diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 0000000..506f05c --- /dev/null +++ b/.bazelrc @@ -0,0 +1,2 @@ +build --host_cxxopt=-std=c++17 +run --workspace_status_command="bash tools/workspace-status.sh" diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..1bc010c --- /dev/null +++ b/.clang-format @@ -0,0 +1,2 @@ +Language: Proto +BasedOnStyle: Google diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml new file mode 100644 index 0000000..cf09618 --- /dev/null +++ b/.github/workflows/master.yaml @@ -0,0 +1,796 @@ +{ + "jobs": { + "build_and_test": { + "runs-on": "ubuntu-latest", + "steps": [ + { + "name": "Installing Bazel", + "run": "curl -L https://github.com/bazelbuild/bazel/releases/download/6.0.0/bazel-6.0.0-linux-x86_64 > ~/bazel && chmod +x ~/bazel && echo ~ >> ${GITHUB_PATH}" + }, + { + "name": "Check out source code", + "uses": "actions/checkout@v1" + }, + { + "name": "Restore Bazel cache", + "uses": "actions/cache@v1", + "with": { + "key": "bazel", + "path": "~/.cache/bazel" + } + }, + { + "name": "Gazelle", + "run": "bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro go_dependencies.bzl%go_dependencies -prune && bazel run //:gazelle" + }, + { + "name": "Buildifier", + "run": "sed '/^$/d' go_dependencies.bzl > go_dependencies.bzl.new && mv go_dependencies.bzl.new go_dependencies.bzl && bazel run @com_github_bazelbuild_buildtools//:buildifier" + }, + { + "name": "Gofmt", + "run": "bazel run @cc_mvdan_gofumpt//:gofumpt -- -lang 1.19 -w -extra $(pwd)" + }, + { + "name": "Clang format", + "run": "find . -name '*.proto' -exec bazel run @llvm_toolchain_llvm//:bin/clang-format -- -i {} +" + }, + { + "name": "GitHub workflows", + "run": "bazel build //tools/github_workflows && cp bazel-bin/tools/github_workflows/*.yaml .github/workflows" + }, + { + "name": "Protobuf generation", + "run": "find . bazel-bin/pkg/proto -name '*.pb.go' -delete || true\nbazel build $(bazel query 'kind(\"go_proto_library\", //...)')\nfind bazel-bin/pkg/proto -name '*.pb.go' | while read f; do\n cat $f > $(echo $f | sed -e 's|.*/pkg/proto/|pkg/proto/|')\ndone\n" + }, + { + "name": "Test style conformance", + "run": "git diff --exit-code HEAD --" + }, + { + "name": "Golint", + "run": "bazel run @org_golang_x_lint//golint -- -set_exit_status $(pwd)/..." + }, + { + "name": "linux_amd64: build and test", + "run": "bazel test --test_output=errors --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //..." + }, + { + "name": "linux_amd64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "linux_amd64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.linux_amd64", + "path": "bb_noop_worker" + } + }, + { + "name": "linux_amd64: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "linux_amd64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.linux_amd64", + "path": "bb_runner" + } + }, + { + "name": "linux_amd64: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "linux_amd64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.linux_amd64", + "path": "bb_scheduler" + } + }, + { + "name": "linux_amd64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "linux_amd64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.linux_amd64", + "path": "bb_virtual_tmp" + } + }, + { + "name": "linux_amd64: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "linux_amd64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.linux_amd64", + "path": "bb_worker" + } + }, + { + "name": "linux_amd64: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "linux_amd64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.linux_amd64", + "path": "fake_python" + } + }, + { + "name": "linux_amd64: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "linux_amd64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.linux_amd64", + "path": "fake_xcrun" + } + }, + { + "name": "linux_386: build and test", + "run": "bazel test --test_output=errors --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //..." + }, + { + "name": "linux_386: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "linux_386: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.linux_386", + "path": "bb_noop_worker" + } + }, + { + "name": "linux_386: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "linux_386: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.linux_386", + "path": "bb_runner" + } + }, + { + "name": "linux_386: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "linux_386: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.linux_386", + "path": "bb_scheduler" + } + }, + { + "name": "linux_386: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "linux_386: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.linux_386", + "path": "bb_virtual_tmp" + } + }, + { + "name": "linux_386: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "linux_386: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.linux_386", + "path": "bb_worker" + } + }, + { + "name": "linux_386: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "linux_386: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.linux_386", + "path": "fake_python" + } + }, + { + "name": "linux_386: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "linux_386: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.linux_386", + "path": "fake_xcrun" + } + }, + { + "name": "linux_arm: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //..." + }, + { + "name": "linux_arm: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "linux_arm: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.linux_arm", + "path": "bb_noop_worker" + } + }, + { + "name": "linux_arm: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "linux_arm: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.linux_arm", + "path": "bb_runner" + } + }, + { + "name": "linux_arm: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "linux_arm: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.linux_arm", + "path": "bb_scheduler" + } + }, + { + "name": "linux_arm: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "linux_arm: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.linux_arm", + "path": "bb_virtual_tmp" + } + }, + { + "name": "linux_arm: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "linux_arm: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.linux_arm", + "path": "bb_worker" + } + }, + { + "name": "linux_arm: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "linux_arm: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.linux_arm", + "path": "fake_python" + } + }, + { + "name": "linux_arm: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "linux_arm: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.linux_arm", + "path": "fake_xcrun" + } + }, + { + "name": "linux_arm64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //..." + }, + { + "name": "linux_arm64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "linux_arm64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.linux_arm64", + "path": "bb_noop_worker" + } + }, + { + "name": "linux_arm64: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "linux_arm64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.linux_arm64", + "path": "bb_runner" + } + }, + { + "name": "linux_arm64: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "linux_arm64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.linux_arm64", + "path": "bb_scheduler" + } + }, + { + "name": "linux_arm64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "linux_arm64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.linux_arm64", + "path": "bb_virtual_tmp" + } + }, + { + "name": "linux_arm64: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "linux_arm64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.linux_arm64", + "path": "bb_worker" + } + }, + { + "name": "linux_arm64: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "linux_arm64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.linux_arm64", + "path": "fake_python" + } + }, + { + "name": "linux_arm64: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "linux_arm64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.linux_arm64", + "path": "fake_xcrun" + } + }, + { + "name": "darwin_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //..." + }, + { + "name": "darwin_amd64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "darwin_amd64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.darwin_amd64", + "path": "bb_noop_worker" + } + }, + { + "name": "darwin_amd64: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "darwin_amd64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.darwin_amd64", + "path": "bb_runner" + } + }, + { + "name": "darwin_amd64: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "darwin_amd64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.darwin_amd64", + "path": "bb_scheduler" + } + }, + { + "name": "darwin_amd64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "darwin_amd64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.darwin_amd64", + "path": "bb_virtual_tmp" + } + }, + { + "name": "darwin_amd64: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "darwin_amd64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.darwin_amd64", + "path": "bb_worker" + } + }, + { + "name": "darwin_amd64: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "darwin_amd64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.darwin_amd64", + "path": "fake_python" + } + }, + { + "name": "darwin_amd64: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "darwin_amd64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.darwin_amd64", + "path": "fake_xcrun" + } + }, + { + "name": "darwin_arm64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //..." + }, + { + "name": "darwin_arm64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "darwin_arm64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.darwin_arm64", + "path": "bb_noop_worker" + } + }, + { + "name": "darwin_arm64: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "darwin_arm64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.darwin_arm64", + "path": "bb_runner" + } + }, + { + "name": "darwin_arm64: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "darwin_arm64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.darwin_arm64", + "path": "bb_scheduler" + } + }, + { + "name": "darwin_arm64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "darwin_arm64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.darwin_arm64", + "path": "bb_virtual_tmp" + } + }, + { + "name": "darwin_arm64: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "darwin_arm64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.darwin_arm64", + "path": "bb_worker" + } + }, + { + "name": "darwin_arm64: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "darwin_arm64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.darwin_arm64", + "path": "fake_python" + } + }, + { + "name": "darwin_arm64: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "darwin_arm64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.darwin_arm64", + "path": "fake_xcrun" + } + }, + { + "name": "freebsd_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_noop_worker //cmd/bb_runner //cmd/bb_scheduler //cmd/bb_virtual_tmp //cmd/bb_worker //cmd/fake_python //cmd/fake_xcrun" + }, + { + "name": "freebsd_amd64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker" + }, + { + "name": "freebsd_amd64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.freebsd_amd64", + "path": "bb_noop_worker" + } + }, + { + "name": "freebsd_amd64: copy bb_runner", + "run": "rm -f bb_runner && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_runner $(pwd)/bb_runner" + }, + { + "name": "freebsd_amd64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.freebsd_amd64", + "path": "bb_runner" + } + }, + { + "name": "freebsd_amd64: copy bb_scheduler", + "run": "rm -f bb_scheduler && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_scheduler $(pwd)/bb_scheduler" + }, + { + "name": "freebsd_amd64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.freebsd_amd64", + "path": "bb_scheduler" + } + }, + { + "name": "freebsd_amd64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp" + }, + { + "name": "freebsd_amd64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.freebsd_amd64", + "path": "bb_virtual_tmp" + } + }, + { + "name": "freebsd_amd64: copy bb_worker", + "run": "rm -f bb_worker && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_worker $(pwd)/bb_worker" + }, + { + "name": "freebsd_amd64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.freebsd_amd64", + "path": "bb_worker" + } + }, + { + "name": "freebsd_amd64: copy fake_python", + "run": "rm -f fake_python && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/fake_python $(pwd)/fake_python" + }, + { + "name": "freebsd_amd64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.freebsd_amd64", + "path": "fake_python" + } + }, + { + "name": "freebsd_amd64: copy fake_xcrun", + "run": "rm -f fake_xcrun && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/fake_xcrun $(pwd)/fake_xcrun" + }, + { + "name": "freebsd_amd64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.freebsd_amd64", + "path": "fake_xcrun" + } + }, + { + "name": "windows_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //..." + }, + { + "name": "windows_amd64: copy bb_noop_worker", + "run": "rm -f bb_noop_worker.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/bb_noop_worker $(pwd)/bb_noop_worker.exe" + }, + { + "name": "windows_amd64: upload bb_noop_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_noop_worker.windows_amd64", + "path": "bb_noop_worker.exe" + } + }, + { + "name": "windows_amd64: copy bb_runner", + "run": "rm -f bb_runner.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/bb_runner $(pwd)/bb_runner.exe" + }, + { + "name": "windows_amd64: upload bb_runner", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_runner.windows_amd64", + "path": "bb_runner.exe" + } + }, + { + "name": "windows_amd64: copy bb_scheduler", + "run": "rm -f bb_scheduler.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/bb_scheduler $(pwd)/bb_scheduler.exe" + }, + { + "name": "windows_amd64: upload bb_scheduler", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_scheduler.windows_amd64", + "path": "bb_scheduler.exe" + } + }, + { + "name": "windows_amd64: copy bb_virtual_tmp", + "run": "rm -f bb_virtual_tmp.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/bb_virtual_tmp $(pwd)/bb_virtual_tmp.exe" + }, + { + "name": "windows_amd64: upload bb_virtual_tmp", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_virtual_tmp.windows_amd64", + "path": "bb_virtual_tmp.exe" + } + }, + { + "name": "windows_amd64: copy bb_worker", + "run": "rm -f bb_worker.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/bb_worker $(pwd)/bb_worker.exe" + }, + { + "name": "windows_amd64: upload bb_worker", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "bb_worker.windows_amd64", + "path": "bb_worker.exe" + } + }, + { + "name": "windows_amd64: copy fake_python", + "run": "rm -f fake_python.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/fake_python $(pwd)/fake_python.exe" + }, + { + "name": "windows_amd64: upload fake_python", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_python.windows_amd64", + "path": "fake_python.exe" + } + }, + { + "name": "windows_amd64: copy fake_xcrun", + "run": "rm -f fake_xcrun.exe && bazel run --run_under cp --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //cmd/fake_xcrun $(pwd)/fake_xcrun.exe" + }, + { + "name": "windows_amd64: upload fake_xcrun", + "uses": "actions/upload-artifact@v2-preview", + "with": { + "name": "fake_xcrun.windows_amd64", + "path": "fake_xcrun.exe" + } + }, + { + "env": { + "GITHUB_TOKEN": "${{ secrets.GITHUB_TOKEN }}" + }, + "name": "Install Docker credentials", + "run": "echo \"${GITHUB_TOKEN}\" | docker login ghcr.io -u $ --password-stdin" + }, + { + "name": "Push container bb_noop_worker:bb_noop_worker", + "run": "bazel run --stamp //cmd/bb_noop_worker:bb_noop_worker_container_push" + }, + { + "name": "Push container bb_runner:bb_runner_bare", + "run": "bazel run --stamp //cmd/bb_runner:bb_runner_bare_container_push" + }, + { + "name": "Push container bb_runner:bb_runner_installer", + "run": "bazel run --stamp //cmd/bb_runner:bb_runner_installer_container_push" + }, + { + "name": "Push container bb_scheduler:bb_scheduler", + "run": "bazel run --stamp //cmd/bb_scheduler:bb_scheduler_container_push" + }, + { + "name": "Push container bb_worker:bb_worker", + "run": "bazel run --stamp //cmd/bb_worker:bb_worker_container_push" + } + ] + } + }, + "name": "master", + "on": { + "push": { + "branches": [ + "master" + ] + } + } +} diff --git a/.github/workflows/pull-requests.yaml b/.github/workflows/pull-requests.yaml new file mode 100644 index 0000000..b7306db --- /dev/null +++ b/.github/workflows/pull-requests.yaml @@ -0,0 +1,97 @@ +{ + "jobs": { + "build_and_test": { + "runs-on": "ubuntu-latest", + "steps": [ + { + "name": "Installing Bazel", + "run": "curl -L https://github.com/bazelbuild/bazel/releases/download/6.0.0/bazel-6.0.0-linux-x86_64 > ~/bazel && chmod +x ~/bazel && echo ~ >> ${GITHUB_PATH}" + }, + { + "name": "Check out source code", + "uses": "actions/checkout@v1" + }, + { + "name": "Restore Bazel cache", + "uses": "actions/cache@v1", + "with": { + "key": "bazel", + "path": "~/.cache/bazel" + } + }, + { + "name": "Gazelle", + "run": "bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro go_dependencies.bzl%go_dependencies -prune && bazel run //:gazelle" + }, + { + "name": "Buildifier", + "run": "sed '/^$/d' go_dependencies.bzl > go_dependencies.bzl.new && mv go_dependencies.bzl.new go_dependencies.bzl && bazel run @com_github_bazelbuild_buildtools//:buildifier" + }, + { + "name": "Gofmt", + "run": "bazel run @cc_mvdan_gofumpt//:gofumpt -- -lang 1.19 -w -extra $(pwd)" + }, + { + "name": "Clang format", + "run": "find . -name '*.proto' -exec bazel run @llvm_toolchain_llvm//:bin/clang-format -- -i {} +" + }, + { + "name": "GitHub workflows", + "run": "bazel build //tools/github_workflows && cp bazel-bin/tools/github_workflows/*.yaml .github/workflows" + }, + { + "name": "Protobuf generation", + "run": "find . bazel-bin/pkg/proto -name '*.pb.go' -delete || true\nbazel build $(bazel query 'kind(\"go_proto_library\", //...)')\nfind bazel-bin/pkg/proto -name '*.pb.go' | while read f; do\n cat $f > $(echo $f | sed -e 's|.*/pkg/proto/|pkg/proto/|')\ndone\n" + }, + { + "name": "Test style conformance", + "run": "git diff --exit-code HEAD --" + }, + { + "name": "Golint", + "run": "bazel run @org_golang_x_lint//golint -- -set_exit_status $(pwd)/..." + }, + { + "name": "linux_amd64: build and test", + "run": "bazel test --test_output=errors --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //..." + }, + { + "name": "linux_386: build and test", + "run": "bazel test --test_output=errors --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //..." + }, + { + "name": "linux_arm: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_arm //..." + }, + { + "name": "linux_arm64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 //..." + }, + { + "name": "darwin_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //..." + }, + { + "name": "darwin_arm64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64 //..." + }, + { + "name": "freebsd_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:freebsd_amd64 //cmd/bb_noop_worker //cmd/bb_runner //cmd/bb_scheduler //cmd/bb_virtual_tmp //cmd/bb_worker //cmd/fake_python //cmd/fake_xcrun" + }, + { + "name": "windows_amd64: build and test", + "run": "bazel build --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64 //..." + } + ] + } + }, + "name": "pull-requests", + "on": { + "pull_request": { + "branches": [ + "master" + ] + } + } +} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..dd7becd --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.*.swp +/bazel-* diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..58209df --- /dev/null +++ b/AUTHORS @@ -0,0 +1 @@ +Ed Schouten diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000..442a8f7 --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,28 @@ +load("@bazel_gazelle//:def.bzl", "gazelle") + +# gazelle:prefix github.com/buildbarn/bb-remote-execution +# gazelle:resolve proto build/bazel/remote/execution/v2/remote_execution.proto @com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto +# gazelle:resolve proto go build/bazel/remote/execution/v2/remote_execution.proto @com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution +# gazelle:resolve proto go google/bytestream/bytestream.proto @org_golang_google_genproto_googleapis_bytestream//:bytestream +# gazelle:resolve proto google/bytestream/bytestream.proto @googleapis//google/bytestream:bytestream_proto +# gazelle:resolve proto go google/rpc/status.proto @org_golang_google_genproto_googleapis_rpc//status +# gazelle:resolve proto proto google/rpc/status.proto @googleapis//google/rpc:status_proto +# gazelle:resolve proto pkg/proto/configuration/auth/auth.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/auth:auth_proto +# gazelle:resolve proto go pkg/proto/configuration/auth/auth.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/auth +# gazelle:resolve proto pkg/proto/configuration/blobstore/blobstore.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore:blobstore_proto +# gazelle:resolve proto go pkg/proto/configuration/blobstore/blobstore.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore +# gazelle:resolve proto pkg/proto/configuration/blockdevice/blockdevice.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice:blockdevice_proto +# gazelle:resolve proto go pkg/proto/configuration/blockdevice/blockdevice.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice +# gazelle:resolve proto pkg/proto/configuration/cloud/aws/aws.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/cloud/aws:aws_proto +# gazelle:resolve proto go pkg/proto/configuration/cloud/aws/aws.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/cloud/aws +# gazelle:resolve proto pkg/proto/configuration/eviction/eviction.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction:eviction_proto +# gazelle:resolve proto go pkg/proto/configuration/eviction/eviction.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction +# gazelle:resolve proto pkg/proto/configuration/global/global.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto +# gazelle:resolve proto go pkg/proto/configuration/global/global.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/global +# gazelle:resolve proto pkg/proto/configuration/grpc/grpc.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto +# gazelle:resolve proto go pkg/proto/configuration/grpc/grpc.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc +# gazelle:resolve proto pkg/proto/configuration/http/http.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/http:http_proto +# gazelle:resolve proto go pkg/proto/configuration/http/http.proto @com_github_buildbarn_bb_storage//pkg/proto/configuration/http +gazelle( + name = "gazelle", +) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..293f804 --- /dev/null +++ b/README.md @@ -0,0 +1,54 @@ +# Buildbarn Remote Execution [![Build status](https://github.com/buildbarn/bb-remote-execution/workflows/master/badge.svg)](https://github.com/buildbarn/bb-remote-execution/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/buildbarn/bb-remote-execution)](https://pkg.go.dev/github.com/buildbarn/bb-remote-execution) [![Go Report Card](https://goreportcard.com/badge/github.com/buildbarn/bb-remote-execution)](https://goreportcard.com/report/github.com/buildbarn/bb-remote-execution) + +Translations: [Chinese](https://github.com/buildbarn/bb-remote-execution/blob/master/doc/zh_CN/README.md) + +This repository provides tools that can be used in combination with +[the Buildbarn storage daemon](https://github.com/buildbarn/bb-storage) +to add support for remote execution, allowing you to create +[a build farm](https://en.wikipedia.org/wiki/Compile_farm) that can be +called into using tools such as [Bazel](https://bazel.build/), +[BuildStream](https://wiki.gnome.org/Projects/BuildStream) and +[recc](https://gitlab.com/bloomberg/recc). + +This repository provides three programs: + +- `bb_scheduler`: A service that receives requests from + [`bb_storage`](https://github.com/buildbarn/bb-storage) to queue build + actions that need to be run. +- `bb_worker`: A service that requests build actions from `bb_scheduler` + and orchestrates their execution. This includes downloading the build + action's input files and uploading its output files. +- `bb_runner`: A service that executes the command associated with the + build action. + +Most setups will run a single instance of `bb_scheduler` and a large +number of pairs of `bb_worker`/`bb_runner` processes. Older versions of +Buildbarn integrated the functionality of `bb_worker` and `bb_runner` +into a single process. These processes were decomposed to accomplish the +following: + +- To make it possible to use privilege separation. Privilege separation + is used to prevent build actions from overwriting input files. This + allows `bb_worker` to cache these files across build actions, + exposing it to the build action through hardlinking. +- To make execution pluggable. `bb_worker` communicates with `bb_runner` + using [a simple gRPC-based protocol](https://github.com/buildbarn/bb-remote-execution/blob/master/pkg/proto/runner/runner.proto). + One could, for example, implement a custom runner process that + executes build actions using [QEMU user-mode emulation](https://www.qemu.org/). +- To work around [a race condition](https://github.com/golang/go/issues/22315) + that effectively prevents multi-threaded processes from writing + executables to disk and spawning them. Through this decomposition, + `bb_worker` writes executables to disk, while `bb_runner` spawns them. + +This repository provides container images for each of these components. +For `bb_runner`, it provides one image without a userland, and one that +installs the `bb_runner` into another container on startup. The former +is sufficient for [BuildStream](https://buildstream.build), while the +latter can, for example, be used in combination with Google RBE's +[Ubuntu 16.04 image](https://console.cloud.google.com/marketplace/details/google/rbe-ubuntu16-04). +The advantage of using the Ubuntu 16.04 image is that the Bazel project +provides [ready-to-use toolchain definitions](https://github.com/bazelbuild/bazel-toolchains) +for them. + +Please refer to [the Buildbarn Deployments repository](https://github.com/buildbarn/bb-deployments) +for examples on how to set up these tools. diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 0000000..00949cc --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,232 @@ +workspace(name = "com_github_buildbarn_bb_remote_execution") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "rules_pkg", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + ], +) + +http_archive( + name = "io_bazel_rules_docker", + sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf", + urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"], +) + +http_archive( + name = "io_bazel_rules_go", + patches = ["//:patches/io_bazel_rules_go/tags-manual.diff"], + sha256 = "278b7ff5a826f3dc10f04feaf0b70d48b68748ccd512d7f98bf442077f043fe3", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.41.0/rules_go-v0.41.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.41.0/rules_go-v0.41.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + patches = [ + "//:patches/bazel_gazelle/dont-flatten-srcs.diff", + "//:patches/bazel_gazelle/issue-1595.diff", + ], + sha256 = "29218f8e0cebe583643cbf93cae6f971be8a2484cdcfa1e45057658df8d54002", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.32.0/bazel-gazelle-v0.32.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.32.0/bazel-gazelle-v0.32.0.tar.gz", + ], +) + +load("@bazel_gazelle//:deps.bzl", "go_repository") + +# Override the version of gomock to one that includes support for +# generating mocks for function types. We can't do this through go.mod, +# as it causes almost all of our package dependencies to be downgraded. +go_repository( + name = "com_github_golang_mock", + importpath = "github.com/golang/mock", + patches = [ + "@com_github_buildbarn_bb_storage//:patches/com_github_golang_mock/mocks-for-funcs.diff", + "//:patches/com_github_golang_mock/generics.diff", + ], + replace = "github.com/golang/mock", + sum = "h1:DxRM2MRFDKF8JGaT1ZSsCZ9KxoOki+rrOoB011jIEDc=", + version = "v1.6.1-0.20220512030613-73266f9366fc", +) + +# gazelle:repository_macro go_dependencies.bzl%go_dependencies +load(":go_dependencies.bzl", "go_dependencies") + +go_dependencies() + +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") + +go_rules_dependencies() + +go_register_toolchains(version = "1.20.1") + +load("@io_bazel_rules_docker//repositories:repositories.bzl", container_repositories = "repositories") + +container_repositories() + +load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps") + +container_deps() + +load("@io_bazel_rules_docker//container:container.bzl", "container_pull") + +container_pull( + name = "busybox", + digest = "sha256:a2490cec4484ee6c1068ba3a05f89934010c85242f736280b35343483b2264b6", # 1.31.1-uclibc + registry = "docker.io", + repository = "library/busybox", +) + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") + +gazelle_dependencies() + +load("@io_bazel_rules_docker//go:image.bzl", _go_image_repos = "repositories") + +_go_image_repos() + +load("@com_github_bazelbuild_remote_apis//:repository_rules.bzl", "switched_rules_by_language") + +switched_rules_by_language( + name = "bazel_remote_apis_imports", + go = True, +) + +http_archive( + name = "com_google_protobuf", + sha256 = "a700a49470d301f1190a487a923b5095bf60f08f4ae4cac9f5f7c36883d17971", + strip_prefix = "protobuf-23.4", + urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v23.4/protobuf-23.4.tar.gz"], +) + +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +protobuf_deps() + +http_archive( + name = "googleapis", + sha256 = "361e26593b881e70286a28065859c941e25b96f9c48ba91127293d0a881152d6", + strip_prefix = "googleapis-a3770599794a8d319286df96f03343b6cd0e7f4f", + urls = ["https://github.com/googleapis/googleapis/archive/a3770599794a8d319286df96f03343b6cd0e7f4f.zip"], +) + +load("@googleapis//:repository_rules.bzl", "switched_rules_by_language") + +switched_rules_by_language( + name = "com_google_googleapis_imports", +) + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file") + +http_file( + name = "com_github_krallin_tini_tini_static_amd64", + downloaded_file_path = "tini", + executable = True, + sha256 = "eadb9d6e2dc960655481d78a92d2c8bc021861045987ccd3e27c7eae5af0cf33", + urls = ["https://github.com/krallin/tini/releases/download/v0.18.0/tini-static-amd64"], +) + +http_archive( + name = "com_grail_bazel_toolchain", + canonical_id = "0.7.2", + sha256 = "f7aa8e59c9d3cafde6edb372d9bd25fb4ee7293ab20b916d867cd0baaa642529", + strip_prefix = "bazel-toolchain-0.7.2", + url = "https://github.com/grailbio/bazel-toolchain/archive/0.7.2.tar.gz", +) + +load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain") + +llvm_toolchain( + name = "llvm_toolchain", + llvm_version = "14.0.0", +) + +http_archive( + name = "io_bazel_rules_jsonnet", + sha256 = "d20270872ba8d4c108edecc9581e2bb7f320afab71f8caa2f6394b5202e8a2c3", + strip_prefix = "rules_jsonnet-0.4.0", + urls = ["https://github.com/bazelbuild/rules_jsonnet/archive/0.4.0.tar.gz"], +) + +load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_repositories") + +jsonnet_repositories() + +load("@google_jsonnet_go//bazel:repositories.bzl", "jsonnet_go_repositories") + +jsonnet_go_repositories() + +load("@google_jsonnet_go//bazel:deps.bzl", "jsonnet_go_dependencies") + +jsonnet_go_dependencies() + +http_archive( + name = "com_github_twbs_bootstrap", + build_file_content = """exports_files(["css/bootstrap.min.css", "js/bootstrap.min.js"])""", + sha256 = "395342b2974e3350560e65752d36aab6573652b11cc6cb5ef79a2e5e83ad64b1", + strip_prefix = "bootstrap-5.1.0-dist", + urls = ["https://github.com/twbs/bootstrap/releases/download/v5.1.0/bootstrap-5.1.0-dist.zip"], +) + +http_archive( + name = "aspect_rules_js", + sha256 = "00e7b97b696af63812df0ca9e9dbd18579f3edd3ab9a56f227238b8405e4051c", + strip_prefix = "rules_js-1.23.0", + url = "https://github.com/aspect-build/rules_js/releases/download/v1.23.0/rules_js-v1.23.0.tar.gz", +) + +load("@aspect_rules_js//js:repositories.bzl", "rules_js_dependencies") + +rules_js_dependencies() + +load("@rules_nodejs//nodejs:repositories.bzl", "DEFAULT_NODE_VERSION", "nodejs_register_toolchains") + +nodejs_register_toolchains( + name = "nodejs", + node_version = DEFAULT_NODE_VERSION, +) + +load("@aspect_rules_js//npm:npm_import.bzl", "npm_translate_lock") + +npm_translate_lock( + name = "npm", + pnpm_lock = "@com_github_buildbarn_bb_storage//:pnpm-lock.yaml", +) + +load("@npm//:repositories.bzl", "npm_repositories") + +npm_repositories() + +http_archive( + name = "rules_antlr", + patches = ["@com_github_buildbarn_go_xdr//:patches/rules_antlr/antlr-4.10.diff"], + sha256 = "26e6a83c665cf6c1093b628b3a749071322f0f70305d12ede30909695ed85591", + strip_prefix = "rules_antlr-0.5.0", + urls = ["https://github.com/marcohu/rules_antlr/archive/0.5.0.tar.gz"], +) + +load("@rules_antlr//antlr:repositories.bzl", "rules_antlr_dependencies") + +rules_antlr_dependencies("4.10") + +http_archive( + name = "io_opentelemetry_proto", + build_file_content = """ +proto_library( + name = "common_proto", + srcs = ["opentelemetry/proto/common/v1/common.proto"], + visibility = ["//visibility:public"], +) +""", + sha256 = "464bc2b348e674a1a03142e403cbccb01be8655b6de0f8bfe733ea31fcd421be", + strip_prefix = "opentelemetry-proto-0.19.0", + urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/refs/tags/v0.19.0.tar.gz"], +) diff --git a/cmd/bb_noop_worker/BUILD.bazel b/cmd/bb_noop_worker/BUILD.bazel new file mode 100644 index 0000000..9925390 --- /dev/null +++ b/cmd/bb_noop_worker/BUILD.bazel @@ -0,0 +1,44 @@ +load("@com_github_buildbarn_bb_storage//tools:container.bzl", "container_push_official") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "bb_noop_worker_lib", + srcs = ["main.go"], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/bb_noop_worker", + visibility = ["//visibility:private"], + deps = [ + "//pkg/blobstore", + "//pkg/builder", + "//pkg/filesystem", + "//pkg/proto/configuration/bb_noop_worker", + "//pkg/proto/remoteworker", + "@com_github_buildbarn_bb_storage//pkg/blobstore/configuration", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/global", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_binary( + name = "bb_noop_worker", + embed = [":bb_noop_worker_lib"], + visibility = ["//visibility:public"], +) + +go_image( + name = "bb_noop_worker_container", + embed = [":bb_noop_worker_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +container_push_official( + name = "bb_noop_worker_container_push", + component = "bb-noop-worker", + image = ":bb_noop_worker_container", +) diff --git a/cmd/bb_noop_worker/main.go b/cmd/bb_noop_worker/main.go new file mode 100644 index 0000000..ef6bef3 --- /dev/null +++ b/cmd/bb_noop_worker/main.go @@ -0,0 +1,90 @@ +package main + +import ( + "context" + "net/url" + "os" + + re_blobstore "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_noop_worker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + blobstore_configuration "github.com/buildbarn/bb-storage/pkg/blobstore/configuration" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/global" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// This is an implementation of a remote execution worker that always +// fails to execute actions with an INVALID_ARGUMENT error. This worker +// may be useful when attempting to inspect input roots of actions, as +// it causes the client to print a link to bb_browser immediately. + +func main() { + program.RunMain(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + if len(os.Args) != 2 { + return status.Error(codes.InvalidArgument, "Usage: bb_noop_worker bb_noop_worker.jsonnet") + } + var configuration bb_noop_worker.ApplicationConfiguration + if err := util.UnmarshalConfigurationFromFile(os.Args[1], &configuration); err != nil { + return util.StatusWrapf(err, "Failed to read configuration from %s", os.Args[1]) + } + lifecycleState, grpcClientFactory, err := global.ApplyConfiguration(configuration.Global) + if err != nil { + return util.StatusWrap(err, "Failed to apply global configuration options") + } + + // Storage access. This worker loads Command objects from the + // Content Addressable Storage (CAS), as those may contain error + // message templates that this worker respects. + info, err := blobstore_configuration.NewBlobAccessFromConfiguration( + dependenciesGroup, + configuration.ContentAddressableStorage, + blobstore_configuration.NewCASBlobAccessCreator( + grpcClientFactory, + int(configuration.MaximumMessageSizeBytes))) + if err != nil { + return util.StatusWrap(err, "Failed to create Content Adddressable Storage") + } + contentAddressableStorage := re_blobstore.NewExistencePreconditionBlobAccess(info.BlobAccess) + + browserURL, err := url.Parse(configuration.BrowserUrl) + if err != nil { + return util.StatusWrap(err, "Failed to parse browser URL") + } + + schedulerConnection, err := grpcClientFactory.NewClientFromConfiguration(configuration.Scheduler) + if err != nil { + return util.StatusWrap(err, "Failed to create scheduler RPC client") + } + schedulerClient := remoteworker.NewOperationQueueClient(schedulerConnection) + + instanceNamePrefix, err := digest.NewInstanceName(configuration.InstanceNamePrefix) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name prefix %#v", configuration.InstanceNamePrefix) + } + + buildClient := builder.NewBuildClient( + schedulerClient, + builder.NewNoopBuildExecutor( + contentAddressableStorage, + int(configuration.MaximumMessageSizeBytes), + browserURL), + re_filesystem.EmptyFilePool, + clock.SystemClock, + configuration.WorkerId, + instanceNamePrefix, + configuration.Platform, + 0) + builder.LaunchWorkerThread(siblingsGroup, buildClient, "noop") + + lifecycleState.MarkReadyAndWait(siblingsGroup) + return nil + }) +} diff --git a/cmd/bb_runner/BUILD.bazel b/cmd/bb_runner/BUILD.bazel new file mode 100644 index 0000000..f8272f6 --- /dev/null +++ b/cmd/bb_runner/BUILD.bazel @@ -0,0 +1,84 @@ +load("@com_github_buildbarn_bb_storage//tools:container.bzl", "container_push_official") +load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "bb_runner_lib", + srcs = ["main.go"], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/bb_runner", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cleaner", + "//pkg/credentials", + "//pkg/filesystem", + "//pkg/proto/configuration/bb_runner", + "//pkg/proto/runner", + "//pkg/proto/tmp_installer", + "//pkg/runner", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/global", + "@com_github_buildbarn_bb_storage//pkg/grpc", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_binary( + name = "bb_runner", + embed = [":bb_runner_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +container_layer( + name = "bb_runner_layer", + files = [":bb_runner"], + visibility = ["//visibility:public"], +) + +container_layer( + name = "tini_layer", + files = ["@com_github_krallin_tini_tini_static_amd64//file"], +) + +container_layer( + name = "install_layer", + files = ["install"], +) + +container_image( + name = "bb_runner_installer", + base = "@busybox//image", + cmd = ["/bb"], + entrypoint = ["/install"], + layers = [ + ":install_layer", + ":bb_runner_layer", + ":tini_layer", + ], + visibility = ["//visibility:public"], +) + +go_image( + name = "bb_runner_bare_container", + embed = [":bb_runner_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +container_push_official( + name = "bb_runner_bare_container_push", + component = "bb-runner-bare", + image = ":bb_runner_bare_container", +) + +container_push_official( + name = "bb_runner_installer_container_push", + component = "bb-runner-installer", + image = ":bb_runner_installer", +) diff --git a/cmd/bb_runner/install b/cmd/bb_runner/install new file mode 100644 index 0000000..06923ff --- /dev/null +++ b/cmd/bb_runner/install @@ -0,0 +1,14 @@ +#!/bin/sh + +set -eu + +echo "$*" + +# Can pass the destination directory as the first argument. By default +# it will use /bb. +dest="${1:-/bb}" + +cp /bb_runner /tini "${dest}" + +# Create a file that indicates that installation has finished. +touch "${dest}/installed" diff --git a/cmd/bb_runner/main.go b/cmd/bb_runner/main.go new file mode 100644 index 0000000..000ae00 --- /dev/null +++ b/cmd/bb_runner/main.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "os" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-remote-execution/pkg/credentials" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_runner" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/global" + bb_grpc "github.com/buildbarn/bb-storage/pkg/grpc" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func main() { + program.RunMain(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + if len(os.Args) != 2 { + return status.Error(codes.InvalidArgument, "Usage: bb_runner bb_runner.jsonnet") + } + var configuration bb_runner.ApplicationConfiguration + if err := util.UnmarshalConfigurationFromFile(os.Args[1], &configuration); err != nil { + return util.StatusWrapf(err, "Failed to read configuration from %s", os.Args[1]) + } + lifecycleState, grpcClientFactory, err := global.ApplyConfiguration(configuration.Global) + if err != nil { + return util.StatusWrap(err, "Failed to apply global configuration options") + } + + buildDirectoryPath, scopeWalker := path.EmptyBuilder.Join(path.NewAbsoluteScopeWalker(path.VoidComponentWalker)) + if err := path.Resolve(configuration.BuildDirectoryPath, scopeWalker); err != nil { + return util.StatusWrap(err, "Failed to resolve build directory") + } + buildDirectoryPathString := buildDirectoryPath.String() + buildDirectory := re_filesystem.NewLazyDirectory( + func() (filesystem.DirectoryCloser, error) { + return filesystem.NewLocalDirectory(buildDirectoryPathString) + }) + + sysProcAttr, processTableCleaningUserID, err := credentials.GetSysProcAttrFromConfiguration(configuration.RunCommandsAs) + if err != nil { + return util.StatusWrap(err, "Failed to extract credentials from configuration") + } + + var commandCreator runner.CommandCreator + if configuration.ChrootIntoInputRoot { + commandCreator, err = runner.NewChrootedCommandCreator(sysProcAttr) + if err != nil { + return util.StatusWrap(err, "Failed to create chrooted command creator") + } + } else { + commandCreator = runner.NewPlainCommandCreator(sysProcAttr) + } + + r := runner.NewLocalRunner( + buildDirectory, + buildDirectoryPath, + commandCreator, + configuration.SetTmpdirEnvironmentVariable) + + // Let bb_runner replace temporary directories with symbolic + // links pointing to the temporary directory set up by + // bb_worker. + for _, symlinkPath := range configuration.SymlinkTemporaryDirectories { + r = runner.NewTemporaryDirectorySymlinkingRunner(r, symlinkPath, buildDirectoryPath) + } + + // Calling into a helper process to set up access to temporary + // directories prior to the execution of build actions. + if configuration.TemporaryDirectoryInstaller != nil { + tmpInstallerConnection, err := grpcClientFactory.NewClientFromConfiguration(configuration.TemporaryDirectoryInstaller) + if err != nil { + return util.StatusWrap(err, "Failed to create temporary directory installer RPC client") + } + tmpInstaller := tmp_installer.NewTemporaryDirectoryInstallerClient(tmpInstallerConnection) + r = runner.NewTemporaryDirectoryInstallingRunner(r, tmpInstaller) + } + + // Kill processes that actions leave behind by daemonizing. + // Ensure that we only match processes belonging to the current + // user that were created after bb_runner is spawned, as we + // don't want to kill unrelated processes. + var cleaners []cleaner.Cleaner + if configuration.CleanProcessTable { + startupTime := time.Now() + cleaners = append( + cleaners, + cleaner.NewProcessTableCleaner( + cleaner.NewFilteringProcessTable( + cleaner.SystemProcessTable, + func(process *cleaner.Process) bool { + return process.UserID == processTableCleaningUserID && + process.CreationTime.After(startupTime) + }))) + } + + // Clean temporary directories, so that files left behind by + // build actions aren't visible to successive actions. This also + // prevents systems from running out of disk space. + for _, d := range configuration.CleanTemporaryDirectories { + directory, err := filesystem.NewLocalDirectory(d) + if err != nil { + return util.StatusWrapf(err, "Failed to open temporary directory %#v", d) + } + cleaners = append(cleaners, cleaner.NewDirectoryCleaner(directory, d)) + } + + if len(configuration.RunCommandCleaner) > 0 { + cleaners = append( + cleaners, + cleaner.NewCommandRunningCleaner( + configuration.RunCommandCleaner[0], + configuration.RunCommandCleaner[1:])) + } + + if len(cleaners) > 0 { + r = runner.NewCleanRunner( + r, + cleaner.NewIdleInvoker(cleaner.NewChainedCleaner(cleaners))) + } + + // Paths that need to be present for the worker to be healthy. + if len(configuration.ReadinessCheckingPathnames) > 0 { + r = runner.NewPathExistenceCheckingRunner(r, configuration.ReadinessCheckingPathnames) + } + + if len(configuration.AppleXcodeDeveloperDirectories) > 0 { + r = runner.NewAppleXcodeResolvingRunner( + r, + configuration.AppleXcodeDeveloperDirectories, + runner.NewCachingAppleXcodeSDKRootResolver( + runner.LocalAppleXcodeSDKRootResolver)) + } + + if err := bb_grpc.NewServersFromConfigurationAndServe( + configuration.GrpcServers, + func(s grpc.ServiceRegistrar) { + runner_pb.RegisterRunnerServer(s, r) + }, + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "gRPC server failure") + } + + lifecycleState.MarkReadyAndWait(siblingsGroup) + return nil + }) +} diff --git a/cmd/bb_scheduler/BUILD.bazel b/cmd/bb_scheduler/BUILD.bazel new file mode 100644 index 0000000..e6c66ee --- /dev/null +++ b/cmd/bb_scheduler/BUILD.bazel @@ -0,0 +1,104 @@ +load("@com_github_buildbarn_bb_storage//tools:container.bzl", "container_push_official") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@npm//:purgecss/package_json.bzl", purgecss_bin = "bin") + +go_library( + name = "bb_scheduler_lib", + srcs = [ + "build_queue_state_service.go", + "main.go", + ], + embedsrcs = [ + "stylesheet.css", + "templates/error.html", + "templates/footer.html", + "templates/get_build_queue_state.html", + "templates/get_operation_state.html", + "templates/header.html", + "templates/list_drain_state.html", + "templates/list_operation_state.html", + "templates/list_queued_operation_state.html", + "templates/list_worker_state.html", + "templates/size_class_queue_info.html", + "templates/invocation_info.html", + "templates/list_invocation_child_state.html", + ], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/bb_scheduler", + visibility = ["//visibility:private"], + deps = [ + "//pkg/blobstore", + "//pkg/proto/buildqueuestate", + "//pkg/proto/configuration/bb_scheduler", + "//pkg/proto/remoteworker", + "//pkg/scheduler", + "//pkg/scheduler/initialsizeclass", + "//pkg/scheduler/routing", + "//pkg/util", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/blobstore/configuration", + "@com_github_buildbarn_bb_storage//pkg/capabilities", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/global", + "@com_github_buildbarn_bb_storage//pkg/grpc", + "@com_github_buildbarn_bb_storage//pkg/http", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/proto/iscc", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_google_uuid//:uuid", + "@com_github_gorilla_mux//:mux", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +filegroup( + name = "templates", + srcs = glob(["templates/*.html"]), +) + +purgecss_bin.purgecss_binary( + name = "purgecss", + tags = ["manual"], +) + +# Create a copy of Bootstrap that only contains the style attributes +# used by the HTML templates. +genrule( + name = "stylesheet", + srcs = [ + "@com_github_twbs_bootstrap//:css/bootstrap.min.css", + ":templates", + ], + outs = ["stylesheet.css"], + cmd = "BAZEL_BINDIR=$(BINDIR) $(location :purgecss) --css $${PWD}/$(location @com_github_twbs_bootstrap//:css/bootstrap.min.css) --content $$(for i in $(locations templates); do echo $${PWD}/$${i}; done) --output $${PWD}/$@", + tools = [":purgecss"], +) + +go_binary( + name = "bb_scheduler", + embed = [":bb_scheduler_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "bb_scheduler_container", + embed = [":bb_scheduler_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +container_push_official( + name = "bb_scheduler_container_push", + component = "bb-scheduler", + image = ":bb_scheduler_container", +) diff --git a/cmd/bb_scheduler/build_queue_state_service.go b/cmd/bb_scheduler/build_queue_state_service.go new file mode 100644 index 0000000..699a695 --- /dev/null +++ b/cmd/bb_scheduler/build_queue_state_service.go @@ -0,0 +1,538 @@ +package main + +import ( + "context" + "embed" + "encoding/json" + "html/template" + "log" + "net/http" + "net/url" + "strconv" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + re_util "github.com/buildbarn/bb-remote-execution/pkg/util" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + bb_http "github.com/buildbarn/bb-storage/pkg/http" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/gorilla/mux" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // pageSize controls how many elements are showed in pages + // containing listings of workers, operations, etc. These pages + // can become quite big for large setups. + pageSize = 1000 +) + +var ( + //go:embed templates + templatesFS embed.FS + //go:embed stylesheet.css + stylesheet template.CSS + + templates = template.Must(template.New("templates").Funcs(template.FuncMap{ + "abbreviate": func(s string) string { + if len(s) > 11 { + return s[:8] + "..." + } + return s + }, + "action_url": func(browserURL *url.URL, instanceNamePrefix, instanceNameSuffix string, digestFunctionValue remoteexecution.DigestFunction_Value, actionDigest *remoteexecution.Digest) string { + iPrefix, err := digest.NewInstanceName(instanceNamePrefix) + if err != nil { + return "" + } + iSuffix, err := digest.NewInstanceName(instanceNameSuffix) + if err != nil { + return "" + } + digestFunction, err := digest.NewInstanceNamePatcher(digest.EmptyInstanceName, iPrefix). + PatchInstanceName(iSuffix). + GetDigestFunction(digestFunctionValue, 0) + if err != nil { + return "" + } + d, err := digestFunction.NewDigestFromProto(actionDigest) + if err != nil { + return "" + } + return re_util.GetBrowserURL(browserURL, "action", d) + }, + "get_child_invocation_name": func(parent *buildqueuestate.InvocationName, id *anypb.Any) *buildqueuestate.InvocationName { + return &buildqueuestate.InvocationName{ + SizeClassQueueName: parent.SizeClassQueueName, + Ids: append(append(make([]*anypb.Any, 0, len(parent.Ids)+1), parent.Ids...), id), + } + }, + "get_size_class_queue_name": func(platformQueueName *buildqueuestate.PlatformQueueName, sizeClass uint32) *buildqueuestate.SizeClassQueueName { + return &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: platformQueueName, + SizeClass: sizeClass, + } + }, + "operation_stage_queued": func(o *buildqueuestate.OperationState) *emptypb.Empty { + if s, ok := o.Stage.(*buildqueuestate.OperationState_Queued); ok { + return s.Queued + } + return nil + }, + "operation_stage_executing": func(o *buildqueuestate.OperationState) *emptypb.Empty { + if s, ok := o.Stage.(*buildqueuestate.OperationState_Executing); ok { + return s.Executing + } + return nil + }, + "operation_stage_completed": func(o *buildqueuestate.OperationState) *remoteexecution.ExecuteResponse { + if s, ok := o.Stage.(*buildqueuestate.OperationState_Completed); ok { + return s.Completed + } + return nil + }, + "proto_to_json": protojson.MarshalOptions{}.Format, + "proto_to_json_multiline": protojson.MarshalOptions{Multiline: true}.Format, + "error_proto": status.ErrorProto, + "stylesheet": func() template.CSS { return stylesheet }, + "time_future": func(t *timestamppb.Timestamp, now time.Time) string { + if t == nil { + return "∞" + } + if t.CheckValid() != nil { + return "?" + } + return t.AsTime().Sub(now).Truncate(time.Second).String() + }, + "time_past": func(t *timestamppb.Timestamp, now time.Time) string { + if t.CheckValid() != nil { + return "?" + } + return now.Sub(t.AsTime()).Truncate(time.Second).String() + }, + "to_json": func(v interface{}) (string, error) { + b, err := json.MarshalIndent(v, " ", "") + if err != nil { + return "", err + } + return string(b), nil + }, + "to_background_color": func(s string) string { + return "#" + s[:6] + }, + "to_foreground_color": func(s string) string { + return "#" + invertColor(s[:2]) + invertColor(s[2:4]) + invertColor(s[4:6]) + }, + }).ParseFS(templatesFS, "templates/*.html")) +) + +// invertColor takes a single red, green or blue color value and +// transforms it to its high contrast counterpart. This color can be +// used to display high contrast text on top of an arbitrarily colored +// background. +func invertColor(s string) string { + if r, _ := strconv.ParseInt(s, 16, 0); r < 128 { + return "ff" + } + return "00" +} + +func renderError(w http.ResponseWriter, err error) { + s := status.Convert(err) + w.WriteHeader(bb_http.StatusCodeFromGRPCCode(s.Code())) + w.Header().Set("X-Content-Type-Options", "nosniff") + if err := templates.ExecuteTemplate(w, "error.html", s); err != nil { + log.Print(err) + } +} + +type buildQueueStateService struct { + buildQueue buildqueuestate.BuildQueueStateServer + clock clock.Clock + browserURL *url.URL +} + +func newBuildQueueStateService(buildQueue buildqueuestate.BuildQueueStateServer, clock clock.Clock, browserURL *url.URL, router *mux.Router) *buildQueueStateService { + s := &buildQueueStateService{ + buildQueue: buildQueue, + clock: clock, + browserURL: browserURL, + } + router.HandleFunc("/", s.handleGetBuildQueueState) + router.HandleFunc("/add_drain", s.handleAddDrain) + router.HandleFunc("/drains", s.handleListDrains) + router.HandleFunc("/invocation_children", s.handleListInvocationChildren) + router.HandleFunc("/kill_operation", s.handleKillOperation) + router.HandleFunc("/operation", s.handleGetOperation) + router.HandleFunc("/operations", s.handleListOperations) + router.HandleFunc("/queued_operations", s.handleListQueuedOperations) + router.HandleFunc("/remove_drain", s.handleRemoveDrain) + router.HandleFunc("/workers", s.handleListWorkers) + return s +} + +func (s *buildQueueStateService) handleGetBuildQueueState(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + operationsCount, err := s.buildQueue.ListOperations(ctx, &buildqueuestate.ListOperationsRequest{}) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list platform queues")) + return + } + response, err := s.buildQueue.ListPlatformQueues(ctx, &emptypb.Empty{}) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list platform queues")) + return + } + + if err := templates.ExecuteTemplate(w, "get_build_queue_state.html", struct { + Now time.Time + PlatformQueues []*buildqueuestate.PlatformQueueState + OperationsCount uint32 + }{ + Now: s.clock.Now(), + PlatformQueues: response.PlatformQueues, + OperationsCount: operationsCount.PaginationInfo.TotalEntries, + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleListInvocationChildren(w http.ResponseWriter, req *http.Request) { + query := req.URL.Query() + var invocationName buildqueuestate.InvocationName + if err := protojson.Unmarshal([]byte(query.Get("invocation_name")), &invocationName); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract invocation name")) + return + } + filterString := query.Get("filter") + filterValue, ok := buildqueuestate.ListInvocationChildrenRequest_Filter_value[filterString] + if !ok { + renderError(w, status.Error(codes.InvalidArgument, "Invalid filter")) + return + } + + ctx := req.Context() + response, err := s.buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: &invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_Filter(filterValue), + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list invocation children")) + return + } + if err := templates.ExecuteTemplate(w, "list_invocation_child_state.html", struct { + InvocationName *buildqueuestate.InvocationName + Children []*buildqueuestate.InvocationChildState + Filter string + Now time.Time + }{ + InvocationName: &invocationName, + Children: response.Children, + Filter: filterString, + Now: s.clock.Now(), + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleKillOperation(w http.ResponseWriter, req *http.Request) { + req.ParseForm() + ctx := req.Context() + if _, err := s.buildQueue.KillOperations(ctx, &buildqueuestate.KillOperationsRequest{ + Filter: &buildqueuestate.KillOperationsRequest_Filter{ + Type: &buildqueuestate.KillOperationsRequest_Filter_OperationName{ + OperationName: req.FormValue("name"), + }, + }, + Status: status.New(codes.Unavailable, "Operation was killed through the web interface").Proto(), + }); err != nil { + renderError(w, util.StatusWrap(err, "Failed to kill operation")) + return + } + http.Redirect(w, req, req.Header.Get("Referer"), http.StatusSeeOther) +} + +func (s *buildQueueStateService) handleGetOperation(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + operationName := req.URL.Query().Get("name") + response, err := s.buildQueue.GetOperation(ctx, &buildqueuestate.GetOperationRequest{ + OperationName: operationName, + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to get operation")) + return + } + if err := templates.ExecuteTemplate(w, "get_operation_state.html", struct { + BrowserURL *url.URL + Now time.Time + OperationName string + Operation *buildqueuestate.OperationState + }{ + BrowserURL: s.browserURL, + Now: s.clock.Now(), + OperationName: operationName, + Operation: response.Operation, + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleListOperations(w http.ResponseWriter, req *http.Request) { + query := req.URL.Query() + var filterInvocationID *anypb.Any + if filterInvocationIDString := query.Get("filter_invocation_id"); filterInvocationIDString != "" { + var invocationID anypb.Any + if err := protojson.Unmarshal([]byte(filterInvocationIDString), &invocationID); err != nil { + renderError(w, status.Error(codes.InvalidArgument, "Invalid filter invocation ID")) + return + } + filterInvocationID = &invocationID + } + + filterStageString := query.Get("filter_stage") + filterStageValue, ok := remoteexecution.ExecutionStage_Value_value[filterStageString] + if !ok { + renderError(w, status.Error(codes.InvalidArgument, "Invalid filter stage")) + return + } + + var startAfter *buildqueuestate.ListOperationsRequest_StartAfter + if startAfterParameter := query.Get("start_after"); startAfterParameter != "" { + var startAfterMessage buildqueuestate.ListOperationsRequest_StartAfter + if err := protojson.Unmarshal([]byte(startAfterParameter), &startAfterMessage); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to parse start after message")) + return + } + startAfter = &startAfterMessage + } + + ctx := req.Context() + response, err := s.buildQueue.ListOperations(ctx, &buildqueuestate.ListOperationsRequest{ + FilterInvocationId: filterInvocationID, + FilterStage: remoteexecution.ExecutionStage_Value(filterStageValue), + PageSize: pageSize, + StartAfter: startAfter, + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list operations")) + return + } + + var nextStartAfter *buildqueuestate.ListOperationsRequest_StartAfter + if l := response.Operations; len(l) > 0 { + o := l[len(l)-1] + nextStartAfter = &buildqueuestate.ListOperationsRequest_StartAfter{ + OperationName: o.Name, + } + } + + if err := templates.ExecuteTemplate(w, "list_operation_state.html", struct { + BrowserURL *url.URL + Now time.Time + PaginationInfo *buildqueuestate.PaginationInfo + EndIndex int + FilterInvocationID *anypb.Any + FilterStage string + StartAfter *buildqueuestate.ListOperationsRequest_StartAfter + Operations []*buildqueuestate.OperationState + }{ + BrowserURL: s.browserURL, + Now: s.clock.Now(), + PaginationInfo: response.PaginationInfo, + EndIndex: int(response.PaginationInfo.StartIndex) + len(response.Operations), + FilterInvocationID: filterInvocationID, + FilterStage: filterStageString, + StartAfter: nextStartAfter, + Operations: response.Operations, + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleListQueuedOperations(w http.ResponseWriter, req *http.Request) { + query := req.URL.Query() + var invocationName buildqueuestate.InvocationName + if err := protojson.Unmarshal([]byte(query.Get("invocation_name")), &invocationName); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract invocation name")) + return + } + + var startAfter *buildqueuestate.ListQueuedOperationsRequest_StartAfter + if startAfterParameter := req.URL.Query().Get("start_after"); startAfterParameter != "" { + var startAfterMessage buildqueuestate.ListQueuedOperationsRequest_StartAfter + if err := protojson.Unmarshal([]byte(startAfterParameter), &startAfterMessage); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to parse start after message")) + return + } + startAfter = &startAfterMessage + } + + ctx := req.Context() + response, err := s.buildQueue.ListQueuedOperations(ctx, &buildqueuestate.ListQueuedOperationsRequest{ + InvocationName: &invocationName, + PageSize: pageSize, + StartAfter: startAfter, + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list queued operation state")) + return + } + + var nextStartAfter *buildqueuestate.ListQueuedOperationsRequest_StartAfter + if l := response.QueuedOperations; len(l) > 0 { + o := l[len(l)-1] + nextStartAfter = &buildqueuestate.ListQueuedOperationsRequest_StartAfter{ + Priority: o.Priority, + ExpectedDuration: o.ExpectedDuration, + QueuedTimestamp: o.QueuedTimestamp, + } + } + + if err := templates.ExecuteTemplate(w, "list_queued_operation_state.html", struct { + InvocationName *buildqueuestate.InvocationName + BrowserURL *url.URL + Now time.Time + PaginationInfo *buildqueuestate.PaginationInfo + EndIndex int + StartAfter *buildqueuestate.ListQueuedOperationsRequest_StartAfter + QueuedOperations []*buildqueuestate.OperationState + }{ + InvocationName: &invocationName, + BrowserURL: s.browserURL, + Now: s.clock.Now(), + PaginationInfo: response.PaginationInfo, + EndIndex: int(response.PaginationInfo.StartIndex) + len(response.QueuedOperations), + StartAfter: nextStartAfter, + QueuedOperations: response.QueuedOperations, + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleListWorkers(w http.ResponseWriter, req *http.Request) { + query := req.URL.Query() + var filter buildqueuestate.ListWorkersRequest_Filter + if err := protojson.Unmarshal([]byte(query.Get("filter")), &filter); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract filter")) + return + } + + var startAfter *buildqueuestate.ListWorkersRequest_StartAfter + if startAfterParameter := req.URL.Query().Get("start_after"); startAfterParameter != "" { + var startAfterMessage buildqueuestate.ListWorkersRequest_StartAfter + if err := protojson.Unmarshal([]byte(startAfterParameter), &startAfterMessage); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to parse start after message")) + return + } + startAfter = &startAfterMessage + } + + ctx := req.Context() + response, err := s.buildQueue.ListWorkers(ctx, &buildqueuestate.ListWorkersRequest{ + Filter: &filter, + PageSize: pageSize, + StartAfter: startAfter, + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list worker state")) + return + } + + var nextStartAfter *buildqueuestate.ListWorkersRequest_StartAfter + if l := response.Workers; len(l) > 0 { + w := l[len(l)-1] + nextStartAfter = &buildqueuestate.ListWorkersRequest_StartAfter{ + WorkerId: w.Id, + } + } + + if err := templates.ExecuteTemplate(w, "list_worker_state.html", struct { + Filter *buildqueuestate.ListWorkersRequest_Filter + BrowserURL *url.URL + Now time.Time + PaginationInfo *buildqueuestate.PaginationInfo + EndIndex int + StartAfter *buildqueuestate.ListWorkersRequest_StartAfter + Workers []*buildqueuestate.WorkerState + }{ + Filter: &filter, + BrowserURL: s.browserURL, + Now: s.clock.Now(), + PaginationInfo: response.PaginationInfo, + EndIndex: int(response.PaginationInfo.StartIndex) + len(response.Workers), + StartAfter: nextStartAfter, + Workers: response.Workers, + }); err != nil { + log.Print(err) + } +} + +func (s *buildQueueStateService) handleListDrains(w http.ResponseWriter, req *http.Request) { + query := req.URL.Query() + var sizeClassQueueName buildqueuestate.SizeClassQueueName + if err := protojson.Unmarshal([]byte(query.Get("size_class_queue_name")), &sizeClassQueueName); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract size class queue name")) + return + } + + ctx := req.Context() + response, err := s.buildQueue.ListDrains(ctx, &buildqueuestate.ListDrainsRequest{ + SizeClassQueueName: &sizeClassQueueName, + }) + if err != nil { + renderError(w, util.StatusWrap(err, "Failed to list drain state")) + return + } + if err := templates.ExecuteTemplate(w, "list_drain_state.html", struct { + SizeClassQueueName *buildqueuestate.SizeClassQueueName + Now time.Time + Drains []*buildqueuestate.DrainState + }{ + SizeClassQueueName: &sizeClassQueueName, + Now: s.clock.Now(), + Drains: response.Drains, + }); err != nil { + log.Print(err) + } +} + +func handleModifyDrain(w http.ResponseWriter, req *http.Request, modifyFunc func(context.Context, *buildqueuestate.AddOrRemoveDrainRequest) (*emptypb.Empty, error)) { + req.ParseForm() + var sizeClassQueueName buildqueuestate.SizeClassQueueName + if err := protojson.Unmarshal([]byte(req.FormValue("size_class_queue_name")), &sizeClassQueueName); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract size class queue name")) + return + } + + var workerIDPattern map[string]string + if err := json.Unmarshal([]byte(req.FormValue("worker_id_pattern")), &workerIDPattern); err != nil { + renderError(w, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to extract worker ID pattern")) + return + } + + ctx := req.Context() + if _, err := modifyFunc(ctx, &buildqueuestate.AddOrRemoveDrainRequest{ + SizeClassQueueName: &sizeClassQueueName, + WorkerIdPattern: workerIDPattern, + }); err != nil { + renderError(w, util.StatusWrap(err, "Failed to modify drains")) + return + } + http.Redirect(w, req, req.Header.Get("Referer"), http.StatusSeeOther) +} + +func (s *buildQueueStateService) handleAddDrain(w http.ResponseWriter, req *http.Request) { + handleModifyDrain(w, req, s.buildQueue.AddDrain) +} + +func (s *buildQueueStateService) handleRemoveDrain(w http.ResponseWriter, req *http.Request) { + handleModifyDrain(w, req, s.buildQueue.RemoveDrain) +} diff --git a/cmd/bb_scheduler/main.go b/cmd/bb_scheduler/main.go new file mode 100644 index 0000000..40a06de --- /dev/null +++ b/cmd/bb_scheduler/main.go @@ -0,0 +1,223 @@ +package main + +import ( + "context" + "net/url" + "os" + "path" + "strings" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_blobstore "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_scheduler" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/routing" + "github.com/buildbarn/bb-storage/pkg/auth" + blobstore_configuration "github.com/buildbarn/bb-storage/pkg/blobstore/configuration" + "github.com/buildbarn/bb-storage/pkg/capabilities" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/global" + bb_grpc "github.com/buildbarn/bb-storage/pkg/grpc" + "github.com/buildbarn/bb-storage/pkg/http" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/google/uuid" + "github.com/gorilla/mux" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func main() { + program.RunMain(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + if len(os.Args) != 2 { + return status.Error(codes.InvalidArgument, "Usage: bb_scheduler bb_scheduler.jsonnet") + } + var configuration bb_scheduler.ApplicationConfiguration + if err := util.UnmarshalConfigurationFromFile(os.Args[1], &configuration); err != nil { + return util.StatusWrapf(err, "Failed to read configuration from %s", os.Args[1]) + } + lifecycleState, grpcClientFactory, err := global.ApplyConfiguration(configuration.Global) + if err != nil { + return util.StatusWrap(err, "Failed to apply global configuration options") + } + + browserURL, err := url.Parse(configuration.BrowserUrl) + if err != nil { + return util.StatusWrap(err, "Failed to parse browser URL") + } + + // Storage access. The scheduler requires access to the Action + // and Command messages stored in the CAS to obtain platform + // properties. + info, err := blobstore_configuration.NewBlobAccessFromConfiguration( + dependenciesGroup, + configuration.ContentAddressableStorage, + blobstore_configuration.NewCASBlobAccessCreator( + grpcClientFactory, + int(configuration.MaximumMessageSizeBytes))) + if err != nil { + return util.StatusWrap(err, "Failed to create Content Adddressable Storage") + } + contentAddressableStorage := re_blobstore.NewExistencePreconditionBlobAccess(info.BlobAccess) + + // Optional: Initial Size Class Cache (ISCC) access. This data + // store is only used if one or more parts of the ActionRouter + // are configured to use feedback driven initial size class + // analysis. + var previousExecutionStatsStore initialsizeclass.PreviousExecutionStatsStore + if isccConfiguration := configuration.InitialSizeClassCache; isccConfiguration != nil { + info, err := blobstore_configuration.NewBlobAccessFromConfiguration( + dependenciesGroup, + isccConfiguration, + blobstore_configuration.NewISCCBlobAccessCreator( + grpcClientFactory, + int(configuration.MaximumMessageSizeBytes))) + if err != nil { + return util.StatusWrap(err, "Failed to create Initial Size Class Cache") + } + previousExecutionStatsStore = re_blobstore.NewBlobAccessMutableProtoStore[iscc.PreviousExecutionStats]( + info.BlobAccess, + int(configuration.MaximumMessageSizeBytes)) + } + + // Create an action router that is responsible for analyzing + // incoming execution requests and determining how they are + // scheduled. + actionRouter, err := routing.NewActionRouterFromConfiguration(configuration.ActionRouter, contentAddressableStorage, int(configuration.MaximumMessageSizeBytes), previousExecutionStatsStore) + if err != nil { + return util.StatusWrap(err, "Failed to create action router") + } + + authorizerFactory := auth.DefaultAuthorizerFactory + executeAuthorizer, err := authorizerFactory.NewAuthorizerFromConfiguration(configuration.ExecuteAuthorizer) + if err != nil { + return util.StatusWrap(err, "Failed to create execute authorizer") + } + modifyDrainsAuthorizer, err := authorizerFactory.NewAuthorizerFromConfiguration(configuration.ModifyDrainsAuthorizer) + if err != nil { + return util.StatusWrap(err, "Failed to create modify drains authorizer") + } + killOperationsAuthorizer, err := authorizerFactory.NewAuthorizerFromConfiguration(configuration.KillOperationsAuthorizer) + if err != nil { + return util.StatusWrap(err, "Failed to create kill operaitons authorizer") + } + + platformQueueWithNoWorkersTimeout := configuration.PlatformQueueWithNoWorkersTimeout + if err := platformQueueWithNoWorkersTimeout.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid platform queue with no workers timeout") + } + + // Create in-memory build queue. + // TODO: Make timeouts configurable. + generator := random.NewFastSingleThreadedGenerator() + buildQueue := scheduler.NewInMemoryBuildQueue( + contentAddressableStorage, + clock.SystemClock, + uuid.NewRandom, + &scheduler.InMemoryBuildQueueConfiguration{ + ExecutionUpdateInterval: time.Minute, + OperationWithNoWaitersTimeout: time.Minute, + PlatformQueueWithNoWorkersTimeout: platformQueueWithNoWorkersTimeout.AsDuration(), + BusyWorkerSynchronizationInterval: 10 * time.Second, + GetIdleWorkerSynchronizationInterval: func() time.Duration { + // Let synchronization calls block somewhere + // between 0 and 2 minutes. Add jitter to + // prevent recurring traffic spikes. + return random.Duration(generator, 2*time.Minute) + }, + WorkerTaskRetryCount: 9, + WorkerWithNoSynchronizationsTimeout: time.Minute, + }, + int(configuration.MaximumMessageSizeBytes), + actionRouter, + executeAuthorizer, + modifyDrainsAuthorizer, + killOperationsAuthorizer) + + // Create predeclared platform queues. + for _, platformQueue := range configuration.PredeclaredPlatformQueues { + instanceName, err := digest.NewInstanceName(platformQueue.InstanceNamePrefix) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name prefix %#v", platformQueue.InstanceNamePrefix) + } + workerInvocationStickinessLimits := make([]time.Duration, 0, len(platformQueue.WorkerInvocationStickinessLimits)) + for i, d := range platformQueue.WorkerInvocationStickinessLimits { + if err := d.CheckValid(); err != nil { + return util.StatusWrapf(err, "Invalid worker invocation stickiness limit at index %d: %s", i) + } + workerInvocationStickinessLimits = append(workerInvocationStickinessLimits, d.AsDuration()) + } + + if err := buildQueue.RegisterPredeclaredPlatformQueue( + instanceName, + platformQueue.Platform, + workerInvocationStickinessLimits, + int(platformQueue.MaximumQueuedBackgroundLearningOperations), + platformQueue.BackgroundLearningOperationPriority, + platformQueue.MaximumSizeClass, + ); err != nil { + return util.StatusWrap(err, "Failed to register predeclared platform queue") + } + } + + // Spawn gRPC servers for client and worker traffic. + if err := bb_grpc.NewServersFromConfigurationAndServe( + configuration.ClientGrpcServers, + func(s grpc.ServiceRegistrar) { + remoteexecution.RegisterCapabilitiesServer( + s, + capabilities.NewServer(buildQueue)) + remoteexecution.RegisterExecutionServer(s, buildQueue) + }, + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "Client gRPC server failure") + } + if err := bb_grpc.NewServersFromConfigurationAndServe( + configuration.WorkerGrpcServers, + func(s grpc.ServiceRegistrar) { + remoteworker.RegisterOperationQueueServer(s, buildQueue) + }, + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "Worker gRPC server failure") + } + if err := bb_grpc.NewServersFromConfigurationAndServe( + configuration.BuildQueueStateGrpcServers, + func(s grpc.ServiceRegistrar) { + buildqueuestate.RegisterBuildQueueStateServer(s, buildQueue) + }, + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "Build queue state gRPC server failure") + } + + // Web server for metrics and profiling. + router := mux.NewRouter() + routePrefix := path.Join("/", configuration.AdminRoutePrefix) + if !strings.HasSuffix(routePrefix, "/") { + routePrefix += "/" + } + subrouter := router.PathPrefix(routePrefix).Subrouter() + newBuildQueueStateService(buildQueue, clock.SystemClock, browserURL, subrouter) + if err := http.NewServersFromConfigurationAndServe( + configuration.AdminHttpServers, + http.NewMetricsHandler(router, "SchedulerUI"), + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "Failed to create admin HTTP servers") + } + + lifecycleState.MarkReadyAndWait(siblingsGroup) + return nil + }) +} diff --git a/cmd/bb_scheduler/templates/error.html b/cmd/bb_scheduler/templates/error.html new file mode 100644 index 0000000..34690bd --- /dev/null +++ b/cmd/bb_scheduler/templates/error.html @@ -0,0 +1,7 @@ +{{template "header.html" "danger"}} + +

Error: {{.Code.String}}

+ +

{{.Message}}

+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/footer.html b/cmd/bb_scheduler/templates/footer.html new file mode 100644 index 0000000..faf65ca --- /dev/null +++ b/cmd/bb_scheduler/templates/footer.html @@ -0,0 +1,3 @@ + + + diff --git a/cmd/bb_scheduler/templates/get_build_queue_state.html b/cmd/bb_scheduler/templates/get_build_queue_state.html new file mode 100644 index 0000000..bc16394 --- /dev/null +++ b/cmd/bb_scheduler/templates/get_build_queue_state.html @@ -0,0 +1,76 @@ +{{template "header.html" "success"}} + +

Build queue

+ + + + + + +
Total number of operations:{{.OperationsCount}}
+ +

Platform queues

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + {{$now := .Now}} + {{range .PlatformQueues}} + + {{$platformQueueName := .Name}} + + + {{$addDivider := false}} + {{range .SizeClassQueues}} + {{if $addDivider}}{{end}} + {{$addDivider = true}} + + + {{$sizeClassQueueName := get_size_class_queue_name $platformQueueName .SizeClass}} + {{$sizeClassQueueNameJSON := proto_to_json $sizeClassQueueName}} + {{$invocationNameJSON := printf "{\"sizeClassQueueName\":%s}" $sizeClassQueueNameJSON}} + + + + + + + + + + {{end}} + + {{end}} + +
Instance name prefixPlatform propertiesSize classTimeoutRoot invocationAll workersDrains
Queued operationsChildrenWorkers
QueuedActiveAllExecutingIdleIdle synchronizing
{{$platformQueueName.InstanceNamePrefix | printf "%#v"}} + {{range $platformQueueName.Platform.Properties}} + {{.Name}}={{.Value | printf "%#v"}} + {{end}} +
{{.SizeClass}}{{time_future .Timeout $now}}{{.RootInvocation.QueuedOperationsCount}}{{.RootInvocation.QueuedChildrenCount}}{{.RootInvocation.ActiveChildrenCount}}{{.RootInvocation.ChildrenCount}}{{.RootInvocation.ExecutingWorkersCount}}{{.RootInvocation.IdleWorkersCount}}{{.RootInvocation.IdleSynchronizingWorkersCount}}{{.WorkersCount}}{{.DrainsCount}}
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/get_operation_state.html b/cmd/bb_scheduler/templates/get_operation_state.html new file mode 100644 index 0000000..d865643 --- /dev/null +++ b/cmd/bb_scheduler/templates/get_operation_state.html @@ -0,0 +1,126 @@ +{{if operation_stage_queued .Operation}} + {{template "header.html" "secondary"}} +{{else}} + {{if operation_stage_executing .Operation}} + {{template "header.html" "secondary"}} + {{else}} + {{with operation_stage_completed .Operation}} + {{with error_proto .Status}} + {{template "header.html" "danger"}} + {{else}} + {{with .Result}} + {{if eq .ExitCode 0}} + {{template "header.html" "success"}} + {{else}} + {{template "header.html" "danger"}} + {{end}} + {{else}} + {{template "header.html" "danger"}} + {{end}} + {{end}} + {{else}} + {{template "header.html" "danger"}} + {{end}} + {{end}} +{{end}} + +

Operation {{.OperationName}}

+ + + {{$now := .Now}} + {{$invocationName := .Operation.InvocationName}} + {{$sizeClassQueueName := $invocationName.SizeClassQueueName}} + {{$platformQueueName := $sizeClassQueueName.PlatformQueueName}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Instance name prefix:{{$platformQueueName.InstanceNamePrefix | printf "%#v"}}
Instance name suffix:{{.Operation.InstanceNameSuffix | printf "%#v"}}
Platform properties: + {{range $platformQueueName.Platform.Properties}} + {{.Name}}={{.Value | printf "%#v"}} + {{end}} +
Size class:{{$sizeClassQueueName.SizeClass}}
Invocation IDs: + +
Action digest:{{proto_to_json .Operation.ActionDigest}}
Timeout:{{time_future .Operation.Timeout $now}}
Target ID:{{.Operation.TargetId}}
Priority:{{.Operation.Priority}}
Expected duration:{{.Operation.ExpectedDuration.AsDuration}}
Age:{{time_past .Operation.QueuedTimestamp $now}}
Stage: + {{with operation_stage_queued .Operation}} + Queued + {{else}} + {{with operation_stage_executing .Operation}} + Executing + {{else}} + {{with operation_stage_completed .Operation}} + {{with error_proto .Status}} + Failed with {{.}} + {{else}} + {{with .Result}} + Completed with exit code {{.ExitCode}} + {{else}} + Action result missing + {{end}} + {{end}} + {{else}} + Unknown + {{end}} + {{end}} + {{end}} +
+ +{{with operation_stage_completed .Operation}} +

Execute response

+
{{proto_to_json_multiline .}}
+{{else}} +
+ + +
+{{end}} + +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/header.html b/cmd/bb_scheduler/templates/header.html new file mode 100644 index 0000000..0a48f33 --- /dev/null +++ b/cmd/bb_scheduler/templates/header.html @@ -0,0 +1,14 @@ + + + + Buildbarn Scheduler + + + + + +
diff --git a/cmd/bb_scheduler/templates/invocation_info.html b/cmd/bb_scheduler/templates/invocation_info.html new file mode 100644 index 0000000..a83b26a --- /dev/null +++ b/cmd/bb_scheduler/templates/invocation_info.html @@ -0,0 +1,11 @@ +{{template "size_class_queue_info.html" .SizeClassQueueName}} + + Invocation IDs: + + + + diff --git a/cmd/bb_scheduler/templates/list_drain_state.html b/cmd/bb_scheduler/templates/list_drain_state.html new file mode 100644 index 0000000..5ded7e3 --- /dev/null +++ b/cmd/bb_scheduler/templates/list_drain_state.html @@ -0,0 +1,50 @@ +{{template "header.html" "success"}} + +

Drains

+ + + {{template "size_class_queue_info.html" .SizeClassQueueName}} +
+ +
+ + + + + + + + + + {{$sizeClassQueueName := proto_to_json .SizeClassQueueName}} + {{$now := .Now}} + {{range .Drains}} + + + + + + {{end}} + + + + + + + + + +
Worker ID patternAgeActions
+ {{range $key, $value := .WorkerIdPattern}} + {{$key}}={{$value | printf "%#v"}} + {{end}} + {{time_past .CreatedTimestamp $now}} +
+ + + +
+
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/list_invocation_child_state.html b/cmd/bb_scheduler/templates/list_invocation_child_state.html new file mode 100644 index 0000000..9ed6f86 --- /dev/null +++ b/cmd/bb_scheduler/templates/list_invocation_child_state.html @@ -0,0 +1,80 @@ +{{template "header.html" "success"}} + +

Invocation children

+ +{{$invocationName := .InvocationName}} + + {{template "invocation_info.html" $invocationName}} +
+ + + +
+ + + + + + + + + + + + + + + + + + + {{$now := .Now}} + {{range .Children}} + {{$childInvocationName := get_child_invocation_name $invocationName .Id}} + {{$childInvocationNameJSON := proto_to_json $childInvocationName}} + + + + + + + + + + + {{end}} + +
Invocation IDQueued operationsChildrenWorkers
QueuedActiveAllExecutingIdleIdle synchronizing
{{proto_to_json .Id}}{{.State.QueuedOperationsCount}}{{.State.QueuedChildrenCount}}{{.State.ActiveChildrenCount}}{{.State.ChildrenCount}}{{.State.ExecutingWorkersCount}}{{.State.IdleWorkersCount}}{{.State.IdleSynchronizingWorkersCount}}
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/list_operation_state.html b/cmd/bb_scheduler/templates/list_operation_state.html new file mode 100644 index 0000000..051a8aa --- /dev/null +++ b/cmd/bb_scheduler/templates/list_operation_state.html @@ -0,0 +1,139 @@ +{{template "header.html" "success"}} + +

Operations

+ +{{with .FilterInvocationID}} + + + + + +
Invocation ID:{{proto_to_json .}}
+{{end}} + +
+ + +
+ +
+ + + + + + + + + + + + {{$browserURL := .BrowserURL}} + {{$now := .Now}} + {{range .Operations}} + + + + + + {{if operation_stage_queued .}} + + {{else}} + {{with operation_stage_executing .}} + + {{else}} + {{with operation_stage_completed .}} + {{with error_proto .Status}} + + {{else}} + {{with .Result}} + {{if eq .ExitCode 0}} + + {{else}} + + {{end}} + {{else}} + + {{end}} + {{end}} + {{else}} + + {{end}} + {{end}} + {{end}} + + {{end}} + +
TimeoutOperation nameAction digestTarget IDStage
{{time_future .Timeout $now}} + {{abbreviate .Name}} + + {{abbreviate .ActionDigest.Hash}} + {{.TargetId}}Queued at priority {{.Priority}}ExecutingFailed with {{.}}Completed with exit code 0Completed with exit code {{.ExitCode}}Action result missingUnknown
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/list_queued_operation_state.html b/cmd/bb_scheduler/templates/list_queued_operation_state.html new file mode 100644 index 0000000..21e07f8 --- /dev/null +++ b/cmd/bb_scheduler/templates/list_queued_operation_state.html @@ -0,0 +1,74 @@ +{{template "header.html" "success"}} + +

Queued operations

+ + + {{template "invocation_info.html" .InvocationName}} +
+ +{{$invocationNameJSON := proto_to_json .InvocationName}} + + +
+ + + + + + + + + + + + + + {{$browserURL := .BrowserURL}} + {{$now := .Now}} + {{$instanceNamePrefix := .InvocationName.SizeClassQueueName.PlatformQueueName.InstanceNamePrefix}} + {{range .QueuedOperations}} + + + + + + + + + + {{end}} + +
PriorityExpected durationAgeTimeoutOperation nameAction digestTarget ID
{{.Priority}}{{.ExpectedDuration.AsDuration}}{{time_past .QueuedTimestamp $now}}{{time_future .Timeout $now}} + {{abbreviate .Name}} + + {{abbreviate .ActionDigest.Hash}} + {{.TargetId}}
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/list_worker_state.html b/cmd/bb_scheduler/templates/list_worker_state.html new file mode 100644 index 0000000..fe7c459 --- /dev/null +++ b/cmd/bb_scheduler/templates/list_worker_state.html @@ -0,0 +1,132 @@ +{{template "header.html" "success"}} + +

Workers

+ + + {{$sizeClassQueueNameJSON := "unset"}} + {{$invocationNameJSON := "unset"}} + {{$instanceNamePrefix := "unset"}} + {{with .Filter.GetExecuting}} + {{$sizeClassQueueNameJSON = proto_to_json .SizeClassQueueName}} + {{$invocationNameJSON = proto_to_json .}} + {{$instanceNamePrefix = .SizeClassQueueName.PlatformQueueName.InstanceNamePrefix}} + {{template "invocation_info.html" .}} + {{else}} + {{with .Filter.GetIdleSynchronizing}} + {{$sizeClassQueueNameJSON = proto_to_json .SizeClassQueueName}} + {{$invocationNameJSON = proto_to_json .}} + {{$instanceNamePrefix = .SizeClassQueueName.PlatformQueueName.InstanceNamePrefix}} + {{template "invocation_info.html" .}} + {{else}} + {{$sizeClassQueueName := .Filter.GetAll}} + {{$sizeClassQueueNameJSON = proto_to_json $sizeClassQueueName}} + {{$invocationNameJSON = printf "{\"sizeClassQueueName\":%s}" $sizeClassQueueNameJSON}} + {{$instanceNamePrefix = $sizeClassQueueName.PlatformQueueName.InstanceNamePrefix}} + {{template "size_class_queue_info.html" $sizeClassQueueName}} + {{end}} + {{end}} +
+ +
+ + +
+ +
+ + + + + + + + + + + + + {{$browserURL := .BrowserURL}} + {{$now := .Now}} + {{range .Workers}} + + + + {{with .CurrentOperation}} + + + + + {{else}} + + {{end}} + + {{end}} + +
Worker IDWorker timeoutOperation timeoutOperation nameAction digestTarget ID
+ {{range $key, $value := .Id}} + {{$key}}={{$value | printf "%#v"}} + {{end}} + {{with .Timeout}}{{time_future . $now}}{{else}}∞{{end}}{{with .Timeout}}{{time_future . $now}}{{else}}∞{{end}} + {{abbreviate .Name}} + + {{abbreviate .ActionDigest.Hash}} + {{.TargetId}}{{if .Drained}}drained{{else}}idle{{end}}
+
+ +{{template "footer.html"}} diff --git a/cmd/bb_scheduler/templates/size_class_queue_info.html b/cmd/bb_scheduler/templates/size_class_queue_info.html new file mode 100644 index 0000000..1262681 --- /dev/null +++ b/cmd/bb_scheduler/templates/size_class_queue_info.html @@ -0,0 +1,16 @@ + + Instance name prefix: + {{.PlatformQueueName.InstanceNamePrefix | printf "%#v"}} + + + Platform properties: + + {{range .PlatformQueueName.Platform.Properties}} + {{.Name}}={{.Value | printf "%#v"}} + {{end}} + + + + Size class: + {{.SizeClass}} + diff --git a/cmd/bb_virtual_tmp/BUILD.bazel b/cmd/bb_virtual_tmp/BUILD.bazel new file mode 100644 index 0000000..d0d3180 --- /dev/null +++ b/cmd/bb_virtual_tmp/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "bb_virtual_tmp_lib", + srcs = ["main.go"], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/bb_virtual_tmp", + visibility = ["//visibility:private"], + deps = [ + "//pkg/filesystem/virtual", + "//pkg/filesystem/virtual/configuration", + "//pkg/proto/configuration/bb_virtual_tmp", + "//pkg/proto/tmp_installer", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/global", + "@com_github_buildbarn_bb_storage//pkg/grpc", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_binary( + name = "bb_virtual_tmp", + embed = [":bb_virtual_tmp_lib"], + visibility = ["//visibility:public"], +) diff --git a/cmd/bb_virtual_tmp/main.go b/cmd/bb_virtual_tmp/main.go new file mode 100644 index 0000000..2d1f8f8 --- /dev/null +++ b/cmd/bb_virtual_tmp/main.go @@ -0,0 +1,90 @@ +package main + +import ( + "context" + "os" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + virtual_configuration "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/configuration" + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_virtual_tmp" + "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/global" + bb_grpc "github.com/buildbarn/bb-storage/pkg/grpc" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// This service provides a virtual file system that merely provides a +// symbolic link named "tmp". By implementing the tmp_installer gRPC +// service, bb_runner is capable of calling into this service to adjust +// the target of this symbolic link, so that it points to the temporary +// directory that bb_worker allocates for every action it runs. +// +// The "tmp" symbolic link is virtualized, meaning that the target +// differs depending on the credentials of the user accessing it. This +// makes it possible to give every build action its own /tmp, even if +// the host operating system does not offer file system namespace +// virtualization (containers/jails), or magic symlinks that evaluate to +// different locations based on the user ID. + +func main() { + program.RunMain(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + if len(os.Args) != 2 { + return status.Error(codes.InvalidArgument, "Usage: bb_virtual_tmp bb_virtual_tmp.jsonnet") + } + var configuration bb_virtual_tmp.ApplicationConfiguration + if err := util.UnmarshalConfigurationFromFile(os.Args[1], &configuration); err != nil { + return util.StatusWrapf(err, "Failed to read configuration from %s", os.Args[1]) + } + lifecycleState, _, err := global.ApplyConfiguration(configuration.Global) + if err != nil { + return util.StatusWrap(err, "Failed to apply global configuration options") + } + + // Create symbolic link whose target can be set by users. + buildDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + if err := path.Resolve(configuration.BuildDirectoryPath, scopeWalker); err != nil { + return util.StatusWrap(err, "Failed to resolve build directory path") + } + userSettableSymlink := virtual.NewUserSettableSymlink(buildDirectory) + + // Expose the symbolic link through a virtual file system. + mount, handleAllocator, err := virtual_configuration.NewMountFromConfiguration( + configuration.Mount, + "bb_virtual_tmp", + /* rootDirectory = */ virtual_configuration.LongAttributeCaching, + /* childDirectories = */ virtual_configuration.LongAttributeCaching, + /* leaves = */ virtual_configuration.NoAttributeCaching) + if err != nil { + return util.StatusWrap(err, "Failed to create virtual file system mount") + } + if err := mount.Expose( + siblingsGroup, + handleAllocator.New().AsStatelessDirectory( + virtual.NewStaticDirectory(map[path.Component]virtual.DirectoryChild{ + path.MustNewComponent("tmp"): virtual.DirectoryChild{}. + FromLeaf(handleAllocator.New().AsNativeLeaf(userSettableSymlink)), + }))); err != nil { + return util.StatusWrap(err, "Failed to expose virtual file system mount") + } + + // Allow users to set the target through gRPC. + if err := bb_grpc.NewServersFromConfigurationAndServe( + configuration.GrpcServers, + func(s grpc.ServiceRegistrar) { + tmp_installer.RegisterTemporaryDirectoryInstallerServer(s, userSettableSymlink) + }, + siblingsGroup, + ); err != nil { + return util.StatusWrap(err, "gRPC server failure") + } + + lifecycleState.MarkReadyAndWait(siblingsGroup) + return nil + }) +} diff --git a/cmd/bb_worker/BUILD.bazel b/cmd/bb_worker/BUILD.bazel new file mode 100644 index 0000000..519456d --- /dev/null +++ b/cmd/bb_worker/BUILD.bazel @@ -0,0 +1,88 @@ +load("@com_github_buildbarn_bb_storage//tools:container.bzl", "container_push_official") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "bb_worker_lib", + srcs = [ + "main.go", + "main_nonunix.go", + "main_unix.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/bb_worker", + visibility = ["//visibility:private"], + deps = [ + "//pkg/blobstore", + "//pkg/builder", + "//pkg/cas", + "//pkg/cleaner", + "//pkg/clock", + "//pkg/filesystem", + "//pkg/filesystem/virtual", + "//pkg/filesystem/virtual/configuration", + "//pkg/proto/completedactionlogger", + "//pkg/proto/configuration/bb_worker", + "//pkg/proto/remoteworker", + "//pkg/proto/runner", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/configuration", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/global", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_google_uuid//:uuid", + "@io_opentelemetry_go_otel//:otel", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_x_sync//semaphore", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + ], + "//conditions:default": [], + }), +) + +go_binary( + name = "bb_worker", + embed = [":bb_worker_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "bb_worker_container", + embed = [":bb_worker_lib"], + pure = "on", + visibility = ["//visibility:public"], +) + +container_push_official( + name = "bb_worker_container_push", + component = "bb-worker", + image = ":bb_worker_container", +) diff --git a/cmd/bb_worker/main.go b/cmd/bb_worker/main.go new file mode 100644 index 0000000..488c46d --- /dev/null +++ b/cmd/bb_worker/main.go @@ -0,0 +1,473 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/url" + "os" + "regexp" + "sort" + "strconv" + "sync/atomic" + "time" + + re_blobstore "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + re_clock "github.com/buildbarn/bb-remote-execution/pkg/clock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + virtual_configuration "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/configuration" + cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_worker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/blobstore" + blobstore_configuration "github.com/buildbarn/bb-storage/pkg/blobstore/configuration" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/eviction" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/global" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/google/uuid" + + "golang.org/x/sync/semaphore" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" +) + +func main() { + program.RunMain(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + if len(os.Args) != 2 { + return status.Error(codes.InvalidArgument, "Usage: bb_worker bb_worker.jsonnet") + } + var configuration bb_worker.ApplicationConfiguration + if err := util.UnmarshalConfigurationFromFile(os.Args[1], &configuration); err != nil { + return util.StatusWrapf(err, "Failed to read configuration from %s", os.Args[1]) + } + lifecycleState, grpcClientFactory, err := global.ApplyConfiguration(configuration.Global) + if err != nil { + return util.StatusWrap(err, "Failed to apply global configuration options") + } + tracerProvider := otel.GetTracerProvider() + + browserURL, err := url.Parse(configuration.BrowserUrl) + if err != nil { + return util.StatusWrap(err, "Failed to parse browser URL") + } + + // Create connection with scheduler. + schedulerConnection, err := grpcClientFactory.NewClientFromConfiguration(configuration.Scheduler) + if err != nil { + return util.StatusWrap(err, "Failed to create scheduler RPC client") + } + schedulerClient := remoteworker.NewOperationQueueClient(schedulerConnection) + + // Location for storing temporary file objects. This is + // currently only used by the virtual file system to store + // output files of build actions. Going forward, this may be + // used to store core dumps generated by build actions as well. + filePool, err := re_filesystem.NewFilePoolFromConfiguration(configuration.FilePool) + if err != nil { + return util.StatusWrap(err, "Failed to create file pool") + } + + // Storage access. + globalContentAddressableStorage, actionCache, err := blobstore_configuration.NewCASAndACBlobAccessFromConfiguration( + dependenciesGroup, + configuration.Blobstore, + grpcClientFactory, + int(configuration.MaximumMessageSizeBytes)) + if err != nil { + return err + } + globalContentAddressableStorage = re_blobstore.NewExistencePreconditionBlobAccess(globalContentAddressableStorage) + + var prefetchingDownloadConcurrency *semaphore.Weighted + var fileSystemAccessCache blobstore.BlobAccess + prefetchingConfiguration := configuration.Prefetching + if prefetchingConfiguration != nil { + info, err := blobstore_configuration.NewBlobAccessFromConfiguration( + dependenciesGroup, + prefetchingConfiguration.FileSystemAccessCache, + blobstore_configuration.NewFSACBlobAccessCreator( + grpcClientFactory, + int(configuration.MaximumMessageSizeBytes))) + if err != nil { + return util.StatusWrap(err, "Failed to create File System Access Cache") + } + fileSystemAccessCache = info.BlobAccess + prefetchingDownloadConcurrency = semaphore.NewWeighted(prefetchingConfiguration.DownloadConcurrency) + } + + // Cached read access for Directory objects stored in the + // Content Addressable Storage. All workers make use of the same + // cache, to increase the hit rate. This process does not read + // Tree objects. + directoryFetcher, err := cas.NewCachingDirectoryFetcherFromConfiguration( + configuration.DirectoryCache, + cas.NewBlobAccessDirectoryFetcher( + globalContentAddressableStorage, + /* maximumDirectorySizeBytes = */ int(configuration.MaximumMessageSizeBytes), + /* maximumTreeSizeBytes = */ 0)) + if err != nil { + return util.StatusWrap(err, "Failed to create caching directory fetcher") + } + + if len(configuration.BuildDirectories) == 0 { + return status.Error(codes.InvalidArgument, "Cannot start worker without any build directories") + } + + // Setup the RemoteCompletedActionLogger for the + // ActionLoggingBuildExecutor to ensure we only create + // one client per worker rather than one per runner. + type remoteCompletedActionLogger struct { + logger builder.CompletedActionLogger + instanceNamePatcher digest.InstanceNamePatcher + } + remoteCompletedActionLoggers := make([]remoteCompletedActionLogger, 0, len(configuration.CompletedActionLoggers)) + for _, c := range configuration.CompletedActionLoggers { + loggerQueueConnection, err := grpcClientFactory.NewClientFromConfiguration(c.Client) + if err != nil { + return util.StatusWrap(err, "Failed to create a new gRPC client for logging completed actions") + } + client := cal_proto.NewCompletedActionLoggerClient(loggerQueueConnection) + logger := builder.NewRemoteCompletedActionLogger(int(c.MaximumSendQueueSize), client) + instanceNamePrefix, err := digest.NewInstanceName(c.AddInstanceNamePrefix) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name prefix %#v", c.AddInstanceNamePrefix) + } + remoteCompletedActionLoggers = append(remoteCompletedActionLoggers, remoteCompletedActionLogger{ + logger: logger, + instanceNamePatcher: digest.NewInstanceNamePatcher(digest.EmptyInstanceName, instanceNamePrefix), + }) + // TODO: Run this as part of the program.Group, + // so that it gets cleaned up upon shutdown. + go func() { + generator := random.NewFastSingleThreadedGenerator() + for { + log.Print("Failure encountered while transmitting completed actions: ", logger.SendAllCompletedActions()) + time.Sleep(random.Duration(generator, 5*time.Second)) + } + }() + } + + outputUploadConcurrency := configuration.OutputUploadConcurrency + if outputUploadConcurrency <= 0 { + return status.Errorf(codes.InvalidArgument, "Nonpositive output upload concurrency: ", outputUploadConcurrency) + } + outputUploadConcurrencySemaphore := semaphore.NewWeighted(outputUploadConcurrency) + + testInfrastructureFailureShutdownState := builder.NewTestInfrastructureFailureShutdownState() + for _, buildDirectoryConfiguration := range configuration.BuildDirectories { + var virtualBuildDirectory virtual.PrepopulatedDirectory + var handleAllocator virtual.StatefulHandleAllocator + var symlinkFactory virtual.SymlinkFactory + var characterDeviceFactory virtual.CharacterDeviceFactory + var naiveBuildDirectory filesystem.DirectoryCloser + var fileFetcher cas.FileFetcher + var buildDirectoryCleaner cleaner.Cleaner + uploadBatchSize := blobstore.RecommendedFindMissingDigestsCount + var maximumExecutionTimeoutCompensation time.Duration + switch backend := buildDirectoryConfiguration.Backend.(type) { + case *bb_worker.BuildDirectoryConfiguration_Virtual: + var mount virtual_configuration.Mount + mount, handleAllocator, err = virtual_configuration.NewMountFromConfiguration( + backend.Virtual.Mount, + "bb_worker", + /* rootDirectory = */ virtual_configuration.ShortAttributeCaching, + /* childDirectories = */ virtual_configuration.LongAttributeCaching, + /* leaves = */ virtual_configuration.LongAttributeCaching) + if err != nil { + return util.StatusWrap(err, "Failed to create build directory mount") + } + + hiddenFilesPattern := func(s string) bool { return false } + if pattern := backend.Virtual.HiddenFilesPattern; pattern != "" { + hiddenFilesRegexp, err := regexp.Compile(pattern) + if err != nil { + return util.StatusWrap(err, "Failed to parse hidden files pattern") + } + hiddenFilesPattern = hiddenFilesRegexp.MatchString + } + + initialContentsSorter := sort.Sort + if backend.Virtual.ShuffleDirectoryListings { + initialContentsSorter = virtual.Shuffle + } + symlinkFactory = virtual.NewHandleAllocatingSymlinkFactory( + virtual.BaseSymlinkFactory, + handleAllocator.New()) + characterDeviceFactory = virtual.NewHandleAllocatingCharacterDeviceFactory( + virtual.BaseCharacterDeviceFactory, + handleAllocator.New()) + virtualBuildDirectory = virtual.NewInMemoryPrepopulatedDirectory( + virtual.NewHandleAllocatingFileAllocator( + virtual.NewPoolBackedFileAllocator( + re_filesystem.EmptyFilePool, + util.DefaultErrorLogger), + handleAllocator), + symlinkFactory, + util.DefaultErrorLogger, + handleAllocator, + initialContentsSorter, + hiddenFilesPattern, + clock.SystemClock) + + if err := mount.Expose(dependenciesGroup, virtualBuildDirectory); err != nil { + return util.StatusWrap(err, "Failed to expose build directory mount") + } + + buildDirectoryCleaner = func(ctx context.Context) error { + if err := virtualBuildDirectory.RemoveAllChildren(false); err != nil { + return util.StatusWrapWithCode(err, codes.Internal, "Failed to clean virtual build directory") + } + return nil + } + if err := backend.Virtual.MaximumExecutionTimeoutCompensation.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid maximum execution timeout compensation") + } + maximumExecutionTimeoutCompensation = backend.Virtual.MaximumExecutionTimeoutCompensation.AsDuration() + case *bb_worker.BuildDirectoryConfiguration_Native: + // Directory where actual builds take place. + nativeConfiguration := backend.Native + naiveBuildDirectory, err = filesystem.NewLocalDirectory(nativeConfiguration.BuildDirectoryPath) + if err != nil { + return util.StatusWrapf(err, "Failed to open build directory %v", nativeConfiguration.BuildDirectoryPath) + } + buildDirectoryCleaner = cleaner.NewDirectoryCleaner(naiveBuildDirectory, nativeConfiguration.BuildDirectoryPath) + + // Create a cache directory that holds input + // files that can be hardlinked into build + // directory. + // + // TODO: Have a single process-wide hardlinking + // cache even if multiple build directories are + // used. This increases cache hit rate. + cacheDirectory, err := filesystem.NewLocalDirectory(nativeConfiguration.CacheDirectoryPath) + if err != nil { + return util.StatusWrap(err, "Failed to open cache directory") + } + if err := cacheDirectory.RemoveAllChildren(); err != nil { + return util.StatusWrap(err, "Failed to clear cache directory") + } + evictionSet, err := eviction.NewSetFromConfiguration[string](nativeConfiguration.CacheReplacementPolicy) + if err != nil { + return util.StatusWrap(err, "Failed to create eviction set for cache directory") + } + fileFetcher = cas.NewHardlinkingFileFetcher( + cas.NewBlobAccessFileFetcher(globalContentAddressableStorage), + cacheDirectory, + int(nativeConfiguration.MaximumCacheFileCount), + nativeConfiguration.MaximumCacheSizeBytes, + eviction.NewMetricsSet(evictionSet, "HardlinkingFileFetcher")) + + // Using a native file system requires us to + // hold on to file descriptors while uploading + // outputs. Limit the batch size to ensure that + // we don't exhaust file descriptors. + uploadBatchSize = 100 + default: + return status.Error(codes.InvalidArgument, "No build directory specified") + } + + buildDirectoryIdleInvoker := cleaner.NewIdleInvoker(buildDirectoryCleaner) + var sharedBuildDirectoryNextParallelActionID atomic.Uint64 + if len(buildDirectoryConfiguration.Runners) == 0 { + return util.StatusWrap(err, "Cannot start worker without any runners") + } + for _, runnerConfiguration := range buildDirectoryConfiguration.Runners { + if runnerConfiguration.Concurrency < 1 { + return util.StatusWrap(err, "Runner concurrency must be positive") + } + concurrencyLength := len(strconv.FormatUint(runnerConfiguration.Concurrency-1, 10)) + + // Obtain raw device numbers of character + // devices that need to be available within the + // input root. + inputRootCharacterDevices, err := getInputRootCharacterDevices( + runnerConfiguration.InputRootCharacterDeviceNodes) + if err != nil { + return err + } + + // Execute commands using a separate runner process. Due to the + // interaction between threads, forking and execve() returning + // ETXTBSY, concurrent execution of build actions can only be + // used in combination with a runner process. Having a separate + // runner process also makes it possible to apply privilege + // separation. + runnerConnection, err := grpcClientFactory.NewClientFromConfiguration(runnerConfiguration.Endpoint) + if err != nil { + return util.StatusWrap(err, "Failed to create runner RPC client") + } + runnerClient := runner_pb.NewRunnerClient(runnerConnection) + + for threadID := uint64(0); threadID < runnerConfiguration.Concurrency; threadID++ { + // Per-worker separate writer of the Content + // Addressable Storage that batches writes after + // completing the build action. + contentAddressableStorageWriter, contentAddressableStorageFlusher := re_blobstore.NewBatchedStoreBlobAccess( + globalContentAddressableStorage, + digest.KeyWithoutInstance, + uploadBatchSize, + outputUploadConcurrencySemaphore) + contentAddressableStorageWriter = blobstore.NewMetricsBlobAccess( + contentAddressableStorageWriter, + clock.SystemClock, + "cas", + "batched_store") + + // When the virtual file system is + // enabled, we can lazily load the input + // root, as opposed to explicitly + // instantiating it before every build. + var executionTimeoutClock clock.Clock + var buildDirectory builder.BuildDirectory + if virtualBuildDirectory != nil { + suspendableClock := re_clock.NewSuspendableClock( + clock.SystemClock, + maximumExecutionTimeoutCompensation, + time.Second/10) + executionTimeoutClock = suspendableClock + buildDirectory = builder.NewVirtualBuildDirectory( + virtualBuildDirectory, + cas.NewSuspendingDirectoryFetcher( + directoryFetcher, + suspendableClock), + re_blobstore.NewSuspendingBlobAccess( + contentAddressableStorageWriter, + suspendableClock), + symlinkFactory, + characterDeviceFactory, + handleAllocator) + } else { + executionTimeoutClock = clock.SystemClock + buildDirectory = builder.NewNaiveBuildDirectory( + naiveBuildDirectory, + directoryFetcher, + fileFetcher, + contentAddressableStorageWriter) + } + + // Create a per-action subdirectory in + // the build directory named after the + // action digest, so that multiple + // actions may be run concurrently. + // + // Also clean the build directory every + // time when going from fully idle to + // executing one action. + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator( + builder.NewCleanBuildDirectoryCreator( + builder.NewRootBuildDirectoryCreator(buildDirectory), + buildDirectoryIdleInvoker), + &sharedBuildDirectoryNextParallelActionID) + + workerID := map[string]string{} + if runnerConfiguration.Concurrency > 1 { + workerID["thread"] = fmt.Sprintf("%0*d", concurrencyLength, threadID) + } + for k, v := range runnerConfiguration.WorkerId { + workerID[k] = v + } + workerName, err := json.Marshal(workerID) + if err != nil { + return util.StatusWrap(err, "Failed to marshal worker ID") + } + + buildExecutor := builder.NewLocalBuildExecutor( + contentAddressableStorageWriter, + buildDirectoryCreator, + runnerClient, + executionTimeoutClock, + inputRootCharacterDevices, + int(configuration.MaximumMessageSizeBytes), + runnerConfiguration.EnvironmentVariables) + + if prefetchingConfiguration != nil { + buildExecutor = builder.NewPrefetchingBuildExecutor( + buildExecutor, + globalContentAddressableStorage, + directoryFetcher, + prefetchingDownloadConcurrency, + fileSystemAccessCache, + int(configuration.MaximumMessageSizeBytes), + int(prefetchingConfiguration.BloomFilterBitsPerPath), + int(prefetchingConfiguration.BloomFilterMaximumSizeBytes)) + } + + buildExecutor = builder.NewMetricsBuildExecutor( + builder.NewFilePoolStatsBuildExecutor( + builder.NewTimestampedBuildExecutor( + builder.NewStorageFlushingBuildExecutor( + buildExecutor, + contentAddressableStorageFlusher), + clock.SystemClock, + string(workerName)))) + + if len(runnerConfiguration.CostsPerSecond) > 0 { + buildExecutor = builder.NewCostComputingBuildExecutor(buildExecutor, runnerConfiguration.CostsPerSecond) + } + + if maximumConsecutiveFailures := runnerConfiguration.MaximumConsecutiveTestInfrastructureFailures; maximumConsecutiveFailures > 0 { + buildExecutor = builder.NewTestInfrastructureFailureDetectingBuildExecutor( + buildExecutor, + testInfrastructureFailureShutdownState, + maximumConsecutiveFailures) + } + + buildExecutor = builder.NewCachingBuildExecutor( + buildExecutor, + globalContentAddressableStorage, + actionCache, + browserURL) + + for _, remoteCompletedActionLogger := range remoteCompletedActionLoggers { + buildExecutor = builder.NewCompletedActionLoggingBuildExecutor( + buildExecutor, + uuid.NewRandom, + remoteCompletedActionLogger.logger, + remoteCompletedActionLogger.instanceNamePatcher) + } + + buildExecutor = builder.NewTracingBuildExecutor( + builder.NewLoggingBuildExecutor( + buildExecutor, + browserURL), + tracerProvider) + + instanceNamePrefix, err := digest.NewInstanceName(runnerConfiguration.InstanceNamePrefix) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name prefix %#v", runnerConfiguration.InstanceNamePrefix) + } + + buildClient := builder.NewBuildClient( + schedulerClient, + buildExecutor, + re_filesystem.NewQuotaEnforcingFilePool( + filePool, + runnerConfiguration.MaximumFilePoolFileCount, + runnerConfiguration.MaximumFilePoolSizeBytes), + clock.SystemClock, + workerID, + instanceNamePrefix, + runnerConfiguration.Platform, + runnerConfiguration.SizeClass) + builder.LaunchWorkerThread(siblingsGroup, buildClient, string(workerName)) + } + } + } + + lifecycleState.MarkReadyAndWait(siblingsGroup) + return nil + }) +} diff --git a/cmd/bb_worker/main_nonunix.go b/cmd/bb_worker/main_nonunix.go new file mode 100644 index 0000000..37114c9 --- /dev/null +++ b/cmd/bb_worker/main_nonunix.go @@ -0,0 +1,19 @@ +//go:build windows +// +build windows + +package main + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func getInputRootCharacterDevices(names []string) (map[path.Component]filesystem.DeviceNumber, error) { + if len(names) > 0 { + return nil, status.Error(codes.Unimplemented, "Character devices are not supported on this platform") + } + return map[path.Component]filesystem.DeviceNumber{}, nil +} diff --git a/cmd/bb_worker/main_unix.go b/cmd/bb_worker/main_unix.go new file mode 100644 index 0000000..39323aa --- /dev/null +++ b/cmd/bb_worker/main_unix.go @@ -0,0 +1,37 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package main + +import ( + "path/filepath" + "syscall" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func getInputRootCharacterDevices(names []string) (map[path.Component]filesystem.DeviceNumber, error) { + inputRootCharacterDevices := map[path.Component]filesystem.DeviceNumber{} + for _, device := range names { + var stat unix.Stat_t + devicePath := filepath.Join("/dev", device) + if err := unix.Stat(devicePath, &stat); err != nil { + return nil, util.StatusWrapf(err, "Unable to stat character device %#v", devicePath) + } + if stat.Mode&syscall.S_IFMT != syscall.S_IFCHR { + return nil, status.Errorf(codes.InvalidArgument, "The specified device %#v is not a character device", devicePath) + } + component, ok := path.NewComponent(device) + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "Device %#v has an invalid name", devicePath) + } + inputRootCharacterDevices[component] = filesystem.NewDeviceNumberFromRaw(uint64(stat.Rdev)) + } + return inputRootCharacterDevices, nil +} diff --git a/cmd/fake_python/BUILD.bazel b/cmd/fake_python/BUILD.bazel new file mode 100644 index 0000000..db3e1e5 --- /dev/null +++ b/cmd/fake_python/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "fake_python_lib", + srcs = ["main.go"], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/fake_python", + visibility = ["//visibility:private"], +) + +go_binary( + name = "fake_python", + embed = [":fake_python_lib"], + visibility = ["//visibility:public"], +) diff --git a/cmd/fake_python/main.go b/cmd/fake_python/main.go new file mode 100644 index 0000000..59b851f --- /dev/null +++ b/cmd/fake_python/main.go @@ -0,0 +1,73 @@ +package main + +import ( + "bufio" + "io" + "log" + "os" + "path/filepath" + "regexp" + "syscall" +) + +// The Python stub template that Bazel generates for every py_binary() +// always starts with '#!/usr/bin/env python'. This means that even if +// build actions provide their own copy of Python as part of the input +// root, the remote execution environment must also provide a copy for +// evaluating the stub template. +// +// This tool is a drop-in replacement for /usr/bin/python that is only +// capable of accepting Python stub templates, executing them with the +// copy of Python that is stored in the input root. It obtains the path +// of the Python interpreter by extracting the PYTHON_BINARY constant +// declared in the stub template. + +func extractPythonBinary(r io.Reader) string { + s := bufio.NewScanner(r) + pattern := regexp.MustCompile("^PYTHON_BINARY\\s*=\\s*'(.*)'$") + for s.Scan() { + if match := pattern.FindStringSubmatch(s.Text()); match != nil { + return match[1] + } + } + if err := s.Err(); err != nil { + log.Fatal("Failed to read line from Python stub template: ", err) + } + log.Fatal("Python stub template does not contain a valid PYTHON_BINARY declaration") + return "" +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("Fake Python can only be used to run Python stub templates generated by Bazel") + } + + // Extract the "PYTHON_BINARY = '...'" line from the Python stub + // template that was generated by Bazel. + f, err := os.Open(os.Args[1]) + if err != nil { + log.Fatalf("Cannot open Python stub template %#v: %s", os.Args[1], err) + } + pythonBinary := extractPythonBinary(f) + f.Close() + + // Determine the path inside the runfiles directory at which the + // Python interpreter is placed. Tests have RUNFILES_DIR set, + // while build actions do not. Look up the runfiles directory + // manually if needed. + runfilesDir, ok := os.LookupEnv("RUNFILES_DIR") + if !ok { + runfilesDir = os.Args[1] + ".runfiles" + } + pythonPath := filepath.Join(runfilesDir, pythonBinary) + + // Execute the Python stub template using the copy of Python + // that it was going to use to run the actual program. + log.Fatalf( + "Failed to execute %#v: %s", + pythonPath, + syscall.Exec( + pythonPath, + append([]string{pythonPath}, os.Args[1:]...), + os.Environ())) +} diff --git a/cmd/fake_xcrun/BUILD.bazel b/cmd/fake_xcrun/BUILD.bazel new file mode 100644 index 0000000..090fcbe --- /dev/null +++ b/cmd/fake_xcrun/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "fake_xcrun_lib", + srcs = ["main.go"], + importpath = "github.com/buildbarn/bb-remote-execution/cmd/fake_xcrun", + visibility = ["//visibility:private"], + deps = ["@com_github_spf13_pflag//:pflag"], +) + +go_binary( + name = "fake_xcrun", + embed = [":fake_xcrun_lib"], + visibility = ["//visibility:public"], +) diff --git a/cmd/fake_xcrun/main.go b/cmd/fake_xcrun/main.go new file mode 100644 index 0000000..2a6841a --- /dev/null +++ b/cmd/fake_xcrun/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "log" + "os" + "path/filepath" + "syscall" + + "github.com/spf13/pflag" +) + +// On a system that has none of the components of Xcode installed, tools +// like /usr/bin/cc and /usr/bin/python3 will not be functional. They +// can be made to work by either installing full Xcode or just the Xcode +// CLTools package. +// +// This tool can be used to make the stubs in /usr/bin work without +// installing any of the Xcode components. When installed at +// /Library/Developer/CommandLineTools/usr/bin/xcrun, all invocations of +// stubs /usr/bin will be forwarded to executables in the same directory +// as xcrun. +// +// This tool can, for example, be used to forward all invocations of +// development tools in the base system to binaries placed in action +// input roots. + +func main() { + find := pflag.Bool("find", false, "Only find and print the tool path") + pflag.SetInterspersed(false) + pflag.Parse() + args := pflag.Args() + if len(args) == 0 { + log.Fatal("Expected xcrun to be invoked with a utility name") + } + utilityPath := filepath.Join(filepath.Dir(os.Args[0]), args[0]) + if *find { + if len(args) != 1 { + log.Fatal("--find can only be called with a single argument") + } + fmt.Println(utilityPath) + } else { + log.Fatalf( + "Failed to execute %#v: %s", + utilityPath, + syscall.Exec( + utilityPath, + append([]string{utilityPath}, args[1:]...), + os.Environ())) + } +} diff --git a/doc/zh_CN/README.md b/doc/zh_CN/README.md new file mode 100644 index 0000000..7d2957d --- /dev/null +++ b/doc/zh_CN/README.md @@ -0,0 +1,24 @@ +*注:本文档是从2021-01-25的英语版本翻译而来的* + +# Buildbarn 远端执行 + +本仓库里的工具可以与[Buildbarn storage daemon](https://github.com/buildbarn/bb-storage) 共同组成一个[构建集群](https://en.wikipedia.org/wiki/Compile_farm) 来为 [Bazel](https://bazel.build/), [BuildStream](https://wiki.gnome.org/Projects/BuildStream) and [recc](https://gitlab.com/bloomberg/recc)等工具提供远端执行服务。 + +本仓库提供3个应用程序: + +- `bb_scheduler`: 这个服务会接收[`bb_storage`](https://github.com/buildbarn/bb-storage)的请求,并将需要运行的Build Action入队 +- `bb_worker`: 这个服务从`bb_scheduler` 获取Build Action并且编排他们的执行。其中包含了下载Build Action的输入文件和上传Build Action的输出文件。 +- `bb_runner`: 这个服务执行Build Action所关联的命令。 + +大多数的部署会跑一个`bb_scheduler`的实例以及大量成对的`bb_worker`/`bb_runner`进程。老版本的Buildbarn将`bb_worker` 和`bb_runner`的功能集成在一个单进程中。这些进程组合在一起完成以下功能: + +- 权限隔离: 防止Build Action对输入文件进行改写,使得`bb_worker`可以在Build Action间缓存这些文件,并通过硬链接暴露给Build Action +- 执行可插拔:`bb_worker`通过 [gRPC协议](https://github.com/buildbarn/bb-remote-execution/blob/master/pkg/proto/runner/runner.proto)与`bb_runner`通信。例如一个集成了用[QEMU](https://www.qemu.org/)执行Build Action的Runner进程的云服务 +- 解决[并发竞争的问题](https://github.com/golang/go/issues/22315) : 有效防止多线程的进程向磁盘写可执行程序并且执行他们。Buildbarn提供的解决方案是 `bb_worker`将可执行程序写到磁盘,`bb_runner` 来执行他们。 + + + +本仓库为每个组件提供了容器镜像。对于`bb_runner`来说,提供了一个不包含用户区的容器镜像,也提供了在容器启动时安装`bb_runner`的方案。前者对于[BuildStream](https://buildstream.build/)来说已经足够了,而后者可以用来与Google RBE's [Ubuntu 16.04 容器镜像](https://console.cloud.google.com/marketplace/details/google/rbe-ubuntu16-04)一同组合使用。用 Ubuntu 16.04 容器镜像的优点是Bazel提供了拿来即用的[工具链定义](https://github.com/bazelbuild/bazel-toolchains)。 + +请参考[Buildbarn Deployments 仓库](https://github.com/buildbarn/bb-deployments) 中关于如何设置这些工具的例子。 + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..20b29a8 --- /dev/null +++ b/go.mod @@ -0,0 +1,96 @@ +module github.com/buildbarn/bb-remote-execution + +go 1.20 + +// https://github.com/grpc-ecosystem/grpc-gateway/commit/5f9757f31b517d98095209636b2b88cd6326572f +replace github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.1 + +require ( + cloud.google.com/go/longrunning v0.5.1 + github.com/bazelbuild/remote-apis v0.0.0-20230822133051-6c32c3b917cc + github.com/buildbarn/bb-storage v0.0.0-20231008111112-ba53c0ad05f2 + github.com/buildbarn/go-xdr v0.0.0-20231002195348-0d2d95eab08c + github.com/golang/protobuf v1.5.3 + github.com/google/uuid v1.3.1 + github.com/gorilla/mux v1.8.0 + github.com/hanwen/go-fuse/v2 v2.4.0 + github.com/jmespath/go-jmespath v0.4.0 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/prometheus/client_golang v1.17.0 + github.com/spf13/pflag v1.0.5 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 + golang.org/x/sync v0.4.0 + golang.org/x/sys v0.13.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c + google.golang.org/grpc v1.58.2 + google.golang.org/protobuf v1.31.0 +) + +require ( + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.2 // indirect + cloud.google.com/go/storage v1.33.0 // indirect + github.com/aohorodnyk/mimeheader v0.0.6 // indirect + github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.41 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.39 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.39.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect + github.com/aws/smithy-go v1.14.2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/fxtlabs/primes v0.0.0-20150821004651-dad82d10a449 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-jsonnet v0.20.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.18.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + golang.org/x/crypto v0.13.0 // indirect + golang.org/x/net v0.15.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.142.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/genproto/googleapis/bytestream v0.0.0-20230920204549-e6e6cdab5c13 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..838f297 --- /dev/null +++ b/go.sum @@ -0,0 +1,349 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M= +cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/aohorodnyk/mimeheader v0.0.6 h1:WCV4NQjtbqnd2N3FT5MEPesan/lfvaLYmt5v4xSaX/M= +github.com/aohorodnyk/mimeheader v0.0.6/go.mod h1:/Gd3t3vszyZYwjNJo2qDxoftZjjVzMdkQZxkiINp3vM= +github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= +github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= +github.com/aws/aws-sdk-go-v2/config v1.18.41 h1:Go7z97YDsBJVNAaL7pDPKB6LeHEsAkHmFe+CeK30fUQ= +github.com/aws/aws-sdk-go-v2/config v1.18.41/go.mod h1:+yR45+A0LIMKT8bWOKo90Hy9rSrovEmEKoPKLmmVec8= +github.com/aws/aws-sdk-go-v2/credentials v1.13.39 h1:UnwBXDIHKDaejSXaRzKR57IdGCizk+z1DEhnsFpus7Q= +github.com/aws/aws-sdk-go-v2/credentials v1.13.39/go.mod h1:OJ9P239A90TnglJEF3qofKiNeEM6PCV/m+aNGV5WC24= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.39.0 h1:VZ2WMkKLio5tVjYfThcy5+pb6YHGd6B6egq75FfM6hU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.39.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.0 h1:AR/hlTsCyk1CwlyKnPFvIMvnONydRjDDRT9OGb0i+/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.0/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.0 h1:UniOmlPJelksyP5dGjfRoFTmLDy4/o0HH1lK2Op7zC8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.0/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 h1:s4bioTgjSFRwOoyEFzAVCmFmoowBgjTR8gkrF/sQ4wk= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= +github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= +github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/bazelbuild/remote-apis v0.0.0-20230822133051-6c32c3b917cc h1:TPwjNpCdoO7TcTPPMHEkrrlSwd8g2XVf3qflmnivvsU= +github.com/bazelbuild/remote-apis v0.0.0-20230822133051-6c32c3b917cc/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/buildbarn/bb-storage v0.0.0-20231008111112-ba53c0ad05f2 h1:Nco5Az9+h/7a2KdSxFOPkC4ITLlsow1kYskL2eneKRo= +github.com/buildbarn/bb-storage v0.0.0-20231008111112-ba53c0ad05f2/go.mod h1:aCpDc/Uzx5GcSRTrB7NHzx79UkvodGmRvevmVuRjQxY= +github.com/buildbarn/go-xdr v0.0.0-20231002195348-0d2d95eab08c h1:lstSRIB5zQnvkSNjzUW8NS5Ox1u/sjL5gksFQ9VQUzo= +github.com/buildbarn/go-xdr v0.0.0-20231002195348-0d2d95eab08c/go.mod h1:VwInghBSUyPtNBhl7o2oCUnxOCTGgySJnRTO1Kh7XuI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fxtlabs/primes v0.0.0-20150821004651-dad82d10a449 h1:HOYnhuVrhAVGKdg3rZapII640so7QfXQmkLkefUN/uM= +github.com/fxtlabs/primes v0.0.0-20150821004651-dad82d10a449/go.mod h1:i+vbdOOivRRh2j+WwBkjZXloGN/+KAqfKDwNfUJeugc= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g= +github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.1 h1:RoziI+96HlQWrbaVhgOOdFYUHtX81pwA6tCgDS9FNRo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.1/go.mod h1:Rj8lEaVgLiPn1jTMVXEhATiZhuyXJq167bMYPbJM1CY= +github.com/hanwen/go-fuse/v2 v2.4.0 h1:12OhD7CkXXQdvxG2osIdBQLdXh+nmLXY9unkUIe/xaU= +github.com/hanwen/go-fuse/v2 v2.4.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd h1:TfmftEfB1zJiDTFi3Qw1xlbEbfJPKUhEDC19clfBMb8= +github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd/go.mod h1:qXyNSomGEqu0M7ewNl3CLgle09PFHk8++5NrBWCz7+Q= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 h1:b8xjZxHbLrXAum4SxJd1Rlm7Y/fKaB+6ACI7/e5EfSA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0/go.mod h1:1ei0a32xOGkFoySu7y1DAHfcuIhC0pNZpvY2huXuMy4= +go.opentelemetry.io/contrib/propagators/b3 v1.19.0 h1:ulz44cpm6V5oAeg5Aw9HyqGFMS6XM7untlMEhD7YzzA= +go.opentelemetry.io/contrib/propagators/b3 v1.19.0/go.mod h1:OzCmE2IVS+asTI+odXQstRGVfXQ4bXv9nMBRK0nNyqQ= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 h1:IAtl+7gua134xcV3NieDhJHjjOVeJhXAnYf/0hswjUY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0/go.mod h1:w+pXobnBzh95MNIkeIuAKcHe/Uu/CX2PKIvBP6ipKRA= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.18.0 h1:e3bAB0wB3MljH38sHzpV/qWrOTCFrdZF2ct9F8rBkcY= +go.opentelemetry.io/otel/sdk v1.18.0/go.mod h1:1RCygWV7plY2KmdskZEDDBs4tJeHG92MdHZIluiYs/M= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210507014357-30e306a8bba5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.142.0 h1:mf+7EJ94fi5ZcnpPy+m0Yv2dkz8bKm+UL0snTCuwXlY= +google.golang.org/api v0.142.0/go.mod h1:zJAN5o6HRqR7O+9qJUFOWrZkYE66RH+efPBdTLA4xBA= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 h1:U7+wNaVuSTaUqNvK2+osJ9ejEZxbjHHk8F2b6Hpx0AE= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230920204549-e6e6cdab5c13 h1:AzcXcS6RbpBm65S0+/F78J9hFCL0/GZWp8oCRZod780= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:qDbnxtViX5J6CvFbxeNUSzKgVlDLJ/6L+caxye9+Flo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/go_dependencies.bzl b/go_dependencies.bzl new file mode 100644 index 0000000..1dc284c --- /dev/null +++ b/go_dependencies.bzl @@ -0,0 +1,1615 @@ +load("@bazel_gazelle//:deps.bzl", "go_repository") + +def go_dependencies(): + go_repository( + name = "cc_mvdan_gofumpt", + importpath = "mvdan.cc/gofumpt", + sum = "h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=", + version = "v0.5.0", + ) + go_repository( + name = "co_honnef_go_tools", + importpath = "honnef.co/go/tools", + sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=", + version = "v0.0.0-20190523083050-ea95bdfd59fc", + ) + go_repository( + name = "com_github_alecthomas_kingpin_v2", + importpath = "github.com/alecthomas/kingpin/v2", + sum = "h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU=", + version = "v2.3.2", + ) + go_repository( + name = "com_github_alecthomas_units", + importpath = "github.com/alecthomas/units", + sum = "h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=", + version = "v0.0.0-20211218093645-b94a6e3cc137", + ) + go_repository( + name = "com_github_antihax_optional", + importpath = "github.com/antihax/optional", + sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_antlr_antlr4_runtime_go_antlr", + importpath = "github.com/antlr/antlr4/runtime/Go/antlr", + sum = "h1:rfAZfq1LjIhVCFsBp2MoXxVvgtCyZUOtzsV8azhR1Jk=", + version = "v0.0.0-20220722194653-14703f21b580", + ) + go_repository( + name = "com_github_aohorodnyk_mimeheader", + importpath = "github.com/aohorodnyk/mimeheader", + sum = "h1:WCV4NQjtbqnd2N3FT5MEPesan/lfvaLYmt5v4xSaX/M=", + version = "v0.0.6", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2", + importpath = "github.com/aws/aws-sdk-go-v2", + sum = "h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc=", + version = "v1.21.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream", + importpath = "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream", + sum = "h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0=", + version = "v1.4.13", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_config", + importpath = "github.com/aws/aws-sdk-go-v2/config", + sum = "h1:Go7z97YDsBJVNAaL7pDPKB6LeHEsAkHmFe+CeK30fUQ=", + version = "v1.18.41", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_credentials", + importpath = "github.com/aws/aws-sdk-go-v2/credentials", + sum = "h1:UnwBXDIHKDaejSXaRzKR57IdGCizk+z1DEhnsFpus7Q=", + version = "v1.13.39", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_feature_ec2_imds", + importpath = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds", + sum = "h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg=", + version = "v1.13.11", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_configsources", + importpath = "github.com/aws/aws-sdk-go-v2/internal/configsources", + sum = "h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g=", + version = "v1.1.41", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_endpoints_v2", + importpath = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2", + sum = "h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI=", + version = "v2.4.35", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_ini", + importpath = "github.com/aws/aws-sdk-go-v2/internal/ini", + sum = "h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0=", + version = "v1.3.42", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_v4a", + importpath = "github.com/aws/aws-sdk-go-v2/internal/v4a", + sum = "h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw=", + version = "v1.1.4", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding", + sum = "h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0=", + version = "v1.9.14", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_checksum", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/checksum", + sum = "h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk=", + version = "v1.1.36", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_presigned_url", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url", + sum = "h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI=", + version = "v1.9.35", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_s3shared", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/s3shared", + sum = "h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg=", + version = "v1.15.4", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_s3", + importpath = "github.com/aws/aws-sdk-go-v2/service/s3", + sum = "h1:VZ2WMkKLio5tVjYfThcy5+pb6YHGd6B6egq75FfM6hU=", + version = "v1.39.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_sso", + importpath = "github.com/aws/aws-sdk-go-v2/service/sso", + sum = "h1:AR/hlTsCyk1CwlyKnPFvIMvnONydRjDDRT9OGb0i+/g=", + version = "v1.14.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_ssooidc", + importpath = "github.com/aws/aws-sdk-go-v2/service/ssooidc", + sum = "h1:UniOmlPJelksyP5dGjfRoFTmLDy4/o0HH1lK2Op7zC8=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_sts", + importpath = "github.com/aws/aws-sdk-go-v2/service/sts", + sum = "h1:s4bioTgjSFRwOoyEFzAVCmFmoowBgjTR8gkrF/sQ4wk=", + version = "v1.22.0", + ) + go_repository( + name = "com_github_aws_smithy_go", + importpath = "github.com/aws/smithy-go", + sum = "h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ=", + version = "v1.14.2", + ) + go_repository( + name = "com_github_bazelbuild_remote_apis", + importpath = "github.com/bazelbuild/remote-apis", + patches = ["@com_github_buildbarn_bb_storage//:patches/com_github_bazelbuild_remote_apis/golang.diff"], + sum = "h1:TPwjNpCdoO7TcTPPMHEkrrlSwd8g2XVf3qflmnivvsU=", + version = "v0.0.0-20230822133051-6c32c3b917cc", + ) + go_repository( + name = "com_github_benbjohnson_clock", + importpath = "github.com/benbjohnson/clock", + sum = "h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_beorn7_perks", + importpath = "github.com/beorn7/perks", + sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_buildbarn_bb_storage", + importpath = "github.com/buildbarn/bb-storage", + sum = "h1:Nco5Az9+h/7a2KdSxFOPkC4ITLlsow1kYskL2eneKRo=", + version = "v0.0.0-20231008111112-ba53c0ad05f2", + ) + go_repository( + name = "com_github_buildbarn_go_xdr", + importpath = "github.com/buildbarn/go-xdr", + sum = "h1:lstSRIB5zQnvkSNjzUW8NS5Ox1u/sjL5gksFQ9VQUzo=", + version = "v0.0.0-20231002195348-0d2d95eab08c", + ) + go_repository( + name = "com_github_burntsushi_toml", + importpath = "github.com/BurntSushi/toml", + sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + build_extra_args = ["-exclude=src"], + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=", + version = "v0.4.1", + ) + go_repository( + name = "com_github_cespare_xxhash_v2", + importpath = "github.com/cespare/xxhash/v2", + sum = "h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=", + version = "v2.2.0", + ) + go_repository( + name = "com_github_client9_misspell", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", + ) + go_repository( + name = "com_github_cncf_udpa_go", + importpath = "github.com/cncf/udpa/go", + sum = "h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=", + version = "v0.0.0-20220112060539-c52dc94e7fbe", + ) + go_repository( + name = "com_github_cncf_xds_go", + importpath = "github.com/cncf/xds/go", + sum = "h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=", + version = "v0.0.0-20230607035331-e9ce68804cb4", + ) + go_repository( + name = "com_github_davecgh_go_spew", + importpath = "github.com/davecgh/go-spew", + sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_dgryski_go_rendezvous", + importpath = "github.com/dgryski/go-rendezvous", + sum = "h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=", + version = "v0.0.0-20200823014737-9f7001d12a5f", + ) + go_repository( + name = "com_github_envoyproxy_go_control_plane", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=", + version = "v0.11.1", + ) + go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_fatih_color", + importpath = "github.com/fatih/color", + sum = "h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=", + version = "v1.12.0", + ) + go_repository( + name = "com_github_fxtlabs_primes", + importpath = "github.com/fxtlabs/primes", + sum = "h1:HOYnhuVrhAVGKdg3rZapII640so7QfXQmkLkefUN/uM=", + version = "v0.0.0-20150821004651-dad82d10a449", + ) + go_repository( + name = "com_github_go_jose_go_jose_v3", + importpath = "github.com/go-jose/go-jose/v3", + sum = "h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=", + version = "v3.0.0", + ) + go_repository( + name = "com_github_go_kit_log", + importpath = "github.com/go-kit/log", + sum = "h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_go_logfmt_logfmt", + importpath = "github.com/go-logfmt/logfmt", + sum = "h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_go_logr_logr", + importpath = "github.com/go-logr/logr", + sum = "h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=", + version = "v1.2.4", + ) + go_repository( + name = "com_github_go_logr_stdr", + importpath = "github.com/go-logr/stdr", + sum = "h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_go_redis_redis_extra_rediscmd", + importpath = "github.com/go-redis/redis/extra/rediscmd", + sum = "h1:A3bhCsCKsedClEH9/jYlcKqOuBoeeV+H0yDie5t+a6w=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_go_redis_redis_extra_redisotel", + importpath = "github.com/go-redis/redis/extra/redisotel", + sum = "h1:8rrizwFAUUeMgmelyiQi9KeFwmpQhay9E+/rE6qHsBM=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_go_redis_redis_v8", + importpath = "github.com/go-redis/redis/v8", + sum = "h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=", + version = "v8.11.5", + ) + go_repository( + name = "com_github_go_stack_stack", + importpath = "github.com/go-stack/stack", + sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_gogo_protobuf", + importpath = "github.com/gogo/protobuf", + sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_golang_glog", + importpath = "github.com/golang/glog", + sum = "h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_golang_groupcache", + importpath = "github.com/golang/groupcache", + sum = "h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=", + version = "v0.0.0-20210331224755-41bb18bfe9da", + ) + go_repository( + name = "com_github_golang_mock", + importpath = "github.com/golang/mock", + sum = "h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_golang_protobuf", + importpath = "github.com/golang/protobuf", + patches = ["@com_github_buildbarn_bb_storage//:patches/com_github_golang_protobuf/service-registrar.diff"], + sum = "h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=", + version = "v1.5.3", + ) + go_repository( + name = "com_github_golang_snappy", + importpath = "github.com/golang/snappy", + sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=", + version = "v0.0.4", + ) + go_repository( + name = "com_github_google_go_cmp", + importpath = "github.com/google/go-cmp", + sum = "h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=", + version = "v0.5.9", + ) + go_repository( + name = "com_github_google_go_jsonnet", + build_file_generation = "on", + importpath = "github.com/google/go-jsonnet", + sum = "h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g=", + version = "v0.20.0", + ) + go_repository( + name = "com_github_google_go_pkcs11", + importpath = "github.com/google/go-pkcs11", + sum = "h1:5meDPB26aJ98f+K9G21f0AqZwo/S5BJMJh8nuhMbdsI=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_google_martian_v3", + importpath = "github.com/google/martian/v3", + sum = "h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=", + version = "v3.3.2", + ) + go_repository( + name = "com_github_google_s2a_go", + importpath = "github.com/google/s2a-go", + sum = "h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=", + version = "v0.1.7", + ) + go_repository( + name = "com_github_google_uuid", + importpath = "github.com/google/uuid", + sum = "h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_googleapis_enterprise_certificate_proxy", + importpath = "github.com/googleapis/enterprise-certificate-proxy", + sum = "h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=", + version = "v0.2.5", + ) + go_repository( + name = "com_github_googleapis_gax_go_v2", + build_file_proto_mode = "disable", + importpath = "github.com/googleapis/gax-go/v2", + sum = "h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=", + version = "v2.12.0", + ) + go_repository( + name = "com_github_gordonklaus_ineffassign", + importpath = "github.com/gordonklaus/ineffassign", + sum = "h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U=", + version = "v0.0.0-20210914165742-4cc7213b9bc8", + ) + go_repository( + name = "com_github_gorilla_mux", + importpath = "github.com/gorilla/mux", + sum = "h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_middleware", + importpath = "github.com/grpc-ecosystem/go-grpc-middleware", + sum = "h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_prometheus", + importpath = "github.com/grpc-ecosystem/go-grpc-prometheus", + patches = ["@com_github_buildbarn_bb_storage//:patches/com_github_grpc_ecosystem_go_grpc_prometheus/client-metrics-prevent-handled-twice.diff"], + sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_grpc_gateway_v2", + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2", + replace = "github.com/grpc-ecosystem/grpc-gateway/v2", + sum = "h1:RoziI+96HlQWrbaVhgOOdFYUHtX81pwA6tCgDS9FNRo=", + version = "v2.16.1", + ) + go_repository( + name = "com_github_hanwen_go_fuse_v2", + importpath = "github.com/hanwen/go-fuse/v2", + patches = [ + "//:patches/com_github_hanwen_go_fuse_v2/direntrylist-offsets-and-testability.diff", + "//:patches/com_github_hanwen_go_fuse_v2/notify-testability.diff", + "//:patches/com_github_hanwen_go_fuse_v2/writeback-cache.diff", + ], + sum = "h1:12OhD7CkXXQdvxG2osIdBQLdXh+nmLXY9unkUIe/xaU=", + version = "v2.4.0", + ) + go_repository( + name = "com_github_jmespath_go_jmespath", + importpath = "github.com/jmespath/go-jmespath", + sum = "h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_jmespath_go_jmespath_internal_testify", + importpath = "github.com/jmespath/go-jmespath/internal/testify", + sum = "h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_jpillora_backoff", + importpath = "github.com/jpillora/backoff", + sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_json_iterator_go", + importpath = "github.com/json-iterator/go", + sum = "h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=", + version = "v1.1.12", + ) + go_repository( + name = "com_github_julienschmidt_httprouter", + importpath = "github.com/julienschmidt/httprouter", + sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_kballard_go_shellquote", + importpath = "github.com/kballard/go-shellquote", + sum = "h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=", + version = "v0.0.0-20180428030007-95032a82bc51", + ) + go_repository( + name = "com_github_kisielk_errcheck", + importpath = "github.com/kisielk/errcheck", + sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_kisielk_gotool", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_klauspost_compress", + importpath = "github.com/klauspost/compress", + sum = "h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_kr_pretty", + importpath = "github.com/kr/pretty", + sum = "h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_kr_pty", + importpath = "github.com/kr/pty", + sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_kr_text", + importpath = "github.com/kr/text", + sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_kylelemons_godebug", + importpath = "github.com/kylelemons/godebug", + sum = "h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=", + version = "v0.0.0-20170820004349-d65d576e9348", + ) + go_repository( + name = "com_github_lazybeaver_xorshift", + importpath = "github.com/lazybeaver/xorshift", + sum = "h1:TfmftEfB1zJiDTFi3Qw1xlbEbfJPKUhEDC19clfBMb8=", + version = "v0.0.0-20170702203709-ce511d4823dd", + ) + go_repository( + name = "com_github_mattn_go_colorable", + importpath = "github.com/mattn/go-colorable", + sum = "h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=", + version = "v0.1.8", + ) + go_repository( + name = "com_github_mattn_go_isatty", + importpath = "github.com/mattn/go-isatty", + sum = "h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=", + version = "v0.0.12", + ) + go_repository( + name = "com_github_matttproud_golang_protobuf_extensions", + importpath = "github.com/matttproud/golang_protobuf_extensions", + sum = "h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_moby_sys_mountinfo", + importpath = "github.com/moby/sys/mountinfo", + sum = "h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=", + version = "v0.6.2", + ) + go_repository( + name = "com_github_modern_go_concurrent", + importpath = "github.com/modern-go/concurrent", + sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=", + version = "v0.0.0-20180306012644-bacd9c7ef1dd", + ) + go_repository( + name = "com_github_modern_go_reflect2", + importpath = "github.com/modern-go/reflect2", + sum = "h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_mwitkow_go_conntrack", + importpath = "github.com/mwitkow/go-conntrack", + sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", + version = "v0.0.0-20190716064945-2f068394615f", + ) + go_repository( + name = "com_github_opentracing_opentracing_go", + importpath = "github.com/opentracing/opentracing-go", + sum = "h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_pkg_errors", + importpath = "github.com/pkg/errors", + sum = "h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=", + version = "v0.8.1", + ) + go_repository( + name = "com_github_pmezard_go_difflib", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_prometheus_client_golang", + importpath = "github.com/prometheus/client_golang", + sum = "h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_prometheus_client_model", + importpath = "github.com/prometheus/client_model", + sum = "h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=", + version = "v0.4.1-0.20230718164431-9a2bf3000d16", + ) + go_repository( + name = "com_github_prometheus_common", + importpath = "github.com/prometheus/common", + sum = "h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=", + version = "v0.44.0", + ) + go_repository( + name = "com_github_prometheus_procfs", + importpath = "github.com/prometheus/procfs", + sum = "h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=", + version = "v0.11.1", + ) + go_repository( + name = "com_github_rogpeppe_fastuuid", + importpath = "github.com/rogpeppe/fastuuid", + sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_rogpeppe_go_internal", + importpath = "github.com/rogpeppe/go-internal", + sum = "h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=", + version = "v1.10.0", + ) + go_repository( + name = "com_github_sergi_go_diff", + importpath = "github.com/sergi/go-diff", + sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_sirupsen_logrus", + importpath = "github.com/sirupsen/logrus", + sum = "h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=", + version = "v1.4.2", + ) + go_repository( + name = "com_github_spf13_pflag", + importpath = "github.com/spf13/pflag", + sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=", + version = "v1.0.5", + ) + go_repository( + name = "com_github_stretchr_objx", + importpath = "github.com/stretchr/objx", + sum = "h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_stretchr_testify", + importpath = "github.com/stretchr/testify", + sum = "h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=", + version = "v1.8.4", + ) + go_repository( + name = "com_github_xhit_go_str2duration_v2", + importpath = "github.com/xhit/go-str2duration/v2", + sum = "h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=", + version = "v2.1.0", + ) + go_repository( + name = "com_github_yuin_goldmark", + importpath = "github.com/yuin/goldmark", + sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=", + version = "v1.2.1", + ) + go_repository( + name = "com_google_cloud_go", + importpath = "cloud.google.com/go", + sum = "h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=", + version = "v0.110.8", + ) + go_repository( + name = "com_google_cloud_go_accessapproval", + importpath = "cloud.google.com/go/accessapproval", + sum = "h1:/5YjNhR6lzCvmJZAnByYkfEgWjfAKwYP6nkuTk6nKFE=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_accesscontextmanager", + importpath = "cloud.google.com/go/accesscontextmanager", + sum = "h1:WIAt9lW9AXtqw/bnvrEUaE8VG/7bAAeMzRCBGMkc4+w=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_aiplatform", + importpath = "cloud.google.com/go/aiplatform", + sum = "h1:J89aj+lqwtjn0qpQBMVaiOmDxBkKDEKUwl+GL19RRpc=", + version = "v1.50.0", + ) + go_repository( + name = "com_google_cloud_go_analytics", + importpath = "cloud.google.com/go/analytics", + sum = "h1:TFBC1ZAqX9/jL56GEXdLrVe5vT3I22bDVWyDwZX4IEg=", + version = "v0.21.3", + ) + go_repository( + name = "com_google_cloud_go_apigateway", + importpath = "cloud.google.com/go/apigateway", + sum = "h1:aBSwCQPcp9rZ0zVEUeJbR623palnqtvxJlUyvzsKGQc=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_apigeeconnect", + importpath = "cloud.google.com/go/apigeeconnect", + sum = "h1:6u/jj0P2c3Mcm+H9qLsXI7gYcTiG9ueyQL3n6vCmFJM=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_apigeeregistry", + importpath = "cloud.google.com/go/apigeeregistry", + sum = "h1:hgq0ANLDx7t2FDZDJQrCMtCtddR/pjCqVuvQWGrQbXw=", + version = "v0.7.1", + ) + go_repository( + name = "com_google_cloud_go_appengine", + importpath = "cloud.google.com/go/appengine", + sum = "h1:J+aaUZ6IbTpBegXbmEsh8qZZy864ZVnOoWyfa1XSNbI=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_area120", + importpath = "cloud.google.com/go/area120", + sum = "h1:wiOq3KDpdqXmaHzvZwKdpoM+3lDcqsI2Lwhyac7stss=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_artifactregistry", + importpath = "cloud.google.com/go/artifactregistry", + sum = "h1:k6hNqab2CubhWlGcSzunJ7kfxC7UzpAfQ1UPb9PDCKI=", + version = "v1.14.1", + ) + go_repository( + name = "com_google_cloud_go_asset", + importpath = "cloud.google.com/go/asset", + sum = "h1:vlHdznX70eYW4V1y1PxocvF6tEwxJTTarwIGwOhFF3U=", + version = "v1.14.1", + ) + go_repository( + name = "com_google_cloud_go_assuredworkloads", + importpath = "cloud.google.com/go/assuredworkloads", + sum = "h1:yaO0kwS+SnhVSTF7BqTyVGt3DTocI6Jqo+S3hHmCwNk=", + version = "v1.11.1", + ) + go_repository( + name = "com_google_cloud_go_automl", + importpath = "cloud.google.com/go/automl", + sum = "h1:iP9iQurb0qbz+YOOMfKSEjhONA/WcoOIjt6/m+6pIgo=", + version = "v1.13.1", + ) + go_repository( + name = "com_google_cloud_go_baremetalsolution", + importpath = "cloud.google.com/go/baremetalsolution", + sum = "h1:3zztyuQHjfU0C0qEsI9LkC3kf5/TQQ3jUJhbmetUoRA=", + version = "v1.2.0", + ) + go_repository( + name = "com_google_cloud_go_batch", + importpath = "cloud.google.com/go/batch", + sum = "h1:/4ADpZKoKH300HN2SB6aI7lXX/0hnnbR74wxjLHkyQo=", + version = "v1.4.1", + ) + go_repository( + name = "com_google_cloud_go_beyondcorp", + importpath = "cloud.google.com/go/beyondcorp", + sum = "h1:VPg+fZXULQjs8LiMeWdLaB5oe8G9sEoZ0I0j6IMiG1Q=", + version = "v1.0.0", + ) + go_repository( + name = "com_google_cloud_go_bigquery", + importpath = "cloud.google.com/go/bigquery", + sum = "h1:hs44Xxov3XLWQiCx2J8lK5U/ihLqnpm4RVVl5fdtLLI=", + version = "v1.55.0", + ) + go_repository( + name = "com_google_cloud_go_billing", + importpath = "cloud.google.com/go/billing", + sum = "h1:CpagWXb/+QNye+vouomndbc4Gsr0uo+AGR24V16uk8Q=", + version = "v1.17.0", + ) + go_repository( + name = "com_google_cloud_go_binaryauthorization", + importpath = "cloud.google.com/go/binaryauthorization", + sum = "h1:7L6uUWo/xNCfdVNnnzh2M4x5YA732YPgqRdCG8aKVAU=", + version = "v1.7.0", + ) + go_repository( + name = "com_google_cloud_go_certificatemanager", + importpath = "cloud.google.com/go/certificatemanager", + sum = "h1:uKsohpE0hiobx1Eak9jNcPCznwfB6gvyQCcS28Ah9E8=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_channel", + importpath = "cloud.google.com/go/channel", + sum = "h1:Hy2EaOiOB7BS1IJmg2lLilEo8uMfFWTy7RgjTzbUqjM=", + version = "v1.17.0", + ) + go_repository( + name = "com_google_cloud_go_cloudbuild", + importpath = "cloud.google.com/go/cloudbuild", + sum = "h1:YTMxmFra7eIjKFgnyQUxOwWNseNqeO38kGh7thy7v4s=", + version = "v1.14.0", + ) + go_repository( + name = "com_google_cloud_go_clouddms", + importpath = "cloud.google.com/go/clouddms", + sum = "h1:vTcaFaFZTZZ11gXB6aZHdAx+zn30P8YJw4X/S3NC+VQ=", + version = "v1.7.0", + ) + go_repository( + name = "com_google_cloud_go_cloudtasks", + importpath = "cloud.google.com/go/cloudtasks", + sum = "h1:cMh9Q6dkvh+Ry5LAPbD/U2aw6KAqdiU6FttwhbTo69w=", + version = "v1.12.1", + ) + go_repository( + name = "com_google_cloud_go_compute", + importpath = "cloud.google.com/go/compute", + sum = "h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=", + version = "v1.23.0", + ) + go_repository( + name = "com_google_cloud_go_compute_metadata", + importpath = "cloud.google.com/go/compute/metadata", + sum = "h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=", + version = "v0.2.3", + ) + go_repository( + name = "com_google_cloud_go_contactcenterinsights", + importpath = "cloud.google.com/go/contactcenterinsights", + sum = "h1:YR2aPedGVQPpFBZXJnPkqRj8M//8veIZZH5ZvICoXnI=", + version = "v1.10.0", + ) + go_repository( + name = "com_google_cloud_go_container", + importpath = "cloud.google.com/go/container", + sum = "h1:SszQdI0qlyKsImz8/l26rpTZMyqvaH9yfua7rirDZvY=", + version = "v1.26.0", + ) + go_repository( + name = "com_google_cloud_go_containeranalysis", + importpath = "cloud.google.com/go/containeranalysis", + sum = "h1:/EsoP+UTIjvl4yqrLA4WgUG83kwQhqZmbXEfqirT2LM=", + version = "v0.11.0", + ) + go_repository( + name = "com_google_cloud_go_datacatalog", + importpath = "cloud.google.com/go/datacatalog", + sum = "h1:qGWrlYvWtK+8jD1jhwq5BsGoSr7S4/LOroV7LwXi00g=", + version = "v1.17.1", + ) + go_repository( + name = "com_google_cloud_go_dataflow", + importpath = "cloud.google.com/go/dataflow", + sum = "h1:VzG2tqsk/HbmOtq/XSfdF4cBvUWRK+S+oL9k4eWkENQ=", + version = "v0.9.1", + ) + go_repository( + name = "com_google_cloud_go_dataform", + importpath = "cloud.google.com/go/dataform", + sum = "h1:xcWso0hKOoxeW72AjBSIp/UfkvpqHNzzS0/oygHlcqY=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_datafusion", + importpath = "cloud.google.com/go/datafusion", + sum = "h1:eX9CZoyhKQW6g1Xj7+RONeDj1mV8KQDKEB9KLELX9/8=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_datalabeling", + importpath = "cloud.google.com/go/datalabeling", + sum = "h1:zxsCD/BLKXhNuRssen8lVXChUj8VxF3ofN06JfdWOXw=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_dataplex", + importpath = "cloud.google.com/go/dataplex", + sum = "h1:wqPAP1vRskOoWwNka1yey2wxxCrxRrcxJf78MyFvrbs=", + version = "v1.9.1", + ) + go_repository( + name = "com_google_cloud_go_dataproc_v2", + importpath = "cloud.google.com/go/dataproc/v2", + sum = "h1:jKijbdsERm2hy/5dFl/LeQN+7CNssLdGXQYBMvMH/M4=", + version = "v2.2.0", + ) + go_repository( + name = "com_google_cloud_go_dataqna", + importpath = "cloud.google.com/go/dataqna", + sum = "h1:ITpUJep04hC9V7C+gcK390HO++xesQFSUJ7S4nSnF3U=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_datastore", + importpath = "cloud.google.com/go/datastore", + sum = "h1:Mq0ApTRdLW3/dyiw+DkjTk0+iGIUvkbzaC8sfPwWTH4=", + version = "v1.14.0", + ) + go_repository( + name = "com_google_cloud_go_datastream", + importpath = "cloud.google.com/go/datastream", + sum = "h1:ra/+jMv36zTAGPfi8TRne1hXme+UsKtdcK4j6bnqQiw=", + version = "v1.10.0", + ) + go_repository( + name = "com_google_cloud_go_deploy", + importpath = "cloud.google.com/go/deploy", + sum = "h1:A+w/xpWgz99EYzB6e31gMGAI/P5jTZ2UO7veQK5jQ8o=", + version = "v1.13.0", + ) + go_repository( + name = "com_google_cloud_go_dialogflow", + importpath = "cloud.google.com/go/dialogflow", + sum = "h1:0hBV5ipVbhYNKCyiBoM47bUt+43Kd8eWXhBr+pwUSTw=", + version = "v1.43.0", + ) + go_repository( + name = "com_google_cloud_go_dlp", + importpath = "cloud.google.com/go/dlp", + sum = "h1:tF3wsJ2QulRhRLWPzWVkeDz3FkOGVoMl6cmDUHtfYxw=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_documentai", + importpath = "cloud.google.com/go/documentai", + sum = "h1:cBndyac7kPWwSuhUcgdbnqzszfZ57HBEHfD33DIwsBM=", + version = "v1.22.1", + ) + go_repository( + name = "com_google_cloud_go_domains", + importpath = "cloud.google.com/go/domains", + sum = "h1:rqz6KY7mEg7Zs/69U6m6LMbB7PxFDWmT3QWNXIqhHm0=", + version = "v0.9.1", + ) + go_repository( + name = "com_google_cloud_go_edgecontainer", + importpath = "cloud.google.com/go/edgecontainer", + sum = "h1:zhHWnLzg6AqzE+I3gzJqiIwHfjEBhWctNQEzqb+FaRo=", + version = "v1.1.1", + ) + go_repository( + name = "com_google_cloud_go_errorreporting", + importpath = "cloud.google.com/go/errorreporting", + sum = "h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=", + version = "v0.3.0", + ) + go_repository( + name = "com_google_cloud_go_essentialcontacts", + importpath = "cloud.google.com/go/essentialcontacts", + sum = "h1:OEJ0MLXXCW/tX1fkxzEZOsv/wRfyFsvDVNaHWBAvoV0=", + version = "v1.6.2", + ) + go_repository( + name = "com_google_cloud_go_eventarc", + importpath = "cloud.google.com/go/eventarc", + sum = "h1:xIP3XZi0Xawx8DEfh++mE2lrIi5kQmCr/KcWhJ1q0J4=", + version = "v1.13.0", + ) + go_repository( + name = "com_google_cloud_go_filestore", + importpath = "cloud.google.com/go/filestore", + sum = "h1:Eiz8xZzMJc5ppBWkuaod/PUdUZGCFR8ku0uS+Ah2fRw=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_firestore", + importpath = "cloud.google.com/go/firestore", + sum = "h1:/3S4RssUV4GO/kvgJZB+tayjhOfyAHs+KcpJgRVu/Qk=", + version = "v1.13.0", + ) + go_repository( + name = "com_google_cloud_go_functions", + importpath = "cloud.google.com/go/functions", + sum = "h1:LtAyqvO1TFmNLcROzHZhV0agEJfBi+zfMZsF4RT/a7U=", + version = "v1.15.1", + ) + go_repository( + name = "com_google_cloud_go_gkebackup", + importpath = "cloud.google.com/go/gkebackup", + sum = "h1:Kfha8SOF2tqsu4O4jVle66mk7qNdlJ2KhL3E2YyiNZc=", + version = "v1.3.1", + ) + go_repository( + name = "com_google_cloud_go_gkeconnect", + importpath = "cloud.google.com/go/gkeconnect", + sum = "h1:a1ckRvVznnuvDWESM2zZDzSVFvggeBaVY5+BVB8tbT0=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_gkehub", + importpath = "cloud.google.com/go/gkehub", + sum = "h1:2BLSb8i+Co1P05IYCKATXy5yaaIw/ZqGvVSBTLdzCQo=", + version = "v0.14.1", + ) + go_repository( + name = "com_google_cloud_go_gkemulticloud", + importpath = "cloud.google.com/go/gkemulticloud", + sum = "h1:MluqhtPVZReoriP5+adGIw+ij/RIeRik8KApCW2WMTw=", + version = "v1.0.0", + ) + go_repository( + name = "com_google_cloud_go_gsuiteaddons", + importpath = "cloud.google.com/go/gsuiteaddons", + sum = "h1:mi9jxZpzVjLQibTS/XfPZvl+Jr6D5Bs8pGqUjllRb00=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_iam", + importpath = "cloud.google.com/go/iam", + sum = "h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4=", + version = "v1.1.2", + ) + go_repository( + name = "com_google_cloud_go_iap", + importpath = "cloud.google.com/go/iap", + sum = "h1:RNhVq/6OMI99/wjPVhqFxjlBxYOBRdaG6rLpBvyaqYY=", + version = "v1.9.0", + ) + go_repository( + name = "com_google_cloud_go_ids", + importpath = "cloud.google.com/go/ids", + sum = "h1:khXYmSoDDhWGEVxHl4c4IgbwSRR+qE/L4hzP3vaU9Hc=", + version = "v1.4.1", + ) + go_repository( + name = "com_google_cloud_go_iot", + importpath = "cloud.google.com/go/iot", + sum = "h1:yrH0OSmicD5bqGBoMlWG8UltzdLkYzNUwNVUVz7OT54=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_kms", + importpath = "cloud.google.com/go/kms", + sum = "h1:lh6qra6oC4AyWe5fUUUBe/S27k12OHAleOOOw6KakdE=", + version = "v1.15.2", + ) + go_repository( + name = "com_google_cloud_go_language", + importpath = "cloud.google.com/go/language", + sum = "h1:KnYolG0T5Oex722ZW/sP5QErhVAVNcqpJ16tVJd9RTw=", + version = "v1.11.0", + ) + go_repository( + name = "com_google_cloud_go_lifesciences", + importpath = "cloud.google.com/go/lifesciences", + sum = "h1:axkANGx1wiBXHiPcJZAE+TDjjYoJRIDzbHC/WYllCBU=", + version = "v0.9.1", + ) + go_repository( + name = "com_google_cloud_go_logging", + importpath = "cloud.google.com/go/logging", + sum = "h1:26skQWPeYhvIasWKm48+Eq7oUqdcdbwsCVwz5Ys0FvU=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_longrunning", + importpath = "cloud.google.com/go/longrunning", + sum = "h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=", + version = "v0.5.1", + ) + go_repository( + name = "com_google_cloud_go_managedidentities", + importpath = "cloud.google.com/go/managedidentities", + sum = "h1:2/qZuOeLgUHorSdxSQGtnOu9xQkBn37+j+oZQv/KHJY=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_maps", + importpath = "cloud.google.com/go/maps", + sum = "h1:PdfgpBLhAoSzZrQXP+/zBc78fIPLZSJp5y8+qSMn2UU=", + version = "v1.4.0", + ) + go_repository( + name = "com_google_cloud_go_mediatranslation", + importpath = "cloud.google.com/go/mediatranslation", + sum = "h1:50cF7c1l3BanfKrpnTCaTvhf+Fo6kdF21DG0byG7gYU=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_memcache", + importpath = "cloud.google.com/go/memcache", + sum = "h1:7lkLsF0QF+Mre0O/NvkD9Q5utUNwtzvIYjrOLOs0HO0=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_metastore", + importpath = "cloud.google.com/go/metastore", + sum = "h1:+9DsxUOHvsqvC0ylrRc/JwzbXJaaBpfIK3tX0Lx8Tcc=", + version = "v1.12.0", + ) + go_repository( + name = "com_google_cloud_go_monitoring", + importpath = "cloud.google.com/go/monitoring", + sum = "h1:rlndy4K8yknMY9JuGe2aK4SbCh21FXoCdX7SAGHmRgI=", + version = "v1.16.0", + ) + go_repository( + name = "com_google_cloud_go_networkconnectivity", + importpath = "cloud.google.com/go/networkconnectivity", + sum = "h1:kG2PX6URJ9Kvotfdm+hH8WIhrRY77sAKytUGOz+MgN0=", + version = "v1.13.0", + ) + go_repository( + name = "com_google_cloud_go_networkmanagement", + importpath = "cloud.google.com/go/networkmanagement", + sum = "h1:aA6L8aioyM4S6nlPYzp2SvB88lBcByZmqMJM6ReafzU=", + version = "v1.9.0", + ) + go_repository( + name = "com_google_cloud_go_networksecurity", + importpath = "cloud.google.com/go/networksecurity", + sum = "h1:TBLEkMp3AE+6IV/wbIGRNTxnqLXHCTEQWoxRVC18TzY=", + version = "v0.9.1", + ) + go_repository( + name = "com_google_cloud_go_notebooks", + importpath = "cloud.google.com/go/notebooks", + sum = "h1:6x2K1JAWv6RW2yQO6oa+xtKUGOpGQseCmT94vpOt1vc=", + version = "v1.10.0", + ) + go_repository( + name = "com_google_cloud_go_optimization", + importpath = "cloud.google.com/go/optimization", + sum = "h1:sGvPVtBJUKNYAwldhJvFmnM+EEdOXjDzjcly3g0n0Xg=", + version = "v1.5.0", + ) + go_repository( + name = "com_google_cloud_go_orchestration", + importpath = "cloud.google.com/go/orchestration", + sum = "h1:KmN18kE/xa1n91cM5jhCh7s1/UfIguSCisw7nTMUzgE=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_orgpolicy", + importpath = "cloud.google.com/go/orgpolicy", + sum = "h1:I/7dHICQkNwym9erHqmlb50LRU588NPCvkfIY0Bx9jI=", + version = "v1.11.1", + ) + go_repository( + name = "com_google_cloud_go_osconfig", + importpath = "cloud.google.com/go/osconfig", + sum = "h1:dgyEHdfqML6cUW6/MkihNdTVc0INQst0qSE8Ou1ub9c=", + version = "v1.12.1", + ) + go_repository( + name = "com_google_cloud_go_oslogin", + importpath = "cloud.google.com/go/oslogin", + sum = "h1:LdSuG3xBYu2Sgr3jTUULL1XCl5QBx6xwzGqzoDUw1j0=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_phishingprotection", + importpath = "cloud.google.com/go/phishingprotection", + sum = "h1:aK/lNmSd1vtbft/vLe2g7edXK72sIQbqr2QyrZN/iME=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_policytroubleshooter", + importpath = "cloud.google.com/go/policytroubleshooter", + sum = "h1:pT4qSiL5o0hBSWHDiOcmes/s301PeLLWEhAr/eMQB/g=", + version = "v1.9.0", + ) + go_repository( + name = "com_google_cloud_go_privatecatalog", + importpath = "cloud.google.com/go/privatecatalog", + sum = "h1:B/18xGo+E0EMS9LOEQ0zXz7F2asMgmVgTYGSI89MHOA=", + version = "v0.9.1", + ) + go_repository( + name = "com_google_cloud_go_pubsub", + importpath = "cloud.google.com/go/pubsub", + sum = "h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g=", + version = "v1.33.0", + ) + go_repository( + name = "com_google_cloud_go_pubsublite", + importpath = "cloud.google.com/go/pubsublite", + sum = "h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_recaptchaenterprise_v2", + importpath = "cloud.google.com/go/recaptchaenterprise/v2", + sum = "h1:IGkbudobsTXAwmkEYOzPCQPApUCsN4Gbq3ndGVhHQpI=", + version = "v2.7.2", + ) + go_repository( + name = "com_google_cloud_go_recommendationengine", + importpath = "cloud.google.com/go/recommendationengine", + sum = "h1:nMr1OEVHuDambRn+/y4RmNAmnR/pXCuHtH0Y4tCgGRQ=", + version = "v0.8.1", + ) + go_repository( + name = "com_google_cloud_go_recommender", + importpath = "cloud.google.com/go/recommender", + sum = "h1:SuzbMJhDAiPro7tR9QP7EX97+TI31urjsIgNh9XQHl8=", + version = "v1.11.0", + ) + go_repository( + name = "com_google_cloud_go_redis", + importpath = "cloud.google.com/go/redis", + sum = "h1:YrjQnCC7ydk+k30op7DSjSHw1yAYhqYXFcOq1bSXRYA=", + version = "v1.13.1", + ) + go_repository( + name = "com_google_cloud_go_resourcemanager", + importpath = "cloud.google.com/go/resourcemanager", + sum = "h1:QIAMfndPOHR6yTmMUB0ZN+HSeRmPjR/21Smq5/xwghI=", + version = "v1.9.1", + ) + go_repository( + name = "com_google_cloud_go_resourcesettings", + importpath = "cloud.google.com/go/resourcesettings", + sum = "h1:Fdyq418U69LhvNPFdlEO29w+DRRjwDA4/pFamm4ksAg=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_retail", + importpath = "cloud.google.com/go/retail", + sum = "h1:gYBrb9u/Hc5s5lUTFXX1Vsbc/9BEvgtioY6ZKaK0DK8=", + version = "v1.14.1", + ) + go_repository( + name = "com_google_cloud_go_run", + importpath = "cloud.google.com/go/run", + sum = "h1:kHeIG8q+N6Zv0nDkBjSOYfK2eWqa5FnaiDPH/7/HirE=", + version = "v1.2.0", + ) + go_repository( + name = "com_google_cloud_go_scheduler", + importpath = "cloud.google.com/go/scheduler", + sum = "h1:yoZbZR8880KgPGLmACOMCiY2tPk+iX4V/dkxqTirlz8=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_secretmanager", + importpath = "cloud.google.com/go/secretmanager", + sum = "h1:cLTCwAjFh9fKvU6F13Y4L9vPcx9yiWPyWXE4+zkuEQs=", + version = "v1.11.1", + ) + go_repository( + name = "com_google_cloud_go_security", + importpath = "cloud.google.com/go/security", + sum = "h1:jR3itwycg/TgGA0uIgTItcVhA55hKWiNJxaNNpQJaZE=", + version = "v1.15.1", + ) + go_repository( + name = "com_google_cloud_go_securitycenter", + importpath = "cloud.google.com/go/securitycenter", + sum = "h1:XOGJ9OpnDtqg8izd7gYk/XUhj8ytjIalyjjsR6oyG0M=", + version = "v1.23.0", + ) + go_repository( + name = "com_google_cloud_go_servicedirectory", + importpath = "cloud.google.com/go/servicedirectory", + sum = "h1:pBWpjCFVGWkzVTkqN3TBBIqNSoSHY86/6RL0soSQ4z8=", + version = "v1.11.0", + ) + go_repository( + name = "com_google_cloud_go_shell", + importpath = "cloud.google.com/go/shell", + sum = "h1:aHbwH9LSqs4r2rbay9f6fKEls61TAjT63jSyglsw7sI=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_spanner", + importpath = "cloud.google.com/go/spanner", + sum = "h1:+HY8C4uztU7XyLz3xMi/LCXdetLEOExhvRFJu2NiVXM=", + version = "v1.49.0", + ) + go_repository( + name = "com_google_cloud_go_speech", + importpath = "cloud.google.com/go/speech", + sum = "h1:MCagaq8ObV2tr1kZJcJYgXYbIn8Ai5rp42tyGYw9rls=", + version = "v1.19.0", + ) + go_repository( + name = "com_google_cloud_go_storage", + importpath = "cloud.google.com/go/storage", + sum = "h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M=", + version = "v1.33.0", + ) + go_repository( + name = "com_google_cloud_go_storagetransfer", + importpath = "cloud.google.com/go/storagetransfer", + sum = "h1:+ZLkeXx0K0Pk5XdDmG0MnUVqIR18lllsihU/yq39I8Q=", + version = "v1.10.0", + ) + go_repository( + name = "com_google_cloud_go_talent", + importpath = "cloud.google.com/go/talent", + sum = "h1:j46ZgD6N2YdpFPux9mc7OAf4YK3tiBCsbLKc8rQx+bU=", + version = "v1.6.2", + ) + go_repository( + name = "com_google_cloud_go_texttospeech", + importpath = "cloud.google.com/go/texttospeech", + sum = "h1:S/pR/GZT9p15R7Y2dk2OXD/3AufTct/NSxT4a7nxByw=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_tpu", + importpath = "cloud.google.com/go/tpu", + sum = "h1:kQf1jgPY04UJBYYjNUO+3GrZtIb57MfGAW2bwgLbR3A=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_trace", + importpath = "cloud.google.com/go/trace", + sum = "h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_translate", + importpath = "cloud.google.com/go/translate", + sum = "h1:0na4gC54Lu05ir00dmUSuMkLAojDe1ALq4hBTUkhwjE=", + version = "v1.9.0", + ) + go_repository( + name = "com_google_cloud_go_video", + importpath = "cloud.google.com/go/video", + sum = "h1:AkjXyJfQ7DtPyDOAbTMeiGcuKsO8/iKSb3fAmTUHYSg=", + version = "v1.20.0", + ) + go_repository( + name = "com_google_cloud_go_videointelligence", + importpath = "cloud.google.com/go/videointelligence", + sum = "h1:MBMWnkQ78GQnRz5lfdTAbBq/8QMCF3wahgtHh3s/J+k=", + version = "v1.11.1", + ) + go_repository( + name = "com_google_cloud_go_vision_v2", + importpath = "cloud.google.com/go/vision/v2", + sum = "h1:ccK6/YgPfGHR/CyESz1mvIbsht5Y2xRsWCPqmTNydEw=", + version = "v2.7.2", + ) + go_repository( + name = "com_google_cloud_go_vmmigration", + importpath = "cloud.google.com/go/vmmigration", + sum = "h1:gnjIclgqbEMc+cF5IJuPxp53wjBIlqZ8h9hE8Rkwp7A=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_vmwareengine", + importpath = "cloud.google.com/go/vmwareengine", + sum = "h1:qsJ0CPlOQu/3MFBGklu752v3AkD+Pdu091UmXJ+EjTA=", + version = "v1.0.0", + ) + go_repository( + name = "com_google_cloud_go_vpcaccess", + importpath = "cloud.google.com/go/vpcaccess", + sum = "h1:ram0GzjNWElmbxXMIzeOZUkQ9J8ZAahD6V8ilPGqX0Y=", + version = "v1.7.1", + ) + go_repository( + name = "com_google_cloud_go_webrisk", + importpath = "cloud.google.com/go/webrisk", + sum = "h1:Ssy3MkOMOnyRV5H2bkMQ13Umv7CwB/kugo3qkAX83Fk=", + version = "v1.9.1", + ) + go_repository( + name = "com_google_cloud_go_websecurityscanner", + importpath = "cloud.google.com/go/websecurityscanner", + sum = "h1:CfEF/vZ+xXyAR3zC9iaC/QRdf1MEgS20r5UR17Q4gOg=", + version = "v1.6.1", + ) + go_repository( + name = "com_google_cloud_go_workflows", + importpath = "cloud.google.com/go/workflows", + sum = "h1:cSUlx4PVV9O0vYCl+pHAUmu0996A7eN602d4wjjVHRs=", + version = "v1.12.0", + ) + go_repository( + name = "in_gopkg_check_v1", + importpath = "gopkg.in/check.v1", + sum = "h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=", + version = "v1.0.0-20201130134442-10cb98267c6c", + ) + go_repository( + name = "in_gopkg_yaml_v2", + importpath = "gopkg.in/yaml.v2", + sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=", + version = "v2.4.0", + ) + go_repository( + name = "in_gopkg_yaml_v3", + importpath = "gopkg.in/yaml.v3", + sum = "h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=", + version = "v3.0.1", + ) + go_repository( + name = "io_k8s_sigs_yaml", + importpath = "sigs.k8s.io/yaml", + sum = "h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=", + version = "v1.1.0", + ) + go_repository( + name = "io_opencensus_go", + importpath = "go.opencensus.io", + sum = "h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=", + version = "v0.24.0", + ) + go_repository( + name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", + importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", + sum = "h1:b8xjZxHbLrXAum4SxJd1Rlm7Y/fKaB+6ACI7/e5EfSA=", + version = "v0.44.0", + ) + go_repository( + name = "io_opentelemetry_go_contrib_propagators_b3", + importpath = "go.opentelemetry.io/contrib/propagators/b3", + sum = "h1:ulz44cpm6V5oAeg5Aw9HyqGFMS6XM7untlMEhD7YzzA=", + version = "v1.19.0", + ) + go_repository( + name = "io_opentelemetry_go_otel", + build_file_proto_mode = "disable", + importpath = "go.opentelemetry.io/otel", + sum = "h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=", + version = "v1.19.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_exporters_jaeger", + importpath = "go.opentelemetry.io/otel/exporters/jaeger", + sum = "h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=", + version = "v1.17.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", + sum = "h1:IAtl+7gua134xcV3NieDhJHjjOVeJhXAnYf/0hswjUY=", + version = "v1.18.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_metric", + importpath = "go.opentelemetry.io/otel/metric", + sum = "h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=", + version = "v1.19.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_sdk", + importpath = "go.opentelemetry.io/otel/sdk", + sum = "h1:e3bAB0wB3MljH38sHzpV/qWrOTCFrdZF2ct9F8rBkcY=", + version = "v1.18.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_trace", + importpath = "go.opentelemetry.io/otel/trace", + sum = "h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=", + version = "v1.19.0", + ) + go_repository( + name = "io_opentelemetry_go_proto_otlp", + importpath = "go.opentelemetry.io/proto/otlp", + sum = "h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=", + version = "v1.0.0", + ) + go_repository( + name = "org_golang_google_api", + importpath = "google.golang.org/api", + sum = "h1:mf+7EJ94fi5ZcnpPy+m0Yv2dkz8bKm+UL0snTCuwXlY=", + version = "v0.142.0", + ) + go_repository( + name = "org_golang_google_appengine", + importpath = "google.golang.org/appengine", + sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=", + version = "v1.6.7", + ) + go_repository( + name = "org_golang_google_genproto", + importpath = "google.golang.org/genproto", + sum = "h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=", + version = "v0.0.0-20231002182017-d307bd883b97", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_api", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:U7+wNaVuSTaUqNvK2+osJ9ejEZxbjHHk8F2b6Hpx0AE=", + version = "v0.0.0-20230920204549-e6e6cdab5c13", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_bytestream", + importpath = "google.golang.org/genproto/googleapis/bytestream", + sum = "h1:AzcXcS6RbpBm65S0+/F78J9hFCL0/GZWp8oCRZod780=", + version = "v0.0.0-20230920204549-e6e6cdab5c13", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY=", + version = "v0.0.0-20231009173412-8bfb1ae86b6c", + ) + go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable", + importpath = "google.golang.org/grpc", + sum = "h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=", + version = "v1.58.2", + ) + go_repository( + name = "org_golang_google_protobuf", + build_extra_args = [ + "-exclude=**/testdata", + ], + importpath = "google.golang.org/protobuf", + sum = "h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=", + version = "v1.31.0", + ) + go_repository( + name = "org_golang_x_crypto", + importpath = "golang.org/x/crypto", + sum = "h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=", + version = "v0.13.0", + ) + go_repository( + name = "org_golang_x_exp", + importpath = "golang.org/x/exp", + sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=", + version = "v0.0.0-20190121172915-509febef88a4", + ) + go_repository( + name = "org_golang_x_lint", + importpath = "golang.org/x/lint", + patches = ["@com_github_buildbarn_bb_storage//:patches/org_golang_x_lint/generic.diff"], + sum = "h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=", + version = "v0.0.0-20201208152925-83fdc39ff7b5", + ) + go_repository( + name = "org_golang_x_mod", + importpath = "golang.org/x/mod", + sum = "h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=", + version = "v0.10.0", + ) + go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=", + version = "v0.15.0", + ) + go_repository( + name = "org_golang_x_oauth2", + importpath = "golang.org/x/oauth2", + sum = "h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=", + version = "v0.12.0", + ) + go_repository( + name = "org_golang_x_sync", + importpath = "golang.org/x/sync", + sum = "h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=", + version = "v0.4.0", + ) + go_repository( + name = "org_golang_x_sys", + importpath = "golang.org/x/sys", + sum = "h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=", + version = "v0.13.0", + ) + go_repository( + name = "org_golang_x_term", + importpath = "golang.org/x/term", + sum = "h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=", + version = "v0.12.0", + ) + go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=", + version = "v0.13.0", + ) + go_repository( + name = "org_golang_x_tools", + build_extra_args = [ + "-exclude=**/testdata", + "-exclude=go/packages/packagestest", + ], + importpath = "golang.org/x/tools", + sum = "h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=", + version = "v0.8.0", + ) + go_repository( + name = "org_golang_x_xerrors", + importpath = "golang.org/x/xerrors", + sum = "h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=", + version = "v0.0.0-20220907171357-04be3eba64a2", + ) + go_repository( + name = "org_uber_go_atomic", + importpath = "go.uber.org/atomic", + sum = "h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=", + version = "v1.7.0", + ) + go_repository( + name = "org_uber_go_goleak", + importpath = "go.uber.org/goleak", + sum = "h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=", + version = "v1.1.10", + ) + go_repository( + name = "org_uber_go_multierr", + importpath = "go.uber.org/multierr", + sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=", + version = "v1.6.0", + ) + go_repository( + name = "org_uber_go_zap", + importpath = "go.uber.org/zap", + sum = "h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4=", + version = "v1.18.1", + ) diff --git a/internal/mock/BUILD.bazel b/internal/mock/BUILD.bazel new file mode 100644 index 0000000..0cbc538 --- /dev/null +++ b/internal/mock/BUILD.bazel @@ -0,0 +1,425 @@ +# gazelle:ignore + +load("@io_bazel_rules_go//extras:gomock.bzl", "gomock") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +gomock( + name = "aliases", + out = "aliases.go", + interfaces = [ + "CancelFunc", + "Context", + "ReadCloser", + ], + library = "//internal/mock/aliases", + package = "mock", +) + +gomock( + name = "auth", + out = "auth.go", + interfaces = [ + "Authorizer", + ], + library = "@com_github_buildbarn_bb_storage//pkg/auth", + package = "mock", +) + +gomock( + name = "blobstore", + out = "blobstore.go", + interfaces = ["BlobAccess"], + library = "@com_github_buildbarn_bb_storage//pkg/blobstore", + package = "mock", +) + +gomock( + name = "blobstore_slicing", + out = "blobstore_slicing.go", + interfaces = ["BlobSlicer"], + library = "@com_github_buildbarn_bb_storage//pkg/blobstore/slicing", + package = "mock", +) + +gomock( + name = "blockdevice", + out = "blockdevice.go", + interfaces = ["BlockDevice"], + library = "@com_github_buildbarn_bb_storage//pkg/blockdevice", + package = "mock", +) + +gomock( + name = "builder", + out = "builder.go", + interfaces = [ + "BuildDirectory", + "BuildDirectoryCreator", + "BuildExecutor", + "CompletedActionLogger", + "ParentPopulatableDirectory", + "StorageFlusher", + "UploadableDirectory", + ], + library = "//pkg/builder", + package = "mock", +) + +gomock( + name = "cas", + out = "cas.go", + interfaces = [ + "CachingDirectoryFetcherEvictionSet", + "DirectoryFetcher", + "DirectoryWalker", + "FileFetcher", + ], + library = "//pkg/cas", + package = "mock", +) + +gomock( + name = "cleaner", + out = "cleaner.go", + interfaces = [ + "Cleaner", + "ProcessTable", + ], + library = "//pkg/cleaner", + package = "mock", +) + +gomock( + name = "clock", + out = "clock.go", + interfaces = [ + "Clock", + "Timer", + ], + library = "@com_github_buildbarn_bb_storage//pkg/clock", + package = "mock", +) + +gomock( + name = "clock_re", + out = "clock_re.go", + interfaces = ["Suspendable"], + library = "//pkg/clock", + package = "mock", +) + +gomock( + name = "completedactionlogger", + out = "completedactionlogger.go", + interfaces = ["CompletedActionLogger_LogCompletedActionsClient"], + library = "//pkg/proto/completedactionlogger", + package = "mock", +) + +gomock( + name = "filesystem", + out = "filesystem.go", + interfaces = [ + "Directory", + "DirectoryCloser", + "FileReader", + "FileReadWriter", + "FileWriter", + ], + library = "@com_github_buildbarn_bb_storage//pkg/filesystem", + package = "mock", +) + +gomock( + name = "filesystem_access", + out = "filesystem_access.go", + interfaces = [ + "ReadDirectoryMonitor", + "UnreadDirectoryMonitor", + ], + library = "//pkg/filesystem/access", + package = "mock", +) + +gomock( + name = "filesystem_re", + out = "filesystem_re.go", + interfaces = [ + "DirectoryOpener", + "FilePool", + "SectorAllocator", + ], + library = "//pkg/filesystem", + package = "mock", +) + +gomock( + name = "filesystem_virtual", + out = "filesystem_virtual.go", + interfaces = [ + "CASFileFactory", + "CharacterDeviceFactory", + "ChildFilter", + "Directory", + "DirectoryEntryReporter", + "FileAllocator", + "FileReadMonitor", + "FileReadMonitorFactory", + "FUSERemovalNotifier", + "FUSERemovalNotifierRegistrar", + "HandleResolver", + "InitialContentsFetcher", + "Leaf", + "NativeLeaf", + "ResolvableHandleAllocation", + "ResolvableHandleAllocator", + "StatefulDirectoryHandle", + "StatefulHandleAllocation", + "StatefulHandleAllocator", + "StatelessHandleAllocation", + "StatelessHandleAllocator", + "SymlinkFactory", + ], + library = "//pkg/filesystem/virtual", + mock_names = { + "Directory": "MockVirtualDirectory", + "Leaf": "MockVirtualLeaf", + }, + package = "mock", +) + +gomock( + name = "fuse", + out = "fuse.go", + interfaces = [ + "RawFileSystem", + "ReadDirEntryList", + "ReadDirPlusEntryList", + "ServerCallbacks", + ], + library = "@com_github_hanwen_go_fuse_v2//fuse", + package = "mock", + tags = ["manual"], +) + +gomock( + name = "grpc_go", + out = "grpc_go.go", + interfaces = [ + "ClientConnInterface", + "ClientStream", + ], + library = "@org_golang_google_grpc//:grpc", + package = "mock", +) + +gomock( + name = "initialsizeclass", + out = "initialsizeclass.go", + interfaces = [ + "Learner", + "PreviousExecutionStatsHandle", + "PreviousExecutionStatsStore", + "Selector", + "StrategyCalculator", + ], + library = "//pkg/scheduler/initialsizeclass", + package = "mock", +) + +gomock( + name = "platform", + out = "platform.go", + interfaces = ["KeyExtractor"], + library = "//pkg/scheduler/platform", + mock_names = {"KeyExtractor": "MockPlatformKeyExtractor"}, + package = "mock", +) + +gomock( + name = "random", + out = "random.go", + interfaces = [ + "SingleThreadedGenerator", + "ThreadSafeGenerator", + ], + library = "@com_github_buildbarn_bb_storage//pkg/random", + package = "mock", +) + +gomock( + name = "remoteexecution", + out = "remoteexecution.go", + interfaces = [ + "Execution_ExecuteServer", + "Execution_WaitExecutionServer", + ], + library = "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + package = "mock", +) + +gomock( + name = "remoteworker", + out = "remoteworker.go", + interfaces = ["OperationQueueClient"], + library = "//pkg/proto/remoteworker", + package = "mock", +) + +gomock( + name = "routing", + out = "routing.go", + interfaces = ["ActionRouter"], + library = "//pkg/scheduler/routing", + package = "mock", +) + +gomock( + name = "runner", + out = "runner.go", + interfaces = ["AppleXcodeSDKRootResolver"], + library = "//pkg/runner", + package = "mock", +) + +gomock( + name = "runner_pb", + out = "runner_pb.go", + interfaces = [ + "RunnerClient", + "RunnerServer", + ], + library = "//pkg/proto/runner", + package = "mock", +) + +gomock( + name = "storage_builder", + out = "storage_builder.go", + interfaces = ["BuildQueue"], + library = "@com_github_buildbarn_bb_storage//pkg/builder", + package = "mock", +) + +gomock( + name = "storage_util", + out = "storage_util.go", + interfaces = [ + "ErrorLogger", + "UUIDGenerator", + ], + library = "@com_github_buildbarn_bb_storage//pkg/util", + package = "mock", +) + +gomock( + name = "sync", + out = "sync.go", + interfaces = ["TryLocker"], + library = "//pkg/sync", + package = "mock", +) + +gomock( + name = "trace", + out = "trace.go", + interfaces = [ + "Span", + "Tracer", + "TracerProvider", + ], + library = "@io_opentelemetry_go_otel_trace//:trace", + package = "mock", +) + +go_library( + name = "mock", + srcs = [ + ":aliases.go", + ":auth.go", + ":blobstore.go", + ":blobstore_slicing.go", + ":blockdevice.go", + ":builder.go", + ":cas.go", + ":cleaner.go", + ":clock.go", + ":clock_re.go", + ":completedactionlogger.go", + ":filesystem.go", + ":filesystem_access.go", + ":filesystem_re.go", + ":filesystem_virtual.go", + ":grpc_go.go", + ":initialsizeclass.go", + ":platform.go", + ":random.go", + ":remoteexecution.go", + ":remoteworker.go", + ":routing.go", + ":runner.go", + ":runner_pb.go", + ":storage_builder.go", + ":storage_util.go", + ":sync.go", + ":trace.go", + ] + select({ + "@io_bazel_rules_go//go/platform:darwin": [ + ":fuse.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + ":fuse.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/buildbarn/bb-remote-execution/internal/mock", + visibility = ["//:__subpackages__"], + deps = [ + "//pkg/blobstore", + "//pkg/builder", + "//pkg/cas", + "//pkg/cleaner", + "//pkg/filesystem", + "//pkg/filesystem/access", + "//pkg/filesystem/virtual", + "//pkg/proto/buildqueuestate", + "//pkg/proto/cas", + "//pkg/proto/completedactionlogger", + "//pkg/proto/outputpathpersistency", + "//pkg/proto/remoteoutputservice", + "//pkg/proto/remoteworker", + "//pkg/proto/runner", + "//pkg/scheduler/initialsizeclass", + "//pkg/scheduler/invocation", + "//pkg/scheduler/platform", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/blobstore/slicing", + "@com_github_buildbarn_bb_storage//pkg/builder", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/proto/iscc", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_golang_mock//gomock", + "@com_github_google_uuid//:uuid", + "@com_google_cloud_go_longrunning//autogen/longrunningpb", + "@io_opentelemetry_go_otel//attribute", + "@io_opentelemetry_go_otel//codes", + "@io_opentelemetry_go_otel_trace//:trace", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//metadata", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "@com_github_hanwen_go_fuse_v2//fuse", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@com_github_hanwen_go_fuse_v2//fuse", + ], + "//conditions:default": [], + }), +) diff --git a/internal/mock/aliases/BUILD.bazel b/internal/mock/aliases/BUILD.bazel new file mode 100644 index 0000000..d3f083c --- /dev/null +++ b/internal/mock/aliases/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "aliases", + srcs = ["aliases.go"], + importpath = "github.com/buildbarn/bb-remote-execution/internal/mock/aliases", + visibility = ["//:__subpackages__"], +) diff --git a/internal/mock/aliases/aliases.go b/internal/mock/aliases/aliases.go new file mode 100644 index 0000000..efde0f5 --- /dev/null +++ b/internal/mock/aliases/aliases.go @@ -0,0 +1,21 @@ +package aliases + +import ( + "context" + "io" +) + +// This file contains aliases for some of the interfaces provided by the +// Go standard library. The only reason this file exists is to allow the +// gomock() Bazel rule to emit mocks for them, as that rule is only +// capable of emitting mocks for interfaces built through a +// go_library(). + +// CancelFunc is an alias of context.CancelFunc. +type CancelFunc = context.CancelFunc + +// Context is an alias of context.Context. +type Context = context.Context + +// ReadCloser is an alias of io.ReadCloser. +type ReadCloser = io.ReadCloser diff --git a/patches/bazel_gazelle/dont-flatten-srcs.diff b/patches/bazel_gazelle/dont-flatten-srcs.diff new file mode 100644 index 0000000..53948a4 --- /dev/null +++ b/patches/bazel_gazelle/dont-flatten-srcs.diff @@ -0,0 +1,47 @@ +diff --git language/go/generate.go language/go/generate.go +index 0b23cca..c532ad2 100644 +--- language/go/generate.go ++++ language/go/generate.go +@@ -483,7 +483,7 @@ func (g *generator) generateLib(pkg *goPackage, embed string) *rule.Rule { + } else { + visibility = g.commonVisibility(pkg.importPath) + } +- g.setCommonAttrs(goLibrary, pkg.rel, visibility, pkg.library, embed) ++ g.setCommonAttrs(goLibrary, pkg.rel, visibility, pkg.library, embed, true) + g.setImportAttrs(goLibrary, pkg.importPath) + return goLibrary + } +@@ -512,7 +512,7 @@ func (g *generator) generateBin(pkg *goPackage, library string) *rule.Rule { + return goBinary // empty + } + visibility := g.commonVisibility(pkg.importPath) +- g.setCommonAttrs(goBinary, pkg.rel, visibility, pkg.binary, library) ++ g.setCommonAttrs(goBinary, pkg.rel, visibility, pkg.binary, library, true) + return goBinary + } + +@@ -527,7 +527,7 @@ func (g *generator) generateTest(pkg *goPackage, library string) *rule.Rule { + if pkg.test.hasInternalTest { + embed = library + } +- g.setCommonAttrs(goTest, pkg.rel, nil, pkg.test, embed) ++ g.setCommonAttrs(goTest, pkg.rel, nil, pkg.test, embed, false) + if pkg.hasTestdata { + goTest.SetAttr("data", rule.GlobValue{Patterns: []string{"testdata/**"}}) + } +@@ -603,9 +603,13 @@ func (g *generator) maybeGenerateExtraLib(lib *rule.Rule, pkg *goPackage) *rule. + return r + } + +-func (g *generator) setCommonAttrs(r *rule.Rule, pkgRel string, visibility []string, target goTarget, embed string) { ++func (g *generator) setCommonAttrs(r *rule.Rule, pkgRel string, visibility []string, target goTarget, embed string, flattenSrcs bool) { + if !target.sources.isEmpty() { +- r.SetAttr("srcs", target.sources.buildFlat()) ++ if flattenSrcs { ++ r.SetAttr("srcs", target.sources.buildFlat()) ++ } else { ++ r.SetAttr("srcs", target.sources.build()) ++ } + } + if !target.embedSrcs.isEmpty() { + r.SetAttr("embedsrcs", target.embedSrcs.build()) diff --git a/patches/bazel_gazelle/issue-1595.diff b/patches/bazel_gazelle/issue-1595.diff new file mode 100644 index 0000000..840b453 --- /dev/null +++ b/patches/bazel_gazelle/issue-1595.diff @@ -0,0 +1,35 @@ +diff --git repo/remote.go repo/remote.go +index 361a324..98e8689 100644 +--- repo/remote.go ++++ repo/remote.go +@@ -180,15 +180,25 @@ func NewRemoteCache(knownRepos []Repo) (r *RemoteCache, cleanup func() error) { + // allowed to use them. However, we'll return the same result nearly all + // the time, and simpler is better. + for _, repo := range knownRepos { +- path := pathWithoutSemver(repo.GoPrefix) +- if path == "" || r.root.cache[path] != nil { ++ newPath := pathWithoutSemver(repo.GoPrefix) ++ if newPath == "" { + continue + } +- r.root.cache[path] = r.root.cache[repo.GoPrefix] ++ found := false ++ for prefix := newPath; prefix != "." && prefix != "/"; prefix = path.Dir(prefix) { ++ if _, ok := r.root.cache[prefix]; ok { ++ found = true ++ break ++ } ++ } ++ if found { ++ continue ++ } ++ r.root.cache[newPath] = r.root.cache[repo.GoPrefix] + if e := r.remote.cache[repo.GoPrefix]; e != nil { +- r.remote.cache[path] = e ++ r.remote.cache[newPath] = e + } +- r.mod.cache[path] = r.mod.cache[repo.GoPrefix] ++ r.mod.cache[newPath] = r.mod.cache[repo.GoPrefix] + } + + return r, r.cleanup diff --git a/patches/com_github_golang_mock/generics.diff b/patches/com_github_golang_mock/generics.diff new file mode 100644 index 0000000..cf142e2 --- /dev/null +++ b/patches/com_github_golang_mock/generics.diff @@ -0,0 +1,177 @@ +diff --git mockgen/model/model.go mockgen/model/model.go +index 94d7f4b..7620f20 100644 +--- mockgen/model/model.go ++++ mockgen/model/model.go +@@ -17,8 +17,10 @@ package model + + import ( + "encoding/gob" ++ "errors" + "fmt" + "io" ++ "path" + "reflect" + "strings" + ) +@@ -405,6 +407,138 @@ var errorType = reflect.TypeOf((*error)(nil)).Elem() + + var byteType = reflect.TypeOf(byte(0)) + ++var predeclaredTypeNames = map[string]struct{}{ ++ // Boolean types. ++ "bool": {}, ++ // Numeric types. ++ "uint8": {}, "uint16": {}, "uint32": {}, "uint64": {}, ++ "int8": {}, "int16": {}, "int32": {}, "int64": {}, ++ "float32": {}, "float64": {}, "complex64": {}, "complex128": {}, ++ "byte": {}, "rune": {}, ++ "uint": {}, "int": {}, "uintptr": {}, ++ // String types. ++ "string": {}, ++ // Any types. ++ "interface {}": {}, ++} ++ ++func typeFromReflectName(currentPkg string, expr io.RuneScanner) (Type, error) { ++ cFirst, _, err := expr.ReadRune() ++ if err != nil { ++ return nil, err ++ } ++ ++ switch cFirst { ++ case '*': ++ // Pointer type. ++ nestedType, err := typeFromReflectName(currentPkg, expr) ++ if err != nil { ++ return nil, err ++ } ++ return &PointerType{ ++ Type: nestedType, ++ }, nil ++ case '[': ++ // Array or slice type. ++ arraySize := -1 ++ for { ++ cSize, _, err := expr.ReadRune() ++ if err != nil { ++ return nil, err ++ } ++ switch cSize { ++ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': ++ if arraySize < 0 { ++ arraySize = int(cSize - '0') ++ } else { ++ arraySize = arraySize*10 + int(cSize-'0') ++ } ++ case ']': ++ nestedType, err := typeFromReflectName(currentPkg, expr) ++ if err != nil { ++ return nil, err ++ } ++ return &ArrayType{ ++ Len: arraySize, ++ Type: nestedType, ++ }, nil ++ default: ++ return nil, fmt.Errorf("encountered invalid character %c in array size", cSize) ++ } ++ } ++ default: ++ // Type name. ++ var typeName strings.Builder ++ typeName.WriteRune(cFirst) ++ var typeParameters []Type ++ for { ++ c, _, err := expr.ReadRune() ++ if err != nil { ++ if err == io.EOF { ++ break ++ } ++ return nil, err ++ } ++ if c == ',' || c == ']' { ++ // End of nested type name. ++ expr.UnreadRune() ++ break ++ } ++ if c == '[' { ++ // Generic type. ++ for { ++ typeParameter, err := typeFromReflectName(currentPkg, expr) ++ if err != nil { ++ return nil, err ++ } ++ typeParameters = append(typeParameters, typeParameter) ++ cSeparator, _, err := expr.ReadRune() ++ if err != nil { ++ return nil, err ++ } ++ if cSeparator == ']' { ++ break ++ } else if cSeparator != ',' { ++ return nil, errors.New("expected comma separator between type parameters") ++ } ++ } ++ break ++ } ++ typeName.WriteRune(c) ++ } ++ ++ typeNameStr := typeName.String() ++ if _, ok := predeclaredTypeNames[typeNameStr]; ok { ++ return PredeclaredType(typeNameStr), nil ++ } ++ ++ dot := strings.LastIndexByte(typeNameStr, '.') ++ if dot >= 0 { ++ // Type name that is preceded by a package name. ++ pkgName := typeNameStr[:dot] ++ if pkgName == path.Base(currentPkg) { ++ pkgName = currentPkg ++ } ++ return &NamedType{ ++ Package: impPath(pkgName), ++ Type: typeNameStr[dot+1:], ++ TypeParams: &TypeParametersType{ ++ TypeParameters: typeParameters, ++ }, ++ }, nil ++ } ++ ++ // Bare type name. ++ return &NamedType{ ++ Package: impPath(currentPkg), ++ Type: typeNameStr, ++ TypeParams: &TypeParametersType{ ++ TypeParameters: typeParameters, ++ }, ++ }, nil ++ } ++} ++ + func typeFromType(t reflect.Type) (Type, error) { + // Hack workaround for https://golang.org/issue/3853. + // This explicit check should not be necessary. +@@ -412,11 +544,17 @@ func typeFromType(t reflect.Type) (Type, error) { + return PredeclaredType("byte"), nil + } + +- if imp := t.PkgPath(); imp != "" { +- return &NamedType{ +- Package: impPath(imp), +- Type: t.Name(), +- }, nil ++ if currentPkg := t.PkgPath(); currentPkg != "" { ++ name := t.Name() ++ r := strings.NewReader(name) ++ t, err := typeFromReflectName(currentPkg, r) ++ if err != nil { ++ return nil, fmt.Errorf("failed to parse reflection type name %#v: %v", name, err) ++ } ++ if l := r.Len(); l != 0 { ++ return nil, fmt.Errorf("reflection type name %#v has trailing garbage of length %d", name, l) ++ } ++ return t, nil + } + + // only unnamed or predeclared types after here diff --git a/patches/com_github_hanwen_go_fuse_v2/direntrylist-offsets-and-testability.diff b/patches/com_github_hanwen_go_fuse_v2/direntrylist-offsets-and-testability.diff new file mode 100644 index 0000000..8c3cab1 --- /dev/null +++ b/patches/com_github_hanwen_go_fuse_v2/direntrylist-offsets-and-testability.diff @@ -0,0 +1,261 @@ +diff --git fs/bridge.go fs/bridge.go +index 78d8fc9..fd60dcf 100644 +--- fs/bridge.go ++++ fs/bridge.go +@@ -996,7 +996,7 @@ func (b *rawBridge) getStream(ctx context.Context, inode *Inode) (DirStream, sys + return NewListDirStream(r), 0 + } + +-func (b *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status { ++func (b *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirEntryList) fuse.Status { + n, f := b.inode(input.NodeId, input.Fh) + + f.mu.Lock() +@@ -1016,7 +1016,7 @@ func (b *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fus + + f.hasOverflow = false + // always succeeds. +- out.AddDirEntry(f.overflow) ++ out.AddDirEntry(f.overflow, f.dirOffset+1) + f.dirOffset++ + } + +@@ -1035,7 +1035,7 @@ func (b *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fus + } + + first = false +- if !out.AddDirEntry(e) { ++ if !out.AddDirEntry(e, f.dirOffset+1) { + f.overflow = e + f.hasOverflow = true + return errnoToStatus(errno) +@@ -1046,7 +1046,7 @@ func (b *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fus + return fuse.OK + } + +-func (b *rawBridge) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status { ++func (b *rawBridge) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) fuse.Status { + n, f := b.inode(input.NodeId, input.Fh) + + f.mu.Lock() +@@ -1085,7 +1085,7 @@ func (b *rawBridge) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out + } + first = false + +- entryOut := out.AddDirLookupEntry(e) ++ entryOut := out.AddDirLookupEntry(e, f.dirOffset+1) + if entryOut == nil { + f.overflow = e + f.hasOverflow = true +diff --git fuse/api.go fuse/api.go +index a0ec84f..3522124 100644 +--- fuse/api.go ++++ fuse/api.go +@@ -381,8 +381,8 @@ type RawFileSystem interface { + + // Directory handling + OpenDir(cancel <-chan struct{}, input *OpenIn, out *OpenOut) (status Status) +- ReadDir(cancel <-chan struct{}, input *ReadIn, out *DirEntryList) Status +- ReadDirPlus(cancel <-chan struct{}, input *ReadIn, out *DirEntryList) Status ++ ReadDir(cancel <-chan struct{}, input *ReadIn, out ReadDirEntryList) Status ++ ReadDirPlus(cancel <-chan struct{}, input *ReadIn, out ReadDirPlusEntryList) Status + ReleaseDir(input *ReleaseIn) + FsyncDir(cancel <-chan struct{}, input *FsyncIn) (code Status) + +diff --git fuse/defaultraw.go fuse/defaultraw.go +index df109bd..edb0629 100644 +--- fuse/defaultraw.go ++++ fuse/defaultraw.go +@@ -140,11 +140,11 @@ func (fs *defaultRawFileSystem) Fsync(cancel <-chan struct{}, input *FsyncIn) (c + return ENOSYS + } + +-func (fs *defaultRawFileSystem) ReadDir(cancel <-chan struct{}, input *ReadIn, l *DirEntryList) Status { ++func (fs *defaultRawFileSystem) ReadDir(cancel <-chan struct{}, input *ReadIn, l ReadDirEntryList) Status { + return ENOSYS + } + +-func (fs *defaultRawFileSystem) ReadDirPlus(cancel <-chan struct{}, input *ReadIn, l *DirEntryList) Status { ++func (fs *defaultRawFileSystem) ReadDirPlus(cancel <-chan struct{}, input *ReadIn, l ReadDirPlusEntryList) Status { + return ENOSYS + } + +diff --git fuse/direntry.go fuse/direntry.go +index ee824f1..665a690 100644 +--- fuse/direntry.go ++++ fuse/direntry.go +@@ -39,35 +39,28 @@ type DirEntryList struct { + buf []byte + // capacity of the underlying buffer + size int +- // offset is the requested location in the directory. go-fuse +- // currently counts in number of directory entries, but this is an +- // implementation detail and may change in the future. +- // If `offset` and `fs.fileEntry.dirOffset` disagree, then a +- // directory seek has taken place. +- offset uint64 + // pointer to the last serialized _Dirent. Used by FixMode(). + lastDirent *_Dirent + } + + // NewDirEntryList creates a DirEntryList with the given data buffer + // and offset. +-func NewDirEntryList(data []byte, off uint64) *DirEntryList { ++func NewDirEntryList(data []byte) *DirEntryList { + return &DirEntryList{ + buf: data[:0], + size: len(data), +- offset: off, + } + } + + // AddDirEntry tries to add an entry, and reports whether it + // succeeded. +-func (l *DirEntryList) AddDirEntry(e DirEntry) bool { +- return l.Add(0, e.Name, e.Ino, e.Mode) ++func (l *DirEntryList) AddDirEntry(e DirEntry, off uint64) bool { ++ return l.add(0, e.Name, e.Ino, e.Mode, off) + } + + // Add adds a direntry to the DirEntryList, returning whether it + // succeeded. +-func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) bool { ++func (l *DirEntryList) add(prefix int, name string, inode uint64, mode uint32, off uint64) bool { + if inode == 0 { + inode = FUSE_UNKNOWN_INO + } +@@ -82,7 +75,7 @@ func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) b + l.buf = l.buf[:newLen] + oldLen += prefix + dirent := (*_Dirent)(unsafe.Pointer(&l.buf[oldLen])) +- dirent.Off = l.offset + 1 ++ dirent.Off = off + dirent.Ino = inode + dirent.NameLen = uint32(len(name)) + dirent.Typ = modeToType(mode) +@@ -94,7 +87,6 @@ func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) b + copy(l.buf[oldLen:], eightPadding[:padding]) + } + +- l.offset = dirent.Off + return true + } + +@@ -109,10 +101,10 @@ func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) b + // 3) Name (null-terminated) + // 4) Padding to align to 8 bytes + // [repeat] +-func (l *DirEntryList) AddDirLookupEntry(e DirEntry) *EntryOut { ++func (l *DirEntryList) AddDirLookupEntry(e DirEntry, off uint64) *EntryOut { + const entryOutSize = int(unsafe.Sizeof(EntryOut{})) + oldLen := len(l.buf) +- ok := l.Add(entryOutSize, e.Name, e.Ino, e.Mode) ++ ok := l.add(entryOutSize, e.Name, e.Ino, e.Mode, off) + if !ok { + return nil + } +@@ -139,3 +131,13 @@ func (l *DirEntryList) FixMode(mode uint32) { + func (l *DirEntryList) bytes() []byte { + return l.buf + } ++ ++type ReadDirEntryList interface { ++ AddDirEntry(e DirEntry, off uint64) bool ++ FixMode(mode uint32) ++} ++ ++type ReadDirPlusEntryList interface { ++ AddDirLookupEntry(e DirEntry, off uint64) *EntryOut ++ FixMode(mode uint32) ++} +diff --git fuse/nodefs/dir.go fuse/nodefs/dir.go +index 75cf75a..5440333 100644 +--- fuse/nodefs/dir.go ++++ fuse/nodefs/dir.go +@@ -22,7 +22,7 @@ type connectorDir struct { + stream []fuse.DirEntry + } + +-func (d *connectorDir) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) { ++func (d *connectorDir) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirEntryList) (code fuse.Status) { + d.mu.Lock() + defer d.mu.Unlock() + +@@ -54,7 +54,7 @@ func (d *connectorDir) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out * + log.Printf("got empty directory entry, mode %o.", e.Mode) + continue + } +- ok := out.AddDirEntry(e) ++ ok := out.AddDirEntry(e, input.Offset+1) + if !ok { + break + } +@@ -62,7 +62,7 @@ func (d *connectorDir) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out * + return fuse.OK + } + +-func (d *connectorDir) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) (code fuse.Status) { ++func (d *connectorDir) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) (code fuse.Status) { + d.mu.Lock() + defer d.mu.Unlock() + +@@ -91,7 +91,7 @@ func (d *connectorDir) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, o + + // we have to be sure entry will fit if we try to add + // it, or we'll mess up the lookup counts. +- entryDest := out.AddDirLookupEntry(e) ++ entryDest := out.AddDirLookupEntry(e, input.Offset+1) + if entryDest == nil { + break + } +@@ -108,6 +108,6 @@ func (d *connectorDir) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, o + } + + type rawDir interface { +- ReadDir(out *fuse.DirEntryList, input *fuse.ReadIn, c *fuse.Context) fuse.Status +- ReadDirPlus(out *fuse.DirEntryList, input *fuse.ReadIn, c *fuse.Context) fuse.Status ++ ReadDir(out fuse.ReadDirEntryList, input *fuse.ReadIn, c *fuse.Context) fuse.Status ++ ReadDirPlus(out fuse.ReadDirPlusEntryList, input *fuse.ReadIn, c *fuse.Context) fuse.Status + } +diff --git fuse/nodefs/fsops.go fuse/nodefs/fsops.go +index 58b4a5e..f89a74a 100644 +--- fuse/nodefs/fsops.go ++++ fuse/nodefs/fsops.go +@@ -172,13 +172,13 @@ func (c *rawBridge) OpenDir(cancel <-chan struct{}, input *fuse.OpenIn, out *fus + return fuse.OK + } + +-func (c *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status { ++func (c *rawBridge) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirEntryList) fuse.Status { + node := c.toInode(input.NodeId) + opened := node.mount.getOpenedFile(input.Fh) + return opened.dir.ReadDir(cancel, input, out) + } + +-func (c *rawBridge) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status { ++func (c *rawBridge) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) fuse.Status { + node := c.toInode(input.NodeId) + opened := node.mount.getOpenedFile(input.Fh) + return opened.dir.ReadDirPlus(cancel, input, out) +diff --git fuse/opcode.go fuse/opcode.go +index 7b72cb6..b8aba60 100644 +--- fuse/opcode.go ++++ fuse/opcode.go +@@ -186,7 +186,7 @@ func doCreate(server *Server, req *request) { + func doReadDir(server *Server, req *request) { + in := (*ReadIn)(req.inData) + buf := server.allocOut(req, in.Size) +- out := NewDirEntryList(buf, uint64(in.Offset)) ++ out := NewDirEntryList(buf) + + code := server.fileSystem.ReadDir(req.cancel, in, out) + req.flatData = out.bytes() +@@ -196,7 +196,7 @@ func doReadDir(server *Server, req *request) { + func doReadDirPlus(server *Server, req *request) { + in := (*ReadIn)(req.inData) + buf := server.allocOut(req, in.Size) +- out := NewDirEntryList(buf, uint64(in.Offset)) ++ out := NewDirEntryList(buf) + + code := server.fileSystem.ReadDirPlus(req.cancel, in, out) + req.flatData = out.bytes() diff --git a/patches/com_github_hanwen_go_fuse_v2/notify-testability.diff b/patches/com_github_hanwen_go_fuse_v2/notify-testability.diff new file mode 100644 index 0000000..1901d2c --- /dev/null +++ b/patches/com_github_hanwen_go_fuse_v2/notify-testability.diff @@ -0,0 +1,126 @@ +diff --git fs/api.go fs/api.go +index 4f4fe92..08a2772 100644 +--- fs/api.go ++++ fs/api.go +@@ -603,7 +603,7 @@ type Options struct { + + // ServerCallbacks can be provided to stub out notification + // functions for testing a filesystem without mounting it. +- ServerCallbacks ServerCallbacks ++ ServerCallbacks fuse.ServerCallbacks + + // Logger is a sink for diagnostic messages. Diagnostic + // messages are printed under conditions where we cannot +diff --git fs/bridge.go fs/bridge.go +index a747018..94b341a 100644 +--- fs/bridge.go ++++ fs/bridge.go +@@ -43,21 +43,10 @@ type fileEntry struct { + wg sync.WaitGroup + } + +-// ServerCallbacks are calls into the kernel to manipulate the inode, +-// entry and page cache. They are stubbed so filesystems can be +-// unittested without mounting them. +-type ServerCallbacks interface { +- DeleteNotify(parent uint64, child uint64, name string) fuse.Status +- EntryNotify(parent uint64, name string) fuse.Status +- InodeNotify(node uint64, off int64, length int64) fuse.Status +- InodeRetrieveCache(node uint64, offset int64, dest []byte) (n int, st fuse.Status) +- InodeNotifyStoreCache(node uint64, offset int64, data []byte) fuse.Status +-} +- + type rawBridge struct { + options Options + root *Inode +- server ServerCallbacks ++ server fuse.ServerCallbacks + + // mu protects the following data. Locks for inodes must be + // taken before rawBridge.mu +@@ -1100,7 +1089,7 @@ func (b *rawBridge) StatFs(cancel <-chan struct{}, input *fuse.InHeader, out *fu + return fuse.OK + } + +-func (b *rawBridge) Init(s *fuse.Server) { ++func (b *rawBridge) Init(s fuse.ServerCallbacks) { + b.server = s + } + +diff --git fuse/api.go fuse/api.go +index 433e5ff..93027b3 100644 +--- fuse/api.go ++++ fuse/api.go +@@ -236,6 +236,17 @@ type MountOptions struct { + DisableReadDirPlus bool + } + ++// ServerCallbacks are calls into the kernel to manipulate the inode, ++// entry and page cache. They are stubbed so filesystems can be ++// unittested without mounting them. ++type ServerCallbacks interface { ++ DeleteNotify(parent uint64, child uint64, name string) Status ++ EntryNotify(parent uint64, name string) Status ++ InodeNotify(node uint64, off int64, length int64) Status ++ InodeRetrieveCache(node uint64, offset int64, dest []byte) (n int, st Status) ++ InodeNotifyStoreCache(node uint64, offset int64, data []byte) Status ++} ++ + // RawFileSystem is an interface close to the FUSE wire protocol. + // + // Unless you really know what you are doing, you should not implement +@@ -346,5 +357,5 @@ type RawFileSystem interface { + // This is called on processing the first request. The + // filesystem implementation can use the server argument to + // talk back to the kernel (through notify methods). +- Init(*Server) ++ Init(ServerCallbacks) + } +diff --git fuse/defaultraw.go fuse/defaultraw.go +index edb0629..3f30e00 100644 +--- fuse/defaultraw.go ++++ fuse/defaultraw.go +@@ -16,7 +16,7 @@ func NewDefaultRawFileSystem() RawFileSystem { + + type defaultRawFileSystem struct{} + +-func (fs *defaultRawFileSystem) Init(*Server) { ++func (fs *defaultRawFileSystem) Init(ServerCallbacks) { + } + + func (fs *defaultRawFileSystem) String() string { +diff --git fuse/nodefs/fsconnector.go fuse/nodefs/fsconnector.go +index 25945ff..351b802 100644 +--- fuse/nodefs/fsconnector.go ++++ fuse/nodefs/fsconnector.go +@@ -29,7 +29,7 @@ type FileSystemConnector struct { + debug bool + + // Callbacks for talking back to the kernel. +- server *fuse.Server ++ server fuse.ServerCallbacks + + // Translate between uint64 handles and *Inode. + inodeMap handleMap +@@ -81,7 +81,7 @@ func NewFileSystemConnector(root Node, opts *Options) (c *FileSystemConnector) { + } + + // Server returns the fuse.Server that talking to the kernel. +-func (c *FileSystemConnector) Server() *fuse.Server { ++func (c *FileSystemConnector) Server() fuse.ServerCallbacks { + return c.server + } + +diff --git fuse/nodefs/fsops.go fuse/nodefs/fsops.go +index f89a74a..bafc489 100644 +--- fuse/nodefs/fsops.go ++++ fuse/nodefs/fsops.go +@@ -56,7 +56,7 @@ func (c *rawBridge) String() string { + return name + } + +-func (c *rawBridge) Init(s *fuse.Server) { ++func (c *rawBridge) Init(s fuse.ServerCallbacks) { + c.server = s + c.rootNode.Node().OnMount((*FileSystemConnector)(c)) + } diff --git a/patches/com_github_hanwen_go_fuse_v2/writeback-cache.diff b/patches/com_github_hanwen_go_fuse_v2/writeback-cache.diff new file mode 100644 index 0000000..d6abcd0 --- /dev/null +++ b/patches/com_github_hanwen_go_fuse_v2/writeback-cache.diff @@ -0,0 +1,32 @@ +diff --git fuse/api.go fuse/api.go +index a0ec84f..5b7a8ab 100644 +--- fuse/api.go ++++ fuse/api.go +@@ -249,6 +249,13 @@ type MountOptions struct { + // for more details. + SyncRead bool + ++ // Let the kernel use the writeback cache mode, as opposed to ++ // write-through mode. ++ // ++ // See the following page for more details: ++ // https://www.kernel.org/doc/Documentation/filesystems/fuse-io.txt ++ EnableWritebackCache bool ++ + // If set, fuse will first attempt to use syscall.Mount instead of + // fusermount to mount the filesystem. This will not update /etc/mtab + // but might be needed if fusermount is not available. +diff --git fuse/opcode.go fuse/opcode.go +index 7b72cb6..e1ff6f2 100644 +--- fuse/opcode.go ++++ fuse/opcode.go +@@ -115,6 +115,9 @@ func doInit(server *Server, req *request) { + // Clear CAP_READDIRPLUS + server.kernelSettings.Flags &= ^uint32(CAP_READDIRPLUS) + } ++ if server.opts.EnableWritebackCache { ++ server.kernelSettings.Flags |= CAP_WRITEBACK_CACHE ++ } + + dataCacheMode := input.Flags & CAP_AUTO_INVAL_DATA + if server.opts.ExplicitDataCacheControl { diff --git a/patches/io_bazel_rules_go/tags-manual.diff b/patches/io_bazel_rules_go/tags-manual.diff new file mode 100644 index 0000000..4c7c49f --- /dev/null +++ b/patches/io_bazel_rules_go/tags-manual.diff @@ -0,0 +1,19 @@ +diff --git extras/gomock.bzl extras/gomock.bzl +index 7e960eae..47799dfb 100644 +--- extras/gomock.bzl ++++ extras/gomock.bzl +@@ -215,12 +215,14 @@ def _gomock_reflect(name, library, out, mockgen_tool, **kwargs): + library = library, + out = prog_src_out, + mockgen_tool = mockgen_tool, ++ tags = ["manual"], + ) + prog_bin = name + "_gomock_prog_bin" + go_binary( + name = prog_bin, + srcs = [prog_src_out], + deps = [library, mockgen_model_lib], ++ tags = ["manual"], + ) + _gomock_prog_exec( + name = name, diff --git a/pkg/blobstore/BUILD.bazel b/pkg/blobstore/BUILD.bazel new file mode 100644 index 0000000..0f9ed04 --- /dev/null +++ b/pkg/blobstore/BUILD.bazel @@ -0,0 +1,56 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "blobstore", + srcs = [ + "batched_store_blob_access.go", + "blob_access_mutable_proto_store.go", + "existence_precondition_blob_access.go", + "mutable_proto_store.go", + "suspending_blob_access.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/blobstore", + visibility = ["//visibility:public"], + deps = [ + "//pkg/clock", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/blobstore/slicing", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_x_sync//errgroup", + "@org_golang_x_sync//semaphore", + ], +) + +go_test( + name = "blobstore_test", + srcs = [ + "batched_store_blob_access_test.go", + "blob_access_mutable_proto_store_test.go", + "existence_precondition_blob_access_test.go", + "suspending_blob_access_test.go", + ], + deps = [ + ":blobstore", + "//internal/mock", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/proto/iscc", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_x_sync//semaphore", + ], +) diff --git a/pkg/blobstore/batched_store_blob_access.go b/pkg/blobstore/batched_store_blob_access.go new file mode 100644 index 0000000..5207bac --- /dev/null +++ b/pkg/blobstore/batched_store_blob_access.go @@ -0,0 +1,131 @@ +package blobstore + +import ( + "context" + "sync" + + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +type pendingPutOperation struct { + digest digest.Digest + b buffer.Buffer +} + +type batchedStoreBlobAccess struct { + blobstore.BlobAccess + blobKeyFormat digest.KeyFormat + batchSize int + putSemaphore *semaphore.Weighted + + lock sync.Mutex + pendingPutOperations map[string]pendingPutOperation + flushError error +} + +// NewBatchedStoreBlobAccess is an adapter for BlobAccess that causes +// Put() operations to be enqueued. When a sufficient number of +// operations are enqueued, a FindMissing() call is generated to +// determine which blobs actually need to be stored. Writes for blobs +// with the same digest are merged. +// +// This adapter may be used by the worker to speed up the uploading +// phase of actions. +func NewBatchedStoreBlobAccess(blobAccess blobstore.BlobAccess, blobKeyFormat digest.KeyFormat, batchSize int, putSemaphore *semaphore.Weighted) (blobstore.BlobAccess, func(ctx context.Context) error) { + ba := &batchedStoreBlobAccess{ + BlobAccess: blobAccess, + blobKeyFormat: blobKeyFormat, + batchSize: batchSize, + pendingPutOperations: map[string]pendingPutOperation{}, + putSemaphore: putSemaphore, + } + return ba, func(ctx context.Context) error { + ba.lock.Lock() + defer ba.lock.Unlock() + + // Flush last batch of blobs. Return any errors that occurred. + ba.flushLocked(ctx) + err := ba.flushError + ba.flushError = nil + return err + } +} + +func (ba *batchedStoreBlobAccess) flushLocked(ctx context.Context) { + // Ensure that all pending blobs are closed upon termination. + defer func() { + for _, pendingPutOperation := range ba.pendingPutOperations { + pendingPutOperation.b.Discard() + } + ba.pendingPutOperations = map[string]pendingPutOperation{} + }() + + // Determine which blobs are missing. + digests := digest.NewSetBuilder() + for _, pendingPutOperation := range ba.pendingPutOperations { + digests.Add(pendingPutOperation.digest) + } + missing, err := ba.BlobAccess.FindMissing(ctx, digests.Build()) + if err != nil { + ba.flushError = util.StatusWrap(err, "Failed to determine existence of previous batch of blobs") + return + } + + // Upload the missing ones. + group, groupCtx := errgroup.WithContext(ctx) + for _, digest := range missing.Items() { + key := digest.GetKey(ba.blobKeyFormat) + if pendingPutOperation, ok := ba.pendingPutOperations[key]; ok { + if groupCtx.Err() != nil || ba.putSemaphore.Acquire(groupCtx, 1) != nil { + break + } + delete(ba.pendingPutOperations, key) + group.Go(func() error { + err := ba.BlobAccess.Put(groupCtx, pendingPutOperation.digest, pendingPutOperation.b) + ba.putSemaphore.Release(1) + if err != nil { + return util.StatusWrapf(err, "Failed to store previous blob %s", pendingPutOperation.digest) + } + return nil + }) + } + } + if err := group.Wait(); err != nil { + ba.flushError = err + } else if err := util.StatusFromContext(ctx); err != nil { + ba.flushError = err + } +} + +func (ba *batchedStoreBlobAccess) Put(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + ba.lock.Lock() + defer ba.lock.Unlock() + + // Discard duplicate writes. + key := digest.GetKey(ba.blobKeyFormat) + if _, ok := ba.pendingPutOperations[key]; ok { + b.Discard() + return nil + } + + // Flush the existing blobs if there are too many pending. + if len(ba.pendingPutOperations) >= ba.batchSize { + ba.flushLocked(ctx) + } + if err := ba.flushError; err != nil { + b.Discard() + return err + } + + ba.pendingPutOperations[key] = pendingPutOperation{ + digest: digest, + b: b, + } + return nil +} diff --git a/pkg/blobstore/batched_store_blob_access_test.go b/pkg/blobstore/batched_store_blob_access_test.go new file mode 100644 index 0000000..d9fbe08 --- /dev/null +++ b/pkg/blobstore/batched_store_blob_access_test.go @@ -0,0 +1,193 @@ +package blobstore_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "golang.org/x/sync/semaphore" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestBatchedStoreBlobAccessSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBlobAccess := mock.NewMockBlobAccess(ctrl) + putSemaphore := semaphore.NewWeighted(1) + blobAccess, flush := blobstore.NewBatchedStoreBlobAccess(baseBlobAccess, digest.KeyWithoutInstance, 2, putSemaphore) + + // Empty calls to FindMissing() may be generated at any point in + // time. It is up to the storage backend to filter those out. + baseBlobAccess.EXPECT().FindMissing(ctx, digest.EmptySet).Return(digest.EmptySet, nil).AnyTimes() + + // We should be able to enqueue requests for up to two blobs + // without generating any calls on the storage backend. + digestEmpty := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "d41d8cd98f00b204e9800998ecf8427e", + 0) + for i := 0; i < 10; i++ { + require.NoError(t, blobAccess.Put(ctx, digestEmpty, buffer.NewValidatedBufferFromByteSlice(nil))) + } + + digestHello := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "8b1a9953c4611296a827abf8c47804d7", + 5) + for i := 0; i < 10; i++ { + require.NoError(t, blobAccess.Put(ctx, digestHello, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) + } + + // Attempting to store a third blob should cause the first two + // blobs to be flushed. + baseBlobAccess.EXPECT().FindMissing( + ctx, + digest.NewSetBuilder().Add(digestHello).Add(digestEmpty).Build()).Return( + digest.NewSetBuilder().Add(digestHello).Build(), nil) + baseBlobAccess.EXPECT().Put(gomock.Any(), digestHello, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + return nil + }) + + digestGoodbye := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "6fc422233a40a75a1f028e11c3cd1140", + 7) + require.NoError(t, blobAccess.Put(ctx, digestGoodbye, buffer.NewValidatedBufferFromByteSlice([]byte("Goodbye")))) + + // Flushing should cause the third blob to be written. + baseBlobAccess.EXPECT().FindMissing( + ctx, + digest.NewSetBuilder().Add(digestGoodbye).Build()).Return( + digest.NewSetBuilder().Add(digestGoodbye).Build(), nil) + baseBlobAccess.EXPECT().Put(gomock.Any(), digestGoodbye, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Goodbye"), data) + return nil + }) + + require.NoError(t, flush(ctx)) + + // Flushing redundantly should have no longer have any effect. + require.NoError(t, flush(ctx)) +} + +func TestBatchedStoreBlobAccessFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBlobAccess := mock.NewMockBlobAccess(ctrl) + putSemaphore := semaphore.NewWeighted(1) + blobAccess, flush := blobstore.NewBatchedStoreBlobAccess(baseBlobAccess, digest.KeyWithoutInstance, 2, putSemaphore) + + // Empty calls to FindMissing() may be generated at any point in + // time. It is up to the storage backend to filter those out. + baseBlobAccess.EXPECT().FindMissing(ctx, digest.EmptySet).Return(digest.EmptySet, nil).AnyTimes() + + // We should be able to enqueue requests for up to two blobs + // without generating any calls on the storage backend. + digestEmpty := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "d41d8cd98f00b204e9800998ecf8427e", + 0) + for i := 0; i < 10; i++ { + require.NoError(t, blobAccess.Put(ctx, digestEmpty, buffer.NewValidatedBufferFromByteSlice(nil))) + } + + digestHello := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "8b1a9953c4611296a827abf8c47804d7", + 5) + for i := 0; i < 10; i++ { + require.NoError(t, blobAccess.Put(ctx, digestHello, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) + } + + // Attempting to store a third blob should cause the first two + // blobs to be flushed. Due to an I/O failure, we should switch + // to an error state in which we no longer perform I/O until + // flushed. + baseBlobAccess.EXPECT().FindMissing( + ctx, + digest.NewSetBuilder().Add(digestHello).Add(digestEmpty).Build()).Return( + digest.NewSetBuilder().Add(digestHello).Build(), nil) + baseBlobAccess.EXPECT().Put( + gomock.Any(), digestHello, gomock.Any(), + ).DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + return status.Error(codes.Internal, "Storage backend on fire") + }) + + digestGoodbye := digest.MustNewDigest( + "default", + remoteexecution.DigestFunction_MD5, + "6fc422233a40a75a1f028e11c3cd1140", + 7) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to store previous blob 3-8b1a9953c4611296a827abf8c47804d7-5-default: Storage backend on fire"), + blobAccess.Put(ctx, digestGoodbye, buffer.NewValidatedBufferFromByteSlice([]byte("Goodbye")))) + + // Future requests to store blobs should be discarded + // immediately, returning same error. + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to store previous blob 3-8b1a9953c4611296a827abf8c47804d7-5-default: Storage backend on fire"), + blobAccess.Put(ctx, digestGoodbye, buffer.NewValidatedBufferFromByteSlice([]byte("Goodbye")))) + + // Flushing should not cause any requests on the backend, due to + // it being in the error state. It should return the error that + // caused it to go into the error state. + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to store previous blob 3-8b1a9953c4611296a827abf8c47804d7-5-default: Storage backend on fire"), + flush(ctx)) + + // Successive stores and flushes should be functional once again. + require.NoError(t, blobAccess.Put(ctx, digestGoodbye, buffer.NewValidatedBufferFromByteSlice([]byte("Goodbye")))) + baseBlobAccess.EXPECT().FindMissing(ctx, digest.NewSetBuilder().Add(digestGoodbye).Build()).Return(digest.EmptySet, nil) + require.NoError(t, flush(ctx)) +} + +func TestBatchedStoreBlobAccessCanceledWhileWaitingOnSemaphore(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBlobAccess := mock.NewMockBlobAccess(ctrl) + putSemaphore := semaphore.NewWeighted(0) + blobAccess, flush := blobstore.NewBatchedStoreBlobAccess(baseBlobAccess, digest.KeyWithoutInstance, 2, putSemaphore) + + // Enqueue a blob for writing. + digestHello := digest.MustNewDigest("default", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) + reader := mock.NewMockFileReader(ctrl) + require.NoError(t, blobAccess.Put(ctx, digestHello, buffer.NewValidatedBufferFromReaderAt(reader, 5))) + + // Flushing it should attempt to write it. Because the semaphore + // is set to zero, there is no capacity to do this. As we're + // using a context that is canceled, this should not cause + // flushing to block. + ctxCanceled, cancel := context.WithCancel(ctx) + cancel() + baseBlobAccess.EXPECT().FindMissing(ctxCanceled, digestHello.ToSingletonSet()).Return(digestHello.ToSingletonSet(), nil) + reader.EXPECT().Close() + + testutil.RequireEqualStatus(t, status.Error(codes.Canceled, "context canceled"), flush(ctxCanceled)) +} diff --git a/pkg/blobstore/blob_access_mutable_proto_store.go b/pkg/blobstore/blob_access_mutable_proto_store.go new file mode 100644 index 0000000..6c741c2 --- /dev/null +++ b/pkg/blobstore/blob_access_mutable_proto_store.go @@ -0,0 +1,283 @@ +package blobstore + +import ( + "context" + "sync" + + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/prometheus/client_golang/prometheus" + + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +var ( + blobAccessMutableProtoHandleMetrics sync.Once + + blobAccessMutableProtoHandlesCreated = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "blobstore", + Name: "blob_access_mutable_proto_handles_created_total", + Help: "Number of mutable Protobuf message handles that were created during Get()", + }) + blobAccessMutableProtoHandlesDestroyed = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "blobstore", + Name: "blob_access_mutable_proto_handles_destroyed_total", + Help: "Number of mutable Protobuf message handles that were destroyed", + }) + blobAccessMutableProtoHandlesDequeued = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "blobstore", + Name: "blob_access_mutable_proto_handles_dequeued_total", + Help: "Number of mutable Protobuf message handles that were dequeued for writing during Get()", + }) + + blobAccessMutableProtoHandlesQueued = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "blobstore", + Name: "blob_access_mutable_proto_handles_queued_total", + Help: "Number of mutable Protobuf message handles that were queued for writing", + }) +) + +type blobAccessMutableProtoStore[T any, TProto interface { + *T + proto.Message +}] struct { + initialSizeClassCache blobstore.BlobAccess + maximumMessageSizeBytes int + + lock sync.Mutex + handles map[digest.Digest]*blobAccessMutableProtoHandle[T, TProto] + handlesToWrite []*blobAccessMutableProtoHandle[T, TProto] +} + +// NewBlobAccessMutableProtoStore creates an instance of +// MutableProtoStore that is backed by BlobAccess. +// +// What makes this interface harder to implement is that releasing +// MutableProtoHandle is performed while holding locks. We can't block, +// nor can we propagate errors or perform retries. To solve this, this +// implementation keeps track of a list of all handles that need to be +// written. Every time a handle is created, we write a couple of +// released handles back to storage. This ensures that the number of +// handles remains proportional to actual use. +func NewBlobAccessMutableProtoStore[T any, TProto interface { + *T + proto.Message +}](initialSizeClassCache blobstore.BlobAccess, maximumMessageSizeBytes int) MutableProtoStore[TProto] { + blobAccessMutableProtoHandleMetrics.Do(func() { + prometheus.MustRegister(blobAccessMutableProtoHandlesCreated) + prometheus.MustRegister(blobAccessMutableProtoHandlesDestroyed) + prometheus.MustRegister(blobAccessMutableProtoHandlesDequeued) + prometheus.MustRegister(blobAccessMutableProtoHandlesQueued) + }) + + return &blobAccessMutableProtoStore[T, TProto]{ + initialSizeClassCache: initialSizeClassCache, + maximumMessageSizeBytes: maximumMessageSizeBytes, + handles: map[digest.Digest]*blobAccessMutableProtoHandle[T, TProto]{}, + } +} + +type handleToWrite[T any, TProto interface { + *T + proto.Message +}] struct { + handle *blobAccessMutableProtoHandle[T, TProto] + message proto.Message + writingVersion int +} + +func (ss *blobAccessMutableProtoStore[T, TProto]) Get(ctx context.Context, reducedActionDigest digest.Digest) (MutableProtoHandle[TProto], error) { + const writesPerRead = 3 + handlesToWrite := make([]handleToWrite[T, TProto], 0, writesPerRead) + + // See if a handle for the current reduced action digest already + // exists that we can use. Remove it from the write queue to + // prevent unnecessary writes to the ISCC. + ss.lock.Lock() + handleToReturn, hasExistingHandle := ss.handles[reducedActionDigest] + if hasExistingHandle { + handleToReturn.increaseUseCount() + } + + // Extract a couple of handles from previous actions that we can + // write to storage at this point. It is safe to access + // handle.message here, as handle.useCount is guaranteed to be + // zero for handles that are queued for writing. + for i := 0; i < writesPerRead && len(ss.handlesToWrite) > 0; i++ { + newLength := len(ss.handlesToWrite) - 1 + handle := ss.handlesToWrite[newLength] + ss.handlesToWrite[newLength] = nil + ss.handlesToWrite = ss.handlesToWrite[:newLength] + if handle.handlesToWriteIndex != newLength { + panic("Handle has bad write index") + } + handle.handlesToWriteIndex = -1 + handlesToWrite = append(handlesToWrite, handleToWrite[T, TProto]{ + handle: handle, + message: proto.Clone(TProto(&handle.message)), + writingVersion: handle.currentVersion, + }) + } + ss.lock.Unlock() + blobAccessMutableProtoHandlesDequeued.Add(float64(len(handlesToWrite))) + + // If no handle exists, create a new handle containing the + // existing message for the action. + group, ctxWithCancel := errgroup.WithContext(ctx) + if !hasExistingHandle { + handleToReturn = &blobAccessMutableProtoHandle[T, TProto]{ + store: ss, + digest: reducedActionDigest, + useCount: 1, + handlesToWriteIndex: -1, + } + group.Go(func() error { + emptyMessage := new(T) + if m, err := ss.initialSizeClassCache.Get(ctxWithCancel, reducedActionDigest).ToProto(TProto(emptyMessage), ss.maximumMessageSizeBytes); err == nil { + proto.Merge(TProto(&handleToReturn.message), m) + } else if status.Code(err) != codes.NotFound { + return util.StatusWrapf(err, "Failed to read mutable Protobuf message with digest %#v", reducedActionDigest.String()) + } + return nil + }) + } + + // Write statistics for the actions that completed previously. + for _, handleToWriteIter := range handlesToWrite { + handleToWrite := handleToWriteIter + group.Go(func() error { + if err := ss.initialSizeClassCache.Put(ctxWithCancel, handleToWrite.handle.digest, buffer.NewProtoBufferFromProto(handleToWrite.message, buffer.UserProvided)); err != nil { + ss.lock.Lock() + handleToWrite.handle.removeOrQueueForWriteLocked() + ss.lock.Unlock() + return util.StatusWrapf(err, "Failed to write mutable Protobuf message with digest %#v", handleToWrite.handle.digest.String()) + } + ss.lock.Lock() + handleToWrite.handle.writtenVersion = handleToWrite.writingVersion + handleToWrite.handle.removeOrQueueForWriteLocked() + ss.lock.Unlock() + return nil + }) + } + + // Wait for the read and both writes to complete. + if err := group.Wait(); err != nil { + ss.lock.Lock() + if hasExistingHandle { + handleToReturn.decreaseUseCount() + } + ss.lock.Unlock() + return nil, err + } + + if !hasExistingHandle { + // Insert the new handle into our bookkeeping. It may be + // the case that another thread beat us to it. Discard + // our newly created handle in that case. + ss.lock.Lock() + if existingHandle, ok := ss.handles[reducedActionDigest]; ok { + handleToReturn = existingHandle + handleToReturn.increaseUseCount() + } else { + ss.handles[reducedActionDigest] = handleToReturn + blobAccessMutableProtoHandlesCreated.Inc() + } + ss.lock.Unlock() + } + return handleToReturn, nil +} + +type blobAccessMutableProtoHandle[T any, TProto interface { + *T + proto.Message +}] struct { + store *blobAccessMutableProtoStore[T, TProto] + digest digest.Digest + + // The number of times we still expect Release() to be called on + // the handle. + useCount int + + // The message that all users of the handle mutate. We keep a + // version number internally to determine whether we need to + // write the message to storage or discard it. + message T + writtenVersion int + currentVersion int + + // The index of this handle in the handlesToWrite list. We keep + // track of this index, so that we can remove the handle from + // the list if needed. + handlesToWriteIndex int +} + +func (sh *blobAccessMutableProtoHandle[T, TProto]) GetMutableProto() TProto { + return TProto(&sh.message) +} + +func (sh *blobAccessMutableProtoHandle[T, TProto]) increaseUseCount() { + sh.useCount++ + if i := sh.handlesToWriteIndex; i >= 0 { + // Handle is queued for writing. Remove it, as we'd + // better write it after further changes have been made. + ss := sh.store + newLength := len(ss.handlesToWrite) - 1 + lastHandle := ss.handlesToWrite[newLength] + ss.handlesToWrite[i] = lastHandle + if lastHandle.handlesToWriteIndex != newLength { + panic("Handle has bad write index") + } + lastHandle.handlesToWriteIndex = i + ss.handlesToWrite[newLength] = nil + ss.handlesToWrite = ss.handlesToWrite[:newLength] + sh.handlesToWriteIndex = -1 + blobAccessMutableProtoHandlesDequeued.Inc() + } +} + +func (sh *blobAccessMutableProtoHandle[T, TProto]) decreaseUseCount() { + sh.useCount-- + sh.removeOrQueueForWriteLocked() +} + +func (sh *blobAccessMutableProtoHandle[T, TProto]) removeOrQueueForWriteLocked() { + if sh.useCount == 0 { + ss := sh.store + if sh.writtenVersion == sh.currentVersion { + // No changes were made to the message. Simply + // discard this handle. + delete(ss.handles, sh.digest) + blobAccessMutableProtoHandlesDestroyed.Inc() + } else if sh.handlesToWriteIndex < 0 { + // Changes were made and we're not queued. Place + // handle in the queue. + sh.handlesToWriteIndex = len(ss.handlesToWrite) + ss.handlesToWrite = append(ss.handlesToWrite, sh) + blobAccessMutableProtoHandlesQueued.Inc() + } + } +} + +func (sh *blobAccessMutableProtoHandle[T, TProto]) Release(isDirty bool) { + ss := sh.store + ss.lock.Lock() + defer ss.lock.Unlock() + + if isDirty { + sh.currentVersion = sh.writtenVersion + 1 + } + sh.decreaseUseCount() +} diff --git a/pkg/blobstore/blob_access_mutable_proto_store_test.go b/pkg/blobstore/blob_access_mutable_proto_store_test.go new file mode 100644 index 0000000..c7482b1 --- /dev/null +++ b/pkg/blobstore/blob_access_mutable_proto_store_test.go @@ -0,0 +1,174 @@ +package blobstore_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestBlobAccessMutableProtoStore(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + blobAccess := mock.NewMockBlobAccess(ctrl) + store := blobstore.NewBlobAccessMutableProtoStore[iscc.PreviousExecutionStats](blobAccess, 10000) + + t.Run("InitialStorageGetFailure", func(t *testing.T) { + // Errors should be propagated from the backend. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "a8ade48a0fb410f9c315723ef0aca3e3", 123)). + Return(buffer.NewBufferFromError(status.Error(codes.Internal, "Storage failure"))) + + _, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "a8ade48a0fb410f9c315723ef0aca3e3", 123)) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to read mutable Protobuf message with digest \"3-a8ade48a0fb410f9c315723ef0aca3e3-123-hello\": Storage failure"), err) + }) + + t.Run("EmptyMessages", func(t *testing.T) { + // Reading a number of nonexistent messages should + // succeed and not trigger any writes against storage. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "a8ade48a0fb410f9c315723ef0aca3e3", 123)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + + handle1, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "a8ade48a0fb410f9c315723ef0aca3e3", 123)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{}, handle1.GetMutableProto()) + handle1.Release(false) + + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "ad328f7d3be9f12b93ce14e8937a083e", 456)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + + handle2, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "ad328f7d3be9f12b93ce14e8937a083e", 456)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{}, handle2.GetMutableProto()) + handle2.Release(false) + + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4c754f07001495a591b25e486d45b347", 789)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + + handle3, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4c754f07001495a591b25e486d45b347", 789)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{}, handle3.GetMutableProto()) + handle3.Release(false) + }) + + t.Run("ReusingHandle", func(t *testing.T) { + // Create a handle that is backed by an existing stats + // message stored in the Initial Size Class Cache. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123)). + Return(buffer.NewProtoBufferFromProto(&iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{}, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620818827}, + }, buffer.UserProvided)) + + handle1, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{}, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620818827}, + }, handle1.GetMutableProto()) + + // Because the first handle hasn't been released, we can + // create other handles without causing the first handle + // to be flushed. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "57f48d9268744c949c1103bf0e665e28", 456)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + + handle2, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "57f48d9268744c949c1103bf0e665e28", 456)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{}, handle2.GetMutableProto()) + handle2.Release(false) + + // Modify and release the original handle. This should + // normally cause the next call to Get() to write it... + handle1.GetMutableProto().LastSeenFailure = ×tamppb.Timestamp{Seconds: 1620819007} + handle1.Release(true) + + // ... except if the next call to Get() requests the + // same handle. We should simply reuse the original one. + handle3, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123)) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{}, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620819007}, + }, handle3.GetMutableProto()) + + // Release it once again. Even though we did not make + // any changes to it, it's still dirty. This means the + // next call to Get() should still try to write it. + handle1.Release(false) + + // Let the next call to Get() write the old handle. + // Unfortunately, at the same time we see a failure + // reading the new handle, meaning the write is + // interrupted. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "ee2d29afd9b3e8715e68a709c15a6784", 789)). + Return(buffer.NewBufferFromError(status.Error(codes.Internal, "Storage failure"))) + blobAccess.EXPECT().Put(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123), gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + b.Discard() + return status.Error(codes.Canceled, "Request canceled") + }) + + _, err = store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "ee2d29afd9b3e8715e68a709c15a6784", 789)) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to read mutable Protobuf message with digest \"3-ee2d29afd9b3e8715e68a709c15a6784-789-hello\": Storage failure"), err) + + // Let's try this again. Except that now the write fails. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "e1e6496be3124289bfb7374bbab057bf", 234)). + DoAndReturn(func(ctx context.Context, digest digest.Digest) buffer.Buffer { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return buffer.NewBufferFromError(status.Error(codes.Canceled, "Request canceled")) + }) + blobAccess.EXPECT().Put(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123), gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + b.Discard() + return status.Error(codes.Internal, "Storage failure") + }) + + _, err = store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "e1e6496be3124289bfb7374bbab057bf", 234)) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to write mutable Protobuf message with digest \"3-6467817c5aab2f887b2d88679cc2fd76-123-hello\": Storage failure"), err) + + // Now we let both the read and write succeed. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "e1e6496be3124289bfb7374bbab057bf", 345)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + blobAccess.EXPECT().Put(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "6467817c5aab2f887b2d88679cc2fd76", 123), gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&iscc.PreviousExecutionStats{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{}, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620819007}, + }, m) + return nil + }) + + handle4, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "e1e6496be3124289bfb7374bbab057bf", 345)) + require.NoError(t, err) + + handle4.Release(false) + + // With the write having completed successfully, future + // attempts to access the store should no longer try to + // write the released handle. + blobAccess.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "b3edf9adbbd9cbfc2673c84cd03e5598", 567)). + Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob does not exist"))) + + handle5, err := store.Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "b3edf9adbbd9cbfc2673c84cd03e5598", 567)) + require.NoError(t, err) + + handle5.Release(false) + }) +} diff --git a/pkg/blobstore/existence_precondition_blob_access.go b/pkg/blobstore/existence_precondition_blob_access.go new file mode 100644 index 0000000..7e4a2ef --- /dev/null +++ b/pkg/blobstore/existence_precondition_blob_access.go @@ -0,0 +1,68 @@ +package blobstore + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/slicing" + "github.com/buildbarn/bb-storage/pkg/digest" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type existencePreconditionBlobAccess struct { + blobstore.BlobAccess +} + +// NewExistencePreconditionBlobAccess wraps a BlobAccess into a version +// that returns GRPC status code "FAILED_PRECONDITION" instead of +// "NOT_FOUND" for Get() operations. This is used by worker processes to +// make Execution::Execute() comply to the protocol. +func NewExistencePreconditionBlobAccess(blobAccess blobstore.BlobAccess) blobstore.BlobAccess { + return &existencePreconditionBlobAccess{ + BlobAccess: blobAccess, + } +} + +func (ba *existencePreconditionBlobAccess) Get(ctx context.Context, digest digest.Digest) buffer.Buffer { + return buffer.WithErrorHandler( + ba.BlobAccess.Get(ctx, digest), + existencePreconditionErrorHandler{digest: digest}) +} + +func (ba *existencePreconditionBlobAccess) GetFromComposite(ctx context.Context, parentDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + return buffer.WithErrorHandler( + ba.BlobAccess.GetFromComposite(ctx, parentDigest, childDigest, slicer), + existencePreconditionErrorHandler{digest: parentDigest}) +} + +type existencePreconditionErrorHandler struct { + digest digest.Digest +} + +func (eh existencePreconditionErrorHandler) OnError(observedErr error) (buffer.Buffer, error) { + if s := status.Convert(observedErr); s.Code() == codes.NotFound { + s, err := status.New(codes.FailedPrecondition, s.Message()).WithDetails( + &errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "MISSING", + Subject: digest.NewInstanceNamePatcher(eh.digest.GetInstanceName(), digest.EmptyInstanceName). + PatchDigest(eh.digest). + GetByteStreamReadPath(remoteexecution.Compressor_IDENTITY), + }, + }, + }) + if err != nil { + return nil, err + } + return nil, s.Err() + } + return nil, observedErr +} + +func (eh existencePreconditionErrorHandler) Done() {} diff --git a/pkg/blobstore/existence_precondition_blob_access_test.go b/pkg/blobstore/existence_precondition_blob_access_test.go new file mode 100644 index 0000000..b7f434e --- /dev/null +++ b/pkg/blobstore/existence_precondition_blob_access_test.go @@ -0,0 +1,150 @@ +package blobstore_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestExistencePreconditionBlobAccessGetSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Let Get() return a reader from which we can read successfully. + bottomBlobAccess := mock.NewMockBlobAccess(ctrl) + bottomBlobAccess.EXPECT().Get( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5), + ).Return(buffer.NewValidatedBufferFromByteSlice([]byte("Hello"))) + + // Validate that the reader can still be read properly. + data, err := blobstore.NewExistencePreconditionBlobAccess(bottomBlobAccess).Get( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5), + ).ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) +} + +func TestExistencePreconditionBlobAccessGetResourceExhausted(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Let Get() return ResourceExhausted. + bottomBlobAccess := mock.NewMockBlobAccess(ctrl) + bottomBlobAccess.EXPECT().Get( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA1, "c916e71d733d06cb77a4775de5f77fd0b480a7e8", 8), + ).Return(buffer.NewBufferFromError(status.Error(codes.ResourceExhausted, "Out of luck!"))) + + // The error should be passed through unmodified. + _, err := blobstore.NewExistencePreconditionBlobAccess(bottomBlobAccess).Get( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA1, "c916e71d733d06cb77a4775de5f77fd0b480a7e8", 8), + ).ToByteSlice(100) + testutil.RequireEqualStatus(t, status.Error(codes.ResourceExhausted, "Out of luck!"), err) +} + +func TestExistencePreconditionBlobAccessGetNotFound(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Let Get() return NotFound. + bottomBlobAccess := mock.NewMockBlobAccess(ctrl) + bottomBlobAccess.EXPECT().Get( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4", 7), + ).Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob doesn't exist!"))) + + // The error should be translated to FailedPrecondition. + _, gotErr := blobstore.NewExistencePreconditionBlobAccess(bottomBlobAccess).Get( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4", 7), + ).ToByteSlice(100) + + wantErr, err := status.New(codes.FailedPrecondition, "Blob doesn't exist!").WithDetails(&errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "MISSING", + Subject: "blobs/c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4/7", + }, + }, + }) + require.NoError(t, err) + + testutil.RequireEqualStatus(t, wantErr.Err(), gotErr) +} + +func TestExistencePreconditionBlobAccessGetFromCompositeNotFound(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Let GetFromComposite() return NotFound. + bottomBlobAccess := mock.NewMockBlobAccess(ctrl) + blobSlicer := mock.NewMockBlobSlicer(ctrl) + bottomBlobAccess.EXPECT().GetFromComposite( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4", 7), + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "f91881078baff10d91f796347efa85304240db6a162d46edcdd56154e91e1d8a", 3), + blobSlicer, + ).Return(buffer.NewBufferFromError(status.Error(codes.NotFound, "Blob doesn't exist!"))) + + // The error should be translated to FailedPrecondition. The + // digest of the parent is the one that should be attached to + // the error, as that's the one that needs to be reuploaded to + // satisfy the request. + _, gotErr := blobstore.NewExistencePreconditionBlobAccess(bottomBlobAccess).GetFromComposite( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4", 7), + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_SHA256, "f91881078baff10d91f796347efa85304240db6a162d46edcdd56154e91e1d8a", 3), + blobSlicer, + ).ToByteSlice(100) + + wantErr, err := status.New(codes.FailedPrecondition, "Blob doesn't exist!").WithDetails(&errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "MISSING", + Subject: "blobs/c015ad6ddaf8bb50689d2d7cbf1539dff6dd84473582a08ed1d15d841f4254f4/7", + }, + }, + }) + require.NoError(t, err) + + testutil.RequireEqualStatus(t, wantErr.Err(), gotErr) +} + +func TestExistencePreconditionBlobAccessPutNotFound(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Let Put() return NotFound. + bottomBlobAccess := mock.NewMockBlobAccess(ctrl) + bottomBlobAccess.EXPECT().Put( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_MD5, "89d5739baabbbe65be35cbe61c88e06d", 6), + gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Foobar"), data) + return status.Error(codes.NotFound, "Storage backend not found") + }) + + // Unlike for Get(), the error should be passed through + // unmodified. This adapter should only alter the results of + // Get() calls. + err := blobstore.NewExistencePreconditionBlobAccess(bottomBlobAccess).Put( + ctx, + digest.MustNewDigest("ubuntu1604", remoteexecution.DigestFunction_MD5, "89d5739baabbbe65be35cbe61c88e06d", 6), + buffer.NewValidatedBufferFromByteSlice([]byte("Foobar"))) + s := status.Convert(err) + require.Equal(t, codes.NotFound, s.Code()) + require.Equal(t, "Storage backend not found", s.Message()) +} diff --git a/pkg/blobstore/mutable_proto_store.go b/pkg/blobstore/mutable_proto_store.go new file mode 100644 index 0000000..9fc2d30 --- /dev/null +++ b/pkg/blobstore/mutable_proto_store.go @@ -0,0 +1,31 @@ +package blobstore + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + + "google.golang.org/protobuf/proto" +) + +// MutableProtoStore is a store for Protobuf messages, allowing them +// both to be read and written. Because multiple operations may interact +// with a single Protobuf message, this interface permits concurrent +// access to the same message. +// +// The Get() function may be called in parallel, yielding a +// MutableProtoHandle. Because these handles are shared, all methods on +// all handles obtained from a single store must be called while holding +// a global lock. The Protobuf message embedded in the handle gets +// invalidated after locks are dropped. +type MutableProtoStore[T proto.Message] interface { + Get(ctx context.Context, reducedActionDigest digest.Digest) (MutableProtoHandle[T], error) +} + +// MutableProtoHandle is a handle that is returned by MutableProtoStore. +// It contains a MutableProto message that contains timing information +// of previous executions of similar actions. +type MutableProtoHandle[T proto.Message] interface { + GetMutableProto() T + Release(isDirty bool) +} diff --git a/pkg/blobstore/suspending_blob_access.go b/pkg/blobstore/suspending_blob_access.go new file mode 100644 index 0000000..ab1da25 --- /dev/null +++ b/pkg/blobstore/suspending_blob_access.go @@ -0,0 +1,78 @@ +package blobstore + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/slicing" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type suspendingBlobAccess struct { + base blobstore.BlobAccess + suspendable clock.Suspendable +} + +// NewSuspendingBlobAccess is a decorator for BlobAccess that simply +// forwards all methods. Before and after each call, it suspends and +// resumes a clock.Suspendable object, respectively. +// +// This decorator is used in combination with SuspendableClock, allowing +// FUSE-based workers to compensate the execution timeout of build +// actions for any time spent downloading the input root. +func NewSuspendingBlobAccess(base blobstore.BlobAccess, suspendable clock.Suspendable) blobstore.BlobAccess { + return &suspendingBlobAccess{ + base: base, + suspendable: suspendable, + } +} + +func (ba *suspendingBlobAccess) Get(ctx context.Context, digest digest.Digest) buffer.Buffer { + ba.suspendable.Suspend() + return buffer.WithErrorHandler( + ba.base.Get(ctx, digest), + &resumingErrorHandler{suspendable: ba.suspendable}) +} + +func (ba *suspendingBlobAccess) GetFromComposite(ctx context.Context, parentDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + ba.suspendable.Suspend() + return buffer.WithErrorHandler( + ba.base.GetFromComposite(ctx, parentDigest, childDigest, slicer), + &resumingErrorHandler{suspendable: ba.suspendable}) +} + +func (ba *suspendingBlobAccess) Put(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + ba.suspendable.Suspend() + defer ba.suspendable.Resume() + + return ba.base.Put(ctx, digest, b) +} + +func (ba *suspendingBlobAccess) FindMissing(ctx context.Context, digests digest.Set) (digest.Set, error) { + ba.suspendable.Suspend() + defer ba.suspendable.Resume() + + return ba.base.FindMissing(ctx, digests) +} + +func (ba *suspendingBlobAccess) GetCapabilities(ctx context.Context, instanceName digest.InstanceName) (*remoteexecution.ServerCapabilities, error) { + ba.suspendable.Suspend() + defer ba.suspendable.Resume() + + return ba.base.GetCapabilities(ctx, instanceName) +} + +type resumingErrorHandler struct { + suspendable clock.Suspendable +} + +func (eh *resumingErrorHandler) OnError(err error) (buffer.Buffer, error) { + return nil, err +} + +func (eh *resumingErrorHandler) Done() { + eh.suspendable.Resume() +} diff --git a/pkg/blobstore/suspending_blob_access_test.go b/pkg/blobstore/suspending_blob_access_test.go new file mode 100644 index 0000000..f808087 --- /dev/null +++ b/pkg/blobstore/suspending_blob_access_test.go @@ -0,0 +1,112 @@ +package blobstore_test + +import ( + "context" + "io" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestSuspendingBlobAccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBlobAccess := mock.NewMockBlobAccess(ctrl) + suspendable := mock.NewMockSuspendable(ctrl) + blobAccess := blobstore.NewSuspendingBlobAccess(baseBlobAccess, suspendable) + + exampleDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) + exampleInstanceName := digest.MustNewInstanceName("hello") + + t.Run("Get", func(t *testing.T) { + r := mock.NewMockReadCloser(ctrl) + gomock.InOrder( + suspendable.EXPECT().Suspend(), + baseBlobAccess.EXPECT().Get(ctx, exampleDigest). + Return(buffer.NewCASBufferFromReader(exampleDigest, r, buffer.UserProvided))) + + b := blobAccess.Get(ctx, exampleDigest) + + gomock.InOrder( + r.EXPECT().Read(gomock.Any()).DoAndReturn(func(p []byte) (int, error) { + return copy(p, "Hello"), io.EOF + }), + r.EXPECT().Close(), + suspendable.EXPECT().Resume()) + + data, err := b.ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + }) + + t.Run("GetFromComposite", func(t *testing.T) { + llDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "5b54c0a045f179bcbbbc9abcb8b5cd4c", 2) + blobSlicer := mock.NewMockBlobSlicer(ctrl) + r := mock.NewMockReadCloser(ctrl) + gomock.InOrder( + suspendable.EXPECT().Suspend(), + baseBlobAccess.EXPECT().GetFromComposite(ctx, exampleDigest, llDigest, blobSlicer). + Return(buffer.NewCASBufferFromReader(llDigest, r, buffer.UserProvided))) + + b := blobAccess.GetFromComposite(ctx, exampleDigest, llDigest, blobSlicer) + + gomock.InOrder( + r.EXPECT().Read(gomock.Any()).DoAndReturn(func(p []byte) (int, error) { + return copy(p, "ll"), io.EOF + }), + r.EXPECT().Close(), + suspendable.EXPECT().Resume()) + + data, err := b.ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("ll"), data) + }) + + t.Run("Put", func(t *testing.T) { + gomock.InOrder( + suspendable.EXPECT().Suspend(), + baseBlobAccess.EXPECT().Put(ctx, exampleDigest, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(1000) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + return nil + }), + suspendable.EXPECT().Resume()) + + require.NoError(t, blobAccess.Put(ctx, exampleDigest, buffer.NewValidatedBufferFromByteSlice([]byte("Hello")))) + }) + + t.Run("FindMissing", func(t *testing.T) { + gomock.InOrder( + suspendable.EXPECT().Suspend(), + baseBlobAccess.EXPECT().FindMissing(ctx, digest.EmptySet).Return(digest.EmptySet, nil), + suspendable.EXPECT().Resume()) + + missing, err := blobAccess.FindMissing(ctx, digest.EmptySet) + require.NoError(t, err) + require.Equal(t, digest.EmptySet, missing) + }) + + t.Run("GetCapabilities", func(t *testing.T) { + gomock.InOrder( + suspendable.EXPECT().Suspend(), + baseBlobAccess.EXPECT().GetCapabilities(ctx, exampleInstanceName).Return(&remoteexecution.ServerCapabilities{ + CacheCapabilities: &remoteexecution.CacheCapabilities{}, + }, nil), + suspendable.EXPECT().Resume()) + + serverCapabilities, err := blobAccess.GetCapabilities(ctx, exampleInstanceName) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.ServerCapabilities{ + CacheCapabilities: &remoteexecution.CacheCapabilities{}, + }, serverCapabilities) + }) +} diff --git a/pkg/builder/BUILD.bazel b/pkg/builder/BUILD.bazel new file mode 100644 index 0000000..1748a3b --- /dev/null +++ b/pkg/builder/BUILD.bazel @@ -0,0 +1,138 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "builder", + srcs = [ + "build_client.go", + "build_directory.go", + "build_directory_creator.go", + "build_executor.go", + "caching_build_executor.go", + "clean_build_directory_creator.go", + "command.go", + "completed_action_logger.go", + "completed_action_logging_build_executor.go", + "cost_computing_build_executor.go", + "file_pool_stats_build_executor.go", + "local_build_executor.go", + "logging_build_executor.go", + "metrics_build_executor.go", + "naive_build_directory.go", + "noop_build_executor.go", + "output_hierarchy.go", + "prefetching_build_executor.go", + "root_build_directory_creator.go", + "shared_build_directory_creator.go", + "storage_flushing_build_executor.go", + "test_infrastructure_failure_detecting_build_executor.go", + "timestamped_build_executor.go", + "tracing_build_executor.go", + "uploadable_directory.go", + "virtual_build_directory.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/builder", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cas", + "//pkg/cleaner", + "//pkg/clock", + "//pkg/filesystem", + "//pkg/filesystem/access", + "//pkg/filesystem/virtual", + "//pkg/proto/cas", + "//pkg/proto/completedactionlogger", + "//pkg/proto/remoteworker", + "//pkg/proto/resourceusage", + "//pkg/proto/runner", + "//pkg/util", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/otel", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/proto/fsac", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_google_uuid//:uuid", + "@com_github_kballard_go_shellquote//:go-shellquote", + "@com_github_prometheus_client_golang//prometheus", + "@io_opentelemetry_go_otel//attribute", + "@io_opentelemetry_go_otel_trace//:trace", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//encoding/protowire", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_x_sync//errgroup", + "@org_golang_x_sync//semaphore", + ], +) + +go_test( + name = "builder_test", + srcs = [ + "build_client_test.go", + "caching_build_executor_test.go", + "clean_build_directory_creator_test.go", + "command_test.go", + "completed_action_logger_test.go", + "completed_action_logging_build_executor_test.go", + "cost_computing_build_executor_test.go", + "file_pool_stats_build_executor_test.go", + "local_build_executor_test.go", + "naive_build_directory_test.go", + "noop_build_executor_test.go", + "output_hierarchy_test.go", + "prefetching_build_executor_test.go", + "root_build_directory_creator_test.go", + "shared_build_directory_creator_test.go", + "storage_flushing_build_executor_test.go", + "test_infrastructure_failure_detecting_build_executor_test.go", + "timestamped_build_executor_test.go", + "tracing_build_executor_test.go", + ], + deps = [ + ":builder", + "//internal/mock", + "//pkg/cleaner", + "//pkg/clock", + "//pkg/filesystem", + "//pkg/filesystem/access", + "//pkg/proto/cas", + "//pkg/proto/completedactionlogger", + "//pkg/proto/remoteworker", + "//pkg/proto/resourceusage", + "//pkg/proto/runner", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/proto/fsac", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_golang_mock//gomock", + "@com_github_google_uuid//:uuid", + "@com_github_stretchr_testify//require", + "@io_opentelemetry_go_otel//attribute", + "@io_opentelemetry_go_otel_trace//:trace", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_x_sync//semaphore", + ], +) diff --git a/pkg/builder/build_client.go b/pkg/builder/build_client.go new file mode 100644 index 0000000..08dbe40 --- /dev/null +++ b/pkg/builder/build_client.go @@ -0,0 +1,339 @@ +package builder + +import ( + "context" + "log" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/otel" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +// BuildClient is a client for the Remote Worker protocol. It can send +// synchronization requests to a scheduler, informing it of the current +// state of the worker, while also obtaining requests for executing +// build actions. +type BuildClient struct { + // Constant fields. + scheduler remoteworker.OperationQueueClient + buildExecutor BuildExecutor + filePool filesystem.FilePool + clock clock.Clock + instanceNamePrefix digest.InstanceName + instanceNamePatcher digest.InstanceNamePatcher + + // Mutable fields that are always set. + request remoteworker.SynchronizeRequest + schedulerMayThinkExecutingUntil *time.Time + nextSynchronizationAt time.Time + + // Mutable fields that are only set when executing an action. + executionCancellation func() + executionUpdates <-chan *remoteworker.CurrentState_Executing +} + +// NewBuildClient creates a new BuildClient instance that is set to the +// initial state (i.e., being idle). +func NewBuildClient(scheduler remoteworker.OperationQueueClient, buildExecutor BuildExecutor, filePool filesystem.FilePool, clock clock.Clock, workerID map[string]string, instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform, sizeClass uint32) *BuildClient { + return &BuildClient{ + scheduler: scheduler, + buildExecutor: buildExecutor, + filePool: filePool, + clock: clock, + instanceNamePrefix: instanceNamePrefix, + instanceNamePatcher: digest.NewInstanceNamePatcher(digest.EmptyInstanceName, instanceNamePrefix), + + request: remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: instanceNamePrefix.String(), + Platform: platform, + SizeClass: sizeClass, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, + nextSynchronizationAt: clock.Now(), + } +} + +func (bc *BuildClient) startExecution(executionRequest *remoteworker.DesiredState_Executing) error { + instanceNameSuffix, err := digest.NewInstanceName(executionRequest.InstanceNameSuffix) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name suffix %#v", executionRequest.InstanceNameSuffix) + } + digestFunction, err := bc.instanceNamePatcher.PatchInstanceName(instanceNameSuffix). + GetDigestFunction(executionRequest.DigestFunction, 0) + if err != nil { + return err + } + + bc.stopExecution() + + // Spawn the execution of the build action. + var ctx context.Context + ctx, bc.executionCancellation = context.WithCancel( + otel.NewContextWithW3CTraceContext( + context.Background(), + executionRequest.W3CTraceContext)) + updates := make(chan *remoteworker.CurrentState_Executing, 10) + bc.executionUpdates = updates + go func() { + executeResponse := bc.buildExecutor.Execute( + ctx, + bc.filePool, + nil, + digestFunction, + executionRequest, + updates) + updates <- &remoteworker.CurrentState_Executing{ + ActionDigest: executionRequest.ActionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: executeResponse, + }, + } + close(updates) + }() + + // Change state to indicate the build has started. + bc.request.CurrentState.WorkerState = &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: executionRequest.ActionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Started{ + Started: &emptypb.Empty{}, + }, + }, + } + return nil +} + +func (bc *BuildClient) stopExecution() { + // Trigger cancellation of the existing build action and wait + // for it to complete. Discard the results. + if bc.executionCancellation != nil { + bc.executionCancellation() + for { + if _, hasUpdate := <-bc.executionUpdates; !hasUpdate { + break + } + } + bc.executionCancellation = nil + bc.executionUpdates = nil + } + + bc.request.CurrentState.WorkerState = &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + } +} + +func (bc *BuildClient) applyExecutionUpdate(update *remoteworker.CurrentState_Executing) { + if update != nil { + // New update received. + bc.request.CurrentState.WorkerState = &remoteworker.CurrentState_Executing_{ + Executing: update, + } + } else { + // Execution has finished. Clean up resources. + bc.executionCancellation() + bc.executionCancellation = nil + bc.executionUpdates = nil + } +} + +func (bc *BuildClient) consumeExecutionUpdatesNonBlocking() { + for { + select { + case update := <-bc.executionUpdates: + bc.applyExecutionUpdate(update) + default: + // No more updates left. + return + } + } +} + +// touchSchedulerMayThinkExecuting updates state on whether the +// scheduler may think the worker is currently executing an action. This +// is used to determine whether it is safe to terminate the worker +// gracefully. +func (bc *BuildClient) touchSchedulerMayThinkExecuting() { + // Assume that if we've missed the desired synchronization time + // provided by the scheduler by more than a minute, the + // scheduler has purged our state. + until := bc.nextSynchronizationAt.Add(time.Minute) + bc.schedulerMayThinkExecutingUntil = &until +} + +// Run a iteration of the Remote Worker client, by performing a single +// synchronization against the scheduler. +func (bc *BuildClient) Run(ctx context.Context) (bool, error) { + // Allow the worker to terminate if the scheduler doesn't think + // we're executing any action, or if we haven't been able to + // successfully synchronize for a prolonged amount of time. + if ctx.Err() != nil && (bc.schedulerMayThinkExecutingUntil == nil || bc.clock.Now().After(*bc.schedulerMayThinkExecutingUntil)) { + return true, nil + } + + // If the scheduler isn't assuming we're executing any action + // right now, perform some readiness checks. This ensures we + // don't dequeue actions from the scheduler while unhealthy. + if bc.schedulerMayThinkExecutingUntil == nil { + if err := bc.buildExecutor.CheckReadiness(ctx); err != nil { + return true, util.StatusWrap(err, "Worker failed readiness check") + } + } + + // When executing an action, see if there are any updates on the + // execution state. + if bc.executionCancellation != nil { + timer, timerChannel := bc.clock.NewTimer(bc.nextSynchronizationAt.Sub(bc.clock.Now())) + select { + case <-timerChannel: + // No meaningful updates. Send the last update + // once again. + case update := <-bc.executionUpdates: + // One or more execution updates available. Send + // a new update with the latest state, + // regardless of the next synchronization time + // returned by the scheduler during the previous + // synchronize call. + timer.Stop() + bc.applyExecutionUpdate(update) + bc.consumeExecutionUpdatesNonBlocking() + if now := bc.clock.Now(); bc.nextSynchronizationAt.After(now) { + bc.nextSynchronizationAt = now + } + } + } + + // Determine whether we should perform call to Synchronize with + // prefer_being_able set to false (potentially blocking) or true + // (non-blocking). + currentStateIsExecuting := false + switch workerState := bc.request.CurrentState.WorkerState.(type) { + case *remoteworker.CurrentState_Idle: + // Even though we are idle, the scheduler may think we + // are executing. This means we were not able to perform + // readiness checks. Forcefully switch to idle, so that + // we can still do this before picking up more work. + bc.request.PreferBeingIdle = bc.schedulerMayThinkExecutingUntil != nil + case *remoteworker.CurrentState_Executing_: + if updateCompleted, ok := workerState.Executing.ExecutionState.(*remoteworker.CurrentState_Executing_Completed); ok { + // In case execution failed with a serious + // error, request that the worker gets a brief + // amount of idle time, so that we can do some + // health checks prior to picking up more work. + bc.request.PreferBeingIdle = status.ErrorProto(updateCompleted.Completed.Status) != nil + } else { + currentStateIsExecuting = true + bc.request.PreferBeingIdle = false + } + default: + panic("Unknown worker state") + } + + // If we need to shut down, we should never be performing + // blocking Synchronize() calls. We should still let the calls + // go through, so that we can either finish the current action, + // or properly ensure the scheduler thinks we're in the idle + // state. + if ctx.Err() != nil { + bc.request.PreferBeingIdle = true + ctx = context.Background() + } + + // Inform scheduler of current worker state, potentially + // requesting new work. If this fails, we might have lost an + // execute request sent by the scheduler, so assume the + // scheduler thinks we may be executing. + response, err := bc.scheduler.Synchronize(ctx, &bc.request) + if bc.schedulerMayThinkExecutingUntil == nil { + bc.touchSchedulerMayThinkExecuting() + } + if err != nil { + return false, util.StatusWrap(err, "Failed to synchronize with scheduler") + } + + // Determine when we should contact the scheduler again in case + // of no activity. + nextSynchronizationAt := response.NextSynchronizationAt + if err := nextSynchronizationAt.CheckValid(); err != nil { + return false, util.StatusWrap(err, "Scheduler response contained invalid synchronization timestamp") + } + bc.nextSynchronizationAt = nextSynchronizationAt.AsTime() + + // Apply desired state changes provided by the scheduler. + if desiredState := response.DesiredState; desiredState != nil { + switch workerState := desiredState.WorkerState.(type) { + case *remoteworker.DesiredState_Executing_: + // Scheduler is requesting us to execute the + // next action, maybe forcing us to to stop + // execution of the current build action. + if err := bc.startExecution(workerState.Executing); err != nil { + return false, err + } + bc.touchSchedulerMayThinkExecuting() + return false, nil + case *remoteworker.DesiredState_Idle: + // Scheduler is forcing us to go back to idle. + bc.stopExecution() + bc.schedulerMayThinkExecutingUntil = nil + return true, nil + default: + return false, status.Error(codes.Internal, "Scheduler provided an unknown desired state") + } + } + + // Scheduler has instructed to continue as is. + if currentStateIsExecuting { + bc.touchSchedulerMayThinkExecuting() + return false, nil + } + bc.schedulerMayThinkExecutingUntil = nil + return true, nil +} + +// LaunchWorkerThread launches a single routine that uses a build client +// to repeatedly synchronizes against the scheduler, requesting a task +// to execute. +func LaunchWorkerThread(group program.Group, buildClient *BuildClient, workerName string) { + group.Go(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + generator := random.NewFastSingleThreadedGenerator() + for { + terminationStartedBeforeRun := ctx.Err() != nil + if mayTerminate, err := buildClient.Run(ctx); mayTerminate && ctx.Err() != nil { + log.Printf("Worker %s: terminating", workerName) + return nil + } else if err != nil { + log.Printf("Worker %s: %s", workerName, err) + + // In case of errors, sleep a random amount of + // time. Allow the sleep to be skipped once when + // termination is initiated, so that it happens + // quickly. + if d := random.Duration(generator, 5*time.Second); terminationStartedBeforeRun { + time.Sleep(d) + } else { + t := time.NewTimer(d) + select { + case <-t.C: + case <-ctx.Done(): + t.Stop() + } + } + } + } + }) +} diff --git a/pkg/builder/build_client_test.go b/pkg/builder/build_client_test.go new file mode 100644 index 0000000..68a9973 --- /dev/null +++ b/pkg/builder/build_client_test.go @@ -0,0 +1,377 @@ +package builder_test + +import ( + "context" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestBuildClient(t *testing.T) { + ctrl := gomock.NewController(t) + + operationQueueClient := mock.NewMockOperationQueueClient(ctrl) + buildExecutor := mock.NewMockBuildExecutor(ctrl) + filePool := mock.NewMockFilePool(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + workerID := map[string]string{"hostname": "example.com"} + digestFunction := digest.MustNewFunction("prefix/suffix", remoteexecution.DigestFunction_SHA1) + platform := &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + }, + } + bc := builder.NewBuildClient(operationQueueClient, buildExecutor, filePool, clock, workerID, digest.MustNewInstanceName("prefix"), platform, 4) + + // If synchronizing against the scheduler doesn't yield any + // action to run, the client should remain in the idle state. + buildExecutor.EXPECT().CheckReadiness(context.Background()) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1010}, + }, nil) + mayTerminate, err := bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + // Let the scheduler return an action to execute. This should + // cause a call against the BuildExecutor. + buildExecutor.EXPECT().CheckReadiness(context.Background()) + desiredStateExecuting1 := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1007}, + InstanceNameSuffix: "suffix", + DigestFunction: remoteexecution.DigestFunction_SHA1, + } + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1020}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: desiredStateExecuting1, + }, + }, + }, nil) + buildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + nil, + digestFunction, + desiredStateExecuting1, + gomock.Any(), + ).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, false, mayTerminate) + require.NoError(t, err) + + // Synchronize against the scheduler to report the successful + // completion of the action. This should cause the scheduler to + // immediately return a second action. + desiredStateExecuting2 := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "8c7bdf20235417b8e3bfa695407e1ff0b43e8223", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "11483c42a98269d01673aa3157836d2882aad5de", + SizeBytes: 456, + }, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1008}, + InstanceNameSuffix: "suffix", + DigestFunction: remoteexecution.DigestFunction_SHA1, + } + clock.EXPECT().Now().Return(time.Unix(1015, 0)).Times(2) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(5*time.Second).Return(timer1, nil) + timer1.EXPECT().Stop().Return(true) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }, + }, + }, + }, + }, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1025}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: desiredStateExecuting2, + }, + }, + }, nil) + buildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + nil, + digestFunction, + desiredStateExecuting2, + gomock.Any(), + ).Return(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Internal, "Failed to contact runner").Proto(), + }) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, false, mayTerminate) + require.NoError(t, err) + + // Also synchronize to report the outcome of the second action. + // Unlike the first one, it failed catastrophically. This should + // cause the PreferBeingIdle flag to be set, thereby forcing the + // scheduler to transition the worker into the idle state. This + // gives the caller of BuildClient.Run() some time to perform + // addition health checks before requesting more work. + clock.EXPECT().Now().Return(time.Unix(1020, 0)).Times(2) + timer2 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(5*time.Second).Return(timer2, nil) + timer2.EXPECT().Stop().Return(true) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "8c7bdf20235417b8e3bfa695407e1ff0b43e8223", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Status: status.New(codes.Internal, "Failed to contact runner").Proto(), + }, + }, + }, + }, + }, + PreferBeingIdle: true, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1055}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, nil) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + // Because we've transitioned back to idle, the next run can + // once again do a readiness check. As long as it fails, no + // synchronizations should be attempted. + buildExecutor.EXPECT().CheckReadiness(context.Background()). + Return(status.Error(codes.Internal, "Still cannot contact runner")) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Worker failed readiness check: Still cannot contact runner"), err) + + // As soon as the readiness check starts to succeed, we should + // do additional synchronizations. + buildExecutor.EXPECT().CheckReadiness(context.Background()) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1065}, + }, nil) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + // If a call to Synchronize() fails, it may be the case we lost + // an execution request returned by the scheduler. Subsequent + // iterations should thus not perform readiness checking, as it + // could cause delays in us trying to pick up the work again. + buildExecutor.EXPECT().CheckReadiness(context.Background()) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(nil, status.Error(codes.Unavailable, "Connection refused")) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, false, mayTerminate) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to synchronize with scheduler: Connection refused"), err) + + for i := 0; i < 10; i++ { + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + PreferBeingIdle: true, + })).Return(nil, status.Error(codes.Unavailable, "Connection refused")) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, false, mayTerminate) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to synchronize with scheduler: Connection refused"), err) + } + + // Once Synchronize() starts to work once again, we may perform + // readiness checking again. + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + PreferBeingIdle: true, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1070}, + }, nil) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + buildExecutor.EXPECT().CheckReadiness(context.Background()) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(&remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1071}, + }, nil) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + // If the build client is in the idle state, attempting to let + // it run with a canceled context should not do anything, as the + // worker is capable of terminating immediately. + canceledCtx, cancel := context.WithCancel(context.Background()) + cancel() + mayTerminate, err = bc.Run(canceledCtx) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) + + // If the build client thinks there is a chance the worker might + // have handed out an operation, it must attempt to contact the + // scheduler to mark itself idle, regardless of the context + // being expired. + buildExecutor.EXPECT().CheckReadiness(context.Background()) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + })).Return(nil, status.Error(codes.Unavailable, "Failed to return response to worker")) + mayTerminate, err = bc.Run(context.Background()) + require.Equal(t, false, mayTerminate) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to synchronize with scheduler: Failed to return response to worker"), err) + + clock.EXPECT().Now().Return(time.Unix(1073, 0)) + operationQueueClient.EXPECT().Synchronize(context.Background(), testutil.EqProto(t, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + InstanceNamePrefix: "prefix", + Platform: platform, + SizeClass: 4, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + PreferBeingIdle: true, + })).Return(nil, status.Error(codes.Unavailable, "Failed to return response to worker")) + mayTerminate, err = bc.Run(canceledCtx) + require.Equal(t, false, mayTerminate) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to synchronize with scheduler: Failed to return response to worker"), err) + + // Because we don't want termination of the worker to hang if + // the scheduler is unavailable for a longer amount of time, we + // only attempt to synchronize for a while. After that we should + // be permitted to terminate, regardless of whether the + // scheduler thinks the worker is executing. + clock.EXPECT().Now().Return(time.Unix(1200, 0)) + mayTerminate, err = bc.Run(canceledCtx) + require.Equal(t, true, mayTerminate) + require.NoError(t, err) +} diff --git a/pkg/builder/build_directory.go b/pkg/builder/build_directory.go new file mode 100644 index 0000000..b92bbc5 --- /dev/null +++ b/pkg/builder/build_directory.go @@ -0,0 +1,47 @@ +package builder + +import ( + "context" + "os" + + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" +) + +// BuildDirectory is a directory that may be used for the purpose of +// running build actions. BuildDirectory shares some operations with +// filesystem.Directory, but it has a couple of custom operations that +// implementations may use to run actions in a more efficient and +// manageable way. +type BuildDirectory interface { + ParentPopulatableDirectory + UploadableDirectory + + // Methods inherited from filesystem.Directory. + Mknod(name path.Component, perm os.FileMode, deviceNumber filesystem.DeviceNumber) error + Remove(name path.Component) error + RemoveAll(name path.Component) error + + // Identical to EnterDirectory(), except that it returns a + // BuildDirectory object. + EnterBuildDirectory(name path.Component) (BuildDirectory, error) + + // Installs a set of hooks into the directory to intercept I/O + // operations. The FilePool may be used to allocate storage + // space. The ErrorLogger may be used to report fatal I/O + // errors. Implementations of BuildDirectory are free to let + // this be a no-op, with the disadvantage that they cannot apply + // resource limits or provide rich I/O error messages. + InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) + + // Recursively merges the contents of a Directory stored in the + // Content Addressable Storage into a local directory. If this + // process is synchronous, this function can return a + // synchronous error. If this process is lazy/asynchronous, the + // provided ErrorLogger may be used to return an error. + MergeDirectoryContents(ctx context.Context, errorLogger util.ErrorLogger, digest digest.Digest, monitor access.UnreadDirectoryMonitor) error +} diff --git a/pkg/builder/build_directory_creator.go b/pkg/builder/build_directory_creator.go new file mode 100644 index 0000000..bbc1bd8 --- /dev/null +++ b/pkg/builder/build_directory_creator.go @@ -0,0 +1,14 @@ +package builder + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// BuildDirectoryCreator is used by LocalBuildExecutor to obtain build +// directories in which build actions are executed. +type BuildDirectoryCreator interface { + GetBuildDirectory(ctx context.Context, actionDigest digest.Digest, mayRunInParallel bool) (BuildDirectory, *path.Trace, error) +} diff --git a/pkg/builder/build_executor.go b/pkg/builder/build_executor.go new file mode 100644 index 0000000..f9ad465 --- /dev/null +++ b/pkg/builder/build_executor.go @@ -0,0 +1,70 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" +) + +// NewDefaultExecuteResponse creates an ExecuteResponse message that +// contains all fields that BuildExecutor should set by default. +func NewDefaultExecuteResponse(request *remoteworker.DesiredState_Executing) *remoteexecution.ExecuteResponse { + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: append([]*anypb.Any(nil), request.AuxiliaryMetadata...), + }, + }, + } +} + +// attachErrorToExecuteResponse extends an ExecuteResponse to contain an +// error, indicating that the action has failed. If the ExecuteResponse +// already contains an error, it is not overwritten. This is done, +// because the first error is typically the most interesting one to +// return the user. As successive errors may well be related to the +// first, returning all of them would be noisy. +func attachErrorToExecuteResponse(response *remoteexecution.ExecuteResponse, err error) { + if status.ErrorProto(response.Status) == nil { + response.Status = status.Convert(err).Proto() + } +} + +func executeResponseIsSuccessful(response *remoteexecution.ExecuteResponse) bool { + return status.ErrorProto(response.Status) == nil && response.Result.ExitCode == 0 +} + +// GetResultAndGRPCCodeFromExecuteResponse converts an ExecuteResponse +// to a pair of strings that describe the execution outcome. These +// strings can be used as part of metrics labels. +// +// TODO: Move this into some other package, so that pkg/scheduler +// doesn't need to depend on pkg/builder? +func GetResultAndGRPCCodeFromExecuteResponse(response *remoteexecution.ExecuteResponse) (result, grpcCode string) { + if c := status.FromProto(response.Status).Code(); c != codes.OK { + result = "Failure" + grpcCode = c.String() + } else if actionResult := response.Result; actionResult == nil { + result = "ActionResultMissing" + } else if actionResult.ExitCode == 0 { + result = "Success" + } else { + result = "NonZeroExitCode" + } + return +} + +// BuildExecutor is the interface for the ability to run Bazel execute +// requests and yield an execute response. +type BuildExecutor interface { + CheckReadiness(ctx context.Context) error + Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse +} diff --git a/pkg/builder/caching_build_executor.go b/pkg/builder/caching_build_executor.go new file mode 100644 index 0000000..29942bb --- /dev/null +++ b/pkg/builder/caching_build_executor.go @@ -0,0 +1,76 @@ +package builder + +import ( + "context" + "net/url" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + re_util "github.com/buildbarn/bb-remote-execution/pkg/util" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type cachingBuildExecutor struct { + BuildExecutor + contentAddressableStorage blobstore.BlobAccess + actionCache blobstore.BlobAccess + browserURL *url.URL +} + +// NewCachingBuildExecutor creates an adapter for BuildExecutor that +// stores action results in the Action Cache (AC) if they may be cached. +// If they may not be cached, they are stored in the Content Addressable +// Storage (CAS) instead. +// +// In both cases, a link to bb_browser is added to the ExecuteResponse, +// so that the user may inspect the Action and ActionResult in detail. +func NewCachingBuildExecutor(base BuildExecutor, contentAddressableStorage, actionCache blobstore.BlobAccess, browserURL *url.URL) BuildExecutor { + return &cachingBuildExecutor{ + BuildExecutor: base, + contentAddressableStorage: contentAddressableStorage, + actionCache: actionCache, + browserURL: browserURL, + } +} + +func (be *cachingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + if actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for action")) + } else if action := request.Action; action == nil { + attachErrorToExecuteResponse(response, status.Error(codes.InvalidArgument, "Request does not contain an action")) + } else if !action.DoNotCache && executeResponseIsSuccessful(response) { + // Store result in the Action Cache. + if err := be.actionCache.Put(ctx, actionDigest, buffer.NewProtoBufferFromProto(response.Result, buffer.UserProvided)); err == nil { + response.Message = "Action details (cached result): " + re_util.GetBrowserURL(be.browserURL, "action", actionDigest) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store cached action result")) + } + } else { + // Extension: store the result in the Content + // Addressable Storage, so the user can at least inspect + // it through bb_browser. + if historicalExecuteResponseDigest, err := blobstore.CASPutProto( + ctx, + be.contentAddressableStorage, + &cas_proto.HistoricalExecuteResponse{ + ActionDigest: actionDigest.GetProto(), + ExecuteResponse: response, + }, + actionDigest.GetDigestFunction()); err == nil { + response.Message = "Action details (uncached result): " + re_util.GetBrowserURL(be.browserURL, "historical_execute_response", historicalExecuteResponseDigest) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store historical execute response")) + } + } + return response +} diff --git a/pkg/builder/caching_build_executor_test.go b/pkg/builder/caching_build_executor_test.go new file mode 100644 index 0000000..77845f9 --- /dev/null +++ b/pkg/builder/caching_build_executor_test.go @@ -0,0 +1,433 @@ +package builder_test + +import ( + "context" + "net/url" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + status_pb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Only when no error is defined in the ExecuteResult and DoNotCache is +// not set, are we allowed to store it in the Action Cache. +func TestCachingBuildExecutorCachedSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + actionCache := mock.NewMockBlobAccess(ctrl) + actionCache.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", 11), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + actionResult, err := b.ToProto(&remoteexecution.ActionResult{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, actionResult) + return nil + }) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/some/sub/directory", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + Message: "Action details (cached result): https://example.com/some/sub/directory/freebsd12/blobs/sha256/action/64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c-11/", + }, executeResponse) +} + +// An explicit Status object with an 'OK' code should also be treated as +// success. The entry should still be stored in the Action Cache. +func TestCachingBuildExecutorCachedSuccessExplicitOK(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + Status: &status_pb.Status{Message: "This is not an error, because it has code zero"}, + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + actionCache := mock.NewMockBlobAccess(ctrl) + actionCache.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", 11), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + actionResult, err := b.ToProto(&remoteexecution.ActionResult{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, actionResult) + return nil + }) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/some/sub/directory", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + Status: &status_pb.Status{Message: "This is not an error, because it has code zero"}, + Message: "Action details (cached result): https://example.com/some/sub/directory/freebsd12/blobs/sha256/action/64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c-11/", + }, executeResponse) +} + +// When the exit code of the build action is non-zero, we may store the +// result in the Action Cache. Bazel permits this nowadays. More details: +// https://github.com/bazelbuild/bazel/issues/7137 +// +// This implementation decides to store an entry in the Content +// Addressable Storage regardless, as that allows us to obtain stable +// URLs to build failures. +func TestCachingBuildExecutorCachedSuccessNonZeroExitCode(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 127, + StderrRaw: []byte("Compiler error!"), + }, + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "bb1107706f3aa379d68aa61062f56d99d24a667ec18d5756fb6df1ba9baa1fdc", 93), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + historicalExecuteResponse, err := b.ToProto(&cas_proto.HistoricalExecuteResponse{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 127, + StderrRaw: []byte("Compiler error!"), + }, + }, + }, historicalExecuteResponse) + return nil + }) + actionCache := mock.NewMockBlobAccess(ctrl) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/some/sub/directory", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 127, + StderrRaw: []byte("Compiler error!"), + }, + Message: "Action details (uncached result): https://example.com/some/sub/directory/freebsd12/blobs/sha256/historical_execute_response/bb1107706f3aa379d68aa61062f56d99d24a667ec18d5756fb6df1ba9baa1fdc-93/", + }, executeResponse) +} + +// In case we fail to write an entry into the Action Cache, we should +// return the original response, but with the error attached to it. +func TestCachingBuildExecutorCachedStorageFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + actionCache := mock.NewMockBlobAccess(ctrl) + actionCache.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", 11), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + actionResult, err := b.ToProto(&remoteexecution.ActionResult{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, actionResult) + return status.Error(codes.Internal, "Network problems") + }) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + Status: status.New(codes.Internal, "Failed to store cached action result: Network problems").Proto(), + }, executeResponse) +} + +// When the DoNotCache flag is set, we should not store results in the +// Action Cache. +func TestCachingBuildExecutorUncachedDoNotCache(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: true} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "5ed2d5720b99f5575542bb4f89e84b5e00e34ab652292974fdb814ab7dc3c92e", 89), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + historicalExecuteResponse, err := b.ToProto(&cas_proto.HistoricalExecuteResponse{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + }, + }, historicalExecuteResponse) + return nil + }) + actionCache := mock.NewMockBlobAccess(ctrl) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/some/sub/directory/", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Hello, world!"), + }, + Message: "Action details (uncached result): http://example.com/some/sub/directory/freebsd12/blobs/sha256/historical_execute_response/5ed2d5720b99f5575542bb4f89e84b5e00e34ab652292974fdb814ab7dc3c92e-89/", + }, executeResponse) +} + +// We should also not store results in the Action Cache when an error is +// part of the ExecuteResponse. +func TestCachingBuildExecutorUncachedError(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "a6e4f00dd21540b0b653dcd195b3d54ea4c0b3ca679cf6a69eb7b0dbd378c2cc", 126), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + historicalExecuteResponse, err := b.ToProto(&cas_proto.HistoricalExecuteResponse{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + }, + }, historicalExecuteResponse) + return nil + }) + actionCache := mock.NewMockBlobAccess(ctrl) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/some/sub/directory/", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + Message: "Action details (uncached result): http://example.com/some/sub/directory/freebsd12/blobs/sha256/historical_execute_response/a6e4f00dd21540b0b653dcd195b3d54ea4c0b3ca679cf6a69eb7b0dbd378c2cc-126/", + }, executeResponse) +} + +// An error generated by uploading the HistoricalExecuteResponse should not +// overwrite the error that is already part of the ExecuteResponse. +func TestCachingBuildExecutorUncachedStorageFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("freebsd12", remoteexecution.DigestFunction_SHA256, "a6e4f00dd21540b0b653dcd195b3d54ea4c0b3ca679cf6a69eb7b0dbd378c2cc", 126), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + historicalExecuteResponse, err := b.ToProto(&cas_proto.HistoricalExecuteResponse{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + }, + }, historicalExecuteResponse) + return status.Error(codes.Internal, "Cannot store historical execute response") + }) + actionCache := mock.NewMockBlobAccess(ctrl) + cachingBuildExecutor := builder.NewCachingBuildExecutor(baseBuildExecutor, contentAddressableStorage, actionCache, &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/some/sub/directory/", + }) + + executeResponse := cachingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutRaw: []byte("Compiling..."), + }, + Status: status.New(codes.DeadlineExceeded, "Build took more than ten seconds").Proto(), + }, executeResponse) +} diff --git a/pkg/builder/clean_build_directory_creator.go b/pkg/builder/clean_build_directory_creator.go new file mode 100644 index 0000000..91a39cd --- /dev/null +++ b/pkg/builder/clean_build_directory_creator.go @@ -0,0 +1,61 @@ +package builder + +import ( + "context" + + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" +) + +type cleanBuildDirectoryCreator struct { + base BuildDirectoryCreator + idleInvoker *cleaner.IdleInvoker +} + +// NewCleanBuildDirectoryCreator is an adapter for BuildDirectoryCreator +// that upon acquistion and release calls into a Cleaner. This Cleaner +// may, for example, be set up to empty out the build directory. This +// guarantees that build actions aren't able to see data left behind by +// ones that ran previously. +func NewCleanBuildDirectoryCreator(base BuildDirectoryCreator, idleInvoker *cleaner.IdleInvoker) BuildDirectoryCreator { + return &cleanBuildDirectoryCreator{ + base: base, + idleInvoker: idleInvoker, + } +} + +func (dc *cleanBuildDirectoryCreator) GetBuildDirectory(ctx context.Context, actionDigest digest.Digest, mayRunInParallel bool) (BuildDirectory, *path.Trace, error) { + if err := dc.idleInvoker.Acquire(ctx); err != nil { + return nil, nil, util.StatusWrap(err, "Failed to clean before acquiring build directory") + } + buildDirectory, buildDirectoryPath, err := dc.base.GetBuildDirectory(ctx, actionDigest, mayRunInParallel) + if err != nil { + dc.idleInvoker.Release(ctx) + return nil, nil, err + } + return &cleanBuildDirectory{ + BuildDirectory: buildDirectory, + idleInvoker: dc.idleInvoker, + context: ctx, + }, buildDirectoryPath, nil +} + +type cleanBuildDirectory struct { + BuildDirectory + idleInvoker *cleaner.IdleInvoker + context context.Context +} + +func (d cleanBuildDirectory) Close() error { + err1 := d.BuildDirectory.Close() + err2 := d.idleInvoker.Release(d.context) + if err1 != nil { + return err1 + } + if err2 != nil { + return util.StatusWrap(err2, "Failed to clean after releasing build directory") + } + return nil +} diff --git a/pkg/builder/clean_build_directory_creator_test.go b/pkg/builder/clean_build_directory_creator_test.go new file mode 100644 index 0000000..29506b6 --- /dev/null +++ b/pkg/builder/clean_build_directory_creator_test.go @@ -0,0 +1,148 @@ +package builder_test + +import ( + "context" + "os" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestCleanBuildDirectoryCreator(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseCleaner := mock.NewMockCleaner(ctrl) + buildDirectoryCreator := builder.NewCleanBuildDirectoryCreator(baseBuildDirectoryCreator, cleaner.NewIdleInvoker(baseCleaner.Call)) + + t.Run("CleanerAcquireFailure", func(t *testing.T) { + // Failure to clean prior to acquiring a build directory + // should be propagated. + baseCleaner.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Cannot remove files from build directory")) + + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to clean before acquiring build directory: Cannot remove files from build directory"), err) + }) + + t.Run("GetBuildDirectoryFailure", func(t *testing.T) { + // If we fail to get the underlying build directory, we + // should release the cleaner, as its use count becomes + // invalid afterwards. This should trigger another + // clean. + baseCleaner.EXPECT().Call(ctx) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(nil, nil, status.Error(codes.Internal, "No space left on device")) + baseCleaner.EXPECT().Call(ctx) + + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.Equal(t, status.Error(codes.Internal, "No space left on device"), err) + }) + + t.Run("CloseFailure", func(t *testing.T) { + // Successfully obtain a build directory. + baseCleaner.EXPECT().Call(ctx) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), nil) + + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), buildDirectoryPath) + + // Validate that calls against the directory are forwarded. + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("hello"), os.FileMode(0o700)) + + require.NoError(t, buildDirectory.Mkdir(path.MustNewComponent("hello"), os.FileMode(0o700))) + + // In case closing the directory fails, we should still + // release the IdleInvoker, causing another clean to be + // performed. Without it, the reference count on the + // IdleInvoker would leak. + baseBuildDirectory.EXPECT().Close().Return(status.Error(codes.Internal, "Failed to flush data")) + baseCleaner.EXPECT().Call(ctx) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to flush data"), + buildDirectory.Close()) + }) + + t.Run("CleanerReleaseFailure", func(t *testing.T) { + // Successfully obtain a build directory. + baseCleaner.EXPECT().Call(ctx) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), nil) + + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), buildDirectoryPath) + + // Cleanup failures at the end of a build should also be + // propagated properly. + baseBuildDirectory.EXPECT().Close() + baseCleaner.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Failed to remove files")) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to clean after releasing build directory: Failed to remove files"), + buildDirectory.Close()) + }) + + t.Run("CloseSuccess", func(t *testing.T) { + // Successfully obtain a build directory. + baseCleaner.EXPECT().Call(ctx) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), nil) + + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), buildDirectoryPath) + + // Let both releasing of the build directory and running + // the cleaner succeed. + baseBuildDirectory.EXPECT().Close() + baseCleaner.EXPECT().Call(ctx) + + require.NoError(t, buildDirectory.Close()) + }) +} diff --git a/pkg/builder/command.go b/pkg/builder/command.go new file mode 100644 index 0000000..35e2fd1 --- /dev/null +++ b/pkg/builder/command.go @@ -0,0 +1,153 @@ +package builder + +import ( + "io" + "os" + "sort" + "syscall" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/kballard/go-shellquote" +) + +// mkdirEmittingDirectory is an implementation of +// ParentPopulatingDirectory that does little more than track which +// directories are created. For each leaf directory created, it is +// capable of emitting an "mkdir -p" shell command. +type mkdirEmittingDirectory struct { + children map[path.Component]*mkdirEmittingDirectory +} + +func (d *mkdirEmittingDirectory) Close() error { + return nil +} + +func (d *mkdirEmittingDirectory) EnterParentPopulatableDirectory(name path.Component) (ParentPopulatableDirectory, error) { + if child, ok := d.children[name]; ok { + return child, nil + } + return nil, syscall.ENOENT +} + +func (d *mkdirEmittingDirectory) Mkdir(name path.Component, perm os.FileMode) error { + if _, ok := d.children[name]; ok { + return syscall.EEXIST + } + d.children[name] = &mkdirEmittingDirectory{ + children: map[path.Component]*mkdirEmittingDirectory{}, + } + return nil +} + +func (d *mkdirEmittingDirectory) emitCommands(trace *path.Trace, w io.StringWriter) error { + if len(d.children) > 0 { + // This directory has children, so there's no need to + // emit an 'mkdir -p' call for this directory. + l := make(path.ComponentsList, 0, len(d.children)) + for name := range d.children { + l = append(l, name) + } + sort.Sort(l) + for _, name := range l { + if err := d.children[name].emitCommands(trace.Append(name), w); err != nil { + return err + } + } + } else if trace != nil { + // This directory has no children and it's not the root + // directory. Emit a single 'mkdir -p' call. + if _, err := w.WriteString("mkdir -p "); err != nil { + return err + } + if _, err := w.WriteString(shellquote.Join(trace.String())); err != nil { + return err + } + if _, err := w.WriteString("\n"); err != nil { + return err + } + } + return nil +} + +// ConvertCommandToShellScript writes a POSIX shell script to a +// StringWriter that causes a process to be launched in the way encoded +// in a Command message. +// +// Because input roots do not explicitly store parent directories of +// outputs, and actions generally assume that they exist, the resulting +// shell script may contain one or more "mkdir -p" calls to create those +// directories prior to execution. +func ConvertCommandToShellScript(command *remoteexecution.Command, w io.StringWriter) error { + // Preamble. + if _, err := w.WriteString("#!/bin/sh\nset -e\n"); err != nil { + return err + } + + // Create parent directories of outputs. + outputHierarchy, err := NewOutputHierarchy(command) + if err != nil { + return err + } + d := mkdirEmittingDirectory{ + children: map[path.Component]*mkdirEmittingDirectory{}, + } + if err := outputHierarchy.CreateParentDirectories(&d); err != nil { + return err + } + if err := d.emitCommands(nil, w); err != nil { + return err + } + + // Switch to the right working directory. + workingDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + if err := path.Resolve(command.WorkingDirectory, scopeWalker); err != nil { + return util.StatusWrap(err, "Failed to resolve working directory") + } + if _, err := w.WriteString("cd "); err != nil { + return err + } + if _, err := w.WriteString(shellquote.Join(workingDirectory.String())); err != nil { + return err + } + if _, err := w.WriteString("\n"); err != nil { + return err + } + + // Set environment variables. + for _, environmentVariable := range command.EnvironmentVariables { + if _, err := w.WriteString("export "); err != nil { + return err + } + if _, err := w.WriteString(environmentVariable.Name); err != nil { + return err + } + if _, err := w.WriteString("="); err != nil { + return err + } + if _, err := w.WriteString(shellquote.Join(environmentVariable.Value)); err != nil { + return err + } + if _, err := w.WriteString("\n"); err != nil { + return err + } + } + + // Execute the command. + if _, err := w.WriteString("exec"); err != nil { + return err + } + for _, argument := range command.Arguments { + if _, err := w.WriteString(" "); err != nil { + return err + } + if _, err := w.WriteString(shellquote.Join(argument)); err != nil { + return err + } + } + if _, err := w.WriteString("\n"); err != nil { + return err + } + return nil +} diff --git a/pkg/builder/command_test.go b/pkg/builder/command_test.go new file mode 100644 index 0000000..5849447 --- /dev/null +++ b/pkg/builder/command_test.go @@ -0,0 +1,34 @@ +package builder_test + +import ( + "strings" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/stretchr/testify/require" +) + +func TestConvertCommandToShellScript(t *testing.T) { + var b strings.Builder + require.NoError(t, builder.ConvertCommandToShellScript(&remoteexecution.Command{ + Arguments: []string{"cc", "-o", "../obj/hello world.o", "hello world.c"}, + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + {Name: "LD_LIBRARY_PATH", Value: "/lib"}, + {Name: "PATH", Value: "/bin:/sbin:/usr/bin:/usr/sbin"}, + {Name: "FUNKY_CHARACTERS", Value: "~Hello$world*"}, + }, + OutputPaths: []string{"../obj/hello world.o", "../obj/notcreated", "other/dir/bar"}, + WorkingDirectory: "src", + }, &b)) + require.Equal(t, `#!/bin/sh +set -e +mkdir -p obj +mkdir -p src/other/dir +cd src +export LD_LIBRARY_PATH=/lib +export PATH=/bin:/sbin:/usr/bin:/usr/sbin +export FUNKY_CHARACTERS=\~Hello\$world\* +exec cc -o '../obj/hello world.o' 'hello world.c' +`, b.String()) +} diff --git a/pkg/builder/completed_action_logger.go b/pkg/builder/completed_action_logger.go new file mode 100644 index 0000000..e01852c --- /dev/null +++ b/pkg/builder/completed_action_logger.go @@ -0,0 +1,163 @@ +package builder + +import ( + "context" + "sync" + + cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/prometheus/client_golang/prometheus" + + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + completedActionLoggerPrometheusMetrics sync.Once + + completedActionLoggerCompletedActionsAcknowledged = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "completed_action_logger_completed_actions_acknowledged_total", + Help: "Number of Completed Actions that the remote server responded to.", + }) + completedActionLoggerCompletedActionsLogged = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "completed_action_logger_completed_actions_logged_total", + Help: "Number of Completed Actions that were queued to be sent to a remote server or discarded.", + }, + []string{"result"}, + ) + completedActionLoggerCompletedActionsLoggedQueued = completedActionLoggerCompletedActionsLogged.WithLabelValues("Queued") + completedActionLoggerCompletedActionsLoggedDiscarded = completedActionLoggerCompletedActionsLogged.WithLabelValues("Discarded") + + completedActionLoggerCompletedActionsSent = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "completed_action_logger_completed_actions_sent_total", + Help: "Number of Completed Actions sent to a remote service.", + }) +) + +// The CompletedActionLogger can be used to record CompletedActions for +// realtime or post-build analysis in a remote service. This is particularly +// useful for understanding how build actions change over time by inspecting +// the aggregated CompletedAction metadata. +type CompletedActionLogger interface { + LogCompletedAction(completedAction *cal_proto.CompletedAction) +} + +// The RemoteCompletedActionLogger type is used to store and send +// CompletedActions for a completedActionLoggingBuildExecutor. It keeps track +// of which messages have been previously transmitted and retries them if our +// connection with the server is interrupted. +type RemoteCompletedActionLogger struct { + client cal_proto.CompletedActionLoggerClient + maximumSendQueueSize int + + lock sync.Mutex + sendQueue []*cal_proto.CompletedAction + sendWakeup chan struct{} +} + +// NewRemoteCompletedActionLogger returns a new RemoteCompletedActionLogger +// with a predefined maximum capacity of stored messages. This ensures that +// we don't overwhelm the server in case it is under heavy load +// and cannot respond. +func NewRemoteCompletedActionLogger(queueSize int, client cal_proto.CompletedActionLoggerClient) *RemoteCompletedActionLogger { + completedActionLoggerPrometheusMetrics.Do(func() { + prometheus.MustRegister(completedActionLoggerCompletedActionsAcknowledged) + prometheus.MustRegister(completedActionLoggerCompletedActionsLogged) + prometheus.MustRegister(completedActionLoggerCompletedActionsSent) + }) + + return &RemoteCompletedActionLogger{ + client: client, + maximumSendQueueSize: queueSize, + + lock: sync.Mutex{}, + sendQueue: []*cal_proto.CompletedAction{}, + sendWakeup: make(chan struct{}, queueSize), + } +} + +// LogCompletedAction will add one CompletedAction to the +// RemoteCompletedActionLogger and notify that a message +// is ready to be transmitted. +func (logger *RemoteCompletedActionLogger) LogCompletedAction(completedAction *cal_proto.CompletedAction) { + logger.lock.Lock() + defer logger.lock.Unlock() + if len(logger.sendQueue) < logger.maximumSendQueueSize { + logger.sendQueue = append(logger.sendQueue, completedAction) + close(logger.sendWakeup) + logger.sendWakeup = make(chan struct{}) + completedActionLoggerCompletedActionsLoggedQueued.Inc() + } else { + completedActionLoggerCompletedActionsLoggedDiscarded.Inc() + } +} + +// SendAllCompletedActions is responsible for managing goroutines that perform +// the rpc transmission and response handling. +func (logger *RemoteCompletedActionLogger) SendAllCompletedActions() error { + eg, ctx := errgroup.WithContext(context.Background()) + stream, err := logger.client.LogCompletedActions(ctx) + if err != nil { + return err + } + + actionsSent := 0 + + eg.Go(func() error { + for { + logger.lock.Lock() + c := logger.sendWakeup + + actionsToSend := logger.sendQueue[actionsSent:] + actionsSent = len(logger.sendQueue) + logger.lock.Unlock() + + for _, action := range actionsToSend { + if err := stream.Send(action); err != nil { + return util.StatusWrapf(err, "Failed to transmit completed action %#v", action.Uuid) + } + completedActionLoggerCompletedActionsSent.Inc() + } + select { + case <-c: + case <-ctx.Done(): + return util.StatusFromContext(ctx) + } + } + }) + eg.Go(func() error { + for { + if _, err := stream.Recv(); err != nil { + return util.StatusWrap(err, "Failed to receive response from server") + } + + logger.lock.Lock() + if actionsSent == 0 { + logger.lock.Unlock() + return status.Error(codes.FailedPrecondition, "Improper response: No messages left in the queue") + } + + logger.sendQueue = logger.sendQueue[1:] + actionsSent-- + logger.lock.Unlock() + completedActionLoggerCompletedActionsAcknowledged.Inc() + } + }) + err = eg.Wait() + + // Ensure we close the stream properly by calling Recv + // until we get an error response. + for { + if _, err := stream.Recv(); err != nil { + break + } + } + return err +} diff --git a/pkg/builder/completed_action_logger_test.go b/pkg/builder/completed_action_logger_test.go new file mode 100644 index 0000000..aba52ac --- /dev/null +++ b/pkg/builder/completed_action_logger_test.go @@ -0,0 +1,135 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + mock "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestRemoteCompletedActionLogger(t *testing.T) { + ctrl, _ := gomock.WithContext(context.Background(), t) + conn := mock.NewMockClientConnInterface(ctrl) + client := cal_proto.NewCompletedActionLoggerClient(conn) + logger := builder.NewRemoteCompletedActionLogger(100, client) + + completedAction := &cal_proto.CompletedAction{ + HistoricalExecuteResponse: &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + WorkerStartTimestamp: ×tamppb.Timestamp{Nanos: 0}, + WorkerCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1, Nanos: 500000000}, + AuxiliaryMetadata: []*anypb.Any{}, + }, + }, + }, + }, + Uuid: "uuid", + InstanceName: "freebsd12", + } + + t.Run("OnlyRecvCallsFailure", func(t *testing.T) { + clientStream := mock.NewMockClientStream(ctrl) + conn.EXPECT().NewStream(gomock.Any(), gomock.Any(), "/buildbarn.completedactionlogger.CompletedActionLogger/LogCompletedActions", gomock.Any()). + Return(clientStream, nil) + + clientStream.EXPECT().RecvMsg(gomock.Any()) + clientStream.EXPECT().RecvMsg(gomock.Any()).Return(status.Error(codes.Internal, "Disk on fire")) + err := logger.SendAllCompletedActions() + testutil.RequireEqualStatus(t, status.Error(codes.FailedPrecondition, "Improper response: No messages left in the queue"), err) + }) + + t.Run("DroppedConnectionRetrySuccess", func(t *testing.T) { + var savedCtx1 context.Context + clientStream1 := mock.NewMockClientStream(ctrl) + conn.EXPECT().NewStream(gomock.Any(), gomock.Any(), "/buildbarn.completedactionlogger.CompletedActionLogger/LogCompletedActions", gomock.Any()). + DoAndReturn(func(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + savedCtx1 = ctx + return clientStream1, nil + }) + + for i := 0; i < 5; i++ { + ch1 := make(chan struct{}) + clientStream1.EXPECT().SendMsg(completedAction).DoAndReturn(func(i interface{}) error { + close(ch1) + return nil + }) + clientStream1.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(i interface{}) error { + <-ch1 + return nil + }) + } + clientStream1.EXPECT().SendMsg(completedAction).DoAndReturn(func(i interface{}) error { + return nil + }).MaxTimes(5) + clientStream1.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(i interface{}) error { + return status.Error(codes.Unavailable, "Server on fire") + }) + clientStream1.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(m interface{}) error { + <-savedCtx1.Done() + require.Equal(t, context.Canceled, savedCtx1.Err()) + return status.Error(codes.Canceled, "Request canceled") + }) + + for i := 0; i < 10; i++ { + logger.LogCompletedAction(completedAction) + } + // Start the client so that it may transmit the 10 queued messages. + err := logger.SendAllCompletedActions() + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to receive response from server: Server on fire"), err) + + // We expect five more items in the logger's sendQueue at + // this point. Perform the setup to start the logger queue + // again and resend the last message. + var savedCtx2 context.Context + clientStream2 := mock.NewMockClientStream(ctrl) + conn.EXPECT().NewStream(gomock.Any(), gomock.Any(), "/buildbarn.completedactionlogger.CompletedActionLogger/LogCompletedActions", gomock.Any()). + DoAndReturn(func(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + savedCtx2 = ctx + return clientStream2, nil + }) + + // Successfully retry the five remaining messages left in the queue. + for i := 0; i < 5; i++ { + ch3 := make(chan struct{}) + clientStream2.EXPECT().SendMsg(completedAction).DoAndReturn(func(i interface{}) error { + close(ch3) + return nil + }) + clientStream2.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(i interface{}) error { + <-ch3 + return nil + }) + } + // All messages have been sent, now force an exit by + // returning an error on subsequent RecvMsg calls. + clientStream2.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(i interface{}) error { + return status.Error(codes.Unavailable, "Server out of memory") + }) + clientStream2.EXPECT().RecvMsg(gomock.Any()).DoAndReturn(func(m interface{}) error { + <-savedCtx2.Done() + require.Equal(t, context.Canceled, savedCtx2.Err()) + return status.Error(codes.Canceled, "Request canceled") + }) + err = logger.SendAllCompletedActions() + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to receive response from server: Server out of memory"), err) + }) +} diff --git a/pkg/builder/completed_action_logging_build_executor.go b/pkg/builder/completed_action_logging_build_executor.go new file mode 100644 index 0000000..76e0072 --- /dev/null +++ b/pkg/builder/completed_action_logging_build_executor.go @@ -0,0 +1,52 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/google/uuid" +) + +type completedActionLoggingBuildExecutor struct { + BuildExecutor + uuidGenerator util.UUIDGenerator + logger CompletedActionLogger + instanceNamePatcher digest.InstanceNamePatcher +} + +// NewCompletedActionLoggingBuildExecutor returns a new +// completedActionLoggingBuildExecutor that will transmit CompletedActions +// to an external server for real-time analysis of REv2 Action metadata +// using a CompletedActionLogger. +func NewCompletedActionLoggingBuildExecutor(base BuildExecutor, uuidGenerator util.UUIDGenerator, logger CompletedActionLogger, instanceNamePatcher digest.InstanceNamePatcher) BuildExecutor { + return &completedActionLoggingBuildExecutor{ + BuildExecutor: base, + uuidGenerator: uuidGenerator, + logger: logger, + instanceNamePatcher: instanceNamePatcher, + } +} + +func (be *completedActionLoggingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + + completedAction := &cal_proto.CompletedAction{ + HistoricalExecuteResponse: &cas_proto.HistoricalExecuteResponse{ + ActionDigest: request.ActionDigest, + ExecuteResponse: response, + }, + Uuid: uuid.Must(be.uuidGenerator()).String(), + InstanceName: be.instanceNamePatcher.PatchInstanceName(digestFunction.GetInstanceName()).String(), + DigestFunction: digestFunction.GetEnumValue(), + } + + be.logger.LogCompletedAction(completedAction) + return response +} diff --git a/pkg/builder/completed_action_logging_build_executor_test.go b/pkg/builder/completed_action_logging_build_executor_test.go new file mode 100644 index 0000000..71f91d7 --- /dev/null +++ b/pkg/builder/completed_action_logging_build_executor_test.go @@ -0,0 +1,75 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + mock "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestActionLoggingBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + + executeResponse := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + WorkerStartTimestamp: ×tamppb.Timestamp{Nanos: 0}, + WorkerCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1, Nanos: 500000000}, + AuxiliaryMetadata: []*anypb.Any{}, + }, + }, + } + + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(executeResponse) + + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + lq := mock.NewMockCompletedActionLogger(ctrl) + completedActionLoggingBuildExecutor := builder.NewCompletedActionLoggingBuildExecutor( + baseBuildExecutor, + uuidGenerator.Call, + lq, + digest.NewInstanceNamePatcher(digest.EmptyInstanceName, digest.MustNewInstanceName("prefix"))) + + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + lq.EXPECT().LogCompletedAction(&cal_proto.CompletedAction{ + HistoricalExecuteResponse: &cas_proto.HistoricalExecuteResponse{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + ExecuteResponse: executeResponse, + }, + Uuid: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + InstanceName: "prefix/freebsd12", + DigestFunction: remoteexecution.DigestFunction_SHA256, + }) + resp := completedActionLoggingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + + testutil.RequireEqualProto(t, resp, executeResponse) +} diff --git a/pkg/builder/cost_computing_build_executor.go b/pkg/builder/cost_computing_build_executor.go new file mode 100644 index 0000000..3ea8d99 --- /dev/null +++ b/pkg/builder/cost_computing_build_executor.go @@ -0,0 +1,54 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/protobuf/types/known/anypb" +) + +type costComputingBuildExecutor struct { + BuildExecutor + pricingPerSecond map[string]*resourceusage.MonetaryResourceUsage_Expense +} + +// NewCostComputingBuildExecutor wraps an existing BuildExecutor, adding the computed +// cost of the action to the prepopulated AuxiliaryMetadata field of the ActionResult. +// The provided expenses are represented on a per-second basis and are then multiplied +// by the amount of seconds that it took for a worker to complete the Action. +func NewCostComputingBuildExecutor(base BuildExecutor, expensesPerSecond map[string]*resourceusage.MonetaryResourceUsage_Expense) BuildExecutor { + return &costComputingBuildExecutor{ + BuildExecutor: base, + pricingPerSecond: expensesPerSecond, + } +} + +func (be *costComputingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + + totalTime := response.Result.ExecutionMetadata.WorkerCompletedTimestamp.AsTime().Sub(response.Result.ExecutionMetadata.WorkerStartTimestamp.AsTime()).Seconds() + costsPerSecond := resourceusage.MonetaryResourceUsage{ + Expenses: map[string]*resourceusage.MonetaryResourceUsage_Expense{}, + } + for costType, exp := range be.pricingPerSecond { + costsPerSecond.Expenses[costType] = &resourceusage.MonetaryResourceUsage_Expense{ + Currency: exp.Currency, + Cost: totalTime * exp.Cost, + } + } + + if monetaryResourceUsage, err := anypb.New(&costsPerSecond); err == nil { + response.Result.ExecutionMetadata.AuxiliaryMetadata = append(response.Result.ExecutionMetadata.AuxiliaryMetadata, monetaryResourceUsage) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to marshal monetary resource usage")) + } + return response +} diff --git a/pkg/builder/cost_computing_build_executor_test.go b/pkg/builder/cost_computing_build_executor_test.go new file mode 100644 index 0000000..f5cb3e5 --- /dev/null +++ b/pkg/builder/cost_computing_build_executor_test.go @@ -0,0 +1,79 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestCostComputingBuildExecutorSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + actionDigest := &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + } + action := &remoteexecution.Action{DoNotCache: false} + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + Action: action, + } + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("freebsd12", remoteexecution.DigestFunction_SHA256) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + + startTime := ×tamppb.Timestamp{Nanos: 0} + endTime := ×tamppb.Timestamp{Seconds: 1, Nanos: 500000000} + + baseBuildExecutor.EXPECT().Execute(ctx, filePool, monitor, digestFunction, request, metadata).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + WorkerStartTimestamp: startTime, + WorkerCompletedTimestamp: endTime, + AuxiliaryMetadata: []*anypb.Any{}, + }, + }, + }) + monetaryResourceUsage, err := anypb.New(&resourceusage.MonetaryResourceUsage{ + Expenses: map[string]*resourceusage.MonetaryResourceUsage_Expense{ + "EC2": {Currency: "USD", Cost: 0.1851}, + "S3": {Currency: "BTC", Cost: 0.885}, + "Electricity": {Currency: "EUR", Cost: 1.845}, + "Maintenance": {Currency: "JPY", Cost: 0.18}, + }, + }) + require.NoError(t, err) + + costComputingBuildExecutor := builder.NewCostComputingBuildExecutor(baseBuildExecutor, map[string]*resourceusage.MonetaryResourceUsage_Expense{ + "EC2": {Currency: "USD", Cost: 0.1234}, + "S3": {Currency: "BTC", Cost: 0.59}, + "Electricity": {Currency: "EUR", Cost: 1.23}, + "Maintenance": {Currency: "JPY", Cost: 0.12}, + }) + + executeResponse := costComputingBuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + WorkerStartTimestamp: startTime, + WorkerCompletedTimestamp: endTime, + AuxiliaryMetadata: []*anypb.Any{ + monetaryResourceUsage, + }, + }, + }, + }, executeResponse) +} diff --git a/pkg/builder/file_pool_stats_build_executor.go b/pkg/builder/file_pool_stats_build_executor.go new file mode 100644 index 0000000..b7888bb --- /dev/null +++ b/pkg/builder/file_pool_stats_build_executor.go @@ -0,0 +1,155 @@ +package builder + +import ( + "context" + "sync" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/protobuf/types/known/anypb" +) + +type filePoolStatsBuildExecutor struct { + BuildExecutor +} + +// NewFilePoolStatsBuildExecutor creates a decorator for BuildExecutor +// that annotates ExecuteResponses to contain usage statistics of the +// FilePool. FilePools are used to allocate temporary files that are +// generated by the build action (e.g., output files). +func NewFilePoolStatsBuildExecutor(buildExecutor BuildExecutor) BuildExecutor { + return &filePoolStatsBuildExecutor{ + BuildExecutor: buildExecutor, + } +} + +func (be *filePoolStatsBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + fp := statsCollectingFilePool{base: filePool} + response := be.BuildExecutor.Execute(ctx, &fp, monitor, digestFunction, request, executionStateUpdates) + + fp.lock.Lock() + stats := fp.stats + fp.lock.Unlock() + + if resourceUsage, err := anypb.New(&stats); err == nil { + response.Result.ExecutionMetadata.AuxiliaryMetadata = append(response.Result.ExecutionMetadata.AuxiliaryMetadata, resourceUsage) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to marshal file pool resource usage")) + } + return response +} + +// statsCollectingFilePool is a decorator for FilePool that measures the +// number of files created and the number of operations performed. +type statsCollectingFilePool struct { + base re_filesystem.FilePool + + lock sync.Mutex + stats resourceusage.FilePoolResourceUsage + totalSize uint64 + totalFiles uint64 +} + +func (fp *statsCollectingFilePool) NewFile() (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile() + if err != nil { + return nil, err + } + + fp.lock.Lock() + fp.stats.FilesCreated++ + fp.totalFiles++ + if fp.stats.FilesCountPeak < fp.totalFiles { + fp.stats.FilesCountPeak = fp.totalFiles + } + fp.lock.Unlock() + + return &statsCollectingFileReadWriter{ + FileReadWriter: f, + pool: fp, + }, nil +} + +// statsCollectingFileReadWriter is a decorator for +// filesystem.FileReadWriter that measures the number of file operations +// performed. +type statsCollectingFileReadWriter struct { + filesystem.FileReadWriter + pool *statsCollectingFilePool + + size uint64 +} + +func (f *statsCollectingFileReadWriter) updateSizeLocked(newSize uint64) { + fp := f.pool + fp.totalSize -= f.size + f.size = newSize + fp.totalSize += f.size + if fp.stats.FilesSizeBytesPeak < fp.totalSize { + fp.stats.FilesSizeBytesPeak = fp.totalSize + } +} + +func (f *statsCollectingFileReadWriter) ReadAt(p []byte, off int64) (int, error) { + n, err := f.FileReadWriter.ReadAt(p, off) + + fp := f.pool + fp.lock.Lock() + fp.stats.ReadsCount++ + fp.stats.ReadsSizeBytes += uint64(n) + fp.lock.Unlock() + + return n, err +} + +func (f *statsCollectingFileReadWriter) WriteAt(p []byte, off int64) (int, error) { + n, err := f.FileReadWriter.WriteAt(p, off) + + fp := f.pool + fp.lock.Lock() + fp.stats.WritesCount++ + fp.stats.WritesSizeBytes += uint64(n) + if n > 0 { + if newSize := uint64(off) + uint64(n); newSize > f.size { + f.updateSizeLocked(newSize) + } + } + fp.lock.Unlock() + + return n, err +} + +func (f *statsCollectingFileReadWriter) Truncate(length int64) error { + err := f.FileReadWriter.Truncate(length) + + fp := f.pool + fp.lock.Lock() + fp.stats.TruncatesCount++ + if err == nil { + f.updateSizeLocked(uint64(length)) + } + fp.lock.Unlock() + + return err +} + +func (f *statsCollectingFileReadWriter) Close() error { + err := f.FileReadWriter.Close() + f.FileReadWriter = nil + + fp := f.pool + fp.lock.Lock() + fp.totalFiles-- + fp.totalSize -= f.size + fp.lock.Unlock() + f.pool = nil + + return err +} diff --git a/pkg/builder/file_pool_stats_build_executor_test.go b/pkg/builder/file_pool_stats_build_executor_test.go new file mode 100644 index 0000000..8d5be7b --- /dev/null +++ b/pkg/builder/file_pool_stats_build_executor_test.go @@ -0,0 +1,104 @@ +package builder_test + +import ( + "context" + "io" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/anypb" +) + +func TestFilePoolStatsBuildExecutorExample(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Recurring messages used by this test. + request := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "d41d8cd98f00b204e9800998ecf8427e", + SizeBytes: 123, + }, + } + + // Expect to see an execution request. Inside the execution + // request, generate some I/O on the file pool to produce + // non-zero counters. + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + baseBuildExecutor.EXPECT().Execute( + ctx, + gomock.Any(), + monitor, + digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5), + request, + gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + f, err := filePool.NewFile() + require.NoError(t, err) + require.NoError(t, f.Truncate(5)) + require.NoError(t, f.Close()) + + f, err = filePool.NewFile() + require.NoError(t, err) + n, err := f.WriteAt([]byte("Hello"), 100) + require.Equal(t, 5, n) + require.NoError(t, err) + var p [10]byte + n, err = f.ReadAt(p[:], 98) + require.Equal(t, 7, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("\x00\x00Hello\x00\x00\x00"), p[:]) + require.NoError(t, f.Truncate(42)) + require.NoError(t, f.Close()) + + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 1, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + } + }) + + // Perform the execution request. + executionStateUpdates := make(chan *remoteworker.CurrentState_Executing, 3) + buildExecutor := builder.NewFilePoolStatsBuildExecutor(baseBuildExecutor) + executeResponse := buildExecutor.Execute( + ctx, + filesystem.InMemoryFilePool, + monitor, + digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5), + request, + executionStateUpdates) + + // Validate the execute response, which should now contain the + // file pool resource usage statistics. + resourceUsage, err := anypb.New(&resourceusage.FilePoolResourceUsage{ + FilesCreated: 2, + FilesCountPeak: 1, + FilesSizeBytesPeak: 105, + ReadsCount: 1, + ReadsSizeBytes: 7, + WritesCount: 1, + WritesSizeBytes: 5, + TruncatesCount: 2, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 1, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{resourceUsage}, + }, + }, + }, executeResponse) +} diff --git a/pkg/builder/local_build_executor.go b/pkg/builder/local_build_executor.go new file mode 100644 index 0000000..ddc2e66 --- /dev/null +++ b/pkg/builder/local_build_executor.go @@ -0,0 +1,301 @@ +package builder + +import ( + "context" + "os" + "sync" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_clock "github.com/buildbarn/bb-remote-execution/pkg/clock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Filenames of objects to be created inside the build directory. +var ( + stdoutComponent = path.MustNewComponent("stdout") + stderrComponent = path.MustNewComponent("stderr") + deviceDirectoryComponent = path.MustNewComponent("dev") + inputRootDirectoryComponent = path.MustNewComponent("root") + temporaryDirectoryComponent = path.MustNewComponent("tmp") +) + +// capturingErrorLogger is an error logger that stores up to a single +// error. When the error is stored, a context cancelation function is +// invoked. This is used by localBuildExecutor to kill a build action in +// case an I/O error occurs on the FUSE file system. +type capturingErrorLogger struct { + lock sync.Mutex + cancel context.CancelFunc + error error +} + +func (el *capturingErrorLogger) Log(err error) { + el.lock.Lock() + defer el.lock.Unlock() + + if el.cancel != nil { + el.error = err + el.cancel() + el.cancel = nil + } +} + +func (el *capturingErrorLogger) GetError() error { + el.lock.Lock() + defer el.lock.Unlock() + + return el.error +} + +type localBuildExecutor struct { + contentAddressableStorage blobstore.BlobAccess + buildDirectoryCreator BuildDirectoryCreator + runner runner_pb.RunnerClient + clock clock.Clock + inputRootCharacterDevices map[path.Component]filesystem.DeviceNumber + maximumMessageSizeBytes int + environmentVariables map[string]string +} + +// NewLocalBuildExecutor returns a BuildExecutor that executes build +// steps on the local system. +func NewLocalBuildExecutor(contentAddressableStorage blobstore.BlobAccess, buildDirectoryCreator BuildDirectoryCreator, runner runner_pb.RunnerClient, clock clock.Clock, inputRootCharacterDevices map[path.Component]filesystem.DeviceNumber, maximumMessageSizeBytes int, environmentVariables map[string]string) BuildExecutor { + return &localBuildExecutor{ + contentAddressableStorage: contentAddressableStorage, + buildDirectoryCreator: buildDirectoryCreator, + runner: runner, + clock: clock, + inputRootCharacterDevices: inputRootCharacterDevices, + maximumMessageSizeBytes: maximumMessageSizeBytes, + environmentVariables: environmentVariables, + } +} + +func (be *localBuildExecutor) createCharacterDevices(inputRootDirectory BuildDirectory) error { + if err := inputRootDirectory.Mkdir(deviceDirectoryComponent, 0o777); err != nil && !os.IsExist(err) { + return util.StatusWrap(err, "Unable to create /dev directory in input root") + } + deviceDirectory, err := inputRootDirectory.EnterBuildDirectory(deviceDirectoryComponent) + if err != nil { + return util.StatusWrap(err, "Unable to enter /dev directory in input root") + } + defer deviceDirectory.Close() + for name, number := range be.inputRootCharacterDevices { + if err := deviceDirectory.Mknod(name, os.ModeDevice|os.ModeCharDevice|0o666, number); err != nil { + return util.StatusWrapf(err, "Failed to create character device %#v", name.String()) + } + } + return nil +} + +func (be *localBuildExecutor) CheckReadiness(ctx context.Context) error { + _, err := be.runner.CheckReadiness(ctx, &emptypb.Empty{}) + return err +} + +func (be *localBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + // Timeout handling. + response := NewDefaultExecuteResponse(request) + action := request.Action + if action == nil { + attachErrorToExecuteResponse(response, status.Error(codes.InvalidArgument, "Request does not contain an action")) + return response + } + if err := action.Timeout.CheckValid(); err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid execution timeout")) + return response + } + executionTimeout := action.Timeout.AsDuration() + + // Obtain build directory. + actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for action")) + return response + } + buildDirectory, buildDirectoryPath, err := be.buildDirectoryCreator.GetBuildDirectory(ctx, actionDigest, action.DoNotCache) + if err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrap(err, "Failed to acquire build environment")) + return response + } + defer buildDirectory.Close() + + // Install hooks on build directory to capture file creation and + // I/O error events. + ctxWithIOError, cancelIOError := context.WithCancel(ctx) + defer cancelIOError() + ioErrorCapturer := capturingErrorLogger{cancel: cancelIOError} + buildDirectory.InstallHooks(filePool, &ioErrorCapturer) + + executionStateUpdates <- &remoteworker.CurrentState_Executing{ + ActionDigest: request.ActionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + } + + // Create input root directory inside of build directory. + if err := buildDirectory.Mkdir(inputRootDirectoryComponent, 0o777); err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrap(err, "Failed to create input root directory")) + return response + } + inputRootDirectory, err := buildDirectory.EnterBuildDirectory(inputRootDirectoryComponent) + if err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrap(err, "Failed to enter input root directory")) + return response + } + defer inputRootDirectory.Close() + + inputRootDigest, err := digestFunction.NewDigestFromProto(action.InputRootDigest) + if err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrap(err, "Failed to extract digest for input root")) + return response + } + if err := inputRootDirectory.MergeDirectoryContents(ctx, &ioErrorCapturer, inputRootDigest, monitor); err != nil { + attachErrorToExecuteResponse(response, err) + return response + } + + if len(be.inputRootCharacterDevices) > 0 { + if err := be.createCharacterDevices(inputRootDirectory); err != nil { + attachErrorToExecuteResponse(response, err) + return response + } + } + + // Create parent directories of output files and directories. + // These are not declared in the input root explicitly. + commandDigest, err := digestFunction.NewDigestFromProto(action.CommandDigest) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for command")) + return response + } + commandMessage, err := be.contentAddressableStorage.Get(ctx, commandDigest).ToProto(&remoteexecution.Command{}, be.maximumMessageSizeBytes) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to obtain command")) + return response + } + command := commandMessage.(*remoteexecution.Command) + outputHierarchy, err := NewOutputHierarchy(command) + if err != nil { + attachErrorToExecuteResponse(response, err) + return response + } + if err := outputHierarchy.CreateParentDirectories(inputRootDirectory); err != nil { + attachErrorToExecuteResponse(response, err) + return response + } + + // Create a directory inside the build directory that build + // actions may use to store temporary files. This ensures that + // temporary files are automatically removed when the build + // action completes. When using FUSE, it also causes quotas to + // be applied to them. + if err := buildDirectory.Mkdir(temporaryDirectoryComponent, 0o777); err != nil { + attachErrorToExecuteResponse( + response, + util.StatusWrap(err, "Failed to create temporary directory inside build directory")) + return response + } + + executionStateUpdates <- &remoteworker.CurrentState_Executing{ + ActionDigest: request.ActionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Running{ + Running: &emptypb.Empty{}, + }, + } + + environmentVariables := map[string]string{} + for name, value := range be.environmentVariables { + environmentVariables[name] = value + } + for _, environmentVariable := range command.EnvironmentVariables { + environmentVariables[environmentVariable.Name] = environmentVariable.Value + } + + // Invoke the command. + ctxWithTimeout, cancelTimeout := be.clock.NewContextWithTimeout(ctxWithIOError, executionTimeout) + runResponse, runErr := be.runner.Run(ctxWithTimeout, &runner_pb.RunRequest{ + Arguments: command.Arguments, + EnvironmentVariables: environmentVariables, + WorkingDirectory: command.WorkingDirectory, + StdoutPath: buildDirectoryPath.Append(stdoutComponent).String(), + StderrPath: buildDirectoryPath.Append(stderrComponent).String(), + InputRootDirectory: buildDirectoryPath.Append(inputRootDirectoryComponent).String(), + TemporaryDirectory: buildDirectoryPath.Append(temporaryDirectoryComponent).String(), + }) + cancelTimeout() + <-ctxWithTimeout.Done() + + // If an I/O error occurred during execution, attach any errors + // related to it to the response first. These errors should be + // preferred over the cancelation errors that are a result of it. + if err := ioErrorCapturer.GetError(); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "I/O error while running command")) + } + + // Attach the exit code or execution error. + if runErr == nil { + response.Result.ExitCode = runResponse.ExitCode + response.Result.ExecutionMetadata.AuxiliaryMetadata = append(response.Result.ExecutionMetadata.AuxiliaryMetadata, runResponse.ResourceUsage...) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(runErr, "Failed to run command")) + } + + // For FUSE-based workers: Attach the amount of time the action + // ran, minus the time it was delayed reading data from storage. + if unsuspendedDuration, ok := ctxWithTimeout.Value(re_clock.UnsuspendedDurationKey{}).(time.Duration); ok { + response.Result.ExecutionMetadata.VirtualExecutionDuration = durationpb.New(unsuspendedDuration) + } + + executionStateUpdates <- &remoteworker.CurrentState_Executing{ + ActionDigest: request.ActionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_UploadingOutputs{ + UploadingOutputs: &emptypb.Empty{}, + }, + } + + // Upload command output. In the common case, the stdout and + // stderr files are empty. If that's the case, don't bother + // setting the digest to keep the ActionResult small. + if stdoutDigest, err := buildDirectory.UploadFile(ctx, stdoutComponent, digestFunction); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store stdout")) + } else if stdoutDigest.GetSizeBytes() > 0 { + response.Result.StdoutDigest = stdoutDigest.GetProto() + } + if stderrDigest, err := buildDirectory.UploadFile(ctx, stderrComponent, digestFunction); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store stderr")) + } else if stderrDigest.GetSizeBytes() > 0 { + response.Result.StderrDigest = stderrDigest.GetProto() + } + if err := outputHierarchy.UploadOutputs(ctx, inputRootDirectory, be.contentAddressableStorage, digestFunction, response.Result); err != nil { + attachErrorToExecuteResponse(response, err) + } + + return response +} diff --git a/pkg/builder/local_build_executor_test.go b/pkg/builder/local_build_executor_test.go new file mode 100644 index 0000000..2f5bde8 --- /dev/null +++ b/pkg/builder/local_build_executor_test.go @@ -0,0 +1,1026 @@ +package builder_test + +import ( + "context" + "os" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + re_clock "github.com/buildbarn/bb-remote-execution/pkg/clock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" +) + +func TestLocalBuildExecutorInvalidActionDigest(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("netbsd", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "This is not a valid action digest!", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Failed to extract digest for action: Hash has length 34, while 64 characters were expected").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorMissingAction(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("netbsd", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Request does not contain an action").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorBuildDirectoryCreatorFailedFailed(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "5555555555555555555555555555555555555555555555555555555555555555", 7), + false, + ).Return(nil, nil, status.Error(codes.InvalidArgument, "Platform requirements not provided")) + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("netbsd", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Failed to acquire build environment: Platform requirements not provided").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorInputRootPopulationFailed(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "5555555555555555555555555555555555555555555555555555555555555555", 7), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + monitor, + ).Return(status.Error(codes.FailedPrecondition, "Some input files could not be found")) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("netbsd", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.FailedPrecondition, "Some input files could not be found").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorOutputDirectoryCreationFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("fedora", remoteexecution.DigestFunction_SHA256, "6666666666666666666666666666666666666666666666666666666666666666", 234), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Arguments: []string{"touch", "foo"}, + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + {Name: "PATH", Value: "/bin:/usr/bin"}, + }, + OutputPaths: []string{"foo/bar/baz"}, + }, buffer.UserProvided)) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("fedora", remoteexecution.DigestFunction_SHA256, "5555555555555555555555555555555555555555555555555555555555555555", 7), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("fedora", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + monitor, + ).Return(nil) + inputRootDirectory.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)).Return(status.Error(codes.Internal, "Out of disk space")) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("fedora", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "6666666666666666666666666666666666666666666666666666666666666666", + SizeBytes: 234, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Internal, "Failed to create output parent directory \"foo\": Out of disk space").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorMissingCommand(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "5555555555555555555555555555555555555555555555555555555555555555", 7), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + monitor, + ).Return(nil) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("netbsd", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Failed to extract digest for command: No digest provided").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorOutputSymlinkReadingFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "6666666666666666666666666666666666666666666666666666666666666666", 234), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Arguments: []string{"touch", "foo"}, + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + {Name: "PATH", Value: "/bin:/usr/bin"}, + }, + OutputPaths: []string{"foo"}, + }, buffer.UserProvided)) + buildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stdout"), gomock.Any()).Return( + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000005", 567), + nil) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stderr"), gomock.Any()).Return( + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000006", 678), + nil) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "102b51b9765a56a3e899f7cf0ee38e5251f9c503b357b330a49183eb7b155604", 2), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&remoteexecution.Tree{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, m) + return nil + }) + + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "5555555555555555555555555555555555555555555555555555555555555555", 7), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("nintendo64", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + monitor, + ).Return(nil) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("tmp"), os.FileMode(0o777)) + runner := mock.NewMockRunnerClient(ctrl) + runner.EXPECT().Run(gomock.Any(), &runner_pb.RunRequest{ + Arguments: []string{"touch", "foo"}, + EnvironmentVariables: map[string]string{"PATH": "/bin:/usr/bin"}, + WorkingDirectory: "", + StdoutPath: "stdout", + StderrPath: "stderr", + InputRootDirectory: "root", + TemporaryDirectory: "tmp", + }).Return(&runner_pb.RunResponse{ + ExitCode: 0, + }, nil) + fooDirectory := mock.NewMockUploadableDirectory(ctrl) + inputRootDirectory.EXPECT().Lstat(path.MustNewComponent("foo")).Return(filesystem.NewFileInfo(path.MustNewComponent("foo"), filesystem.FileTypeDirectory, false), nil) + inputRootDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("foo")).Return(fooDirectory, nil) + fooDirectory.EXPECT().ReadDir().Return([]filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("bar"), filesystem.FileTypeSymlink, false), + }, nil) + fooDirectory.EXPECT().Readlink(path.MustNewComponent("bar")).Return("", status.Error(codes.Internal, "Cosmic rays caused interference")) + fooDirectory.EXPECT().Close() + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + clock := mock.NewMockClock(ctrl) + clock.EXPECT().NewContextWithTimeout(gomock.Any(), time.Hour).DoAndReturn(func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithCancel(parent) + }) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("nintendo64", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "5555555555555555555555555555555555555555555555555555555555555555", + SizeBytes: 7, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "6666666666666666666666666666666666666666666666666666666666666666", + SizeBytes: 234, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "7777777777777777777777777777777777777777777777777777777777777777", + SizeBytes: 42, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "foo", + TreeDigest: &remoteexecution.Digest{ + Hash: "102b51b9765a56a3e899f7cf0ee38e5251f9c503b357b330a49183eb7b155604", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + }, + StdoutDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000005", + SizeBytes: 567, + }, + StderrDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000006", + SizeBytes: 678, + }, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Internal, "Failed to read output symlink \"foo/bar\": Cosmic rays caused interference").Proto(), + }, executeResponse) +} + +// TestLocalBuildExecutorSuccess tests a full invocation of a simple +// build step, equivalent to compiling a simple C++ file. +func TestLocalBuildExecutorSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // File system operations that should occur against the input + // root directory. Creation of + // bazel-out/k8-fastbuild/bin/_objs/hello. + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + inputRootDirectory.EXPECT().Mkdir(path.MustNewComponent("bazel-out"), os.FileMode(0o777)).Return(nil) + bazelOutDirectory := mock.NewMockParentPopulatableDirectory(ctrl) + inputRootDirectory.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("bazel-out")).Return(bazelOutDirectory, nil) + bazelOutDirectory.EXPECT().Close() + bazelOutDirectory.EXPECT().Mkdir(path.MustNewComponent("k8-fastbuild"), os.FileMode(0o777)).Return(nil) + k8FastbuildDirectory := mock.NewMockParentPopulatableDirectory(ctrl) + bazelOutDirectory.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("k8-fastbuild")).Return(k8FastbuildDirectory, nil) + k8FastbuildDirectory.EXPECT().Close() + k8FastbuildDirectory.EXPECT().Mkdir(path.MustNewComponent("bin"), os.FileMode(0o777)).Return(nil) + binDirectory := mock.NewMockParentPopulatableDirectory(ctrl) + k8FastbuildDirectory.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("bin")).Return(binDirectory, nil) + binDirectory.EXPECT().Close() + binDirectory.EXPECT().Mkdir(path.MustNewComponent("_objs"), os.FileMode(0o777)).Return(nil) + objsDirectory := mock.NewMockParentPopulatableDirectory(ctrl) + binDirectory.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("_objs")).Return(objsDirectory, nil) + objsDirectory.EXPECT().Close() + objsDirectory.EXPECT().Mkdir(path.MustNewComponent("hello"), os.FileMode(0o777)).Return(nil) + + // Uploading of files in bazel-out/k8-fastbuild/bin/_objs/hello. + bazelOutUploadableDirectory := mock.NewMockUploadableDirectory(ctrl) + inputRootDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("bazel-out")).Return(bazelOutUploadableDirectory, nil) + bazelOutUploadableDirectory.EXPECT().Close() + k8sFastbuildUploadableDirectory := mock.NewMockUploadableDirectory(ctrl) + bazelOutUploadableDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("k8-fastbuild")).Return(k8sFastbuildUploadableDirectory, nil) + k8sFastbuildUploadableDirectory.EXPECT().Close() + binUploadableDirectory := mock.NewMockUploadableDirectory(ctrl) + k8sFastbuildUploadableDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("bin")).Return(binUploadableDirectory, nil) + binUploadableDirectory.EXPECT().Close() + objsUploadableDirectory := mock.NewMockUploadableDirectory(ctrl) + binUploadableDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("_objs")).Return(objsUploadableDirectory, nil) + objsUploadableDirectory.EXPECT().Close() + helloUploadableDirectory := mock.NewMockUploadableDirectory(ctrl) + objsUploadableDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("hello")).Return(helloUploadableDirectory, nil) + helloUploadableDirectory.EXPECT().Lstat(path.MustNewComponent("hello.pic.d")).Return(filesystem.NewFileInfo(path.MustNewComponent("hello.pic.d"), filesystem.FileTypeRegularFile, false), nil) + helloUploadableDirectory.EXPECT().Lstat(path.MustNewComponent("hello.pic.o")).Return(filesystem.NewFileInfo(path.MustNewComponent("hello.pic.o"), filesystem.FileTypeRegularFile, true), nil) + helloUploadableDirectory.EXPECT().Close() + + // Read operations against the Content Addressable Storage. + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000002", 234), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Arguments: []string{ + "/usr/local/bin/clang", + "-MD", + "-MF", + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.d", + "-c", + "hello.cc", + "-o", + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.o", + }, + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + {Name: "BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN", Value: "1"}, + {Name: "PATH", Value: "/bin:/usr/bin"}, + {Name: "PWD", Value: "/proc/self/cwd"}, + }, + OutputPaths: []string{ + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.d", + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.o", + }, + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + { + Name: "container-image", + Value: "docker://gcr.io/cloud-marketplace/google/rbe-debian8@sha256:4893599fb00089edc8351d9c26b31d3f600774cb5addefb00c70fdb6ca797abf", + }, + }, + }, + }, buffer.UserProvided)) + + // Write operations against the Content Addressable Storage. + buildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stdout"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000005", 567), + nil) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stderr"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000006", 678), + nil) + helloUploadableDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("hello.pic.d"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000007", 789), + nil) + helloUploadableDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("hello.pic.o"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000008", 890), + nil) + + // Command execution. + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000001", 123), + false, + ).Return(buildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("0000000000000000")), nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000003", 345), + monitor, + ).Return(nil) + inputRootDirectory.EXPECT().Mkdir(path.MustNewComponent("dev"), os.FileMode(0o777)) + inputRootDevDirectory := mock.NewMockBuildDirectory(ctrl) + inputRootDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("dev")).Return(inputRootDevDirectory, nil) + inputRootDevDirectory.EXPECT().Mknod( + path.MustNewComponent("null"), + os.FileMode(os.ModeDevice|os.ModeCharDevice|0o666), + filesystem.NewDeviceNumberFromMajorMinor(1, 3)) + inputRootDevDirectory.EXPECT().Close() + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("tmp"), os.FileMode(0o777)) + resourceUsage, err := anypb.New(&emptypb.Empty{}) + require.NoError(t, err) + runner := mock.NewMockRunnerClient(ctrl) + runner.EXPECT().Run(gomock.Any(), &runner_pb.RunRequest{ + Arguments: []string{ + "/usr/local/bin/clang", + "-MD", + "-MF", + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.d", + "-c", + "hello.cc", + "-o", + "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.o", + }, + EnvironmentVariables: map[string]string{ + "BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN": "1", + "PATH": "/bin:/usr/bin", + "PWD": "/proc/self/cwd", + "TEST_VAR": "123", + }, + WorkingDirectory: "", + StdoutPath: "0000000000000000/stdout", + StderrPath: "0000000000000000/stderr", + InputRootDirectory: "0000000000000000/root", + TemporaryDirectory: "0000000000000000/tmp", + }).Return(&runner_pb.RunResponse{ + ExitCode: 0, + ResourceUsage: []*anypb.Any{resourceUsage}, + }, nil) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + clock := mock.NewMockClock(ctrl) + clock.EXPECT().NewContextWithTimeout(gomock.Any(), time.Hour).DoAndReturn(func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithCancel(context.WithValue(parent, re_clock.UnsuspendedDurationKey{}, 5*time.Second)) + }) + inputRootCharacterDevices := map[path.Component]filesystem.DeviceNumber{ + path.MustNewComponent("null"): filesystem.NewDeviceNumberFromMajorMinor(1, 3), + } + environmentVars := map[string]string{ + "TEST_VAR": "123", + "PWD": "dont-overwrite", + } + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, inputRootCharacterDevices, 10000, environmentVars) + + requestMetadata, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "666b72d8-c43e-4998-866c-9312a31fe86d", + }) + require.NoError(t, err) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("ubuntu1804", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000001", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000002", + SizeBytes: 234, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000003", + SizeBytes: 345, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + AuxiliaryMetadata: []*anypb.Any{requestMetadata}, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + OutputFiles: []*remoteexecution.OutputFile{ + { + Path: "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.d", + Digest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000007", + SizeBytes: 789, + }, + }, + { + Path: "bazel-out/k8-fastbuild/bin/_objs/hello/hello.pic.o", + Digest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000008", + SizeBytes: 890, + }, + IsExecutable: true, + }, + }, + StdoutDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000005", + SizeBytes: 567, + }, + StderrDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000006", + SizeBytes: 678, + }, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{requestMetadata, resourceUsage}, + VirtualExecutionDuration: &durationpb.Duration{Seconds: 5}, + }, + }, + }, executeResponse) +} + +func TestLocalBuildExecutorCachingInvalidTimeout(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + // Execution should fail, as the number of nanoseconds in the + // timeout is not within bounds. + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("ubuntu1804", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000001", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000003", + SizeBytes: 345, + }, + Timeout: &durationpb.Duration{ + Nanos: 1000000000, + }, + }, + }, + metadata) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Invalid execution timeout: "), status.ErrorProto(executeResponse.Status)) +} + +func TestLocalBuildExecutorInputRootIOFailureDuringExecution(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Build directory. + buildDirectory := mock.NewMockBuildDirectory(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000002", 234), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Arguments: []string{"clang"}, + }, buffer.UserProvided)) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stdout"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000005", 567), + nil) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stderr"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000006", 678), + nil) + + // Build environment. + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000001", 123), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + + // Input root creation. Preserve the error logger that is + // provided, so that an I/O error can be triggered during the + // build. + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + var errorLogger util.ErrorLogger + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000003", 345), + monitor, + ).DoAndReturn(func(ctx context.Context, providedErrorLogger util.ErrorLogger, digest digest.Digest, monitor access.UnreadDirectoryMonitor) error { + errorLogger = providedErrorLogger + return nil + }) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("tmp"), os.FileMode(0o777)) + + // Let an I/O error in the input root trigger during the build. + // The build should be canceled immediately. The error should be + // propagated to the response. + runner := mock.NewMockRunnerClient(ctrl) + runner.EXPECT().Run(gomock.Any(), &runner_pb.RunRequest{ + Arguments: []string{"clang"}, + EnvironmentVariables: map[string]string{}, + WorkingDirectory: "", + StdoutPath: "stdout", + StderrPath: "stderr", + InputRootDirectory: "root", + TemporaryDirectory: "tmp", + }).DoAndReturn(func(ctx context.Context, request *runner_pb.RunRequest, opts ...grpc.CallOption) (*runner_pb.RunResponse, error) { + errorLogger.Log(status.Error(codes.FailedPrecondition, "Blob not found")) + <-ctx.Done() + return nil, util.StatusFromContext(ctx) + }) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + clock := mock.NewMockClock(ctrl) + clock.EXPECT().NewContextWithTimeout(gomock.Any(), 15*time.Minute).DoAndReturn(func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithCancel(parent) + }) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("ubuntu1804", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000001", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000002", + SizeBytes: 234, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000003", + SizeBytes: 345, + }, + Timeout: &durationpb.Duration{ + Seconds: 900, + }, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000005", + SizeBytes: 567, + }, + StderrDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000006", + SizeBytes: 678, + }, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.FailedPrecondition, "I/O error while running command: Blob not found").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorTimeoutDuringExecution(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Build directory. + buildDirectory := mock.NewMockBuildDirectory(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000002", 234), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Arguments: []string{"clang"}, + }, buffer.UserProvided)) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stdout"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000005", 567), + nil) + buildDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("stderr"), gomock.Any()).Return( + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000006", 678), + nil) + + // Build environment. + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000001", 123), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + + // Input root creation. + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000003", 345), + monitor, + ).Return(nil) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("tmp"), os.FileMode(0o777)) + + // Simulate a timeout by running the command with a timeout of + // zero seconds. This should cause an immediate build failure. + runner := mock.NewMockRunnerClient(ctrl) + runner.EXPECT().Run(gomock.Any(), &runner_pb.RunRequest{ + Arguments: []string{"clang"}, + EnvironmentVariables: map[string]string{}, + WorkingDirectory: "", + StdoutPath: "stdout", + StderrPath: "stderr", + InputRootDirectory: "root", + TemporaryDirectory: "tmp", + }).DoAndReturn(func(ctx context.Context, request *runner_pb.RunRequest, opts ...grpc.CallOption) (*runner_pb.RunResponse, error) { + <-ctx.Done() + return nil, util.StatusFromContext(ctx) + }) + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + clock := mock.NewMockClock(ctrl) + clock.EXPECT().NewContextWithTimeout(gomock.Any(), time.Hour).DoAndReturn(func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(parent, 0) + }) + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, nil, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("ubuntu1804", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000001", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000002", + SizeBytes: 234, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000003", + SizeBytes: 345, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + StdoutDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000005", + SizeBytes: 567, + }, + StderrDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000006", + SizeBytes: 678, + }, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.DeadlineExceeded, "Failed to run command: context deadline exceeded").Proto(), + }, executeResponse) +} + +func TestLocalBuildExecutorCharacterDeviceNodeCreationFailed(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Build directory. + buildDirectory := mock.NewMockBuildDirectory(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + + // Build environment. + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + buildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000001", 123), + false, + ).Return(buildDirectory, nil, nil) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + buildDirectory.EXPECT().InstallHooks(filePool, gomock.Any()) + + // Input root creation. + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("root"), os.FileMode(0o777)) + inputRootDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("root")).Return(inputRootDirectory, nil) + inputRootDirectory.EXPECT().MergeDirectoryContents( + ctx, + gomock.Any(), + digest.MustNewDigest("ubuntu1804", remoteexecution.DigestFunction_SHA256, "0000000000000000000000000000000000000000000000000000000000000003", 345), + monitor, + ).Return(nil) + inputRootDirectory.EXPECT().Mkdir(path.MustNewComponent("dev"), os.FileMode(0o777)) + inputRootDevDirectory := mock.NewMockBuildDirectory(ctrl) + inputRootDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("dev")).Return(inputRootDevDirectory, nil) + inputRootDevDirectory.EXPECT().Mknod( + path.MustNewComponent("null"), + os.FileMode(os.ModeDevice|os.ModeCharDevice|0o666), + filesystem.NewDeviceNumberFromMajorMinor(1, 3), + ).Return(status.Error(codes.Internal, "Device node creation failed")) + inputRootDevDirectory.EXPECT().Close() + inputRootDirectory.EXPECT().Close() + buildDirectory.EXPECT().Close() + runner := mock.NewMockRunnerClient(ctrl) + clock := mock.NewMockClock(ctrl) + inputRootCharacterDevices := map[path.Component]filesystem.DeviceNumber{ + path.MustNewComponent("null"): filesystem.NewDeviceNumberFromMajorMinor(1, 3), + } + localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, inputRootCharacterDevices, 10000, map[string]string{}) + + metadata := make(chan *remoteworker.CurrentState_Executing, 10) + executeResponse := localBuildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("ubuntu1804", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000001", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + InputRootDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000003", + SizeBytes: 345, + }, + Timeout: &durationpb.Duration{Seconds: 3600}, + }, + }, + metadata) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Internal, "Failed to create character device \"null\": Device node creation failed").Proto(), + }, executeResponse) +} diff --git a/pkg/builder/logging_build_executor.go b/pkg/builder/logging_build_executor.go new file mode 100644 index 0000000..e1917ae --- /dev/null +++ b/pkg/builder/logging_build_executor.go @@ -0,0 +1,51 @@ +package builder + +import ( + "context" + "log" + "net/url" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + re_util "github.com/buildbarn/bb-remote-execution/pkg/util" + "github.com/buildbarn/bb-storage/pkg/digest" + + "google.golang.org/protobuf/encoding/protojson" +) + +type loggingBuildExecutor struct { + BuildExecutor + browserURL *url.URL +} + +// NewLoggingBuildExecutor wraps an existing BuildExecutor, adding basic +// logging. A link to bb_browser is printed prior to executing the +// action. A JSON representation of the ExecuteResponse is logged after +// completion. +func NewLoggingBuildExecutor(base BuildExecutor, browserURL *url.URL) BuildExecutor { + return &loggingBuildExecutor{ + BuildExecutor: base, + browserURL: browserURL, + } +} + +func (be *loggingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + // Print URL to bb_browser prior to execution. + if actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest); err == nil { + log.Printf("Action: %s with timeout %s", re_util.GetBrowserURL(be.browserURL, "action", actionDigest), request.Action.GetTimeout().AsDuration()) + } else { + log.Print("Action: Failed to extract digest: ", err) + } + + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + + // Print execution response to log. + if responseJSON, err := protojson.Marshal(response); err == nil { + log.Print("ExecuteResponse: ", string(responseJSON)) + } else { + log.Print("ExecuteResponse: Failed to marshal: ", err) + } + return response +} diff --git a/pkg/builder/metrics_build_executor.go b/pkg/builder/metrics_build_executor.go new file mode 100644 index 0000000..169bd57 --- /dev/null +++ b/pkg/builder/metrics_build_executor.go @@ -0,0 +1,351 @@ +package builder + +import ( + "context" + "sync" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/prometheus/client_golang/prometheus" + + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + buildExecutorPrometheusMetrics sync.Once + + // Timestamps stored in ExecutedActionMetadata. + buildExecutorDurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_duration_seconds", + Help: "Amount of time spent per build execution stage, in seconds.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"result", "grpc_code", "stage"}) + buildExecutorVirtualExecutionDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_virtual_execution_duration", + Help: "Amount of time the execution timeout was compensated, in seconds.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"result", "grpc_code"}) + + // Metrics for FilePoolResourceUsage. + buildExecutorFilePoolFilesCreated = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_file_pool_files_created", + Help: "Number of files created by a build action.", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorFilePoolFilesCountPeak = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_file_pool_files_count_peak", + Help: "Peak number of files created by a build action.", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorFilePoolFilesSizeBytesPeak = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_file_pool_files_size_bytes_peak", + Help: "Peak size of files created by a build action, in bytes.", + Buckets: prometheus.ExponentialBuckets(1.0, 2.0, 33), + }, + []string{"result", "grpc_code"}) + buildExecutorFilePoolFilesOperationsCount = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_file_pool_operations_count", + Help: "Number of file pool operations performed by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code", "operation"}) + buildExecutorFilePoolFilesOperationsSizeBytes = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_file_pool_operations_size_bytes", + Help: "Total size of file pool operations performed by build actions, in bytes.", + Buckets: prometheus.ExponentialBuckets(1.0, 2.0, 33), + }, + []string{"result", "grpc_code", "operation"}) + + // Metrics for InputRootResourceUsage. + buildExecutorInputRootDirectoriesResolved = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_input_root_directories_resolved", + Help: "Number of directories in the input root for which directory nodes in the virtual file system were instantiated.", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorInputRootDirectoriesRead = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_input_root_directories_read", + Help: "Number of directories in the input root whose contents were read from the Content Addressable Storage (CAS).", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorInputRootFilesRead = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_input_root_files_read", + Help: "Number of files in the input root whose contents were read from the Content Addressable Storage (CAS).", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + + // Metrics for POSIXResourceUsage. + buildExecutorPOSIXUserTime = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_user_time", + Help: "Amount of time spent in userspace by build actions, in seconds.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXSystemTime = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_system_time", + Help: "Amount of time spent in kernelspace by build actions, in seconds.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXMaximumResidentSetSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_maximum_resident_set_size", + Help: "Maximum resident set size of build actions, in bytes.", + Buckets: prometheus.ExponentialBuckets(1024.0, 2.0, 23), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXPageReclaims = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_page_reclaims", + Help: "Number of page reclaims caused by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXPageFaults = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_page_faults", + Help: "Number of page faults caused by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXSwaps = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_swaps", + Help: "Number of swaps caused by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXBlockInputOperations = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_block_input_operations", + Help: "Number of block input operations performed by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXBlockOutputOperations = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_block_output_operations", + Help: "Number of block output operations performed by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXMessagesSent = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_messages_sent", + Help: "Number of messages sent by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXMessagesReceived = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_messages_received", + Help: "Number of messages received by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXSignalsReceived = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_signals_received", + Help: "Number of signals received by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 6, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXVoluntaryContextSwitches = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_voluntary_context_switches", + Help: "Number of voluntary context switches caused by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) + buildExecutorPOSIXInvoluntaryContextSwitches = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "build_executor_posix_involuntary_context_switches", + Help: "Number of involuntary context switches caused by build actions.", + Buckets: util.DecimalExponentialBuckets(0, 9, 2), + }, + []string{"result", "grpc_code"}) +) + +type metricsBuildExecutor struct { + BuildExecutor +} + +// NewMetricsBuildExecutor creates a decorator for BuildExecutor that +// exposes the statistics stored in ExecutedActionMetadata as Prometheus +// metrics. +func NewMetricsBuildExecutor(buildExecutor BuildExecutor) BuildExecutor { + buildExecutorPrometheusMetrics.Do(func() { + prometheus.MustRegister(buildExecutorDurationSeconds) + prometheus.MustRegister(buildExecutorVirtualExecutionDuration) + + prometheus.MustRegister(buildExecutorFilePoolFilesCreated) + prometheus.MustRegister(buildExecutorFilePoolFilesCountPeak) + prometheus.MustRegister(buildExecutorFilePoolFilesSizeBytesPeak) + prometheus.MustRegister(buildExecutorFilePoolFilesOperationsCount) + prometheus.MustRegister(buildExecutorFilePoolFilesOperationsSizeBytes) + + prometheus.MustRegister(buildExecutorInputRootDirectoriesResolved) + prometheus.MustRegister(buildExecutorInputRootDirectoriesRead) + prometheus.MustRegister(buildExecutorInputRootFilesRead) + + prometheus.MustRegister(buildExecutorPOSIXUserTime) + prometheus.MustRegister(buildExecutorPOSIXSystemTime) + prometheus.MustRegister(buildExecutorPOSIXMaximumResidentSetSize) + prometheus.MustRegister(buildExecutorPOSIXPageReclaims) + prometheus.MustRegister(buildExecutorPOSIXPageFaults) + prometheus.MustRegister(buildExecutorPOSIXSwaps) + prometheus.MustRegister(buildExecutorPOSIXBlockInputOperations) + prometheus.MustRegister(buildExecutorPOSIXBlockOutputOperations) + prometheus.MustRegister(buildExecutorPOSIXMessagesSent) + prometheus.MustRegister(buildExecutorPOSIXMessagesReceived) + prometheus.MustRegister(buildExecutorPOSIXSignalsReceived) + prometheus.MustRegister(buildExecutorPOSIXVoluntaryContextSwitches) + prometheus.MustRegister(buildExecutorPOSIXInvoluntaryContextSwitches) + }) + + return &metricsBuildExecutor{ + BuildExecutor: buildExecutor, + } +} + +func observeDuration(histogram prometheus.Observer, pb *durationpb.Duration) { + if pb == nil { + return + } + histogram.Observe(pb.AsDuration().Seconds()) +} + +func observeTimestampDelta(histogram prometheus.Observer, pbStart, pbCompleted *timestamppb.Timestamp) { + if pbStart == nil || pbCompleted == nil { + return + } + histogram.Observe(pbCompleted.AsTime().Sub(pbStart.AsTime()).Seconds()) +} + +func (be *metricsBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + result, grpcCode := GetResultAndGRPCCodeFromExecuteResponse(response) + + // Expose metrics for timestamps stored in ExecutedActionMetadata. + metadata := response.Result.ExecutionMetadata + observeTimestampDelta( + buildExecutorDurationSeconds.WithLabelValues(result, grpcCode, "FetchingInputs"), + metadata.InputFetchStartTimestamp, metadata.InputFetchCompletedTimestamp) + observeTimestampDelta( + buildExecutorDurationSeconds.WithLabelValues(result, grpcCode, "Running"), + metadata.ExecutionStartTimestamp, metadata.ExecutionCompletedTimestamp) + observeTimestampDelta( + buildExecutorDurationSeconds.WithLabelValues(result, grpcCode, "UploadingOutputs"), + metadata.OutputUploadStartTimestamp, metadata.OutputUploadCompletedTimestamp) + observeDuration( + buildExecutorVirtualExecutionDuration.WithLabelValues(result, grpcCode), + metadata.VirtualExecutionDuration) + + for _, auxiliaryMetadata := range metadata.AuxiliaryMetadata { + var filePool resourceusage.FilePoolResourceUsage + var inputRoot resourceusage.InputRootResourceUsage + var posix resourceusage.POSIXResourceUsage + if auxiliaryMetadata.UnmarshalTo(&filePool) == nil { + // Expose metrics stored in FilePoolResourceUsage. + buildExecutorFilePoolFilesCreated.WithLabelValues(result, grpcCode).Observe(float64(filePool.FilesCreated)) + buildExecutorFilePoolFilesCountPeak.WithLabelValues(result, grpcCode).Observe(float64(filePool.FilesCountPeak)) + buildExecutorFilePoolFilesSizeBytesPeak.WithLabelValues(result, grpcCode).Observe(float64(filePool.FilesSizeBytesPeak)) + buildExecutorFilePoolFilesOperationsCount.WithLabelValues(result, grpcCode, "Read").Observe(float64(filePool.ReadsCount)) + buildExecutorFilePoolFilesOperationsSizeBytes.WithLabelValues(result, grpcCode, "Read").Observe(float64(filePool.ReadsSizeBytes)) + buildExecutorFilePoolFilesOperationsCount.WithLabelValues(result, grpcCode, "Write").Observe(float64(filePool.WritesCount)) + buildExecutorFilePoolFilesOperationsSizeBytes.WithLabelValues(result, grpcCode, "Write").Observe(float64(filePool.WritesSizeBytes)) + buildExecutorFilePoolFilesOperationsCount.WithLabelValues(result, grpcCode, "Truncate").Observe(float64(filePool.TruncatesCount)) + } else if auxiliaryMetadata.UnmarshalTo(&inputRoot) == nil { + buildExecutorInputRootDirectoriesResolved.WithLabelValues(result, grpcCode).Observe(float64(inputRoot.DirectoriesResolved)) + buildExecutorInputRootDirectoriesRead.WithLabelValues(result, grpcCode).Observe(float64(inputRoot.DirectoriesRead)) + buildExecutorInputRootFilesRead.WithLabelValues(result, grpcCode).Observe(float64(inputRoot.FilesRead)) + } else if auxiliaryMetadata.UnmarshalTo(&posix) == nil { + // Expose metrics stored in POSIXResourceUsage. + observeDuration(buildExecutorPOSIXUserTime.WithLabelValues(result, grpcCode), posix.UserTime) + observeDuration(buildExecutorPOSIXSystemTime.WithLabelValues(result, grpcCode), posix.SystemTime) + buildExecutorPOSIXMaximumResidentSetSize.WithLabelValues(result, grpcCode).Observe(float64(posix.MaximumResidentSetSize)) + buildExecutorPOSIXPageReclaims.WithLabelValues(result, grpcCode).Observe(float64(posix.PageReclaims)) + buildExecutorPOSIXPageFaults.WithLabelValues(result, grpcCode).Observe(float64(posix.PageFaults)) + buildExecutorPOSIXSwaps.WithLabelValues(result, grpcCode).Observe(float64(posix.Swaps)) + buildExecutorPOSIXBlockInputOperations.WithLabelValues(result, grpcCode).Observe(float64(posix.BlockInputOperations)) + buildExecutorPOSIXBlockOutputOperations.WithLabelValues(result, grpcCode).Observe(float64(posix.BlockOutputOperations)) + buildExecutorPOSIXMessagesSent.WithLabelValues(result, grpcCode).Observe(float64(posix.MessagesSent)) + buildExecutorPOSIXMessagesReceived.WithLabelValues(result, grpcCode).Observe(float64(posix.MessagesReceived)) + buildExecutorPOSIXSignalsReceived.WithLabelValues(result, grpcCode).Observe(float64(posix.SignalsReceived)) + buildExecutorPOSIXVoluntaryContextSwitches.WithLabelValues(result, grpcCode).Observe(float64(posix.VoluntaryContextSwitches)) + buildExecutorPOSIXInvoluntaryContextSwitches.WithLabelValues(result, grpcCode).Observe(float64(posix.InvoluntaryContextSwitches)) + } + } + + return response +} diff --git a/pkg/builder/naive_build_directory.go b/pkg/builder/naive_build_directory.go new file mode 100644 index 0000000..26572dc --- /dev/null +++ b/pkg/builder/naive_build_directory.go @@ -0,0 +1,181 @@ +package builder + +import ( + "context" + "io" + "math" + + "github.com/buildbarn/bb-remote-execution/pkg/cas" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type naiveBuildDirectory struct { + filesystem.DirectoryCloser + directoryFetcher cas.DirectoryFetcher + fileFetcher cas.FileFetcher + contentAddressableStorage blobstore.BlobAccess +} + +// NewNaiveBuildDirectory creates a BuildDirectory that is backed by a +// simple filesystem.Directory with all of the operations implemented in +// a naive way. Namely, MergeDirectoryContents() recursively loads all +// directories from the Content Addressable Storage (CAS) and requests +// that all of their files are copied into the build directory. +// +// This implementation is intended to be used in combination with +// regular local file systems. The downside of such file systems is that +// we cannot populate them on demand. All of the input files must be +// present before invoking the build action. +func NewNaiveBuildDirectory(directory filesystem.DirectoryCloser, directoryFetcher cas.DirectoryFetcher, fileFetcher cas.FileFetcher, contentAddressableStorage blobstore.BlobAccess) BuildDirectory { + return &naiveBuildDirectory{ + DirectoryCloser: directory, + directoryFetcher: directoryFetcher, + fileFetcher: fileFetcher, + contentAddressableStorage: contentAddressableStorage, + } +} + +func (d *naiveBuildDirectory) EnterBuildDirectory(name path.Component) (BuildDirectory, error) { + child, err := d.EnterDirectory(name) + if err != nil { + return nil, err + } + return &naiveBuildDirectory{ + DirectoryCloser: child, + directoryFetcher: d.directoryFetcher, + fileFetcher: d.fileFetcher, + contentAddressableStorage: d.contentAddressableStorage, + }, nil +} + +func (d *naiveBuildDirectory) EnterParentPopulatableDirectory(name path.Component) (ParentPopulatableDirectory, error) { + return d.EnterBuildDirectory(name) +} + +func (d *naiveBuildDirectory) EnterUploadableDirectory(name path.Component) (UploadableDirectory, error) { + return d.EnterBuildDirectory(name) +} + +func (d *naiveBuildDirectory) InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) { + // Simply ignore the provided hooks, as POSIX offers no way to + // install them. This means no quota enforcement and detection + // of I/O errors is performed. +} + +func (d *naiveBuildDirectory) mergeDirectoryContents(ctx context.Context, digest digest.Digest, inputDirectory filesystem.Directory, pathTrace *path.Trace) error { + // Obtain directory. + directory, err := d.directoryFetcher.GetDirectory(ctx, digest) + if err != nil { + return util.StatusWrapf(err, "Failed to obtain input directory %#v", pathTrace.String()) + } + + // Create children. + digestFunction := digest.GetDigestFunction() + for _, file := range directory.Files { + component, ok := path.NewComponent(file.Name) + if !ok { + return status.Errorf(codes.InvalidArgument, "File %#v has an invalid name", file.Name) + } + childPathTrace := pathTrace.Append(component) + childDigest, err := digestFunction.NewDigestFromProto(file.Digest) + if err != nil { + return util.StatusWrapf(err, "Failed to extract digest for input file %#v", childPathTrace.String()) + } + if err := d.fileFetcher.GetFile(ctx, childDigest, inputDirectory, component, file.IsExecutable); err != nil { + return util.StatusWrapf(err, "Failed to obtain input file %#v", childPathTrace.String()) + } + } + for _, directory := range directory.Directories { + component, ok := path.NewComponent(directory.Name) + if !ok { + return status.Errorf(codes.InvalidArgument, "Directory %#v has an invalid name", directory.Name) + } + childPathTrace := pathTrace.Append(component) + childDigest, err := digestFunction.NewDigestFromProto(directory.Digest) + if err != nil { + return util.StatusWrapf(err, "Failed to extract digest for input directory %#v", childPathTrace.String()) + } + if err := inputDirectory.Mkdir(component, 0o777); err != nil { + return util.StatusWrapf(err, "Failed to create input directory %#v", childPathTrace.String()) + } + childDirectory, err := inputDirectory.EnterDirectory(component) + if err != nil { + return util.StatusWrapf(err, "Failed to enter input directory %#v", childPathTrace.String()) + } + err = d.mergeDirectoryContents(ctx, childDigest, childDirectory, childPathTrace) + childDirectory.Close() + if err != nil { + return err + } + } + for _, symlink := range directory.Symlinks { + component, ok := path.NewComponent(symlink.Name) + if !ok { + return status.Errorf(codes.InvalidArgument, "Symlink %#v has an invalid name", symlink.Name) + } + childPathTrace := pathTrace.Append(component) + if err := inputDirectory.Symlink(symlink.Target, component); err != nil { + return util.StatusWrapf(err, "Failed to create input symlink %#v", childPathTrace.String()) + } + } + return nil +} + +func (d *naiveBuildDirectory) MergeDirectoryContents(ctx context.Context, errorLogger util.ErrorLogger, digest digest.Digest, monitor access.UnreadDirectoryMonitor) error { + return d.mergeDirectoryContents(ctx, digest, d.DirectoryCloser, nil) +} + +func (d *naiveBuildDirectory) UploadFile(ctx context.Context, name path.Component, digestFunction digest.Function) (digest.Digest, error) { + file, err := d.OpenRead(name) + if err != nil { + return digest.BadDigest, err + } + + // Walk through the file to compute the digest. + digestGenerator := digestFunction.NewGenerator(math.MaxInt64) + sizeBytes, err := io.Copy(digestGenerator, io.NewSectionReader(file, 0, math.MaxInt64)) + if err != nil { + file.Close() + return digest.BadDigest, util.StatusWrap(err, "Failed to compute file digest") + } + blobDigest := digestGenerator.Sum() + + // Rewind and store it. Limit uploading to the size that was + // used to compute the digest. This ensures uploads succeed, + // even if more data gets appended in the meantime. This is not + // uncommon, especially for stdout and stderr logs. + if err := d.contentAddressableStorage.Put( + ctx, + blobDigest, + buffer.NewCASBufferFromReader( + blobDigest, + newSectionReadCloser(file, 0, sizeBytes), + buffer.UserProvided)); err != nil { + return digest.BadDigest, util.StatusWrap(err, "Failed to upload file") + } + return blobDigest, nil +} + +// newSectionReadCloser returns an io.ReadCloser that reads from r at a +// given offset, but stops with EOF after n bytes. This function is +// identical to io.NewSectionReader(), except that it provides an +// io.ReadCloser instead of an io.Reader. +func newSectionReadCloser(r filesystem.FileReader, off, n int64) io.ReadCloser { + return &struct { + io.SectionReader + io.Closer + }{ + SectionReader: *io.NewSectionReader(r, off, n), + Closer: r, + } +} diff --git a/pkg/builder/naive_build_directory_test.go b/pkg/builder/naive_build_directory_test.go new file mode 100644 index 0000000..4abfea4 --- /dev/null +++ b/pkg/builder/naive_build_directory_test.go @@ -0,0 +1,531 @@ +package builder_test + +import ( + "context" + "io" + "os" + "syscall" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestNaiveBuildDirectorySuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + Files: []*remoteexecution.FileNode{ + { + Name: "non-executable", + Digest: &remoteexecution.Digest{ + Hash: "9999999999999999999999999999999999999999999999999999999999999999", + SizeBytes: 512, + }, + }, + { + Name: "executable", + Digest: &remoteexecution.Digest{ + Hash: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + SizeBytes: 512, + }, + IsExecutable: true, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "link-to-executable", + Target: "executable", + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "link-to-non-executable", + Target: "../non-executable", + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("directory"), os.FileMode(0o777)).Return(nil) + nestedDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("directory")).Return(nestedDirectory, nil) + nestedDirectory.EXPECT().Symlink("../non-executable", path.MustNewComponent("link-to-non-executable")).Return(nil) + nestedDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + fileFetcher.EXPECT().GetFile( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "9999999999999999999999999999999999999999999999999999999999999999", 512), + buildDirectory, + path.MustNewComponent("non-executable"), + false).Return(nil) + fileFetcher.EXPECT().GetFile( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 512), + buildDirectory, + path.MustNewComponent("executable"), + true).Return(nil) + buildDirectory.EXPECT().Symlink("executable", + path.MustNewComponent("link-to-executable")).Return(nil) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + require.NoError(t, err) +} + +func TestNaiveBuildDirectoryInputRootNotInStorage(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(nil, status.Error(codes.Internal, "Storage is offline")) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to obtain input directory \".\": Storage is offline"), err) +} + +func TestNaiveBuildDirectoryMissingInputDirectoryDigest(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "World", + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Failed to extract digest for input directory \"Hello/World\": No digest provided"), err) +} + +func TestNaiveBuildDirectoryDirectoryCreationFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "World", + Digest: &remoteexecution.Digest{ + Hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + SizeBytes: 87, + }, + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Mkdir(path.MustNewComponent("World"), os.FileMode(0o777)).Return(status.Error(codes.DataLoss, "Disk on fire")) + helloDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.DataLoss, "Failed to create input directory \"Hello/World\": Disk on fire"), err) +} + +func TestNaiveBuildDirectoryDirectoryEnterDirectoryFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "World", + Digest: &remoteexecution.Digest{ + Hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + SizeBytes: 87, + }, + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Mkdir(path.MustNewComponent("World"), os.FileMode(0o777)).Return(nil) + helloDirectory.EXPECT().EnterDirectory(path.MustNewComponent("World")).Return(nil, status.Error(codes.PermissionDenied, "Thou shalt not pass!")) + helloDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.PermissionDenied, "Failed to enter input directory \"Hello/World\": Thou shalt not pass!"), err) +} + +func TestNaiveBuildDirectoryMissingInputFileDigest(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "World", + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Failed to extract digest for input file \"Hello/World\": No digest provided"), err) +} + +func TestNaiveBuildDirectoryFileCreationFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "World", + Digest: &remoteexecution.Digest{ + Hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + SizeBytes: 87, + }, + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + fileFetcher := mock.NewMockFileFetcher(ctrl) + fileFetcher.EXPECT().GetFile( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 87), + helloDirectory, + path.MustNewComponent("World"), + false).Return(status.Error(codes.DataLoss, "Disk on fire")) + helloDirectory.EXPECT().Close() + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.DataLoss, "Failed to obtain input file \"Hello/World\": Disk on fire"), err) +} + +func TestNaiveBuildDirectorySymlinkCreationFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + ).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "Hello", + Digest: &remoteexecution.Digest{ + Hash: "8888888888888888888888888888888888888888888888888888888888888888", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryFetcher.EXPECT().GetDirectory( + ctx, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "8888888888888888888888888888888888888888888888888888888888888888", 123), + ).Return(&remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "World", + Target: "/etc/passwd", + }, + }, + }, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().Mkdir(path.MustNewComponent("Hello"), os.FileMode(0o777)).Return(nil) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("Hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Symlink("/etc/passwd", path.MustNewComponent("World")).Return(status.Error(codes.Unimplemented, "This filesystem does not support symbolic links")) + helloDirectory.EXPECT().Close() + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory(buildDirectory, directoryFetcher, fileFetcher, contentAddressableStorage) + + err := inputRootPopulator.MergeDirectoryContents( + ctx, + errorLogger, + digest.MustNewDigest("netbsd", remoteexecution.DigestFunction_SHA256, "7777777777777777777777777777777777777777777777777777777777777777", 42), + nil) + testutil.RequireEqualStatus(t, status.Error(codes.Unimplemented, "Failed to create input symlink \"Hello/World\": This filesystem does not support symbolic links"), err) +} + +func TestNaiveBuildDirectoryUploadFile(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + buildDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + fileFetcher := mock.NewMockFileFetcher(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + inputRootPopulator := builder.NewNaiveBuildDirectory( + buildDirectory, + directoryFetcher, + fileFetcher, + contentAddressableStorage) + + helloWorldDigest := digest.MustNewDigest("default-scheduler", remoteexecution.DigestFunction_MD5, "3e25960a79dbc69b674cd4ec67a72c62", 11) + digestFunction := helloWorldDigest.GetDigestFunction() + + t.Run("NonexistentFile", func(t *testing.T) { + buildDirectory.EXPECT().OpenRead(path.MustNewComponent("hello")).Return(nil, syscall.ENOENT) + + _, err := inputRootPopulator.UploadFile(ctx, path.MustNewComponent("hello"), digestFunction) + require.Equal(t, syscall.ENOENT, err) + }) + + t.Run("IOFailureDuringDigestComputation", func(t *testing.T) { + file := mock.NewMockFileReader(ctrl) + buildDirectory.EXPECT().OpenRead(path.MustNewComponent("hello")).Return(file, nil) + gomock.InOrder( + file.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return 0, status.Error(codes.Unavailable, "Disk on fire") + }), + file.EXPECT().Close().Return(nil)) + + _, err := inputRootPopulator.UploadFile(ctx, path.MustNewComponent("hello"), digestFunction) + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Failed to compute file digest: Disk on fire"), err) + }) + + t.Run("FileChangedDuringUpload", func(t *testing.T) { + // Changes to the file contents between the digest + // computation and upload phases should be detected. + file := mock.NewMockFileReader(ctrl) + buildDirectory.EXPECT().OpenRead(path.MustNewComponent("hello")).Return(file, nil) + gomock.InOrder( + file.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + require.Greater(t, len(p), 11) + copy(p, "Hello world") + return 11, io.EOF + }), + file.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + require.Greater(t, len(p), 9) + copy(p, "Different") + return 9, io.EOF + }), + file.EXPECT().Close().Return(nil)) + contentAddressableStorage.EXPECT().Put(ctx, helloWorldDigest, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + _, err := b.ToByteSlice(100) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Buffer is 9 bytes in size, while 11 bytes were expected"), err) + return err + }) + + _, err := inputRootPopulator.UploadFile(ctx, path.MustNewComponent("hello"), digestFunction) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Failed to upload file: Buffer is 9 bytes in size, while 11 bytes were expected"), err) + }) + + t.Run("SuccessFileGrownDuringUpload", func(t *testing.T) { + // Simulate the case where the file to be uploaded grows + // while being uploaded. The newly added part should be + // ignored, as it wasn't used to compute the digest. + // This is not uncommon, especially for stdout and + // stderr logs. + file := mock.NewMockFileReader(ctrl) + buildDirectory.EXPECT().OpenRead(path.MustNewComponent("hello")).Return(file, nil) + gomock.InOrder( + file.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + require.Greater(t, len(p), 11) + copy(p, "Hello world") + return 11, io.EOF + }), + file.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + require.Len(t, p, 11) + copy(p, "Hello world") + return 11, nil + }), + file.EXPECT().Close().Return(nil)) + contentAddressableStorage.EXPECT().Put(ctx, helloWorldDigest, gomock.Any()).DoAndReturn( + func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + data, err := b.ToByteSlice(100) + require.NoError(t, err) + require.Equal(t, []byte("Hello world"), data) + return nil + }) + + digest, err := inputRootPopulator.UploadFile(ctx, path.MustNewComponent("hello"), digestFunction) + require.NoError(t, err) + require.Equal(t, digest, helloWorldDigest) + }) +} diff --git a/pkg/builder/noop_build_executor.go b/pkg/builder/noop_build_executor.go new file mode 100644 index 0000000..0a27644 --- /dev/null +++ b/pkg/builder/noop_build_executor.go @@ -0,0 +1,103 @@ +package builder + +import ( + "context" + "net/url" + "strings" + "text/template" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + re_util "github.com/buildbarn/bb-remote-execution/pkg/util" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type noopBuildExecutor struct { + contentAddressableStorage blobstore.BlobAccess + maximumMessageSizeBytes int + browserURL *url.URL +} + +// NewNoopBuildExecutor creates a BuildExecutor that always returns an +// error message when attempting to execute an action. +// +// This implementation may be used to force a build client +// to upload the input root of an action into the Content Addressable +// Storage (CAS) without causing it to be executed afterwards. This may +// be useful when attempting to debug actions. +func NewNoopBuildExecutor(contentAddressableStorage blobstore.BlobAccess, maximumMessageSizeBytes int, browserURL *url.URL) BuildExecutor { + return &noopBuildExecutor{ + contentAddressableStorage: contentAddressableStorage, + maximumMessageSizeBytes: maximumMessageSizeBytes, + browserURL: browserURL, + } +} + +func (be *noopBuildExecutor) CheckReadiness(ctx context.Context) error { + return nil +} + +var defaultNoopErrorMessageTemplate = template.Must( + template.New("NoopBuildExecutor"). + Parse("Action has been uploaded, but will not be executed. Action details: {{ .ActionURL }}")) + +func (be *noopBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + // Obtain action digest, which can be embedded in the error message. + response := NewDefaultExecuteResponse(request) + actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for action")) + return response + } + + // Extract the error message template from the Command message. + action := request.Action + if action == nil { + attachErrorToExecuteResponse(response, status.Error(codes.InvalidArgument, "Request does not contain an action")) + return response + } + commandDigest, err := digestFunction.NewDigestFromProto(request.Action.CommandDigest) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for command")) + return response + } + commandMessage, err := be.contentAddressableStorage.Get(ctx, commandDigest).ToProto(&remoteexecution.Command{}, be.maximumMessageSizeBytes) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to obtain command")) + return response + } + command := commandMessage.(*remoteexecution.Command) + + errorMessageTemplate := defaultNoopErrorMessageTemplate + for _, environmentVariable := range command.EnvironmentVariables { + if environmentVariable.Name == "NOOP_WORKER_ERROR_MESSAGE_TEMPLATE" { + userProvidedTemplate, err := template.New(environmentVariable.Name).Parse(environmentVariable.Value) + if err != nil { + attachErrorToExecuteResponse(response, util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid error message template")) + return response + } + errorMessageTemplate = userProvidedTemplate + } + } + + // Generate error message to return. + var errorMessage strings.Builder + if err := errorMessageTemplate.Execute(&errorMessage, struct { + ActionURL string + }{ + ActionURL: re_util.GetBrowserURL(be.browserURL, "action", actionDigest), + }); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrapWithCode(err, codes.InvalidArgument, "Cannot evaluate error message template")) + return response + } + + attachErrorToExecuteResponse(response, status.Error(codes.InvalidArgument, errorMessage.String())) + return response +} diff --git a/pkg/builder/noop_build_executor_test.go b/pkg/builder/noop_build_executor_test.go new file mode 100644 index 0000000..91f2edd --- /dev/null +++ b/pkg/builder/noop_build_executor_test.go @@ -0,0 +1,209 @@ +package builder_test + +import ( + "context" + "net/url" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestNoopBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + buildExecutor := builder.NewNoopBuildExecutor( + contentAddressableStorage, + /* maximumMessageSizeBytes = */ 10000, + &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/some/sub/directory/", + }) + + t.Run("NoActionDigest", func(t *testing.T) { + // The client needs to provide an Action digest, so that + // this BuildExecutor can generate a link to bb_browser. + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Failed to extract digest for action: No digest provided").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("build", remoteexecution.DigestFunction_MD5), + &remoteworker.DesiredState_Executing{}, + make(chan *remoteworker.CurrentState_Executing, 10))) + }) + + t.Run("InvalidActionDigest", func(t *testing.T) { + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Failed to extract digest for action: Hash has length 24, while 32 characters were expected").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("build", remoteexecution.DigestFunction_MD5), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "This is not a valid hash", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "0d47d8528c97bdc9862674e6a8090cad", + SizeBytes: 1200, + }, + }, + }, + make(chan *remoteworker.CurrentState_Executing, 10))) + }) + + t.Run("InvalidTemplate", func(t *testing.T) { + // If an invalid template is provided in the + // environment, parsing it should fail. + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("build", remoteexecution.DigestFunction_SHA256, "7f53aed4b5489c487be514dd88d3314d966e19b84bc766a972d82246ee6f494f", 150)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + { + Name: "NOOP_WORKER_ERROR_MESSAGE_TEMPLATE", + Value: "{{ foobarbaz }}", + }, + }, + }, buffer.UserProvided)) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Invalid error message template: template: NOOP_WORKER_ERROR_MESSAGE_TEMPLATE:1: function \"foobarbaz\" not defined").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("build", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "4e3fbcd2916efea4cc61d57b4a097df2b467e6b2207f6e242457a8705c5dc689", + SizeBytes: 234, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "7f53aed4b5489c487be514dd88d3314d966e19b84bc766a972d82246ee6f494f", + SizeBytes: 150, + }, + }, + }, + make(chan *remoteworker.CurrentState_Executing, 10))) + }) + + t.Run("SuccessDefaultTemplate", func(t *testing.T) { + // If no template is provided in the environment + // variables, then a default template should be used. + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("build", remoteexecution.DigestFunction_SHA256, "d134371fd7573f7ef77c90e907c8bfaf95f34b82ac8503dbed5e062fb6fe4702", 200)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{}, buffer.UserProvided)) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Action has been uploaded, but will not be executed. Action details: http://example.com/some/sub/directory/build/blobs/sha256/action/4b3f66e160293a393a1fe2dd13721368c944d949e11f97985a893a5b76877346-123/").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("build", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "4b3f66e160293a393a1fe2dd13721368c944d949e11f97985a893a5b76877346", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "d134371fd7573f7ef77c90e907c8bfaf95f34b82ac8503dbed5e062fb6fe4702", + SizeBytes: 200, + }, + }, + }, + make(chan *remoteworker.CurrentState_Executing, 10))) + }) + + t.Run("SuccessCustomTemplate", func(t *testing.T) { + // If a custom template is provided in the environment, + // it should be preferred over the default template. + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("build", remoteexecution.DigestFunction_SHA256, "9da17cb226048f5bb3e6a20311b551e73ce8ac0d408e69e737d28a8f3179d0ce", 300)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ + { + Name: "PATH", + Value: "/bin:/sbin:/usr/bin:/usr/sbin", + }, + { + Name: "NOOP_WORKER_ERROR_MESSAGE_TEMPLATE", + Value: "Please visit {{ .ActionURL }} to inspect the action", + }, + }, + }, buffer.UserProvided)) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Please visit http://example.com/some/sub/directory/build/blobs/sha256/action/e1497d75baa26b50b71b14086d1553586e817385670ac550f36b94cd50b8e25d-456/ to inspect the action").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("build", remoteexecution.DigestFunction_SHA256), + &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "e1497d75baa26b50b71b14086d1553586e817385670ac550f36b94cd50b8e25d", + SizeBytes: 456, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "9da17cb226048f5bb3e6a20311b551e73ce8ac0d408e69e737d28a8f3179d0ce", + SizeBytes: 300, + }, + }, + }, + make(chan *remoteworker.CurrentState_Executing, 10))) + }) +} diff --git a/pkg/builder/output_hierarchy.go b/pkg/builder/output_hierarchy.go new file mode 100644 index 0000000..f1522bd --- /dev/null +++ b/pkg/builder/output_hierarchy.go @@ -0,0 +1,546 @@ +package builder + +import ( + "context" + "os" + "sort" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" +) + +// OutputNode is a node in a directory hierarchy that contains one or +// more locations where output directories and files are expected. +type outputNode struct { + directoriesToUpload map[path.Component][]string + filesToUpload map[path.Component][]string + pathsToUpload map[path.Component][]string + subdirectories map[path.Component]*outputNode +} + +func (on *outputNode) getSubdirectoryNames() []path.Component { + l := make(path.ComponentsList, 0, len(on.subdirectories)) + for k := range on.subdirectories { + l = append(l, k) + } + sort.Sort(l) + return l +} + +func sortToUpload(m map[path.Component][]string) []path.Component { + l := make(path.ComponentsList, 0, len(m)) + for k := range m { + l = append(l, k) + } + sort.Sort(l) + return l +} + +// NewOutputDirectory creates a new outputNode that is in the initial +// state. It contains no locations where output directories or files are +// expected. +func newOutputDirectory() *outputNode { + return &outputNode{ + directoriesToUpload: map[path.Component][]string{}, + filesToUpload: map[path.Component][]string{}, + pathsToUpload: map[path.Component][]string{}, + subdirectories: map[path.Component]*outputNode{}, + } +} + +// CreateParentDirectories is recursive invoked by +// OutputHierarchy.CreateParentDirectories() to create parent +// directories of locations where output directories and files are +// expected. +func (on *outputNode) createParentDirectories(d ParentPopulatableDirectory, dPath *path.Trace) error { + for _, name := range on.getSubdirectoryNames() { + childPath := dPath.Append(name) + if err := d.Mkdir(name, 0o777); err != nil && !os.IsExist(err) { + return util.StatusWrapf(err, "Failed to create output parent directory %#v", childPath.String()) + } + + // Recurse if we need to create one or more directories within. + if child := on.subdirectories[name]; len(child.subdirectories) > 0 || len(child.directoriesToUpload) > 0 { + childDirectory, err := d.EnterParentPopulatableDirectory(name) + if err != nil { + return util.StatusWrapf(err, "Failed to enter output parent directory %#v", childPath.String()) + } + err = child.createParentDirectories(childDirectory, childPath) + childDirectory.Close() + if err != nil { + return err + } + } + } + + // Although REv2 explicitly documents that only parents of + // output directories are created (i.e., not the output + // directory itself), Bazel changed its behaviour and now + // creates output directories when using local execution. See + // these issues for details: + // + // https://github.com/bazelbuild/bazel/issues/6262 + // https://github.com/bazelbuild/bazel/issues/6393 + // + // Considering that the 'output_directories' field is deprecated + // in REv2.1 anyway, be consistent with Bazel's local execution. + // Once Bazel switches to REv2.1, it will be forced to solve + // this matter in a protocol conforming way. + for _, name := range sortToUpload(on.directoriesToUpload) { + if _, ok := on.subdirectories[name]; !ok { + childPath := dPath.Append(name) + if err := d.Mkdir(name, 0o777); err != nil && !os.IsExist(err) { + return util.StatusWrapf(err, "Failed to create output directory %#v", childPath.String()) + } + } + } + return nil +} + +// UploadOutputs is recursively invoked by +// OutputHierarchy.UploadOutputs() to upload output directories and +// files from the locations where they are expected. +func (on *outputNode) uploadOutputs(s *uploadOutputsState, d UploadableDirectory, dPath *path.Trace) { + // Upload REv2.0 output directories that are expected to be + // present in this directory. + for _, component := range sortToUpload(on.directoriesToUpload) { + childPath := dPath.Append(component) + paths := on.directoriesToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeDirectory: + s.uploadOutputDirectory(d, component, childPath, paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, childPath, &s.actionResult.OutputDirectorySymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output directory %#v is not a directory or symlink", childPath.String())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output directory %#v", childPath.String())) + } + } + + // Upload REv2.0 output files that are expected to be present in + // this directory. + for _, component := range sortToUpload(on.filesToUpload) { + childPath := dPath.Append(component) + paths := on.filesToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeRegularFile: + s.uploadOutputFile(d, component, childPath, fileInfo.IsExecutable(), paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, childPath, &s.actionResult.OutputFileSymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output file %#v is not a regular file or symlink", childPath.String())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output file %#v", childPath.String())) + } + } + + // Upload REv2.1 output paths that are expected to be present in + // this directory. + for _, component := range sortToUpload(on.pathsToUpload) { + childPath := dPath.Append(component) + paths := on.pathsToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeDirectory: + s.uploadOutputDirectory(d, component, childPath, paths) + case filesystem.FileTypeRegularFile: + s.uploadOutputFile(d, component, childPath, fileInfo.IsExecutable(), paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, childPath, &s.actionResult.OutputSymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output path %#v is not a directory, regular file or symlink", childPath.String())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output path %#v", childPath.String())) + } + } + + // Traverse into subdirectories. + for _, component := range on.getSubdirectoryNames() { + childPath := dPath.Append(component) + childNode := on.subdirectories[component] + if childDirectory, err := d.EnterUploadableDirectory(component); err == nil { + childNode.uploadOutputs(s, childDirectory, childPath) + childDirectory.Close() + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to enter output parent directory %#v", childPath.String())) + } + } +} + +// UploadOutputsState is used by OutputHierarchy.UploadOutputs() to +// track common parameters during recursion. +type uploadOutputsState struct { + context context.Context + contentAddressableStorage blobstore.BlobAccess + digestFunction digest.Function + actionResult *remoteexecution.ActionResult + + firstError error +} + +// SaveError preserves errors that occur during uploading. Even when +// errors occur, the remainder of the output files is still uploaded. +// This makes debugging easier. +func (s *uploadOutputsState) saveError(err error) { + if s.firstError == nil { + s.firstError = err + } +} + +// UploadOutputDirectoryEntered is called to upload a single output +// directory as a remoteexecution.Tree. The root directory is assumed to +// already be opened. +func (s *uploadOutputsState) uploadOutputDirectoryEntered(d UploadableDirectory, dPath *path.Trace, paths []string) { + dState := uploadOutputDirectoryState{ + uploadOutputsState: s, + directoriesSeen: map[digest.Digest]struct{}{}, + } + if rootDirectory, err := dState.uploadDirectory(d, dPath); err == nil { + // Approximate the size of the resulting Tree object, so + // that we may allocate all space at once. + directories := append(dState.directories, rootDirectory) + maximumTreeSizeBytes := 0 + for _, directory := range directories { + maximumTreeSizeBytes += len(directory) + } + maximumTreeSizeBytes += len(directories) * (1 + protowire.SizeVarint(uint64(maximumTreeSizeBytes))) + + // Construct the Tree object. We don't want to use + // proto.Marshal() for this, as it would require us to + // marshal all of the directories a second time. + treeData := make([]byte, 0, maximumTreeSizeBytes) + tag := byte(blobstore.TreeRootFieldNumber<<3) | byte(protowire.BytesType) + for i := len(directories); i > 0; i-- { + directory := directories[i-1] + treeData = append(treeData, tag) + treeData = protowire.AppendVarint(treeData, uint64(len(directory))) + treeData = append(treeData, directory...) + tag = byte(blobstore.TreeChildrenFieldNumber<<3) | byte(protowire.BytesType) + } + + digestGenerator := s.digestFunction.NewGenerator(int64(len(treeData))) + if _, err := digestGenerator.Write(treeData); err != nil { + panic(err) + } + treeDigest := digestGenerator.Sum() + + if err := s.contentAddressableStorage.Put(s.context, treeDigest, buffer.NewValidatedBufferFromByteSlice(treeData)); err == nil { + for _, path := range paths { + s.actionResult.OutputDirectories = append( + s.actionResult.OutputDirectories, + &remoteexecution.OutputDirectory{ + Path: path, + TreeDigest: treeDigest.GetProto(), + IsTopologicallySorted: true, + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output directory %#v", dPath.String())) + } + } else { + s.saveError(err) + } +} + +// UploadOutputDirectory is called to upload a single output directory +// as a remoteexecution.Tree. The root directory is opened opened by +// this function. +func (s *uploadOutputsState) uploadOutputDirectory(d UploadableDirectory, name path.Component, childPath *path.Trace, paths []string) { + if childDirectory, err := d.EnterUploadableDirectory(name); err == nil { + s.uploadOutputDirectoryEntered(childDirectory, childPath, paths) + childDirectory.Close() + } else { + s.saveError(util.StatusWrapf(err, "Failed to enter output directory %#v", childPath.String())) + } +} + +// UploadOutputDirectory is called to upload a single output file. +func (s *uploadOutputsState) uploadOutputFile(d UploadableDirectory, name path.Component, childPath *path.Trace, isExecutable bool, paths []string) { + if digest, err := d.UploadFile(s.context, name, s.digestFunction); err == nil { + for _, path := range paths { + s.actionResult.OutputFiles = append( + s.actionResult.OutputFiles, + &remoteexecution.OutputFile{ + Path: path, + Digest: digest.GetProto(), + IsExecutable: isExecutable, + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output file %#v", childPath.String())) + } +} + +// UploadOutputDirectory is called to read the attributes of a single +// output symlink. +func (s *uploadOutputsState) uploadOutputSymlink(d UploadableDirectory, name path.Component, childPath *path.Trace, outputSymlinks *[]*remoteexecution.OutputSymlink, paths []string) { + if target, err := d.Readlink(name); err == nil { + for _, path := range paths { + *outputSymlinks = append( + *outputSymlinks, + &remoteexecution.OutputSymlink{ + Path: path, + Target: target, + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to read output symlink %#v", childPath.String())) + } +} + +// UploadOutputDirectoryState is used by OutputHierarchy.UploadOutputs() +// to track state specific to uploading a single output directory. +type uploadOutputDirectoryState struct { + *uploadOutputsState + + directories [][]byte + directoriesSeen map[digest.Digest]struct{} +} + +// UploadDirectory is called to upload a single directory. Elements in +// the directory are stored in a remoteexecution.Directory, so that they +// can be placed in a remoteexecution.Tree. +func (s *uploadOutputDirectoryState) uploadDirectory(d UploadableDirectory, dPath *path.Trace) ([]byte, error) { + files, err := d.ReadDir() + if err != nil { + return nil, util.StatusWrapf(err, "Failed to read output directory %#v", dPath.String()) + } + + var directory remoteexecution.Directory + for _, file := range files { + name := file.Name() + childPath := dPath.Append(name) + switch fileType := file.Type(); fileType { + case filesystem.FileTypeRegularFile: + if childDigest, err := d.UploadFile(s.context, name, s.digestFunction); err == nil { + directory.Files = append(directory.Files, &remoteexecution.FileNode{ + Name: name.String(), + Digest: childDigest.GetProto(), + IsExecutable: file.IsExecutable(), + }) + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output file %#v", childPath.String())) + } + case filesystem.FileTypeDirectory: + if childDirectory, err := d.EnterUploadableDirectory(name); err == nil { + childData, err := s.uploadDirectory(childDirectory, dPath) + childDirectory.Close() + if err == nil { + // Compute the digest of the child + // directory, so that it may be + // referenced by the parent. + digestGenerator := s.digestFunction.NewGenerator(int64(len(childData))) + if _, err := digestGenerator.Write(childData); err != nil { + panic(err) + } + childDigest := digestGenerator.Sum() + + // There is no need to make the + // directory part of the Tree if we + // have seen an identical directory + // previously. + if _, ok := s.directoriesSeen[childDigest]; !ok { + s.directories = append(s.directories, childData) + s.directoriesSeen[childDigest] = struct{}{} + } + + directory.Directories = append(directory.Directories, &remoteexecution.DirectoryNode{ + Name: name.String(), + Digest: childDigest.GetProto(), + }) + } else { + s.saveError(err) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to enter output directory %#v", childPath.String())) + } + case filesystem.FileTypeSymlink: + if target, err := d.Readlink(name); err == nil { + directory.Symlinks = append(directory.Symlinks, &remoteexecution.SymlinkNode{ + Name: name.String(), + Target: target, + }) + } else { + s.saveError(util.StatusWrapf(err, "Failed to read output symlink %#v", childPath.String())) + } + } + } + + data, err := proto.Marshal(&directory) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to marshal output directory %#v", dPath.String()) + } + return data, nil +} + +// outputNodePath is an implementation of path.ComponentWalker that is +// used by NewOutputHierarchy() to compute normalized paths of outputs +// of a build action. +// +// It might have been cleaner if path.Resolve() was performed directly +// against the tree of outputNode objects. Unfortunately, the Remote +// Execution protocol requires us to create the parent directories of +// outputs, while the working directory needs to be part of the input +// root explicitly. Operating directly against outputNode objects would +// make it harder to achieve that. +type outputNodePath struct { + components []path.Component +} + +func (onp *outputNodePath) OnDirectory(name path.Component) (path.GotDirectoryOrSymlink, error) { + onp.components = append(onp.components, name) + return path.GotDirectory{ + Child: onp, + IsReversible: true, + }, nil +} + +func (onp *outputNodePath) OnTerminal(name path.Component) (*path.GotSymlink, error) { + onp.components = append(onp.components, name) + return nil, nil +} + +func (onp *outputNodePath) OnUp() (path.ComponentWalker, error) { + if len(onp.components) == 0 { + return nil, status.Error(codes.InvalidArgument, "Path resolves to a location outside the input root directory") + } + onp.components = onp.components[:len(onp.components)-1] + return onp, nil +} + +// OutputHierarchy is used by LocalBuildExecutor to track output +// directories and files that are expected to be generated by the build +// action. OutputHierarchy can be used to create parent directories of +// outputs prior to execution, and to upload outputs into the CAS after +// execution. +type OutputHierarchy struct { + root outputNode + rootsToUpload []string +} + +// NewOutputHierarchy creates a new OutputHierarchy that uses the +// working directory and the output paths specified in an REv2 Command +// message. +func NewOutputHierarchy(command *remoteexecution.Command) (*OutputHierarchy, error) { + var workingDirectory outputNodePath + if err := path.Resolve(command.WorkingDirectory, path.NewRelativeScopeWalker(&workingDirectory)); err != nil { + return nil, util.StatusWrap(err, "Invalid working directory") + } + + oh := &OutputHierarchy{ + root: *newOutputDirectory(), + } + + if len(command.OutputPaths) == 0 { + // Register REv2.0 output directories. + for _, outputDirectory := range command.OutputDirectories { + if on, name, err := oh.lookup(workingDirectory, outputDirectory); err != nil { + return nil, util.StatusWrapf(err, "Invalid output directory %#v", outputDirectory) + } else if on == nil { + oh.rootsToUpload = append(oh.rootsToUpload, outputDirectory) + } else { + on.directoriesToUpload[*name] = append(on.directoriesToUpload[*name], outputDirectory) + } + } + + // Register REv2.0 output files. + for _, outputFile := range command.OutputFiles { + if on, name, err := oh.lookup(workingDirectory, outputFile); err != nil { + return nil, util.StatusWrapf(err, "Invalid output file %#v", outputFile) + } else if on == nil { + return nil, status.Errorf(codes.InvalidArgument, "Output file %#v resolves to the input root directory", outputFile) + } else { + on.filesToUpload[*name] = append(on.filesToUpload[*name], outputFile) + } + } + } else { + // Register REv2.1 output paths. + for _, outputPath := range command.OutputPaths { + if on, name, err := oh.lookup(workingDirectory, outputPath); err != nil { + return nil, util.StatusWrapf(err, "Invalid output path %#v", outputPath) + } else if on == nil { + oh.rootsToUpload = append(oh.rootsToUpload, outputPath) + } else { + on.pathsToUpload[*name] = append(on.pathsToUpload[*name], outputPath) + } + } + } + return oh, nil +} + +func (oh *OutputHierarchy) lookup(workingDirectory outputNodePath, targetPath string) (*outputNode, *path.Component, error) { + // Resolve the path of the output relative to the working directory. + outputPath := outputNodePath{ + components: append([]path.Component(nil), workingDirectory.components...), + } + if err := path.Resolve(targetPath, path.NewRelativeScopeWalker(&outputPath)); err != nil { + return nil, nil, err + } + + components := outputPath.components + if len(components) == 0 { + // Path resolves to the root directory. + return nil, nil, nil + } + + // Path resolves to a location inside the root directory, + // meaning it is named. Create all parent directories. + on := &oh.root + for _, component := range components[:len(components)-1] { + child, ok := on.subdirectories[component] + if !ok { + child = newOutputDirectory() + on.subdirectories[component] = child + } + on = child + } + return on, &components[len(components)-1], nil +} + +// ParentPopulatableDirectory contains a subset of the methods of +// filesystem.Directory that are required for creating the parent +// directories of output files of a build action. +type ParentPopulatableDirectory interface { + Close() error + EnterParentPopulatableDirectory(name path.Component) (ParentPopulatableDirectory, error) + Mkdir(name path.Component, perm os.FileMode) error +} + +// CreateParentDirectories creates parent directories of outputs. This +// function is called prior to executing the build action. +func (oh *OutputHierarchy) CreateParentDirectories(d ParentPopulatableDirectory) error { + return oh.root.createParentDirectories(d, nil) +} + +// UploadOutputs uploads outputs of the build action into the CAS. This +// function is called after executing the build action. +func (oh *OutputHierarchy) UploadOutputs(ctx context.Context, d UploadableDirectory, contentAddressableStorage blobstore.BlobAccess, digestFunction digest.Function, actionResult *remoteexecution.ActionResult) error { + s := uploadOutputsState{ + context: ctx, + contentAddressableStorage: contentAddressableStorage, + digestFunction: digestFunction, + actionResult: actionResult, + } + + if len(oh.rootsToUpload) > 0 { + s.uploadOutputDirectoryEntered(d, nil, oh.rootsToUpload) + } + oh.root.uploadOutputs(&s, d, nil) + return s.firstError +} diff --git a/pkg/builder/output_hierarchy_test.go b/pkg/builder/output_hierarchy_test.go new file mode 100644 index 0000000..8b5b502 --- /dev/null +++ b/pkg/builder/output_hierarchy_test.go @@ -0,0 +1,840 @@ +package builder_test + +import ( + "context" + "os" + "syscall" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestOutputHierarchyCreation(t *testing.T) { + t.Run("AbsoluteWorkingDirectory", func(t *testing.T) { + _, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "/tmp/hello/../..", + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Invalid working directory: Path is absolute, while a relative path was expected"), err) + }) + + t.Run("InvalidWorkingDirectory", func(t *testing.T) { + _, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "hello/../..", + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Invalid working directory: Path resolves to a location outside the input root directory"), err) + }) + + t.Run("AbsoluteOutputDirectory", func(t *testing.T) { + _, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: ".", + OutputDirectories: []string{"/etc/passwd"}, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Invalid output directory \"/etc/passwd\": Path is absolute, while a relative path was expected"), err) + }) + + t.Run("InvalidOutputDirectory", func(t *testing.T) { + _, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "hello", + OutputDirectories: []string{"../.."}, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Invalid output directory \"../..\": Path resolves to a location outside the input root directory"), err) + }) + + t.Run("InvalidOutputFile", func(t *testing.T) { + _, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "hello", + OutputFiles: []string{".."}, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Output file \"..\" resolves to the input root directory"), err) + }) +} + +func TestOutputHierarchyCreateParentDirectories(t *testing.T) { + ctrl := gomock.NewController(t) + + root := mock.NewMockParentPopulatableDirectory(ctrl) + + t.Run("Noop", func(t *testing.T) { + // No parent directories should be created. + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: ".", + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("WorkingDirectory", func(t *testing.T) { + // REv2 explicitly states that the working directory + // must be a directory that exists in the input root. + // Using a subdirectory as a working directory should + // not cause any Mkdir() calls. + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo/bar", + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("StillNoop", func(t *testing.T) { + // All of the provided paths expand to (locations under) + // the root directory. There is thus no need to create + // any output directories. + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{".."}, + OutputFiles: []string{"../file"}, + OutputPaths: []string{"../path"}, + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("Success", func(t *testing.T) { + // Create /foo/bar/baz. + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)) + bar := mock.NewMockParentPopulatableDirectory(ctrl) + foo.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("bar")).Return(bar, nil) + bar.EXPECT().Mkdir(path.MustNewComponent("baz"), os.FileMode(0o777)) + bar.EXPECT().Close() + // Create /foo/qux. + foo.EXPECT().Mkdir(path.MustNewComponent("qux"), os.FileMode(0o777)) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{"bar/baz"}, + OutputFiles: []string{"../foo/qux/xyzzy"}, + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("SuccessPaths", func(t *testing.T) { + // No /foo/bar/baz since OutputPaths is set. + // Create /alice. + root.EXPECT().Mkdir(path.MustNewComponent("alice"), os.FileMode(0o777)) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{"bar/baz"}, + OutputFiles: []string{"../foo/qux/xyzzy"}, + OutputPaths: []string{"../alice/bob"}, + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureParent", func(t *testing.T) { + // Failure to create the parent directory of a location + // where an output file is expected. + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)).Return(status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputFiles: []string{"bar/baz"}, + }) + require.NoError(t, err) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to create output parent directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureParentExists", func(t *testing.T) { + // This test is identical to the previous, except that + // the error is EEXIST. This should not cause a hard + // failure. + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)).Return(os.ErrExist) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputFiles: []string{"bar/baz"}, + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureOutput", func(t *testing.T) { + // Failure to create a location where an output + // directory is expected. + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)).Return(status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{"bar"}, + }) + require.NoError(t, err) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to create output directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureOutputExists", func(t *testing.T) { + // This test is identical to the previous, except that + // the error is EEXIST. This should not cause a hard + // failure. + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)).Return(os.ErrExist) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{"bar"}, + }) + require.NoError(t, err) + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("EnterFailure", func(t *testing.T) { + root.EXPECT().Mkdir(path.MustNewComponent("foo"), os.FileMode(0o777)) + foo := mock.NewMockParentPopulatableDirectory(ctrl) + root.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + foo.EXPECT().Mkdir(path.MustNewComponent("bar"), os.FileMode(0o777)) + foo.EXPECT().EnterParentPopulatableDirectory(path.MustNewComponent("bar")).Return(nil, status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{"bar/baz"}, + }) + require.NoError(t, err) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to enter output parent directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) +} + +func TestOutputHierarchyUploadOutputs(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + root := mock.NewMockUploadableDirectory(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + digestFunction := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5).GetDigestFunction() + + t.Run("Noop", func(t *testing.T) { + // Uploading of a build action with no declared outputs + // should not trigger any I/O. + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: ".", + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + testSuccess := func(t *testing.T, command *remoteexecution.Command, expectedResult remoteexecution.ActionResult) { + // Declare output directories, files and paths. For each + // of these output types, let them match one of the + // valid file types. + foo := mock.NewMockUploadableDirectory(ctrl) + root.EXPECT().EnterUploadableDirectory(path.MustNewComponent("foo")).Return(foo, nil) + + // Calls triggered to obtain the file type of the outputs. + foo.EXPECT().Lstat(path.MustNewComponent("directory-directory")).Return(filesystem.NewFileInfo(path.MustNewComponent("directory-directory"), filesystem.FileTypeDirectory, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("directory-symlink")).Return(filesystem.NewFileInfo(path.MustNewComponent("directory-symlink"), filesystem.FileTypeSymlink, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("directory-enoent")).Return(filesystem.FileInfo{}, syscall.ENOENT) + foo.EXPECT().Lstat(path.MustNewComponent("file-regular")).Return(filesystem.NewFileInfo(path.MustNewComponent("file-regular"), filesystem.FileTypeRegularFile, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("file-executable")).Return(filesystem.NewFileInfo(path.MustNewComponent("file-executable"), filesystem.FileTypeRegularFile, true), nil) + foo.EXPECT().Lstat(path.MustNewComponent("file-symlink")).Return(filesystem.NewFileInfo(path.MustNewComponent("file-symlink"), filesystem.FileTypeSymlink, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("file-enoent")).Return(filesystem.FileInfo{}, syscall.ENOENT) + foo.EXPECT().Lstat(path.MustNewComponent("path-regular")).Return(filesystem.NewFileInfo(path.MustNewComponent("path-regular"), filesystem.FileTypeRegularFile, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("path-executable")).Return(filesystem.NewFileInfo(path.MustNewComponent("path-executable"), filesystem.FileTypeRegularFile, true), nil) + foo.EXPECT().Lstat(path.MustNewComponent("path-directory")).Return(filesystem.NewFileInfo(path.MustNewComponent("path-directory"), filesystem.FileTypeDirectory, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("path-symlink")).Return(filesystem.NewFileInfo(path.MustNewComponent("path-symlink"), filesystem.FileTypeSymlink, false), nil) + foo.EXPECT().Lstat(path.MustNewComponent("path-enoent")).Return(filesystem.FileInfo{}, syscall.ENOENT) + + // Inspection/uploading of all non-directory outputs. + foo.EXPECT().Readlink(path.MustNewComponent("directory-symlink")).Return("directory-symlink-target", nil) + foo.EXPECT().UploadFile(ctx, path.MustNewComponent("file-regular"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "a58c2f2281011ca2e631b39baa1ab657", 12), nil) + foo.EXPECT().UploadFile(ctx, path.MustNewComponent("file-executable"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "7590e1b46240ecb5ea65a80db7ee6fae", 15), nil) + foo.EXPECT().Readlink(path.MustNewComponent("file-symlink")).Return("file-symlink-target", nil) + foo.EXPECT().UploadFile(ctx, path.MustNewComponent("path-regular"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "44206648b7bb2f3b0d2ed0c52ad2e269", 12), nil) + foo.EXPECT().UploadFile(ctx, path.MustNewComponent("path-executable"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "87729325cd08d300fb0e238a3a8da443", 15), nil) + foo.EXPECT().Readlink(path.MustNewComponent("path-symlink")).Return("path-symlink-target", nil) + + // Uploading of /foo/directory-directory. Files with an + // unknown type (UNIX sockets, FIFOs) should be ignored. + // Returning a hard error makes debugging harder (e.g., + // in case the full input root is declared as an output). + directoryDirectory := mock.NewMockUploadableDirectory(ctrl) + foo.EXPECT().EnterUploadableDirectory(path.MustNewComponent("directory-directory")).Return(directoryDirectory, nil) + directoryDirectory.EXPECT().ReadDir().Return([]filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("directory"), filesystem.FileTypeDirectory, false), + filesystem.NewFileInfo(path.MustNewComponent("executable"), filesystem.FileTypeRegularFile, true), + filesystem.NewFileInfo(path.MustNewComponent("other"), filesystem.FileTypeOther, false), + filesystem.NewFileInfo(path.MustNewComponent("regular"), filesystem.FileTypeRegularFile, false), + filesystem.NewFileInfo(path.MustNewComponent("symlink"), filesystem.FileTypeSymlink, false), + }, nil) + directoryDirectoryDirectory := mock.NewMockUploadableDirectory(ctrl) + directoryDirectory.EXPECT().EnterUploadableDirectory(path.MustNewComponent("directory")).Return(directoryDirectoryDirectory, nil) + directoryDirectoryDirectory.EXPECT().ReadDir().Return(nil, nil) + directoryDirectoryDirectory.EXPECT().Close() + directoryDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("executable"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "ee7004c7949d83f130592f15d98ca343", 10), nil) + directoryDirectory.EXPECT().UploadFile(ctx, path.MustNewComponent("regular"), gomock.Any()). + Return(digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "af37d08ae228a87dc6b265fd1019c97d", 7), nil) + directoryDirectory.EXPECT().Readlink(path.MustNewComponent("symlink")).Return("symlink-target", nil) + directoryDirectory.EXPECT().Close() + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "55aed4acf40a28132fb2d2de2b5962f0", 184), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&remoteexecution.Tree{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "executable", + Digest: &remoteexecution.Digest{ + Hash: "ee7004c7949d83f130592f15d98ca343", + SizeBytes: 10, + }, + IsExecutable: true, + }, + { + Name: "regular", + Digest: &remoteexecution.Digest{ + Hash: "af37d08ae228a87dc6b265fd1019c97d", + SizeBytes: 7, + }, + }, + }, + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory", + Digest: &remoteexecution.Digest{ + Hash: "d41d8cd98f00b204e9800998ecf8427e", + SizeBytes: 0, + }, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "symlink-target", + }, + }, + }, + Children: []*remoteexecution.Directory{ + {}, + }, + }, m) + return nil + }) + + // Uploading of /foo/path-directory. + pathDirectory := mock.NewMockUploadableDirectory(ctrl) + foo.EXPECT().EnterUploadableDirectory(path.MustNewComponent("path-directory")).Return(pathDirectory, nil) + pathDirectory.EXPECT().ReadDir().Return(nil, nil) + pathDirectory.EXPECT().Close() + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "9dd94c5a4b02914af42e8e6372e0b709", 2), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&remoteexecution.Tree{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, m) + return nil + }) + + foo.EXPECT().Close() + + oh, err := builder.NewOutputHierarchy(command) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, expectedResult, actionResult) + } + + t.Run("Success", func(t *testing.T) { + t.Run("FilesAndDirectories", func(t *testing.T) { + testSuccess(t, &remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{ + "directory-directory", + "../foo/directory-directory", + "directory-symlink", + "../foo/directory-symlink", + "directory-enoent", + "../foo/directory-enoent", + "path-directory", + "../foo/path-directory", + }, + OutputFiles: []string{ + "file-regular", + "../foo/file-regular", + "file-executable", + "../foo/file-executable", + "file-symlink", + "../foo/file-symlink", + "file-enoent", + "../foo/file-enoent", + "path-regular", + "../foo/path-regular", + "path-executable", + "../foo/path-executable", + "path-symlink", + "../foo/path-symlink", + "path-enoent", + "../foo/path-enoent", + }, + }, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "55aed4acf40a28132fb2d2de2b5962f0", + SizeBytes: 184, + }, + IsTopologicallySorted: true, + }, + { + Path: "../foo/directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "55aed4acf40a28132fb2d2de2b5962f0", + SizeBytes: 184, + }, + IsTopologicallySorted: true, + }, + { + Path: "path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + { + Path: "../foo/path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + }, + OutputDirectorySymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "directory-symlink", + Target: "directory-symlink-target", + }, + { + Path: "../foo/directory-symlink", + Target: "directory-symlink-target", + }, + }, + OutputFiles: []*remoteexecution.OutputFile{ + { + Path: "file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "../foo/file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + { + Path: "../foo/path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + }, + OutputFileSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "file-symlink", + Target: "file-symlink-target", + }, + { + Path: "../foo/file-symlink", + Target: "file-symlink-target", + }, + { + Path: "path-symlink", + Target: "path-symlink-target", + }, + { + Path: "../foo/path-symlink", + Target: "path-symlink-target", + }, + }, + }) + }) + t.Run("Paths", func(t *testing.T) { + testSuccess(t, &remoteexecution.Command{ + WorkingDirectory: "foo", + OutputPaths: []string{ + "file-regular", + "../foo/file-regular", + "file-executable", + "../foo/file-executable", + "file-symlink", + "../foo/file-symlink", + "file-enoent", + "../foo/file-enoent", + "directory-directory", + "../foo/directory-directory", + "directory-symlink", + "../foo/directory-symlink", + "directory-enoent", + "../foo/directory-enoent", + "path-directory", + "../foo/path-directory", + "path-regular", + "../foo/path-regular", + "path-executable", + "../foo/path-executable", + "path-symlink", + "../foo/path-symlink", + "path-enoent", + "../foo/path-enoent", + }, + }, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "55aed4acf40a28132fb2d2de2b5962f0", + SizeBytes: 184, + }, + IsTopologicallySorted: true, + }, + { + Path: "../foo/directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "55aed4acf40a28132fb2d2de2b5962f0", + SizeBytes: 184, + }, + IsTopologicallySorted: true, + }, + { + Path: "path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + { + Path: "../foo/path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + }, + OutputFiles: []*remoteexecution.OutputFile{ + { + Path: "file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "../foo/file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + { + Path: "../foo/path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + }, + OutputSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "directory-symlink", + Target: "directory-symlink-target", + }, + { + Path: "../foo/directory-symlink", + Target: "directory-symlink-target", + }, + { + Path: "file-symlink", + Target: "file-symlink-target", + }, + { + Path: "../foo/file-symlink", + Target: "file-symlink-target", + }, + { + Path: "path-symlink", + Target: "path-symlink-target", + }, + { + Path: "../foo/path-symlink", + Target: "path-symlink-target", + }, + }, + }) + }) + }) + + t.Run("RootDirectory", func(t *testing.T) { + // Special case: it is also permitted to add the root + // directory as an REv2.0 output directory. This + // shouldn't cause any Lstat() calls, as the root + // directory always exists. It is also impossible to + // call Lstat() on it, as that would require us to + // traverse upwards. + root.EXPECT().ReadDir().Return(nil, nil) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "9dd94c5a4b02914af42e8e6372e0b709", 2), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&remoteexecution.Tree{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, m) + return nil + }) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputDirectories: []string{".."}, + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "..", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + }, + }, actionResult) + }) + + t.Run("RootPath", func(t *testing.T) { + // Similar to the previous test, it is also permitted to + // add the root directory as an REv2.1 output path. + root.EXPECT().ReadDir().Return(nil, nil) + contentAddressableStorage.EXPECT().Put( + ctx, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "9dd94c5a4b02914af42e8e6372e0b709", 2), + gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + m, err := b.ToProto(&remoteexecution.Tree{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, m) + return nil + }) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "foo", + OutputPaths: []string{".."}, + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "..", + TreeDigest: &remoteexecution.Digest{ + Hash: "9dd94c5a4b02914af42e8e6372e0b709", + SizeBytes: 2, + }, + IsTopologicallySorted: true, + }, + }, + }, actionResult) + }) + + t.Run("LstatFailureDirectory", func(t *testing.T) { + // Failure to Lstat() an output directory should cause + // it to be skipped. + root.EXPECT().Lstat(path.MustNewComponent("foo")).Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "", + OutputDirectories: []string{"foo"}, + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to read attributes of output directory \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + t.Run("LstatFailureFile", func(t *testing.T) { + // Failure to Lstat() an output file should cause it to + // be skipped. + root.EXPECT().Lstat(path.MustNewComponent("foo")).Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "", + OutputFiles: []string{"foo"}, + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to read attributes of output file \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + t.Run("LstatFailurePath", func(t *testing.T) { + // Failure to Lstat() an output path should cause it to + // be skipped. + root.EXPECT().Lstat(path.MustNewComponent("foo")).Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh, err := builder.NewOutputHierarchy(&remoteexecution.Command{ + WorkingDirectory: "", + OutputPaths: []string{"foo"}, + }) + require.NoError(t, err) + var actionResult remoteexecution.ActionResult + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to read attributes of output path \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, digestFunction, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + // TODO: Are there other cases we'd like to unit test? +} diff --git a/pkg/builder/prefetching_build_executor.go b/pkg/builder/prefetching_build_executor.go new file mode 100644 index 0000000..b4f4450 --- /dev/null +++ b/pkg/builder/prefetching_build_executor.go @@ -0,0 +1,267 @@ +package builder + +import ( + "context" + "io" + "log" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/proto/fsac" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +type prefetchingBuildExecutor struct { + BuildExecutor + contentAddressableStorage blobstore.BlobAccess + directoryFetcher cas.DirectoryFetcher + fileReadSemaphore *semaphore.Weighted + fileSystemAccessCache blobstore.BlobAccess + maximumMessageSizeBytes int + bloomFilterBitsPerElement int + bloomFilterMaximumSizeBytes int + emptyProfile *fsac.FileSystemAccessProfile +} + +// NewPrefetchingBuildExecutor creates a decorator for BuildExecutor +// that as the action gets executed, prefetches files that are part of +// the action's input root from the Content Addressable Storage (CAS). +// It determines which files to download by making use of Bloom filters +// stored in the File System Access Cache (FSAC). +// +// It also monitors the file system access of the action to be executed. +// If this yields a Bloom filter that differs from the one retried from +// the FSAC, it stores an updated version. This ensures that the next +// time a similar action is ran, only the files that are expected to be +// used are downloaded. +// +// This decorator is only of use on workers that use a virtual build +// directory (FUSE, NFSv4). On workers that use native build +// directories, the monitor is ignored, leading to empty Bloom filters +// being stored. +func NewPrefetchingBuildExecutor(buildExecutor BuildExecutor, contentAddressableStorage blobstore.BlobAccess, directoryFetcher cas.DirectoryFetcher, fileReadSemaphore *semaphore.Weighted, fileSystemAccessCache blobstore.BlobAccess, maximumMessageSizeBytes, bloomFilterBitsPerElement, bloomFilterMaximumSizeBytes int) BuildExecutor { + be := &prefetchingBuildExecutor{ + BuildExecutor: buildExecutor, + contentAddressableStorage: contentAddressableStorage, + directoryFetcher: directoryFetcher, + fileReadSemaphore: fileReadSemaphore, + fileSystemAccessCache: fileSystemAccessCache, + maximumMessageSizeBytes: maximumMessageSizeBytes, + bloomFilterBitsPerElement: bloomFilterBitsPerElement, + bloomFilterMaximumSizeBytes: bloomFilterMaximumSizeBytes, + } + be.emptyProfile = be.computeProfile(access.NewBloomFilterComputingUnreadDirectoryMonitor()) + return be +} + +func (be *prefetchingBuildExecutor) computeProfile(monitor *access.BloomFilterComputingUnreadDirectoryMonitor) *fsac.FileSystemAccessProfile { + bloomFilter, bloomFilterHashFunctions := monitor.GetBloomFilter(be.bloomFilterBitsPerElement, be.bloomFilterMaximumSizeBytes) + return &fsac.FileSystemAccessProfile{ + BloomFilter: bloomFilter, + BloomFilterHashFunctions: bloomFilterHashFunctions, + } +} + +func (be *prefetchingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + // Obtain the reduced Action digest, which is needed to read + // from, and write to the File System Access Cache (FSAC). + action := request.Action + if action == nil { + response := NewDefaultExecuteResponse(request) + attachErrorToExecuteResponse(response, status.Error(codes.InvalidArgument, "Request does not contain an action")) + return response + } + reducedActionDigest, err := blobstore.GetReducedActionDigest(digestFunction, action) + if err != nil { + response := NewDefaultExecuteResponse(request) + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Cannot compute reduced action digest")) + return response + } + + group, groupCtx := errgroup.WithContext(ctx) + prefetchCtx, cancelPrefetch := context.WithCancel(groupCtx) + + // Fetch the profile from the File System Access Cache. If one + // exists, traverse the input root and prefetch any files and + // directories matched by the Bloom filter. + var existingProfile *fsac.FileSystemAccessProfile + group.Go(func() error { + profileMessage, err := be.fileSystemAccessCache.Get(groupCtx, reducedActionDigest).ToProto(&fsac.FileSystemAccessProfile{}, be.maximumMessageSizeBytes) + if err != nil { + if status.Code(err) == codes.NotFound { + existingProfile = be.emptyProfile + return nil + } + return util.StatusWrap(err, "Failed to fetch file system access profile") + } + existingProfile = profileMessage.(*fsac.FileSystemAccessProfile) + + bloomFilter, err := access.NewBloomFilterReader(existingProfile.BloomFilter, existingProfile.BloomFilterHashFunctions) + if err != nil { + // Don't fail if the profile is malformed, as + // this prevents forward progress. Simply ignore + // its contents, so that it is replaced when the + // action completes. + log.Printf("Cannot read Bloom filter for %s: %s", reducedActionDigest.String(), err) + return nil + } + + directoryPrefetcher := directoryPrefetcher{ + context: prefetchCtx, + group: group, + bloomFilter: bloomFilter, + digestFunction: digestFunction, + contentAddressableStorage: be.contentAddressableStorage, + directoryFetcher: be.directoryFetcher, + fileReadSemaphore: be.fileReadSemaphore, + } + // Prefetching may be interrupted if the action + // completes quickly. These cancelation errors should + // not propagate to the caller. + if err := directoryPrefetcher.prefetchRecursively(nil, access.RootPathHashes, action.InputRootDigest); status.Code(err) != codes.Canceled { + return err + } + return nil + }) + + // While prefetching is happening, already launch the build + // action. It may initially run slower due to local cache + // misses, but should speed up as prefetching nears completion. + bloomFilterMonitor := access.NewBloomFilterComputingUnreadDirectoryMonitor() + var response *remoteexecution.ExecuteResponse + group.Go(func() error { + response = be.BuildExecutor.Execute(groupCtx, filePool, bloomFilterMonitor, digestFunction, request, executionStateUpdates) + cancelPrefetch() + if !executeResponseIsSuccessful(response) { + return dontReadFromFSACError{} + } + return nil + }) + + if err := group.Wait(); err == nil { + // Action completed successfully. Store an updated + // profile in the File System Access Cache if it is + // different from the one fetched previously. + if newProfile := be.computeProfile(bloomFilterMonitor); !proto.Equal(existingProfile, newProfile) { + if err := be.fileSystemAccessCache.Put(ctx, reducedActionDigest, buffer.NewProtoBufferFromProto(newProfile, buffer.UserProvided)); err != nil { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store file system access profile")) + } + } + } else if err != (dontReadFromFSACError{}) { + response.Status = status.Convert(err).Proto() + } + + if resourceUsage, err := anypb.New(bloomFilterMonitor.GetInputRootResourceUsage()); err == nil { + response.Result.ExecutionMetadata.AuxiliaryMetadata = append(response.Result.ExecutionMetadata.AuxiliaryMetadata, resourceUsage) + } else { + attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to marshal input root resource usage")) + } + return response +} + +// dontReadFromFSACError is a placeholder error type. If an action +// completes unsuccessfully before the read against the File System +// Access Cache (FSAC) completes, there is no point in continuing that. +// This type is used to force the error group to cancel its context, +// causing the read to be interrupted. +type dontReadFromFSACError struct{} + +func (dontReadFromFSACError) Error() string { + panic("This error is merely a placeholder, and should not be returned") +} + +// directoryPrefetcher is used by prefetchingBuildExecutor to +// recursively traverse the input root, only downloading parts of the +// input root that are matched by a Bloom filter. +type directoryPrefetcher struct { + context context.Context + group *errgroup.Group + bloomFilter *access.BloomFilterReader + digestFunction digest.Function + contentAddressableStorage blobstore.BlobAccess + directoryFetcher cas.DirectoryFetcher + fileReadSemaphore *semaphore.Weighted +} + +func (dp *directoryPrefetcher) shouldPrefetch(pathHashes access.PathHashes) bool { + return dp.bloomFilter.Contains(pathHashes) +} + +func (dp *directoryPrefetcher) prefetchRecursively(pathTrace *path.Trace, directoryPathHashes access.PathHashes, rawDirectoryDigest *remoteexecution.Digest) error { + if !dp.shouldPrefetch(directoryPathHashes) { + // This directory nor any of its children are expected + // to be accessed. + return nil + } + + directoryDigest, err := dp.digestFunction.NewDigestFromProto(rawDirectoryDigest) + if err != nil { + return util.StatusWrapf(err, "Failed to parse digest for directory %#v", pathTrace.String()) + } + directory, err := dp.directoryFetcher.GetDirectory(dp.context, directoryDigest) + if err != nil { + return util.StatusWrapf(err, "Failed to prefetch directory %#v", pathTrace.String()) + } + + // Directories are traversed sequentially to prevent unbounded + // concurrency. Therefore, schedule prefetching of files before + // directories, so that concurrency is introduced as soon as + // possible. + for _, file := range directory.Files { + component, ok := path.NewComponent(file.Name) + if !ok { + return status.Errorf(codes.InvalidArgument, "File %#v in directory %#v has an invalid name", file.Name, pathTrace.String()) + } + if dp.shouldPrefetch(directoryPathHashes.AppendComponent(component)) { + childPathTrace := pathTrace.Append(component) + fileDigest, err := dp.digestFunction.NewDigestFromProto(file.Digest) + if err != nil { + return util.StatusWrapf(err, "Failed to parse digest for file %#v", childPathTrace.String()) + } + + // Download files at a globally bounded concurrency. + // + // TODO: We currently do a 1 byte read against + // the file, as a BlobAccess.Prefetch() doesn't + // carry its weight just yet. We should revisit + // this once we support chunking/decomposition, + // as in that case it is insufficient. + if dp.context.Err() != nil || dp.fileReadSemaphore.Acquire(dp.context, 1) != nil { + return util.StatusFromContext(dp.context) + } + dp.group.Go(func() error { + var b [1]byte + _, err := dp.contentAddressableStorage.Get(dp.context, fileDigest).ReadAt(b[:], 0) + dp.fileReadSemaphore.Release(1) + if err != nil && err != io.EOF && status.Code(err) != codes.Canceled { + return util.StatusWrapf(err, "Failed to prefetch file %#v", childPathTrace.String()) + } + return nil + }) + } + } + for _, directory := range directory.Directories { + component, ok := path.NewComponent(directory.Name) + if !ok { + return status.Errorf(codes.InvalidArgument, "Directory %#v in directory %#v has an invalid name", directory.Name, pathTrace.String()) + } + if err := dp.prefetchRecursively(pathTrace.Append(component), directoryPathHashes.AppendComponent(component), directory.Digest); err != nil { + return err + } + } + return nil +} diff --git a/pkg/builder/prefetching_build_executor_test.go b/pkg/builder/prefetching_build_executor_test.go new file mode 100644 index 0000000..d7f4d63 --- /dev/null +++ b/pkg/builder/prefetching_build_executor_test.go @@ -0,0 +1,465 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/proto/fsac" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "golang.org/x/sync/semaphore" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" +) + +func TestPrefetchingBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + fileReadSemaphore := semaphore.NewWeighted(1) + fileSystemAccessCache := mock.NewMockBlobAccess(ctrl) + buildExecutor := builder.NewPrefetchingBuildExecutor( + baseBuildExecutor, + contentAddressableStorage, + directoryFetcher, + fileReadSemaphore, + fileSystemAccessCache, + /* maximumMessageSizeBytes = */ 10000, + /* bloomFilterBitsPerElement = */ 10, + /* bloomFilterMaximumSizeBytes = */ 1000) + + filePool := mock.NewMockFilePool(ctrl) + baseMonitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5) + executionStateUpdates := make(chan<- *remoteworker.CurrentState_Executing) + + defaultInputRootResourceUsage, err := anypb.New(&resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 1, + DirectoriesRead: 0, + FilesRead: 0, + }) + require.NoError(t, err) + + t.Run("ActionMissing", func(t *testing.T) { + // If no Action is present, there is no way to compute + // the reduced Action digest that's needed to fetch the + // profile. + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.InvalidArgument, "Request does not contain an action").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + &remoteworker.DesiredState_Executing{}, + executionStateUpdates)) + }) + + // The request that will be used in the tests below, and the + // reduced action digest that corresponds with it. + exampleRequest := &remoteworker.DesiredState_Executing{ + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "e72499b7c6de12b7ad046541f6de8beb", + SizeBytes: 123, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "095afbd64b6358665546558583c64d38", + SizeBytes: 456, + }, + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + }, + }, + }, + } + exampleReducedActionDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "8f4c066b7911c44acae9e11f42889828", 53) + + t.Run("NonZeroExitCode", func(t *testing.T) { + // If the action fails with a non-zero exit code while + // we're still trying to load the profile, there's no + // point in completing the request. We can return + // immediately. + response := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + ExitCode: 12, + }, + } + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).Return(response) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + DoAndReturn(func(ctx context.Context, blobDigest digest.Digest) buffer.Buffer { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return buffer.NewBufferFromError(status.Error(codes.Canceled, "Request cancelled")) + }) + + testutil.RequireEqualProto( + t, + response, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("ExecutionFailure", func(t *testing.T) { + // The same holds if execution fails due to some kind of + // infrastructure issue. + response := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Internal, "Cannot fork child process").Proto(), + } + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).Return(response) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + DoAndReturn(func(ctx context.Context, blobDigest digest.Digest) buffer.Buffer { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return buffer.NewBufferFromError(status.Error(codes.Canceled, "Request cancelled")) + }) + + testutil.RequireEqualProto( + t, + response, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("FSACGetError", func(t *testing.T) { + // Errors reading from the File System Access Cache + // (FSAC) should be propagated. It should cause the + // execution of the action to be cancelled immediately. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Canceled, "Execution cancelled").Proto(), + } + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewBufferFromError(status.Error(codes.Internal, "Storage offline"))) + + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{defaultInputRootResourceUsage}, + }, + }, + Status: status.New(codes.Internal, "Failed to fetch file system access profile: Storage offline").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("DirectoryFetcherGetError", func(t *testing.T) { + // Errors fetching directories from the Content + // Addressable Storage (CAS) should be propagated. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Canceled, "Execution cancelled").Proto(), + } + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewProtoBufferFromProto(&fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0xff}, + BloomFilterHashFunctions: 1, + }, buffer.UserProvided)) + directoryFetcher.EXPECT().GetDirectory(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "095afbd64b6358665546558583c64d38", 456)). + Return(nil, status.Error(codes.Internal, "Storage offline")) + + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{defaultInputRootResourceUsage}, + }, + }, + Status: status.New(codes.Internal, "Failed to prefetch directory \".\": Storage offline").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("ContentAddressableStorageGetError", func(t *testing.T) { + // Similarly, errors fetching files from the Content + // Addressable Storage (CAS) should be propagated. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + <-ctx.Done() + require.Equal(t, context.Canceled, ctx.Err()) + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Canceled, "Execution cancelled").Proto(), + } + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewProtoBufferFromProto(&fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0xff}, + BloomFilterHashFunctions: 1, + }, buffer.UserProvided)) + directoryFetcher.EXPECT().GetDirectory(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "095afbd64b6358665546558583c64d38", 456)). + Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello.txt", + Digest: &remoteexecution.Digest{ + Hash: "3ffe1ce0624ece24e5d9b31c2342a6d4", + SizeBytes: 200, + }, + }, + }, + }, nil) + contentAddressableStorage.EXPECT().Get(gomock.Any(), digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "3ffe1ce0624ece24e5d9b31c2342a6d4", 200)). + Return(buffer.NewBufferFromError(status.Error(codes.Internal, "Storage offline"))) + + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{defaultInputRootResourceUsage}, + }, + }, + Status: status.New(codes.Internal, "Failed to prefetch file \"hello.txt\": Storage offline").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("FSACPutError", func(t *testing.T) { + // If the Bloom filter stored in the profile does not + // match with the paths accessed during execution, we + // should attempt to write an updated profile. Failures + // writing this profile should be propagated. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewProtoBufferFromProto(&fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0x00, 0x20}, + BloomFilterHashFunctions: 1, + }, buffer.UserProvided)) + fileSystemAccessCache.EXPECT().Put(gomock.Any(), exampleReducedActionDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, blobDigest digest.Digest, b buffer.Buffer) error { + b.Discard() + return status.Error(codes.Internal, "Storage offline") + }) + + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{defaultInputRootResourceUsage}, + }, + }, + Status: status.New(codes.Internal, "Failed to store file system access profile: Storage offline").Proto(), + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("FSACPutSkipped", func(t *testing.T) { + // If the profile matches was read from the FSAC, then + // there is no need to store an updated profile. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).Return(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewProtoBufferFromProto(&fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0x80}, + BloomFilterHashFunctions: 1, + }, buffer.UserProvided)) + + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{defaultInputRootResourceUsage}, + }, + }, + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) + + t.Run("FSACPutSuccess", func(t *testing.T) { + // Successfully overwrite a Bloom filter that was out of + // sync with what's observed while executing. + baseBuildExecutor.EXPECT().Execute( + gomock.Any(), + filePool, + gomock.Any(), + digestFunction, + testutil.EqProto(t, exampleRequest), + executionStateUpdates, + ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + monitor.ReadDirectory() + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + } + }) + fileSystemAccessCache.EXPECT().Get(gomock.Any(), exampleReducedActionDigest). + Return(buffer.NewProtoBufferFromProto(&fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0x80}, + BloomFilterHashFunctions: 1, + }, buffer.UserProvided)) + fileSystemAccessCache.EXPECT().Put(gomock.Any(), exampleReducedActionDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, blobDigest digest.Digest, b buffer.Buffer) error { + profile, err := b.ToProto(&fsac.FileSystemAccessProfile{}, 10000) + require.NoError(t, err) + testutil.RequireEqualProto(t, &fsac.FileSystemAccessProfile{ + BloomFilter: []byte{0x0b, 0x2a}, + BloomFilterHashFunctions: 9, + }, profile) + return nil + }) + + inputRootResourceUsage, err := anypb.New(&resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 1, + DirectoriesRead: 1, + FilesRead: 0, + }) + require.NoError(t, err) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{inputRootResourceUsage}, + }, + }, + }, + buildExecutor.Execute( + ctx, + filePool, + baseMonitor, + digestFunction, + exampleRequest, + executionStateUpdates)) + }) +} diff --git a/pkg/builder/root_build_directory_creator.go b/pkg/builder/root_build_directory_creator.go new file mode 100644 index 0000000..89a775e --- /dev/null +++ b/pkg/builder/root_build_directory_creator.go @@ -0,0 +1,38 @@ +package builder + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type rootBuildDirectoryCreator struct { + buildDirectory BuildDirectory +} + +// NewRootBuildDirectoryCreator is a BuildDirectoryCreator that +// repeatedly hands out a single directory present on the current +// system. Additional decorators are used to run builds in +// subdirectories, so that build actions may run in parallel. +func NewRootBuildDirectoryCreator(buildDirectory BuildDirectory) BuildDirectoryCreator { + dc := &rootBuildDirectoryCreator{ + buildDirectory: rootBuildDirectory{ + BuildDirectory: buildDirectory, + }, + } + return dc +} + +func (dc *rootBuildDirectoryCreator) GetBuildDirectory(ctx context.Context, actionDigest digest.Digest, mayRunInParallel bool) (BuildDirectory, *path.Trace, error) { + return dc.buildDirectory, nil, nil +} + +type rootBuildDirectory struct { + BuildDirectory +} + +func (d rootBuildDirectory) Close() error { + // Never call Close() on the root directory, as it will be reused. + return nil +} diff --git a/pkg/builder/root_build_directory_creator_test.go b/pkg/builder/root_build_directory_creator_test.go new file mode 100644 index 0000000..f1c4410 --- /dev/null +++ b/pkg/builder/root_build_directory_creator_test.go @@ -0,0 +1,47 @@ +package builder_test + +import ( + "context" + "os" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestRootBuildDirectoryCreator(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + mockBuildDirectory := mock.NewMockBuildDirectory(ctrl) + buildDirectoryCreator := builder.NewRootBuildDirectoryCreator(mockBuildDirectory) + + // Run a simple build action that only performs an Mkdir() call. + // Once terminated, the underlying build directory should not be + // closed, as it is reused by the next build action. + mockBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("hello"), os.FileMode(0o700)) + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + true) + require.NoError(t, err) + require.Nil(t, buildDirectoryPath) + require.NoError(t, buildDirectory.Mkdir(path.MustNewComponent("hello"), os.FileMode(0o700))) + buildDirectory.Close() + + // Run an action similar to the previous one. It should be + // applied against the same underlying build directory. + mockBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("world"), os.FileMode(0o700)) + buildDirectory, buildDirectoryPath, err = buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("freebsd", remoteexecution.DigestFunction_SHA256, "7609128715518308672067aab169e24944ead24e3d732aab8a8f0b7013a65564", 5), + true) + require.NoError(t, err) + require.Nil(t, buildDirectoryPath) + require.NoError(t, buildDirectory.Mkdir(path.MustNewComponent("world"), os.FileMode(0o700))) + buildDirectory.Close() +} diff --git a/pkg/builder/shared_build_directory_creator.go b/pkg/builder/shared_build_directory_creator.go new file mode 100644 index 0000000..80aa511 --- /dev/null +++ b/pkg/builder/shared_build_directory_creator.go @@ -0,0 +1,115 @@ +package builder + +import ( + "context" + "log" + "strconv" + "sync/atomic" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" +) + +type sharedBuildDirectoryCreator struct { + base BuildDirectoryCreator + nextParallelActionID *atomic.Uint64 +} + +// NewSharedBuildDirectoryCreator is an adapter for +// BuildDirectoryCreator that causes build actions to be executed inside +// a subdirectory within the build directory, as opposed to inside the +// build directory itself. The subdirectory is either named after the +// action digest of the build action or uses an incrementing number, +// based on whether collisions may occur. +// +// This adapter can be used to add concurrency to a single worker. When +// executing build actions in parallel, every build action needs its own +// build directory. +func NewSharedBuildDirectoryCreator(base BuildDirectoryCreator, nextParallelActionID *atomic.Uint64) BuildDirectoryCreator { + return &sharedBuildDirectoryCreator{ + base: base, + nextParallelActionID: nextParallelActionID, + } +} + +func (dc *sharedBuildDirectoryCreator) GetBuildDirectory(ctx context.Context, actionDigest digest.Digest, mayRunInParallel bool) (BuildDirectory, *path.Trace, error) { + parentDirectory, parentDirectoryPath, err := dc.base.GetBuildDirectory(ctx, actionDigest, mayRunInParallel) + if err != nil { + return nil, nil, err + } + + // Determine the name of the subdirectory. + var name string + if mayRunInParallel { + // Multiple instances of this action may run in + // parallel, as the scheduler is not permitted to + // deduplicate them. This is likely caused by the + // 'do_not_cache' flag being set in the Action message. + // + // Number subdirectories incrementally to prevent + // collisions if multiple of them are scheduled on the + // same worker. + name = strconv.FormatUint(dc.nextParallelActionID.Add(1), 10) + } else { + // This action is guaranteed not to run in parallel, due + // to the scheduler being permitted to deduplicate + // execution requests. Use a directory name based on the + // action digest. This ensures that the working + // directory of the build action is deterministic, + // thereby increasing reproducibility. + // + // Only use a small number of characters from the digest + // to ensure the absolute path of the build directory + // remains short. This avoids reaching PATH_MAX and + // sockaddr_un::sun_path size limits for stronger digest + // functions. 16 characters is more than sufficient to + // prevent collisions. + name = actionDigest.GetHashString()[:16] + } + + // Create the subdirectory. + childDirectoryName := path.MustNewComponent(name) + childDirectoryPath := parentDirectoryPath.Append(childDirectoryName) + if err := parentDirectory.Mkdir(childDirectoryName, 0o777); err != nil { + parentDirectory.Close() + return nil, nil, util.StatusWrapfWithCode(err, codes.Internal, "Failed to create build directory %#v", childDirectoryPath.String()) + } + childDirectory, err := parentDirectory.EnterBuildDirectory(childDirectoryName) + if err != nil { + if err := parentDirectory.Remove(childDirectoryName); err != nil { + log.Printf("Failed to remove action digest build directory %#v upon failure to enter: %s", childDirectoryPath.String(), err) + } + parentDirectory.Close() + return nil, nil, util.StatusWrapfWithCode(err, codes.Internal, "Failed to enter build directory %#v", childDirectoryPath.String()) + } + + return &sharedBuildDirectory{ + BuildDirectory: childDirectory, + parentDirectory: parentDirectory, + childDirectoryName: childDirectoryName, + childDirectoryPath: childDirectoryPath.String(), + }, childDirectoryPath, nil +} + +type sharedBuildDirectory struct { + BuildDirectory + parentDirectory BuildDirectory + childDirectoryName path.Component + childDirectoryPath string +} + +func (d *sharedBuildDirectory) Close() error { + err1 := d.BuildDirectory.Close() + err2 := d.parentDirectory.RemoveAll(d.childDirectoryName) + err3 := d.parentDirectory.Close() + if err1 != nil { + return util.StatusWrapf(err1, "Failed to close build directory %#v", d.childDirectoryPath) + } + if err2 != nil { + return util.StatusWrapfWithCode(err2, codes.Internal, "Failed to remove build directory %#v", d.childDirectoryPath) + } + return err3 +} diff --git a/pkg/builder/shared_build_directory_creator_test.go b/pkg/builder/shared_build_directory_creator_test.go new file mode 100644 index 0000000..ccf88ae --- /dev/null +++ b/pkg/builder/shared_build_directory_creator_test.go @@ -0,0 +1,256 @@ +package builder_test + +import ( + "context" + "os" + "sync/atomic" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestSharedBuildDirectoryCreatorGetBuildDirectoryFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Failure to create environment should simply be forwarded. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(nil, nil, status.Error(codes.Internal, "No space left on device")) + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "No space left on device"), err) +} + +func TestSharedBuildDirectoryCreatorMkdirFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Failure to create a build subdirectory is always an internal error. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)).Return( + status.Error(codes.AlreadyExists, "Directory already exists")) + baseBuildDirectory.EXPECT().Close() + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to create build directory \"base-directory/e3b0c44298fc1c14\": Directory already exists"), err) +} + +func TestSharedBuildDirectoryCreatorEnterBuildDirectoryFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Failure to enter a build subdirectory is always an internal error. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")), nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)) + baseBuildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("e3b0c44298fc1c14")).Return(nil, status.Error(codes.ResourceExhausted, "Out of file descriptors")) + baseBuildDirectory.EXPECT().Remove(path.MustNewComponent("e3b0c44298fc1c14")) + baseBuildDirectory.EXPECT().Close() + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to enter build directory \"base-directory/e3b0c44298fc1c14\": Out of file descriptors"), err) +} + +func TestSharedBuildDirectoryCreatorCloseChildFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Directory closure errors should be propagated. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryPath := ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)) + subDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("e3b0c44298fc1c14")).Return(subDirectory, nil) + subDirectory.EXPECT().Close().Return(status.Error(codes.Internal, "Bad file descriptor")) + baseBuildDirectory.EXPECT().RemoveAll(path.MustNewComponent("e3b0c44298fc1c14")) + baseBuildDirectory.EXPECT().Close() + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, baseBuildDirectoryPath.Append(path.MustNewComponent("e3b0c44298fc1c14")), buildDirectoryPath) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to close build directory \"base-directory/e3b0c44298fc1c14\": Bad file descriptor"), buildDirectory.Close()) +} + +func TestSharedBuildDirectoryCreatorRemoveAllFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Directory removal errors should be propagated. Permission + // errors should be converted to internal errors, as they + // indicate problems with the infrastructure. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryPath := ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)) + subDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("e3b0c44298fc1c14")).Return(subDirectory, nil) + subDirectory.EXPECT().Close() + baseBuildDirectory.EXPECT().RemoveAll(path.MustNewComponent("e3b0c44298fc1c14")).Return(status.Error(codes.PermissionDenied, "Directory is owned by another user")) + baseBuildDirectory.EXPECT().Close() + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, baseBuildDirectoryPath.Append(path.MustNewComponent("e3b0c44298fc1c14")), buildDirectoryPath) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to remove build directory \"base-directory/e3b0c44298fc1c14\": Directory is owned by another user"), buildDirectory.Close()) +} + +func TestSharedBuildDirectoryCreatorCloseParentFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Directory closure errors on the parent should also be + // propagated, but there is no need to prefix any additional + // info. The base BuildDirectoryCreator will already be + // responsible for injecting more detailed errors. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryPath := ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)) + subDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("e3b0c44298fc1c14")).Return(subDirectory, nil) + subDirectory.EXPECT().Close() + baseBuildDirectory.EXPECT().RemoveAll(path.MustNewComponent("e3b0c44298fc1c14")) + baseBuildDirectory.EXPECT().Close().Return(status.Error(codes.Internal, "Bad file descriptor")) + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, baseBuildDirectoryPath.Append(path.MustNewComponent("e3b0c44298fc1c14")), buildDirectoryPath) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Bad file descriptor"), buildDirectory.Close()) +} + +func TestSharedBuildDirectoryCreatorSuccessNotParallel(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Successful build in a subdirectory for an action that does + // not run in parallel. The subdirectory name is based on the + // action digest. + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryPath := ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")) + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("e3b0c44298fc1c14"), os.FileMode(0o777)) + subDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectory.EXPECT().EnterBuildDirectory(path.MustNewComponent("e3b0c44298fc1c14")).Return(subDirectory, nil) + subDirectory.EXPECT().Close() + baseBuildDirectory.EXPECT().RemoveAll(path.MustNewComponent("e3b0c44298fc1c14")) + baseBuildDirectory.EXPECT().Close() + + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + buildDirectory, buildDirectoryPath, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + false) + require.NoError(t, err) + require.Equal(t, baseBuildDirectoryPath.Append(path.MustNewComponent("e3b0c44298fc1c14")), buildDirectoryPath) + require.NoError(t, buildDirectory.Close()) +} + +func TestSharedBuildDirectoryCreatorMkdirSuccessParallel(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBuildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) + baseBuildDirectory := mock.NewMockBuildDirectory(ctrl) + baseBuildDirectoryPath := ((*path.Trace)(nil)).Append(path.MustNewComponent("base-directory")) + var nextParallelActionID atomic.Uint64 + buildDirectoryCreator := builder.NewSharedBuildDirectoryCreator(baseBuildDirectoryCreator, &nextParallelActionID) + + // Build directories for actions that run in parallel are simply + // named incrementally to prevent collisions. + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + true, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("1"), os.FileMode(0o777)).Return( + status.Error(codes.Internal, "Foo")) + baseBuildDirectory.EXPECT().Close() + _, _, err := buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + true) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to create build directory \"base-directory/1\": Foo"), err) + + baseBuildDirectoryCreator.EXPECT().GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + true, + ).Return(baseBuildDirectory, baseBuildDirectoryPath, nil) + baseBuildDirectory.EXPECT().Mkdir(path.MustNewComponent("2"), os.FileMode(0o777)).Return( + status.Error(codes.Internal, "Foo")) + baseBuildDirectory.EXPECT().Close() + _, _, err = buildDirectoryCreator.GetBuildDirectory( + ctx, + digest.MustNewDigest("debian8", remoteexecution.DigestFunction_SHA256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 0), + true) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to create build directory \"base-directory/2\": Foo"), err) +} diff --git a/pkg/builder/storage_flushing_build_executor.go b/pkg/builder/storage_flushing_build_executor.go new file mode 100644 index 0000000..fd48c26 --- /dev/null +++ b/pkg/builder/storage_flushing_build_executor.go @@ -0,0 +1,57 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// StorageFlusher is a callback that is invoked by +// NewStorageFlushingBuildExecutor to flush contents to storage. +type StorageFlusher func(context.Context) error + +type storageFlushingBuildExecutor struct { + BuildExecutor + flush StorageFlusher +} + +// NewStorageFlushingBuildExecutor is an adapter for BuildExecutor that +// calls a callback after every operation. The callback is typically +// used to flush pending writes to underlying storage, to ensure that +// other processes in the cluster have a consistent view of the +// completion of the operation. +func NewStorageFlushingBuildExecutor(base BuildExecutor, flush StorageFlusher) BuildExecutor { + return &storageFlushingBuildExecutor{ + BuildExecutor: base, + flush: flush, + } +} + +func (be *storageFlushingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + if err := be.flush(ctx); err != nil { + attachErrorToExecuteResponse(response, err) + + // Due to flushing failing, some of the outputs + // referenced by the Execute Response may not be present + // in the Content Addressable Storage. Even with the + // error attached to the Execute Response, Bazel will + // try to access some of the outputs. + // + // Prune all digests from the response, as we want Bazel + // to print the error above, as opposed to print errors + // related to fetching nonexistent blobs. + if result := response.Result; result != nil { + result.OutputFiles = nil + result.OutputDirectories = nil + result.StdoutDigest = nil + result.StderrDigest = nil + } + response.ServerLogs = nil + } + return response +} diff --git a/pkg/builder/storage_flushing_build_executor_test.go b/pkg/builder/storage_flushing_build_executor_test.go new file mode 100644 index 0000000..590fd20 --- /dev/null +++ b/pkg/builder/storage_flushing_build_executor_test.go @@ -0,0 +1,143 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +func TestStorageFlushingBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + storageFlusher := mock.NewMockStorageFlusher(ctrl) + buildExecutor := builder.NewStorageFlushingBuildExecutor(baseBuildExecutor, storageFlusher.Call) + + // Execute request and response that are used for all tests. The + // response uses all features supported by the protocol, to test + // that we only strip fields that ought to be omitted. + request := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + Action: &remoteexecution.Action{DoNotCache: false}, + } + updates := make(chan<- *remoteworker.CurrentState_Executing) + response := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + OutputFiles: []*remoteexecution.OutputFile{ + { + Path: "output.o", + Digest: &remoteexecution.Digest{ + Hash: "8c2e88f122b6fbcf0a20d562391c93db", + SizeBytes: 3483, + }, + }, + }, + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "some_directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "0342e9502cf8c4cea71de4c33669b60f", + SizeBytes: 237944, + }, + }, + }, + OutputSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "output.o.stripped", + Target: "output.o", + }, + { + Path: "some_other_directory", + Target: "some_directory", + }, + }, + ExitCode: 123, + StdoutRaw: []byte("Hello"), + StdoutDigest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 5, + }, + StderrRaw: []byte("Hello"), + StderrDigest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 5, + }, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + Worker: "builder1.example.com", + }, + }, + ServerLogs: map[string]*remoteexecution.LogFile{ + "kernel_log": { + Digest: &remoteexecution.Digest{ + Hash: "2917c2a7eb23012392098e74a873cd31", + SizeBytes: 9584, + }, + HumanReadable: true, + }, + }, + Message: "Uncached action result: http://....", + } + + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("default", remoteexecution.DigestFunction_MD5) + baseBuildExecutor.EXPECT().Execute( + ctx, filePool, monitor, digestFunction, request, updates, + ).Return(proto.Clone(response).(*remoteexecution.ExecuteResponse)).Times(2) + + // When flushing succeeds, we should return the response in + // literal form. + t.Run("FlushingSucceeded", func(t *testing.T) { + storageFlusher.EXPECT().Call(ctx).Return(nil) + testutil.RequireEqualProto( + t, + response, + buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, updates)) + }) + + // When flushing fails, some of the outputs may not have ended + // up in storage. Return the response with all of the digests + // removed. + t.Run("FlushingFailed", func(t *testing.T) { + storageFlusher.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Failed to flush blobs to storage")) + testutil.RequireEqualProto( + t, + &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + OutputSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "output.o.stripped", + Target: "output.o", + }, + { + Path: "some_other_directory", + Target: "some_directory", + }, + }, + ExitCode: 123, + StdoutRaw: []byte("Hello"), + StderrRaw: []byte("Hello"), + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + Worker: "builder1.example.com", + }, + }, + Status: status.New(codes.Internal, "Failed to flush blobs to storage").Proto(), + Message: "Uncached action result: http://....", + }, + buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, updates)) + }) +} diff --git a/pkg/builder/test_infrastructure_failure_detecting_build_executor.go b/pkg/builder/test_infrastructure_failure_detecting_build_executor.go new file mode 100644 index 0000000..0fe7600 --- /dev/null +++ b/pkg/builder/test_infrastructure_failure_detecting_build_executor.go @@ -0,0 +1,154 @@ +package builder + +import ( + "context" + "path" + "sync" + "sync/atomic" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/program" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// TestInfrastructureFailureShutdownState keeps track of whether a group +// of worker threads have shut down, due to an excessive number of +// consecutive tests failing due to infrastructure failures on a single +// worker thread. +type TestInfrastructureFailureShutdownState struct { + once sync.Once + channel chan struct{} +} + +// NewTestInfrastructureFailureShutdownState creates a new +// TestInfrastructureFailureShutdownState that is in the initial state, +// where no infrastructure failures have occurred. +func NewTestInfrastructureFailureShutdownState() *TestInfrastructureFailureShutdownState { + return &TestInfrastructureFailureShutdownState{ + channel: make(chan struct{}), + } +} + +var errTooManyInfrastructureFailures = status.Error(codes.Unavailable, "Worker has shut down, as too many consecutive tests reported an infrastructure failure") + +func (ts *TestInfrastructureFailureShutdownState) shutDown() error { + ts.once.Do(func() { + close(ts.channel) + }) + return errTooManyInfrastructureFailures +} + +func (ts *TestInfrastructureFailureShutdownState) isShutDown() error { + select { + case <-ts.channel: + return errTooManyInfrastructureFailures + default: + return nil + } +} + +func (ts *TestInfrastructureFailureShutdownState) waitForShutdown(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + select { + case <-ts.channel: + return errTooManyInfrastructureFailures + case <-ctx.Done(): + return nil + } +} + +type testInfrastructureFailureDetectingBuildExecutor struct { + base BuildExecutor + shutdownState *TestInfrastructureFailureShutdownState + maximumConsecutiveFailures uint32 + + currentConsecutiveFailures atomic.Uint32 +} + +// NewTestInfrastructureFailureDetectingBuildExecutor is a decorator for +// BuildExecutor that counts the number of consecutive actions that +// generated one or more "test.infrastructure_failure" output files. If +// the count exceeds a configured value, the BuildExecutor will start to +// fail readiness checks. This prevents further work from being +// executed. +// +// This decorator is useful when workers have peripherals attached to +// them that are prone to hardware failures. Bazel allows tests to +// report these failures by creating the file designated by the +// TEST_INFRASTRUCTURE_FAILURE_FILE environment variable. +// +// Please refer to the Bazel test encyclopedia for more details on +// TEST_INFRASTRUCTURE_FAILURE_FILE: +// https://bazel.build/reference/test-encyclopedia +func NewTestInfrastructureFailureDetectingBuildExecutor(base BuildExecutor, shutdownState *TestInfrastructureFailureShutdownState, maximumConsecutiveFailures uint32) BuildExecutor { + return &testInfrastructureFailureDetectingBuildExecutor{ + base: base, + shutdownState: shutdownState, + maximumConsecutiveFailures: maximumConsecutiveFailures, + } +} + +func (be *testInfrastructureFailureDetectingBuildExecutor) CheckReadiness(ctx context.Context) error { + if err := be.shutdownState.isShutDown(); err != nil { + return err + } + + return program.RunLocal(ctx, func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + dependenciesGroup.Go(be.shutdownState.waitForShutdown) + return be.base.CheckReadiness(ctx) + }) +} + +func (be *testInfrastructureFailureDetectingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + if err := be.shutdownState.isShutDown(); err != nil { + response := NewDefaultExecuteResponse(request) + attachErrorToExecuteResponse(response, err) + return response + } + + var response *remoteexecution.ExecuteResponse + if err := program.RunLocal(ctx, func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + dependenciesGroup.Go(be.shutdownState.waitForShutdown) + response = be.base.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) + + // Check for the existence of TEST_INFRASTRUCTURE_FAILURE_FILE. + // + // TODO: As this BuildExecutor doesn't have access to the + // Command message, we assume that the file is always called + // "test.infrastructure_failure". This may not be a valid + // assumption. + hasInfrastructureFailure := false + for _, outputFile := range response.Result.OutputFiles { + if path.Base(outputFile.Path) == "test.infrastructure_failure" { + hasInfrastructureFailure = true + break + } + } + + if hasInfrastructureFailure { + if be.currentConsecutiveFailures.Add(1) >= be.maximumConsecutiveFailures { + // Too many consecutive test failures. + // Shut down all worker threads. + return be.shutdownState.shutDown() + } + } else { + // No infrastructure failure occurred, likely + // because the test succeeded. Reset the counter + // for this worker thread. + be.currentConsecutiveFailures.Store(0) + } + return nil + }); err != nil { + newResponse := &remoteexecution.ExecuteResponse{} + proto.Merge(newResponse, response) + attachErrorToExecuteResponse(newResponse, err) + return newResponse + } + return response +} diff --git a/pkg/builder/test_infrastructure_failure_detecting_build_executor_test.go b/pkg/builder/test_infrastructure_failure_detecting_build_executor_test.go new file mode 100644 index 0000000..33c8ad0 --- /dev/null +++ b/pkg/builder/test_infrastructure_failure_detecting_build_executor_test.go @@ -0,0 +1,117 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestTestInfrastructureFailureDetectingBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + buildExecutor := builder.NewTestInfrastructureFailureDetectingBuildExecutor( + baseBuildExecutor, + builder.NewTestInfrastructureFailureShutdownState(), + /* maximumConsecutiveFailures = */ 5) + + // Common values used by the tests below. + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("example", remoteexecution.DigestFunction_MD5) + var metadata chan<- *remoteworker.CurrentState_Executing = make(chan *remoteworker.CurrentState_Executing, 10) + request := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "c7af09d7f0c45d36b46e21616398a1eb", + SizeBytes: 100, + }, + Action: &remoteexecution.Action{}, + } + failedResponse := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + OutputFiles: []*remoteexecution.OutputFile{ + {Path: "bazel-out/linux_x86_64/testlogs/my/test/test.infrastructure_failure"}, + }, + }, + } + successfulResponse := &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + OutputFiles: []*remoteexecution.OutputFile{ + {Path: "bazel-out/linux_x86_64/testlogs/my/test/test.outputs/outputs.zip"}, + }, + }, + } + + // By default, calls to CheckReadiness should just be forwarded + // to the underlying BuildExecutor. + baseBuildExecutor.EXPECT().CheckReadiness(gomock.Any()). + Return(status.Error(codes.Internal, "Runner unavailable")) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Runner unavailable"), buildExecutor.CheckReadiness(ctx)) + + baseBuildExecutor.EXPECT().CheckReadiness(gomock.Any()) + require.NoError(t, buildExecutor.CheckReadiness(ctx)) + + // Execute a couple of tests that trigger infrastructure + // failures. As this is still right below the configured limit, + // this shouldn't mark the worker in an unhealthy state. + for i := 0; i < 4; i++ { + baseBuildExecutor.EXPECT().Execute(gomock.Any(), filePool, monitor, digestFunction, request, metadata).Return(failedResponse) + testutil.RequireEqualProto(t, failedResponse, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata)) + } + + baseBuildExecutor.EXPECT().CheckReadiness(gomock.Any()) + require.NoError(t, buildExecutor.CheckReadiness(ctx)) + + // Now reset the counter of consecutive infrastructure failures by + // executing a successful test. + baseBuildExecutor.EXPECT().Execute(gomock.Any(), filePool, monitor, digestFunction, request, metadata).Return(successfulResponse) + testutil.RequireEqualProto(t, successfulResponse, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata)) + + baseBuildExecutor.EXPECT().CheckReadiness(gomock.Any()) + require.NoError(t, buildExecutor.CheckReadiness(ctx)) + + // We may once again trigger a number of tests without marking + // the worker unhealthy. + for i := 0; i < 4; i++ { + baseBuildExecutor.EXPECT().Execute(gomock.Any(), filePool, monitor, digestFunction, request, metadata).Return(failedResponse) + testutil.RequireEqualProto(t, failedResponse, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata)) + } + + baseBuildExecutor.EXPECT().CheckReadiness(gomock.Any()) + require.NoError(t, buildExecutor.CheckReadiness(ctx)) + + // Running a fifth failing test should cause the worker to be + // marked unhealthy. + baseBuildExecutor.EXPECT().Execute(gomock.Any(), filePool, monitor, digestFunction, request, metadata).Return(failedResponse) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + OutputFiles: []*remoteexecution.OutputFile{ + {Path: "bazel-out/linux_x86_64/testlogs/my/test/test.infrastructure_failure"}, + }, + }, + Status: status.New(codes.Unavailable, "Worker has shut down, as too many consecutive tests reported an infrastructure failure").Proto(), + }, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata)) + + // Future readiness checks and execution requests should fail. + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "Worker has shut down, as too many consecutive tests reported an infrastructure failure"), buildExecutor.CheckReadiness(ctx)) + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, + }, + Status: status.New(codes.Unavailable, "Worker has shut down, as too many consecutive tests reported an infrastructure failure").Proto(), + }, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, metadata)) +} diff --git a/pkg/builder/timestamped_build_executor.go b/pkg/builder/timestamped_build_executor.go new file mode 100644 index 0000000..d424858 --- /dev/null +++ b/pkg/builder/timestamped_build_executor.go @@ -0,0 +1,104 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type timestampedBuildExecutor struct { + BuildExecutor + clock clock.Clock + workerName string +} + +// NewTimestampedBuildExecutor creates a decorator for BuildExecutor +// that augments the ActionResult that is part of the ExecuteResponse +// with ExecutedActionMetadata. More concretely, it ensures that the +// ActionResult contains the name of the worker performing the build and +// timing information. +func NewTimestampedBuildExecutor(buildExecutor BuildExecutor, clock clock.Clock, workerName string) BuildExecutor { + return ×tampedBuildExecutor{ + BuildExecutor: buildExecutor, + clock: clock, + workerName: workerName, + } +} + +func (be *timestampedBuildExecutor) getCurrentTime() *timestamppb.Timestamp { + return timestamppb.New(be.clock.Now()) +} + +func (be *timestampedBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + // Initial metadata, using the current time as the start timestamp. + metadata := remoteexecution.ExecutedActionMetadata{ + Worker: be.workerName, + QueuedTimestamp: request.QueuedTimestamp, + WorkerStartTimestamp: be.getCurrentTime(), + } + + // Call into the underlying build executor. + baseUpdates := make(chan *remoteworker.CurrentState_Executing) + baseCompletion := make(chan *remoteexecution.ExecuteResponse) + go func() { + baseCompletion <- be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, baseUpdates) + }() + + var completedTimestamp **timestamppb.Timestamp + for { + select { + case update := <-baseUpdates: + // Complete the previous stage. + now := be.getCurrentTime() + if completedTimestamp != nil { + *completedTimestamp = now + } + + // Start the next stage. + switch update.ExecutionState.(type) { + case *remoteworker.CurrentState_Executing_FetchingInputs: + metadata.InputFetchStartTimestamp = now + completedTimestamp = &metadata.InputFetchCompletedTimestamp + case *remoteworker.CurrentState_Executing_Running: + metadata.ExecutionStartTimestamp = now + completedTimestamp = &metadata.ExecutionCompletedTimestamp + case *remoteworker.CurrentState_Executing_UploadingOutputs: + metadata.OutputUploadStartTimestamp = now + completedTimestamp = &metadata.OutputUploadCompletedTimestamp + default: + completedTimestamp = nil + } + executionStateUpdates <- update + case response := <-baseCompletion: + // Complete the final stage. + now := be.getCurrentTime() + if completedTimestamp != nil { + *completedTimestamp = now + } + + // Merge the metadata into the response. + metadata.WorkerCompletedTimestamp = now + baseMetadata := response.Result.ExecutionMetadata + proto.Merge(baseMetadata, &metadata) + + // If the base BuildExecutor does not provide a + // virtual execution duration, set it to wall + // time. This ensures that feedback driven + // initial size class analysis at least has some + // information to work with. + if baseMetadata.VirtualExecutionDuration == nil && baseMetadata.ExecutionStartTimestamp != nil && baseMetadata.ExecutionCompletedTimestamp != nil { + baseMetadata.VirtualExecutionDuration = durationpb.New(baseMetadata.ExecutionCompletedTimestamp.AsTime().Sub(baseMetadata.ExecutionStartTimestamp.AsTime())) + } + return response + } + } +} diff --git a/pkg/builder/timestamped_build_executor_test.go b/pkg/builder/timestamped_build_executor_test.go new file mode 100644 index 0000000..6f9268c --- /dev/null +++ b/pkg/builder/timestamped_build_executor_test.go @@ -0,0 +1,127 @@ +package builder_test + +import ( + "context" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestTimestampedBuildExecutorExample(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Recurring messages used by this test. + actionDigest := &remoteexecution.Digest{ + Hash: "d41d8cd98f00b204e9800998ecf8427e", + SizeBytes: 123, + } + request := &remoteworker.DesiredState_Executing{ + ActionDigest: actionDigest, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 999}, + } + updateFetchingInputs := &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + } + updateExecuting := &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Running{ + Running: &emptypb.Empty{}, + }, + } + updateUploadingOutputs := &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_UploadingOutputs{ + UploadingOutputs: &emptypb.Empty{}, + }, + } + + // Simulate the execution of an action where every stage takes + // one second. + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + auxiliaryMetadata, err := anypb.New(&emptypb.Empty{}) + require.NoError(t, err) + baseBuildExecutor.EXPECT().Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("main", remoteexecution.DigestFunction_MD5), + request, + gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + executionStateUpdates <- updateFetchingInputs + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + executionStateUpdates <- updateExecuting + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + executionStateUpdates <- updateUploadingOutputs + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + return &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 1, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + AuxiliaryMetadata: []*anypb.Any{auxiliaryMetadata}, + }, + }, + } + }) + + // Invoke action through the timestamped build executor. + executionStateUpdates := make(chan *remoteworker.CurrentState_Executing, 3) + buildExecutor := builder.NewTimestampedBuildExecutor(baseBuildExecutor, clock, "builder.example.com") + executeResponse := buildExecutor.Execute( + ctx, + filePool, + monitor, + digest.MustNewFunction("main", remoteexecution.DigestFunction_MD5), + request, + executionStateUpdates) + + // Execution updates should be forwarded literally. + require.Equal(t, <-executionStateUpdates, updateFetchingInputs) + require.Equal(t, <-executionStateUpdates, updateExecuting) + require.Equal(t, <-executionStateUpdates, updateUploadingOutputs) + + // Execute response should be augmented to include metadata. + // Auxiliary metadata that is already part of the execution + // metadata should not be discarded. + testutil.RequireEqualProto(t, &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 1, + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + Worker: "builder.example.com", + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 999}, + WorkerStartTimestamp: ×tamppb.Timestamp{Seconds: 1000}, + InputFetchStartTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + InputFetchCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1002}, + ExecutionStartTimestamp: ×tamppb.Timestamp{Seconds: 1002}, + ExecutionCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + OutputUploadStartTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + OutputUploadCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1004}, + WorkerCompletedTimestamp: ×tamppb.Timestamp{Seconds: 1004}, + AuxiliaryMetadata: []*anypb.Any{auxiliaryMetadata}, + VirtualExecutionDuration: &durationpb.Duration{Seconds: 1}, + }, + }, + }, executeResponse) +} diff --git a/pkg/builder/tracing_build_executor.go b/pkg/builder/tracing_build_executor.go new file mode 100644 index 0000000..4dab29e --- /dev/null +++ b/pkg/builder/tracing_build_executor.go @@ -0,0 +1,68 @@ +package builder + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +type tracingBuildExecutor struct { + BuildExecutor + tracer trace.Tracer +} + +// NewTracingBuildExecutor is a decorator for BuildExecutor that creates +// an OpenTelemetry trace span for every action that is executed. At the +// start of every execution state, an event is added to the span that +// indicates which state is entered. +func NewTracingBuildExecutor(buildExecutor BuildExecutor, tracerProvider trace.TracerProvider) BuildExecutor { + return &tracingBuildExecutor{ + BuildExecutor: buildExecutor, + tracer: tracerProvider.Tracer("github.com/buildbarn/bb-remote-execution/pkg/builder"), + } +} + +func (be *tracingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + actionDigest := request.ActionDigest + action := request.Action + ctxWithTracing, span := be.tracer.Start(ctx, "BuildExecutor.Execute", trace.WithAttributes( + attribute.String("action_digest.hash", actionDigest.GetHash()), + attribute.Int64("action_digest.size_bytes", actionDigest.GetSizeBytes()), + attribute.String("digest_function", digestFunction.GetEnumValue().String()), + attribute.Bool("do_not_cache", action.GetDoNotCache()), + attribute.String("instance_name", digestFunction.GetInstanceName().String()), + attribute.Float64("timeout", action.GetTimeout().AsDuration().Seconds()), + )) + defer span.End() + + baseUpdates := make(chan *remoteworker.CurrentState_Executing) + baseCompletion := make(chan *remoteexecution.ExecuteResponse) + go func() { + baseCompletion <- be.BuildExecutor.Execute(ctxWithTracing, filePool, monitor, digestFunction, request, baseUpdates) + }() + + for { + select { + case update := <-baseUpdates: + switch update.ExecutionState.(type) { + case *remoteworker.CurrentState_Executing_FetchingInputs: + span.AddEvent("FetchingInputs") + case *remoteworker.CurrentState_Executing_Running: + span.AddEvent("Running") + case *remoteworker.CurrentState_Executing_UploadingOutputs: + span.AddEvent("UploadingOutputs") + } + + executionStateUpdates <- update + case response := <-baseCompletion: + return response + } + } +} diff --git a/pkg/builder/tracing_build_executor_test.go b/pkg/builder/tracing_build_executor_test.go new file mode 100644 index 0000000..ad241a8 --- /dev/null +++ b/pkg/builder/tracing_build_executor_test.go @@ -0,0 +1,93 @@ +package builder_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + + "google.golang.org/protobuf/types/known/durationpb" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +func TestTracingBuildExecutor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Creating an instance of TracingBuildExecutor should cause it + // to create a new Tracer object. + baseBuildExecutor := mock.NewMockBuildExecutor(ctrl) + tracerProvider := mock.NewMockTracerProvider(ctrl) + tracer := mock.NewMockTracer(ctrl) + tracerProvider.EXPECT().Tracer("github.com/buildbarn/bb-remote-execution/pkg/builder").Return(tracer) + + buildExecutor := builder.NewTracingBuildExecutor(baseBuildExecutor, tracerProvider) + + // Example execution request, response and execution state updates. + request := &remoteworker.DesiredState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "caa9adf60f3b5fd05d7cb6f17bac9201ad9d444d01e7b6964901055e6d6a5c4b", + SizeBytes: 142, + }, + Action: &remoteexecution.Action{ + DoNotCache: true, + Timeout: &durationpb.Duration{Seconds: 5}, + }, + } + response := &remoteexecution.ExecuteResponse{} + fetchingInputs := &remoteworker.CurrentState_Executing{ + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{}, + } + running := &remoteworker.CurrentState_Executing{ + ExecutionState: &remoteworker.CurrentState_Executing_Running{}, + } + uploadingOutputs := &remoteworker.CurrentState_Executing{ + ExecutionState: &remoteworker.CurrentState_Executing_UploadingOutputs{}, + } + + // Call Execute() against the TracingBuildExecutor. The call + // should be forwarded to the underlying BuildExecutor in + // literal form, and execution state updates should also be + // forwarded back to the caller. A trace span should be created + // that contains events for each of the execution state updates. + ctxWithTracing := mock.NewMockContext(ctrl) + filePool := mock.NewMockFilePool(ctrl) + monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_SHA256) + baseBuildExecutor.EXPECT().Execute(ctxWithTracing, filePool, monitor, digestFunction, testutil.EqProto(t, request), gomock.Any()).DoAndReturn( + func(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + executionStateUpdates <- fetchingInputs + executionStateUpdates <- running + executionStateUpdates <- uploadingOutputs + return response + }) + + span := mock.NewMockSpan(ctrl) + tracer.EXPECT().Start(ctx, "BuildExecutor.Execute", trace.WithAttributes( + attribute.String("action_digest.hash", "caa9adf60f3b5fd05d7cb6f17bac9201ad9d444d01e7b6964901055e6d6a5c4b"), + attribute.Int64("action_digest.size_bytes", 142), + attribute.String("digest_function", "SHA256"), + attribute.Bool("do_not_cache", true), + attribute.String("instance_name", "hello"), + attribute.Float64("timeout", 5), + )).Return(ctxWithTracing, span) + span.EXPECT().AddEvent("FetchingInputs") + span.EXPECT().AddEvent("Running") + span.EXPECT().AddEvent("UploadingOutputs") + span.EXPECT().End() + + executionStateUpdates := make(chan *remoteworker.CurrentState_Executing, 3) + testutil.RequireEqualProto(t, response, buildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates)) + testutil.RequireEqualProto(t, fetchingInputs, <-executionStateUpdates) + testutil.RequireEqualProto(t, running, <-executionStateUpdates) + testutil.RequireEqualProto(t, uploadingOutputs, <-executionStateUpdates) +} diff --git a/pkg/builder/uploadable_directory.go b/pkg/builder/uploadable_directory.go new file mode 100644 index 0000000..3afa048 --- /dev/null +++ b/pkg/builder/uploadable_directory.go @@ -0,0 +1,25 @@ +package builder + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// UploadableDirectory is a directory that can be uploaded into the +// Content Addressable Storage. It is provided to +// OutputHierarchy.UploadOutputs(), which traverses it and uploads paths +// that were specified in the Action message. +type UploadableDirectory interface { + // Methods inherited from filesystem.Directory. + Close() error + EnterUploadableDirectory(name path.Component) (UploadableDirectory, error) + Lstat(name path.Component) (filesystem.FileInfo, error) + ReadDir() ([]filesystem.FileInfo, error) + Readlink(name path.Component) (string, error) + + // Upload a file into the Content Addressable Storage. + UploadFile(ctx context.Context, name path.Component, digestFunction digest.Function) (digest.Digest, error) +} diff --git a/pkg/builder/virtual_build_directory.go b/pkg/builder/virtual_build_directory.go new file mode 100644 index 0000000..5dca189 --- /dev/null +++ b/pkg/builder/virtual_build_directory.go @@ -0,0 +1,162 @@ +package builder + +import ( + "context" + "os" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/cas" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type virtualBuildDirectoryOptions struct { + directoryFetcher cas.DirectoryFetcher + contentAddressableStorage blobstore.BlobAccess + symlinkFactory virtual.SymlinkFactory + characterDeviceFactory virtual.CharacterDeviceFactory + handleAllocator virtual.StatefulHandleAllocator +} + +type virtualBuildDirectory struct { + virtual.PrepopulatedDirectory + options *virtualBuildDirectoryOptions +} + +// NewVirtualBuildDirectory creates a BuildDirectory that is backed by a +// virtual.PrepopulatedDirectory. Instead of creating all files in the +// input root explicitly, it calls PrepopulatedDirectory.CreateChildren +// to add special file and directory nodes whose contents are read on +// demand. +func NewVirtualBuildDirectory(directory virtual.PrepopulatedDirectory, directoryFetcher cas.DirectoryFetcher, contentAddressableStorage blobstore.BlobAccess, symlinkFactory virtual.SymlinkFactory, characterDeviceFactory virtual.CharacterDeviceFactory, handleAllocator virtual.StatefulHandleAllocator) BuildDirectory { + return &virtualBuildDirectory{ + PrepopulatedDirectory: directory, + options: &virtualBuildDirectoryOptions{ + directoryFetcher: directoryFetcher, + contentAddressableStorage: contentAddressableStorage, + symlinkFactory: symlinkFactory, + characterDeviceFactory: characterDeviceFactory, + handleAllocator: handleAllocator, + }, + } +} + +func (d *virtualBuildDirectory) EnterBuildDirectory(name path.Component) (BuildDirectory, error) { + child, err := d.LookupChild(name) + if err != nil { + return nil, err + } + directory, _ := child.GetPair() + if directory == nil { + return nil, syscall.ENOTDIR + } + return &virtualBuildDirectory{ + PrepopulatedDirectory: directory, + options: d.options, + }, nil +} + +func (d *virtualBuildDirectory) Close() error { + // Virtual directories do not need to be released explicitly. + return nil +} + +func (d *virtualBuildDirectory) EnterParentPopulatableDirectory(name path.Component) (ParentPopulatableDirectory, error) { + return d.EnterBuildDirectory(name) +} + +func (d *virtualBuildDirectory) EnterUploadableDirectory(name path.Component) (UploadableDirectory, error) { + return d.EnterBuildDirectory(name) +} + +func (d *virtualBuildDirectory) InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) { + d.PrepopulatedDirectory.InstallHooks( + virtual.NewHandleAllocatingFileAllocator( + virtual.NewPoolBackedFileAllocator(filePool, errorLogger), + d.options.handleAllocator), + errorLogger) +} + +func (d *virtualBuildDirectory) MergeDirectoryContents(ctx context.Context, errorLogger util.ErrorLogger, digest digest.Digest, monitor access.UnreadDirectoryMonitor) error { + initialContentsFetcher := virtual.NewCASInitialContentsFetcher( + ctx, + cas.NewDecomposedDirectoryWalker(d.options.directoryFetcher, digest), + virtual.NewStatelessHandleAllocatingCASFileFactory( + virtual.NewBlobAccessCASFileFactory( + ctx, + d.options.contentAddressableStorage, + errorLogger), + d.options.handleAllocator.New()), + d.options.symlinkFactory, + digest.GetDigestFunction()) + if monitor != nil { + initialContentsFetcher = virtual.NewAccessMonitoringInitialContentsFetcher(initialContentsFetcher, monitor) + } + children, err := initialContentsFetcher.FetchContents(func(name path.Component) virtual.FileReadMonitor { return nil }) + if err != nil { + return err + } + return d.CreateChildren(children, false) +} + +func (d *virtualBuildDirectory) UploadFile(ctx context.Context, name path.Component, digestFunction digest.Function) (digest.Digest, error) { + child, err := d.LookupChild(name) + if err != nil { + return digest.BadDigest, err + } + if _, leaf := child.GetPair(); leaf != nil { + return leaf.UploadFile(ctx, d.options.contentAddressableStorage, digestFunction) + } + return digest.BadDigest, syscall.EISDIR +} + +func (d *virtualBuildDirectory) Lstat(name path.Component) (filesystem.FileInfo, error) { + child, err := d.LookupChild(name) + if err != nil { + return filesystem.FileInfo{}, err + } + if _, leaf := child.GetPair(); leaf != nil { + return virtual.GetFileInfo(name, leaf), nil + } + return filesystem.NewFileInfo(name, filesystem.FileTypeDirectory, false), nil +} + +func (d *virtualBuildDirectory) Mkdir(name path.Component, mode os.FileMode) error { + return d.CreateChildren(map[path.Component]virtual.InitialNode{ + name: virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false) +} + +func (d *virtualBuildDirectory) Mknod(name path.Component, perm os.FileMode, deviceNumber filesystem.DeviceNumber) error { + if perm&os.ModeType != os.ModeDevice|os.ModeCharDevice { + return status.Error(codes.InvalidArgument, "The provided file mode is not for a character device") + } + characterDevice := d.options.characterDeviceFactory.LookupCharacterDevice(deviceNumber) + if err := d.CreateChildren(map[path.Component]virtual.InitialNode{ + name: virtual.InitialNode{}.FromLeaf(characterDevice), + }, false); err != nil { + characterDevice.Unlink() + return err + } + return nil +} + +func (d *virtualBuildDirectory) Readlink(name path.Component) (string, error) { + child, err := d.LookupChild(name) + if err != nil { + return "", err + } + if _, leaf := child.GetPair(); leaf != nil { + return leaf.Readlink() + } + return "", syscall.EISDIR +} diff --git a/pkg/cas/BUILD.bazel b/pkg/cas/BUILD.bazel new file mode 100644 index 0000000..2e1260f --- /dev/null +++ b/pkg/cas/BUILD.bazel @@ -0,0 +1,61 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cas", + srcs = [ + "blob_access_directory_fetcher.go", + "blob_access_file_fetcher.go", + "caching_directory_fetcher.go", + "configuration.go", + "decomposed_directory_walker.go", + "directory_fetcher.go", + "directory_walker.go", + "file_fetcher.go", + "hardlinking_file_fetcher.go", + "suspending_directory_fetcher.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/cas", + visibility = ["//visibility:public"], + deps = [ + "//pkg/clock", + "//pkg/proto/configuration/cas", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/blobstore/slicing", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protowire", + "@org_golang_google_protobuf//proto", + ], +) + +go_test( + name = "cas_test", + srcs = [ + "blob_access_directory_fetcher_test.go", + "caching_directory_fetcher_test.go", + "decomposed_directory_walker_test.go", + "hardlinking_file_fetcher_test.go", + ], + deps = [ + ":cas", + "//internal/mock", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/blobstore/slicing", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/cas/blob_access_directory_fetcher.go b/pkg/cas/blob_access_directory_fetcher.go new file mode 100644 index 0000000..6479611 --- /dev/null +++ b/pkg/cas/blob_access_directory_fetcher.go @@ -0,0 +1,168 @@ +package cas + +import ( + "context" + "io" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/slicing" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" +) + +type blobAccessDirectoryFetcher struct { + blobAccess blobstore.BlobAccess + slicer treeBlobSlicer + maximumTreeSizeBytes int64 +} + +// NewBlobAccessDirectoryFetcher creates a DirectoryFetcher that reads +// Directory objects from a BlobAccess based store. +func NewBlobAccessDirectoryFetcher(blobAccess blobstore.BlobAccess, maximumDirectorySizeBytes int, maximumTreeSizeBytes int64) DirectoryFetcher { + return &blobAccessDirectoryFetcher{ + blobAccess: blobAccess, + slicer: treeBlobSlicer{ + maximumDirectorySizeBytes: maximumDirectorySizeBytes, + }, + maximumTreeSizeBytes: maximumTreeSizeBytes, + } +} + +func (df *blobAccessDirectoryFetcher) GetDirectory(ctx context.Context, directoryDigest digest.Digest) (*remoteexecution.Directory, error) { + m, err := df.blobAccess.Get(ctx, directoryDigest).ToProto(&remoteexecution.Directory{}, df.slicer.maximumDirectorySizeBytes) + if err != nil { + return nil, err + } + return m.(*remoteexecution.Directory), nil +} + +func (df *blobAccessDirectoryFetcher) GetTreeRootDirectory(ctx context.Context, treeDigest digest.Digest) (*remoteexecution.Directory, error) { + if treeDigest.GetSizeBytes() > df.maximumTreeSizeBytes { + return nil, status.Errorf(codes.InvalidArgument, "Tree exceeds the maximum permitted size of %d bytes", df.maximumTreeSizeBytes) + } + + r := df.blobAccess.Get(ctx, treeDigest).ToReader() + defer r.Close() + + var rootDirectory *remoteexecution.Directory + if err := util.VisitProtoBytesFields(r, func(fieldNumber protowire.Number, offsetBytes, sizeBytes int64, fieldReader io.Reader) error { + if fieldNumber == blobstore.TreeRootFieldNumber { + if rootDirectory != nil { + return status.Error(codes.InvalidArgument, "Tree contains multiple root directories") + } + m, err := buffer.NewProtoBufferFromReader( + &remoteexecution.Directory{}, + io.NopCloser(fieldReader), + buffer.UserProvided, + ).ToProto(&remoteexecution.Directory{}, df.slicer.maximumDirectorySizeBytes) + if err != nil { + return err + } + rootDirectory = m.(*remoteexecution.Directory) + } + return nil + }); err != nil { + if _, copyErr := io.Copy(io.Discard, r); copyErr != nil { + err = copyErr + } + return nil, err + } + if rootDirectory == nil { + return nil, status.Error(codes.InvalidArgument, "Tree does not contain a root directory") + } + return rootDirectory, nil +} + +func (df *blobAccessDirectoryFetcher) GetTreeChildDirectory(ctx context.Context, treeDigest, childDigest digest.Digest) (*remoteexecution.Directory, error) { + if treeDigest.GetSizeBytes() > df.maximumTreeSizeBytes { + return nil, status.Errorf(codes.InvalidArgument, "Tree exceeds the maximum permitted size of %d bytes", df.maximumTreeSizeBytes) + } + + m, err := df.blobAccess.GetFromComposite(ctx, treeDigest, childDigest, &df.slicer).ToProto(&remoteexecution.Directory{}, df.slicer.maximumDirectorySizeBytes) + if err != nil { + return nil, err + } + return m.(*remoteexecution.Directory), nil +} + +// treeBlobSlicer is capable of unpacking an REv2 Tree object stored in +// the Content Addressable Storage (CAS) into separate Directory +// objects. This allows implementations of BlobAccess to store the +// contents of the Tree just once, but to create entries in its index +// that refer to each of the Directories contained within. +type treeBlobSlicer struct { + maximumDirectorySizeBytes int +} + +func (bs *treeBlobSlicer) Slice(b buffer.Buffer, requestedChildDigest digest.Digest) (buffer.Buffer, []slicing.BlobSlice) { + r := b.ToReader() + defer r.Close() + + requestedSizeBytes := requestedChildDigest.GetSizeBytes() + digestFunction := requestedChildDigest.GetDigestFunction() + var slices []slicing.BlobSlice + var bRequested buffer.Buffer + if err := util.VisitProtoBytesFields(r, func(fieldNumber protowire.Number, offsetBytes, sizeBytes int64, fieldReader io.Reader) error { + if fieldNumber == blobstore.TreeChildrenFieldNumber { + var childDigest digest.Digest + if bRequested == nil && sizeBytes == requestedSizeBytes { + // This directory has the same size as + // the one that is requested, so we may + // need to return it. Duplicate it. + b1, b2 := buffer.NewProtoBufferFromReader( + &remoteexecution.Directory{}, + io.NopCloser(fieldReader), + buffer.UserProvided, + ).CloneCopy(bs.maximumDirectorySizeBytes) + + childDigestGenerator := digestFunction.NewGenerator(sizeBytes) + if err := b1.IntoWriter(childDigestGenerator); err != nil { + b2.Discard() + return err + } + childDigest = childDigestGenerator.Sum() + + if childDigest == requestedChildDigest { + // Found the directory that was + // requested. Return it. + bRequested = b2 + } else { + b2.Discard() + } + } else { + // The directory's size doesn't match, + // so we can compute its checksum + // without unmarshaling it. + childDigestGenerator := digestFunction.NewGenerator(sizeBytes) + if _, err := io.Copy(childDigestGenerator, fieldReader); err != nil { + return err + } + childDigest = childDigestGenerator.Sum() + } + slices = append(slices, slicing.BlobSlice{ + Digest: childDigest, + OffsetBytes: offsetBytes, + SizeBytes: sizeBytes, + }) + } + return nil + }); err != nil { + if bRequested != nil { + bRequested.Discard() + } + if _, copyErr := io.Copy(io.Discard, r); copyErr != nil { + err = copyErr + } + return buffer.NewBufferFromError(err), nil + } + if bRequested == nil { + bRequested = buffer.NewBufferFromError(status.Error(codes.InvalidArgument, "Requested child directory is not contained in the tree")) + } + return bRequested, slices +} diff --git a/pkg/cas/blob_access_directory_fetcher_test.go b/pkg/cas/blob_access_directory_fetcher_test.go new file mode 100644 index 0000000..ad97910 --- /dev/null +++ b/pkg/cas/blob_access_directory_fetcher_test.go @@ -0,0 +1,324 @@ +package cas_test + +import ( + "bytes" + "context" + "io" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/blobstore/slicing" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestBlobAccessDirectoryFetcherGetDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + blobAccess := mock.NewMockBlobAccess(ctrl) + directoryFetcher := cas.NewBlobAccessDirectoryFetcher(blobAccess, 1000, 10000) + + t.Run("IOError", func(t *testing.T) { + // Failures reading the Directory object should be propagated. + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "756b15c8f94b519e96135dcfde0e58c5", 50) + + r := mock.NewMockFileReader(ctrl) + r.EXPECT().ReadAt(gomock.Any(), gomock.Any()).Return(0, status.Error(codes.Internal, "I/O error")) + r.EXPECT().Close() + blobAccess.EXPECT().Get(ctx, directoryDigest).Return(buffer.NewValidatedBufferFromReaderAt(r, 100)) + + _, err := directoryFetcher.GetDirectory(ctx, directoryDigest) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "I/O error"), err) + }) + + t.Run("InvalidDirectory", func(t *testing.T) { + // It is only valid to call GetDirectory() against an + // REv2 Directory object. + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "764b0da73352b970cfbfc488a0f54934", 30) + + blobAccess.EXPECT().Get(ctx, directoryDigest).Return(buffer.NewValidatedBufferFromByteSlice([]byte("This is not a Directory object"))) + + _, err := directoryFetcher.GetDirectory(ctx, directoryDigest) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to unmarshal message: "), err) + }) + + t.Run("Success", func(t *testing.T) { + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "f5f634611dd11ccba54c7b9d9607c3c2", 100) + exampleDirectory := &remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello.txt", + Digest: &remoteexecution.Digest{ + Hash: "6f6e6ce3fa3aecc5e8275dbfe43a9790", + SizeBytes: 42, + }, + }, + }, + } + + blobAccess.EXPECT().Get(ctx, directoryDigest).Return(buffer.NewProtoBufferFromProto(exampleDirectory, buffer.UserProvided)) + + directory, err := directoryFetcher.GetDirectory(ctx, directoryDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, exampleDirectory, directory) + }) +} + +func TestBlobAccessDirectoryFetcherGetTreeRootDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + blobAccess := mock.NewMockBlobAccess(ctrl) + directoryFetcher := cas.NewBlobAccessDirectoryFetcher(blobAccess, 1000, 10000) + + t.Run("TooBig", func(t *testing.T) { + _, err := directoryFetcher.GetTreeRootDirectory(ctx, digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "f5f634611dd11ccba54c7b9d9607c3c2", 100000)) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Tree exceeds the maximum permitted size of 10000 bytes"), err) + }) + + t.Run("IOError", func(t *testing.T) { + // Failures reading the Tree object should be propagated. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "756b15c8f94b519e96135dcfde0e58c5", 50) + + r := mock.NewMockFileReader(ctrl) + r.EXPECT().ReadAt(gomock.Any(), gomock.Any()).Return(0, status.Error(codes.Internal, "I/O error")).AnyTimes() + r.EXPECT().Close() + blobAccess.EXPECT().Get(ctx, treeDigest).Return(buffer.NewValidatedBufferFromReaderAt(r, 100)) + + _, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "I/O error"), err) + }) + + t.Run("InvalidDirectory", func(t *testing.T) { + // It is only valid to call GetTreeRootDirectory() + // against an REv2 Tree object. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "3478477ca0af085e8d676f9a53b095cb", 25) + + blobAccess.EXPECT().Get(ctx, treeDigest).Return(buffer.NewValidatedBufferFromByteSlice([]byte("This is not a Tree object"))) + + _, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Field with number 10 at offset 0 has type 4, while 2 was expected"), err) + }) + + t.Run("MissingRootDirectory", func(t *testing.T) { + // Malformed Tree objects may not have a root directory. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "f5f634611dd11ccba54c7b9d9607c3c2", 100) + + blobAccess.EXPECT().Get(ctx, treeDigest).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Tree{}, buffer.UserProvided)) + + _, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Tree does not contain a root directory"), err) + }) + + t.Run("ChecksumMismatch", func(t *testing.T) { + // If an REv2 Tree object cannot be parsed, it must be + // read in its entirety to ensure this isn't caused by a + // checksum mismatch. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "ceb78ab91c6d580aceea6618dd6fc5cc", 10000) + + blobAccess.EXPECT().Get(ctx, treeDigest). + Return(buffer.NewCASBufferFromReader(treeDigest, io.NopCloser(bytes.NewBuffer(make([]byte, 10000))), buffer.UserProvided)) + + _, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Buffer has checksum b85d6fb9ef4260dcf1ce0a1b0bff80d3, while ceb78ab91c6d580aceea6618dd6fc5cc was expected"), err) + }) + + t.Run("Success", func(t *testing.T) { + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "f5f634611dd11ccba54c7b9d9607c3c2", 100) + exampleDirectory := &remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello.txt", + Digest: &remoteexecution.Digest{ + Hash: "6f6e6ce3fa3aecc5e8275dbfe43a9790", + SizeBytes: 42, + }, + }, + }, + } + + blobAccess.EXPECT().Get(ctx, treeDigest).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Tree{ + Root: exampleDirectory, + }, buffer.UserProvided)) + + directory, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, exampleDirectory, directory) + }) +} + +func TestBlobAccessDirectoryFetcherGetTreeChildDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + blobAccess := mock.NewMockBlobAccess(ctrl) + directoryFetcher := cas.NewBlobAccessDirectoryFetcher(blobAccess, 1000, 10000) + + t.Run("TooBig", func(t *testing.T) { + _, err := directoryFetcher.GetTreeChildDirectory( + ctx, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "5959bc9570aa7909a09163bb2201f4af", 100000), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "2c09e7b2ad516c4cd9fc5c244ae08794", 100)) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Tree exceeds the maximum permitted size of 10000 bytes"), err) + }) + + t.Run("IOError", func(t *testing.T) { + // Failures reading the Tree object should be propagated. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "40d8f0c70941162ee9dfacf8863d23f5", 100) + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "756b15c8f94b519e96135dcfde0e58c5", 50) + + r := mock.NewMockFileReader(ctrl) + r.EXPECT().ReadAt(gomock.Any(), gomock.Any()).Return(0, status.Error(codes.Internal, "I/O error")).AnyTimes() + r.EXPECT().Close() + blobAccess.EXPECT().GetFromComposite(ctx, treeDigest, directoryDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, treeDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + b, slices := slicer.Slice(buffer.NewValidatedBufferFromReaderAt(r, 100), childDigest) + require.Empty(t, slices) + return b + }). + AnyTimes() + + _, err := directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + directoryDigest) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "I/O error"), err) + }) + + t.Run("InvalidTree", func(t *testing.T) { + // It is only valid to call GetTreeChildDirectory() + // against an REv2 Tree object. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "3478477ca0af085e8d676f9a53b095cb", 25) + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "f297d724d679d79d577d46c79fd4d712", 10) + + blobAccess.EXPECT().GetFromComposite(ctx, treeDigest, directoryDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, treeDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + b, slices := slicer.Slice(buffer.NewValidatedBufferFromByteSlice([]byte("This is not a Tree object")), childDigest) + require.Empty(t, slices) + return b + }). + AnyTimes() + + _, err := directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + directoryDigest) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Field with number 10 at offset 0 has type 4, while 2 was expected"), err) + }) + + t.Run("ChecksumMismatch", func(t *testing.T) { + // If an REv2 Tree object cannot be parsed, it must be + // read in its entirety to ensure this isn't caused by a + // checksum mismatch. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "ceb78ab91c6d580aceea6618dd6fc5cc", 10000) + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "138f65a6fb46dc6d97618a24b4490c19", 10) + + blobAccess.EXPECT().GetFromComposite(ctx, treeDigest, directoryDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, treeDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + b, slices := slicer.Slice(buffer.NewCASBufferFromReader(treeDigest, io.NopCloser(bytes.NewBuffer(make([]byte, 10000))), buffer.UserProvided), childDigest) + require.Empty(t, slices) + return b + }) + + _, err := directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + directoryDigest) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Buffer has checksum b85d6fb9ef4260dcf1ce0a1b0bff80d3, while ceb78ab91c6d580aceea6618dd6fc5cc was expected"), err) + }) + + t.Run("ValidTree", func(t *testing.T) { + // Call GetTreeChildDirectory() against a valid Tree + // object. The provided BlobSlicer should be capable of + // extracting the locations of both children. + directory1 := &remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "subdirectory", + Digest: &remoteexecution.Digest{ + Hash: "a7536a0ebdeefa48280e135ea77755f0", + SizeBytes: 51, + }, + }, + }, + } + directory2 := &remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello.txt", + Digest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 5, + }, + }, + }, + } + tree := &remoteexecution.Tree{ + Root: &remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory", + Digest: &remoteexecution.Digest{ + Hash: "ed56cd683c99acdff14b77db249819fc", + SizeBytes: 54, + }, + }, + }, + }, + Children: []*remoteexecution.Directory{ + directory1, + directory2, + }, + } + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "ed56cd683c99acdff14b77db249819fc", 162) + directory1Digest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "5eede3f7e2a1a66c06ffd3906115a55b", 54) + directory2Digest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "a7536a0ebdeefa48280e135ea77755f0", 51) + + blobAccess.EXPECT().GetFromComposite(ctx, treeDigest, gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, treeDigest, childDigest digest.Digest, slicer slicing.BlobSlicer) buffer.Buffer { + // Call into the slicer to extract + // Directory objects from the Tree. + b, slices := slicer.Slice(buffer.NewProtoBufferFromProto(tree, buffer.UserProvided), childDigest) + require.Equal(t, []slicing.BlobSlice{ + { + Digest: directory1Digest, + OffsetBytes: 55, + SizeBytes: 54, + }, + { + Digest: directory2Digest, + OffsetBytes: 111, + SizeBytes: 51, + }, + }, slices) + return b + }). + AnyTimes() + + fetchedDirectory, err := directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + directory1Digest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directory1, fetchedDirectory) + + fetchedDirectory, err = directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + directory2Digest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directory2, fetchedDirectory) + + _, err = directoryFetcher.GetTreeChildDirectory( + ctx, + treeDigest, + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "cb572cb90e637d1eb64c5358aa398b5e", 400)) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Requested child directory is not contained in the tree"), err) + }) +} diff --git a/pkg/cas/blob_access_file_fetcher.go b/pkg/cas/blob_access_file_fetcher.go new file mode 100644 index 0000000..8ffbf34 --- /dev/null +++ b/pkg/cas/blob_access_file_fetcher.go @@ -0,0 +1,48 @@ +package cas + +import ( + "context" + "os" + + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type blobAccessFileFetcher struct { + blobAccess blobstore.BlobAccess +} + +// NewBlobAccessFileFetcher creates a FileFetcher that reads files fom a +// BlobAccess based store. +func NewBlobAccessFileFetcher(blobAccess blobstore.BlobAccess) FileFetcher { + return &blobAccessFileFetcher{ + blobAccess: blobAccess, + } +} + +func (ff *blobAccessFileFetcher) GetFile(ctx context.Context, digest digest.Digest, directory filesystem.Directory, name path.Component, isExecutable bool) error { + var mode os.FileMode = 0o444 + if isExecutable { + mode = 0o555 + } + + w, err := directory.OpenAppend(name, filesystem.CreateExcl(mode)) + if err != nil { + return err + } + defer w.Close() + + if err := ff.blobAccess.Get(ctx, digest).IntoWriter(w); err != nil { + // Ensure no traces are left behind upon failure. + directory.Remove(name) + return err + } + time := filesystem.DeterministicFileModificationTimestamp + if err := directory.Chtimes(name, time, time); err != nil { + directory.Remove(name) + return err + } + return nil +} diff --git a/pkg/cas/caching_directory_fetcher.go b/pkg/cas/caching_directory_fetcher.go new file mode 100644 index 0000000..88230fa --- /dev/null +++ b/pkg/cas/caching_directory_fetcher.go @@ -0,0 +1,152 @@ +package cas + +import ( + "context" + "sync" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/eviction" + + "google.golang.org/protobuf/proto" +) + +// CachingDirectoryFetcherKey is the key type that is used by +// CachingDirectoryFetcher. +// +// A separate boolean field is used to distinguish Directory objects +// that are roots of Tree objects from the others. This is needed +// because the digests of these objects don't match with that of the +// resulting Directory object, while for the others it does. Using the +// same key space for all objects collectively would be insecure. +type CachingDirectoryFetcherKey struct { + DigestKey string + IsTreeRoot bool +} + +// CachingDirectoryFetcherEvictionSet is the eviction set type that is +// accepted by NewCachingDirectoryFetcher(). +type CachingDirectoryFetcherEvictionSet = eviction.Set[CachingDirectoryFetcherKey] + +type cachingFetcherObject struct { + directory *remoteexecution.Directory + sizeBytes int64 +} + +type cachingDirectoryFetcher struct { + base DirectoryFetcher + digestKeyFormat digest.KeyFormat + maximumCount int + maximumSizeBytes int64 + + lock sync.Mutex + objects map[CachingDirectoryFetcherKey]cachingFetcherObject + objectsSizeBytes int64 + evictionSet CachingDirectoryFetcherEvictionSet +} + +// NewCachingDirectoryFetcher creates an adapter for DirectoryFetcher +// that caches up a fixed number of unmarshalled objects in memory. This +// reduces the amount of time spent unmarshaling messages, and may +// reduce the amount of network traffic generated. +func NewCachingDirectoryFetcher(base DirectoryFetcher, digestKeyFormat digest.KeyFormat, maximumCount int, maximumSizeBytes int64, evictionSet CachingDirectoryFetcherEvictionSet) DirectoryFetcher { + return &cachingDirectoryFetcher{ + base: base, + digestKeyFormat: digestKeyFormat, + maximumCount: maximumCount, + maximumSizeBytes: maximumSizeBytes, + + objects: map[CachingDirectoryFetcherKey]cachingFetcherObject{}, + evictionSet: evictionSet, + } +} + +func (df *cachingDirectoryFetcher) lookup(key CachingDirectoryFetcherKey) (*remoteexecution.Directory, bool) { + df.lock.Lock() + defer df.lock.Unlock() + + if object, ok := df.objects[key]; ok { + df.evictionSet.Touch(key) + return object.directory, true + } + return nil, false +} + +func (df *cachingDirectoryFetcher) insert(key CachingDirectoryFetcherKey, directory *remoteexecution.Directory, sizeBytes int64) { + df.lock.Lock() + defer df.lock.Unlock() + + if _, ok := df.objects[key]; !ok { + // Make space if needed. + for len(df.objects) > 0 && (len(df.objects) >= df.maximumCount || df.objectsSizeBytes+sizeBytes > df.maximumSizeBytes) { + key := df.evictionSet.Peek() + df.evictionSet.Remove() + df.objectsSizeBytes -= df.objects[key].sizeBytes + delete(df.objects, key) + } + + df.evictionSet.Insert(key) + df.objects[key] = cachingFetcherObject{ + directory: directory, + sizeBytes: sizeBytes, + } + df.objectsSizeBytes += sizeBytes + } +} + +func (df *cachingDirectoryFetcher) GetDirectory(ctx context.Context, directoryDigest digest.Digest) (*remoteexecution.Directory, error) { + key := CachingDirectoryFetcherKey{ + DigestKey: directoryDigest.GetKey(df.digestKeyFormat), + IsTreeRoot: false, + } + if directory, ok := df.lookup(key); ok { + return directory, nil + } + + directory, err := df.base.GetDirectory(ctx, directoryDigest) + if err != nil { + return nil, err + } + + df.insert(key, directory, directoryDigest.GetSizeBytes()) + return directory, nil +} + +func (df *cachingDirectoryFetcher) GetTreeRootDirectory(ctx context.Context, treeDigest digest.Digest) (*remoteexecution.Directory, error) { + key := CachingDirectoryFetcherKey{ + DigestKey: treeDigest.GetKey(df.digestKeyFormat), + IsTreeRoot: true, + } + if directory, ok := df.lookup(key); ok { + return directory, nil + } + + directory, err := df.base.GetTreeRootDirectory(ctx, treeDigest) + if err != nil { + return nil, err + } + + // For this method the size of the resulting Directory message + // is not known up front. Use proto.Size() to explicitly compute + // the size of the cached entry. + df.insert(key, directory, int64(proto.Size(directory))) + return directory, nil +} + +func (df *cachingDirectoryFetcher) GetTreeChildDirectory(ctx context.Context, treeDigest, childDigest digest.Digest) (*remoteexecution.Directory, error) { + key := CachingDirectoryFetcherKey{ + DigestKey: childDigest.GetKey(df.digestKeyFormat), + IsTreeRoot: false, + } + if directory, ok := df.lookup(key); ok { + return directory, nil + } + + directory, err := df.base.GetTreeChildDirectory(ctx, treeDigest, childDigest) + if err != nil { + return nil, err + } + + df.insert(key, directory, childDigest.GetSizeBytes()) + return directory, nil +} diff --git a/pkg/cas/caching_directory_fetcher_test.go b/pkg/cas/caching_directory_fetcher_test.go new file mode 100644 index 0000000..d53a01f --- /dev/null +++ b/pkg/cas/caching_directory_fetcher_test.go @@ -0,0 +1,218 @@ +package cas_test + +import ( + "context" + "fmt" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestCachingDirectoryFetcherGetDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseDirectoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + evictionSet := mock.NewMockCachingDirectoryFetcherEvictionSet(ctrl) + directoryFetcher := cas.NewCachingDirectoryFetcher(baseDirectoryFetcher, digest.KeyWithoutInstance, 10, 1000, evictionSet) + + t.Run("IOError", func(t *testing.T) { + // Errors from underlying storage should be propagated. + directoryDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "756b15c8f94b519e96135dcfde0e58c5", 50) + + baseDirectoryFetcher.EXPECT().GetDirectory(ctx, directoryDigest).Return(nil, status.Error(codes.Internal, "I/O error")) + + _, err := directoryFetcher.GetDirectory(ctx, directoryDigest) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "I/O error"), err) + }) + + t.Run("Success", func(t *testing.T) { + directoryDigests := []digest.Digest{ + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "dae613d971e8649f28bf07b0d8dfefa8", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "7c9a2912bf38c338baa07a6df966eabf", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "7c57a169737a51b43d9853874ef39854", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "bd9c8a8f4618436dad0f3d1164ff54fc", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "2b80e2292d40e6664eb458bb0d906d1e", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "b8168c5e6bcb299c61d46c7163b16216", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "a00535d5f1ce3b1257baa70f4fcac762", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "d66cc130b6436e52a503249a7cf39175", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "557d4a5911854ecd5d1fba42cce80960", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "ca62de38c5058d8f52a2f6f3eed0efdc", 100), + digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "660cc65d02dc9b676d48f8ac50691a05", 100), + } + + // Insert ten directories into the cache. + for i, directoryDigest := range directoryDigests[:10] { + directoryToInsert := &remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: fmt.Sprintf("target-%d", i), + }, + }, + } + baseDirectoryFetcher.EXPECT().GetDirectory(ctx, directoryDigest).Return(directoryToInsert, nil) + evictionSet.EXPECT().Insert(cas.CachingDirectoryFetcherKey{ + DigestKey: directoryDigest.GetKey(digest.KeyWithoutInstance), + IsTreeRoot: false, + }) + + directory, err := directoryFetcher.GetDirectory(ctx, directoryDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + } + + // It should be possible to read back one of the ten + // entries above without incurring any additional I/O. + evictionSet.EXPECT().Touch(cas.CachingDirectoryFetcherKey{ + DigestKey: directoryDigests[7].GetKey(digest.KeyWithoutInstance), + IsTreeRoot: false, + }) + + directory, err := directoryFetcher.GetDirectory(ctx, directoryDigests[7]) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target-7", + }, + }, + }, directory) + + // Because the cache is saturated at this point, + // inserting another element should cause one of the + // existing entries to be removed. + directoryToInsert := &remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target-10", + }, + }, + } + baseDirectoryFetcher.EXPECT().GetDirectory(ctx, directoryDigests[10]).Return(directoryToInsert, nil) + evictionSet.EXPECT().Peek().Return(cas.CachingDirectoryFetcherKey{ + DigestKey: directoryDigests[3].GetKey(digest.KeyWithoutInstance), + IsTreeRoot: false, + }) + evictionSet.EXPECT().Remove() + evictionSet.EXPECT().Insert(cas.CachingDirectoryFetcherKey{ + DigestKey: directoryDigests[10].GetKey(digest.KeyWithoutInstance), + IsTreeRoot: false, + }) + + directory, err = directoryFetcher.GetDirectory(ctx, directoryDigests[10]) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + + // The directory that was evicted should no longer be + // readable without performing a read. + baseDirectoryFetcher.EXPECT().GetDirectory(ctx, directoryDigests[3]).Return(nil, status.Error(codes.Internal, "I/O error")) + + _, err = directoryFetcher.GetDirectory(ctx, directoryDigests[3]) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "I/O error"), err) + }) +} + +func TestCachingDirectoryFetcherGetTreeRootDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseDirectoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + evictionSet := mock.NewMockCachingDirectoryFetcherEvictionSet(ctrl) + directoryFetcher := cas.NewCachingDirectoryFetcher(baseDirectoryFetcher, digest.KeyWithoutInstance, 10, 1000, evictionSet) + + // We assume that the tests of GetDirectory() sufficiently test + // the caching logic of this type. + + t.Run("Success", func(t *testing.T) { + // Insert a Tree's root directory into the cache. The + // key must be made distinct from regular directories + // using the IsTreeRoot flag, as the digest refers to + // that of the tree; not the directory. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "72e8cc2780afe06ccaf5353ff29e8bf0", 123151) + directoryToInsert := &remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target", + }, + }, + } + baseDirectoryFetcher.EXPECT().GetTreeRootDirectory(ctx, treeDigest).Return(directoryToInsert, nil) + evictionSet.EXPECT().Insert(cas.CachingDirectoryFetcherKey{ + DigestKey: "3-72e8cc2780afe06ccaf5353ff29e8bf0-123151", + IsTreeRoot: true, + }) + + directory, err := directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + + // Request the cached copy of the directory. + evictionSet.EXPECT().Touch(cas.CachingDirectoryFetcherKey{ + DigestKey: "3-72e8cc2780afe06ccaf5353ff29e8bf0-123151", + IsTreeRoot: true, + }) + + directory, err = directoryFetcher.GetTreeRootDirectory(ctx, treeDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + }) +} + +func TestCachingDirectoryFetcherGetTreeChildDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseDirectoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + evictionSet := mock.NewMockCachingDirectoryFetcherEvictionSet(ctrl) + directoryFetcher := cas.NewCachingDirectoryFetcher(baseDirectoryFetcher, digest.KeyWithoutInstance, 10, 1000, evictionSet) + + // We assume that the tests of GetDirectory() sufficiently test + // the caching logic of this type. + + t.Run("Success", func(t *testing.T) { + // Insert a Tree's child directory into the cache. The + // key must be based on the digest of the child + // directory and can be of the same format as that of + // GetDirectory(), as that one matche the directory's + // contents. + treeDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "72e8cc2780afe06ccaf5353ff29e8bf0", 123151) + childDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "63e6ebccf1fae98faba9c59888991621", 72) + directoryToInsert := &remoteexecution.Directory{ + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target", + }, + }, + } + baseDirectoryFetcher.EXPECT().GetTreeChildDirectory(ctx, treeDigest, childDigest).Return(directoryToInsert, nil) + evictionSet.EXPECT().Insert(cas.CachingDirectoryFetcherKey{ + DigestKey: "3-63e6ebccf1fae98faba9c59888991621-72", + IsTreeRoot: false, + }) + + directory, err := directoryFetcher.GetTreeChildDirectory(ctx, treeDigest, childDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + + // Request the cached copy of the directory. + evictionSet.EXPECT().Touch(cas.CachingDirectoryFetcherKey{ + DigestKey: "3-63e6ebccf1fae98faba9c59888991621-72", + IsTreeRoot: false, + }) + + directory, err = directoryFetcher.GetTreeChildDirectory(ctx, treeDigest, childDigest) + require.NoError(t, err) + testutil.RequireEqualProto(t, directoryToInsert, directory) + }) +} diff --git a/pkg/cas/configuration.go b/pkg/cas/configuration.go new file mode 100644 index 0000000..7fa9869 --- /dev/null +++ b/pkg/cas/configuration.go @@ -0,0 +1,29 @@ +package cas + +import ( + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/eviction" +) + +// NewCachingDirectoryFetcherFromConfiguration creates a new +// CachingDirectoryFetcher based on parameters provided in a +// configuration file. +func NewCachingDirectoryFetcherFromConfiguration(configuration *pb.CachingDirectoryFetcherConfiguration, base DirectoryFetcher) (DirectoryFetcher, error) { + if configuration == nil { + // No configuration provided. Disable in-memory caching. + return base, nil + } + + evictionSet, err := eviction.NewSetFromConfiguration[CachingDirectoryFetcherKey](configuration.CacheReplacementPolicy) + if err != nil { + return nil, err + } + return NewCachingDirectoryFetcher( + base, + digest.KeyWithoutInstance, + int(configuration.MaximumCount), + configuration.MaximumSizeBytes, + eviction.NewMetricsSet(evictionSet, "CachingDirectoryFetcher"), + ), nil +} diff --git a/pkg/cas/decomposed_directory_walker.go b/pkg/cas/decomposed_directory_walker.go new file mode 100644 index 0000000..2bbcf13 --- /dev/null +++ b/pkg/cas/decomposed_directory_walker.go @@ -0,0 +1,44 @@ +package cas + +import ( + "context" + "fmt" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type decomposedDirectoryWalker struct { + fetcher DirectoryFetcher + digest digest.Digest +} + +// NewDecomposedDirectoryWalker creates a DirectoryWalker that assumes +// that all Directory messages are stored as separate objects in the +// Content Addressable Storage (CAS). This is the case for input roots +// of build actions. +func NewDecomposedDirectoryWalker(fetcher DirectoryFetcher, digest digest.Digest) DirectoryWalker { + return &decomposedDirectoryWalker{ + fetcher: fetcher, + digest: digest, + } +} + +func (dw *decomposedDirectoryWalker) GetDirectory(ctx context.Context) (*remoteexecution.Directory, error) { + return dw.fetcher.GetDirectory(ctx, dw.digest) +} + +func (dw *decomposedDirectoryWalker) GetChild(digest digest.Digest) DirectoryWalker { + return &decomposedDirectoryWalker{ + fetcher: dw.fetcher, + digest: digest, + } +} + +func (dw *decomposedDirectoryWalker) GetDescription() string { + return fmt.Sprintf("Directory %#v", dw.digest.String()) +} + +func (dw *decomposedDirectoryWalker) GetContainingDigest() digest.Digest { + return dw.digest +} diff --git a/pkg/cas/decomposed_directory_walker_test.go b/pkg/cas/decomposed_directory_walker_test.go new file mode 100644 index 0000000..675898c --- /dev/null +++ b/pkg/cas/decomposed_directory_walker_test.go @@ -0,0 +1,97 @@ +package cas_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestDecomposedDirectoryWalker(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryFetcher := mock.NewMockDirectoryFetcher(ctrl) + parentDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "6884a9e20905b512d1122a2b1ad8ba16", 123) + parentDirectoryWalker := cas.NewDecomposedDirectoryWalker(directoryFetcher, parentDigest) + + exampleDirectory := &remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "foo", + Digest: &remoteexecution.Digest{ + Hash: "4df5f448a5e6b3c41e6aae7a8a9832aa", + SizeBytes: 456, + }, + }, + }, + } + + // Test that the DirectoryWalker loads the right object from the + // CAS, and that error messages use the right prefix. + + t.Run("ParentGetDirectorySuccess", func(t *testing.T) { + directoryFetcher.EXPECT().GetDirectory(ctx, parentDigest). + Return(exampleDirectory, nil) + parentDirectory, err := parentDirectoryWalker.GetDirectory(ctx) + require.NoError(t, err) + testutil.RequireEqualProto(t, exampleDirectory, parentDirectory) + }) + + t.Run("ParentGetDirectoryFailure", func(t *testing.T) { + directoryFetcher.EXPECT().GetDirectory(ctx, parentDigest). + Return(nil, status.Error(codes.Internal, "Server failure")) + _, err := parentDirectoryWalker.GetDirectory(ctx) + require.Equal(t, status.Error(codes.Internal, "Server failure"), err) + }) + + t.Run("ParentGetDescription", func(t *testing.T) { + require.Equal( + t, + "Directory \"3-6884a9e20905b512d1122a2b1ad8ba16-123-example\"", + parentDirectoryWalker.GetDescription()) + }) + + t.Run("ParentGetContainingDigest", func(t *testing.T) { + require.Equal( + t, + parentDigest, + parentDirectoryWalker.GetContainingDigest()) + }) + + childDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "4df5f448a5e6b3c41e6aae7a8a9832aa", 456) + childDirectoryWalker := parentDirectoryWalker.GetChild(childDigest) + + // Repeat the tests above against a child directory, to make + // sure those also load the right object from the CAS. + + t.Run("ChildGetDirectory", func(t *testing.T) { + directoryFetcher.EXPECT().GetDirectory(ctx, childDigest). + Return(exampleDirectory, nil) + childDirectory, err := childDirectoryWalker.GetDirectory(ctx) + require.NoError(t, err) + testutil.RequireEqualProto(t, exampleDirectory, childDirectory) + }) + + t.Run("ChildGetDescription", func(t *testing.T) { + require.Equal( + t, + "Directory \"3-4df5f448a5e6b3c41e6aae7a8a9832aa-456-example\"", + childDirectoryWalker.GetDescription()) + }) + + t.Run("ChildGetContainingDigest", func(t *testing.T) { + require.Equal( + t, + childDigest, + childDirectoryWalker.GetContainingDigest()) + }) +} diff --git a/pkg/cas/directory_fetcher.go b/pkg/cas/directory_fetcher.go new file mode 100644 index 0000000..1432d77 --- /dev/null +++ b/pkg/cas/directory_fetcher.go @@ -0,0 +1,17 @@ +package cas + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// DirectoryFetcher is responsible for fetching Directory messages from +// the Content Addressable Storage (CAS). These describe the layout of a +// single directory in a build action's input root. +type DirectoryFetcher interface { + GetDirectory(ctx context.Context, directoryDigest digest.Digest) (*remoteexecution.Directory, error) + GetTreeRootDirectory(ctx context.Context, treeDigest digest.Digest) (*remoteexecution.Directory, error) + GetTreeChildDirectory(ctx context.Context, treeDigest, childDigest digest.Digest) (*remoteexecution.Directory, error) +} diff --git a/pkg/cas/directory_walker.go b/pkg/cas/directory_walker.go new file mode 100644 index 0000000..972416e --- /dev/null +++ b/pkg/cas/directory_walker.go @@ -0,0 +1,37 @@ +package cas + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// DirectoryWalker is identical to a DirectoryFetcher, except that it is +// bound to a specific instance of a directory. +// +// The goal of this interface is to provide uniform access to Directory +// messages, regardless of the way they are stored in the Content +// Addressable Storage (i.e., as separate objects, or as part of a Tree +// message). +type DirectoryWalker interface { + // GetDirectory() returns the contents of the current directory. + GetDirectory(ctx context.Context) (*remoteexecution.Directory, error) + + // GetChild() can be used obtain a new DirectoryWalker instance + // that corresponds to one of the children of this directory. + GetChild(digest digest.Digest) DirectoryWalker + + // GetDescription() gives a textual description of the + // DirectoryWalker, which may be useful for logging purposes. + GetDescription() string + + // GetContainingDigest() returns the digest of the Content + // Addressable Storage object that holds this directory. + // + // In the case of plain Directory objects, this function returns + // the digest provided to GetChild(). In the case of Tree + // objects, the digest of the containing Tree is returned, which + // differs from the digest provided to GetChild(). + GetContainingDigest() digest.Digest +} diff --git a/pkg/cas/file_fetcher.go b/pkg/cas/file_fetcher.go new file mode 100644 index 0000000..9964562 --- /dev/null +++ b/pkg/cas/file_fetcher.go @@ -0,0 +1,16 @@ +package cas + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// FileFetcher is responsible for fetching files from the Content +// Addressable Storage (CAS), storing its contents inside a file on +// disk. +type FileFetcher interface { + GetFile(ctx context.Context, digest digest.Digest, directory filesystem.Directory, name path.Component, isExecutable bool) error +} diff --git a/pkg/cas/hardlinking_file_fetcher.go b/pkg/cas/hardlinking_file_fetcher.go new file mode 100644 index 0000000..e98a745 --- /dev/null +++ b/pkg/cas/hardlinking_file_fetcher.go @@ -0,0 +1,129 @@ +package cas + +import ( + "context" + "os" + "sync" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/eviction" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" +) + +type hardlinkingFileFetcher struct { + base FileFetcher + cacheDirectory filesystem.Directory + maxFiles int + maxSize int64 + + filesLock sync.RWMutex + filesSize map[string]int64 + filesTotalSize int64 + + evictionLock sync.Mutex + evictionSet eviction.Set[string] +} + +// NewHardlinkingFileFetcher is an adapter for FileFetcher that stores +// files in an internal directory. After successfully downloading files +// at the target location, they are hardlinked into the cache. Future +// calls for the same file will hardlink them from the cache to the +// target location. This reduces the amount of network traffic needed. +func NewHardlinkingFileFetcher(base FileFetcher, cacheDirectory filesystem.Directory, maxFiles int, maxSize int64, evictionSet eviction.Set[string]) FileFetcher { + return &hardlinkingFileFetcher{ + base: base, + cacheDirectory: cacheDirectory, + maxFiles: maxFiles, + maxSize: maxSize, + + filesSize: map[string]int64{}, + + evictionSet: evictionSet, + } +} + +func (ff *hardlinkingFileFetcher) makeSpace(size int64) error { + for len(ff.filesSize) > 0 && (len(ff.filesSize) >= ff.maxFiles || ff.filesTotalSize+size > ff.maxSize) { + // Remove a file from disk. + key := ff.evictionSet.Peek() + if err := ff.cacheDirectory.Remove(path.MustNewComponent(key)); err != nil && !os.IsNotExist(err) { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to remove cached file %#v", key) + } + + // Remove file from bookkeeping. + ff.evictionSet.Remove() + ff.filesTotalSize -= ff.filesSize[key] + delete(ff.filesSize, key) + } + return nil +} + +func (ff *hardlinkingFileFetcher) GetFile(ctx context.Context, blobDigest digest.Digest, directory filesystem.Directory, name path.Component, isExecutable bool) error { + key := blobDigest.GetKey(digest.KeyWithoutInstance) + if isExecutable { + key += "+x" + } else { + key += "-x" + } + + // If the file is present in the cache, hardlink it to the destination. + wasMissing := false + ff.filesLock.RLock() + if _, ok := ff.filesSize[key]; ok { + ff.evictionLock.Lock() + ff.evictionSet.Touch(key) + ff.evictionLock.Unlock() + + if err := ff.cacheDirectory.Link(path.MustNewComponent(key), directory, name); err == nil { + // Successfully hardlinked the file to its destination. + ff.filesLock.RUnlock() + return nil + } else if !os.IsNotExist(err) { + ff.filesLock.RUnlock() + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to create hardlink to cached file %#v", key) + } + + // The file was part of the cache, even though it did not + // exist on disk. Some other process may have tampered + // with the cache directory's contents. + wasMissing = true + } + ff.filesLock.RUnlock() + + // Download the file at the intended location. + if err := ff.base.GetFile(ctx, blobDigest, directory, name, isExecutable); err != nil { + return err + } + + ff.filesLock.Lock() + defer ff.filesLock.Unlock() + if _, ok := ff.filesSize[key]; !ok { + ff.evictionLock.Lock() + defer ff.evictionLock.Unlock() + + // Remove old files from the cache if necessary. + sizeBytes := blobDigest.GetSizeBytes() + if err := ff.makeSpace(sizeBytes); err != nil { + return err + } + + // Hardlink the file into the cache. + if err := directory.Link(name, ff.cacheDirectory, path.MustNewComponent(key)); err != nil && !os.IsExist(err) { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to add cached file %#v", key) + } + ff.evictionSet.Insert(key) + ff.filesSize[key] = sizeBytes + ff.filesTotalSize += sizeBytes + } else if wasMissing { + // Even though the file is part of our bookkeeping, we + // observed it didn't exist. Repair this inconsistency. + if err := directory.Link(name, ff.cacheDirectory, path.MustNewComponent(key)); err != nil && !os.IsExist(err) { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to repair cached file %#v", key) + } + } + return nil +} diff --git a/pkg/cas/hardlinking_file_fetcher_test.go b/pkg/cas/hardlinking_file_fetcher_test.go new file mode 100644 index 0000000..8c57b83 --- /dev/null +++ b/pkg/cas/hardlinking_file_fetcher_test.go @@ -0,0 +1,128 @@ +package cas_test + +import ( + "context" + "os" + "syscall" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/eviction" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestHardlinkingFileFetcher(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseFileFetcher := mock.NewMockFileFetcher(ctrl) + cacheDirectory := mock.NewMockDirectory(ctrl) + fileFetcher := cas.NewHardlinkingFileFetcher(baseFileFetcher, cacheDirectory, 1, 1024, eviction.NewLRUSet[string]()) + + blobDigest1 := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) + buildDirectory := mock.NewMockDirectory(ctrl) + + // Errors fetching files from the backend should be propagated. + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false). + Return(status.Error(codes.Internal, "Server not reachable")) + require.Equal( + t, + status.Error(codes.Internal, "Server not reachable"), + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Failing to link the file into the cache should not cause the + // file to become cached immediately. + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false) + buildDirectory.EXPECT().Link(path.MustNewComponent("hello.txt"), cacheDirectory, path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")). + Return(syscall.EIO) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to add cached file \"3-8b1a9953c4611296a827abf8c47804d7-5-x\": input/output error"), + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Successfully link the file into the cache. + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false) + buildDirectory.EXPECT().Link(path.MustNewComponent("hello.txt"), cacheDirectory, path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")) + require.NoError( + t, + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Once the file is cached, hardlinks should be made in the + // opposite direction. + cacheDirectory.EXPECT().Link(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x"), buildDirectory, path.MustNewComponent("hello.txt")) + require.NoError( + t, + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Failure when accessing a cached file. + cacheDirectory.EXPECT().Link(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x"), buildDirectory, path.MustNewComponent("hello.txt")). + Return(syscall.EIO) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to create hardlink to cached file \"3-8b1a9953c4611296a827abf8c47804d7-5-x\": input/output error"), + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Recover from the case where the cache directory gets cleaned + // up by another process. If hardlinking returns ENOENT, we + // should fall back to downloading and reinserting the file. + cacheDirectory.EXPECT().Link(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x"), buildDirectory, path.MustNewComponent("hello.txt")). + Return(syscall.ENOENT) + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false) + buildDirectory.EXPECT().Link(path.MustNewComponent("hello.txt"), cacheDirectory, path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")) + require.NoError( + t, + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // The above may happen in multiple threads at the same time. + // EEXIST errors should be ignored in that case. + cacheDirectory.EXPECT().Link(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x"), buildDirectory, path.MustNewComponent("hello.txt")). + Return(syscall.ENOENT) + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false) + buildDirectory.EXPECT().Link(path.MustNewComponent("hello.txt"), cacheDirectory, path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")). + Return(os.ErrExist) + require.NoError( + t, + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + // Errors other than EEXIST should be propagated as usual. + cacheDirectory.EXPECT().Link(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x"), buildDirectory, path.MustNewComponent("hello.txt")). + Return(syscall.ENOENT) + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false) + buildDirectory.EXPECT().Link(path.MustNewComponent("hello.txt"), cacheDirectory, path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")). + Return(syscall.EIO) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to repair cached file \"3-8b1a9953c4611296a827abf8c47804d7-5-x\": input/output error"), + fileFetcher.GetFile(ctx, blobDigest1, buildDirectory, path.MustNewComponent("hello.txt"), false)) + + blobDigest2 := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "6fc422233a40a75a1f028e11c3cd1140", 7) + + // Errors other than ENOENT when removing a file should be + // propagated, as we don't want to silently fill up the disk. + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest2, buildDirectory, path.MustNewComponent("goodbye.txt"), false) + cacheDirectory.EXPECT().Remove(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")). + Return(syscall.EIO) + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to remove cached file \"3-8b1a9953c4611296a827abf8c47804d7-5-x\": input/output error"), + fileFetcher.GetFile(ctx, blobDigest2, buildDirectory, path.MustNewComponent("goodbye.txt"), false)) + + // ENOENT errors when removing files should be tolerated. It + // simply means that files in the cache directory were cleaned + // up by another process. + baseFileFetcher.EXPECT().GetFile(ctx, blobDigest2, buildDirectory, path.MustNewComponent("goodbye.txt"), false) + cacheDirectory.EXPECT().Remove(path.MustNewComponent("3-8b1a9953c4611296a827abf8c47804d7-5-x")). + Return(syscall.ENOENT) + buildDirectory.EXPECT().Link(path.MustNewComponent("goodbye.txt"), cacheDirectory, path.MustNewComponent("3-6fc422233a40a75a1f028e11c3cd1140-7-x")) + require.NoError( + t, + fileFetcher.GetFile(ctx, blobDigest2, buildDirectory, path.MustNewComponent("goodbye.txt"), false)) +} diff --git a/pkg/cas/suspending_directory_fetcher.go b/pkg/cas/suspending_directory_fetcher.go new file mode 100644 index 0000000..dabbab4 --- /dev/null +++ b/pkg/cas/suspending_directory_fetcher.go @@ -0,0 +1,50 @@ +package cas + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type suspendingDirectoryFetcher struct { + base DirectoryFetcher + suspendable clock.Suspendable +} + +// NewSuspendingDirectoryFetcher is a decorator for DirectoryFetcher +// that simply forwards all methods. Before and after each call, it +// suspends and resumes a clock.Suspendable object, respectively. +// +// This decorator is used in combination with SuspendableClock, allowing +// FUSE/NFSv4-based workers to compensate the execution timeout of build +// actions for any time spent loading directory contents of the input +// root. +func NewSuspendingDirectoryFetcher(base DirectoryFetcher, suspendable clock.Suspendable) DirectoryFetcher { + return &suspendingDirectoryFetcher{ + base: base, + suspendable: suspendable, + } +} + +func (df *suspendingDirectoryFetcher) GetDirectory(ctx context.Context, directoryDigest digest.Digest) (*remoteexecution.Directory, error) { + df.suspendable.Suspend() + defer df.suspendable.Resume() + + return df.base.GetDirectory(ctx, directoryDigest) +} + +func (df *suspendingDirectoryFetcher) GetTreeRootDirectory(ctx context.Context, treeDigest digest.Digest) (*remoteexecution.Directory, error) { + df.suspendable.Suspend() + defer df.suspendable.Resume() + + return df.base.GetTreeRootDirectory(ctx, treeDigest) +} + +func (df *suspendingDirectoryFetcher) GetTreeChildDirectory(ctx context.Context, treeDigest, childDigest digest.Digest) (*remoteexecution.Directory, error) { + df.suspendable.Suspend() + defer df.suspendable.Resume() + + return df.base.GetTreeChildDirectory(ctx, treeDigest, childDigest) +} diff --git a/pkg/cleaner/BUILD.bazel b/pkg/cleaner/BUILD.bazel new file mode 100644 index 0000000..f29b286 --- /dev/null +++ b/pkg/cleaner/BUILD.bazel @@ -0,0 +1,68 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cleaner", + srcs = [ + "chained_cleaner.go", + "command_running_cleaner.go", + "directory_cleaner.go", + "filtering_process_table.go", + "idle_invoker.go", + "process_table.go", + "process_table_cleaner.go", + "process_table_cleaner_disabled.go", + "process_table_cleaner_unix.go", + "system_process_table_darwin.go", + "system_process_table_disabled.go", + "system_process_table_linux.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/cleaner", + visibility = ["//visibility:public"], + deps = [ + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_google_grpc//status", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "@org_golang_google_grpc//status", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "cleaner_test", + srcs = [ + "chained_cleaner_test.go", + "command_running_cleaner_test.go", + "directory_cleaner_test.go", + "filtering_process_table_test.go", + "idle_invoker_test.go", + "system_process_table_test.go", + ], + deps = [ + ":cleaner", + "//internal/mock", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/cleaner/chained_cleaner.go b/pkg/cleaner/chained_cleaner.go new file mode 100644 index 0000000..f4545a9 --- /dev/null +++ b/pkg/cleaner/chained_cleaner.go @@ -0,0 +1,20 @@ +package cleaner + +import ( + "context" +) + +// NewChainedCleaner creates a new Cleaner that invokes a series of +// existing Cleaner objects sequentially. If any of them fail, the first +// observed error is returned. +func NewChainedCleaner(cleaners []Cleaner) Cleaner { + return func(ctx context.Context) error { + var chainedErr error + for _, cleaner := range cleaners { + if err := cleaner(ctx); chainedErr == nil { + chainedErr = err + } + } + return chainedErr + } +} diff --git a/pkg/cleaner/chained_cleaner_test.go b/pkg/cleaner/chained_cleaner_test.go new file mode 100644 index 0000000..e36a7ed --- /dev/null +++ b/pkg/cleaner/chained_cleaner_test.go @@ -0,0 +1,83 @@ +package cleaner_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestChainedCleaner(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + t.Run("Empty", func(t *testing.T) { + require.NoError(t, cleaner.NewChainedCleaner(nil)(ctx)) + }) + + t.Run("SingleFailure", func(t *testing.T) { + cleaner1 := mock.NewMockCleaner(ctrl) + cleaner1.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Failed to clean process table")) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to clean process table"), + cleaner.NewChainedCleaner([]cleaner.Cleaner{ + cleaner1.Call, + })(ctx)) + }) + + t.Run("SingleSuccess", func(t *testing.T) { + cleaner1 := mock.NewMockCleaner(ctrl) + cleaner1.EXPECT().Call(ctx) + + require.NoError(t, cleaner.NewChainedCleaner([]cleaner.Cleaner{ + cleaner1.Call, + })(ctx)) + }) + + t.Run("MultipleFailure", func(t *testing.T) { + // When multiple cleaners are provided, they must be + // invoked in order. Even in case of failures, the + // remaining cleaners need to be run. The error of the + // first cleaner is returned. + cleaner1 := mock.NewMockCleaner(ctrl) + cleaner2 := mock.NewMockCleaner(ctrl) + cleaner3 := mock.NewMockCleaner(ctrl) + gomock.InOrder( + cleaner1.EXPECT().Call(ctx), + cleaner2.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Failed to clean process table")), + cleaner3.EXPECT().Call(ctx).Return(status.Error(codes.Internal, "Failed to clean directory"))) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to clean process table"), + cleaner.NewChainedCleaner([]cleaner.Cleaner{ + cleaner1.Call, + cleaner2.Call, + cleaner3.Call, + })(ctx)) + }) + + t.Run("MultipleSuccess", func(t *testing.T) { + cleaner1 := mock.NewMockCleaner(ctrl) + cleaner2 := mock.NewMockCleaner(ctrl) + cleaner3 := mock.NewMockCleaner(ctrl) + gomock.InOrder( + cleaner1.EXPECT().Call(ctx), + cleaner2.EXPECT().Call(ctx), + cleaner3.EXPECT().Call(ctx)) + + require.NoError(t, cleaner.NewChainedCleaner([]cleaner.Cleaner{ + cleaner1.Call, + cleaner2.Call, + cleaner3.Call, + })(ctx)) + }) +} diff --git a/pkg/cleaner/command_running_cleaner.go b/pkg/cleaner/command_running_cleaner.go new file mode 100644 index 0000000..3ae3cbd --- /dev/null +++ b/pkg/cleaner/command_running_cleaner.go @@ -0,0 +1,21 @@ +package cleaner + +import ( + "context" + "os/exec" + + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" +) + +// NewCommandRunningCleaner creates a new Cleaner that executes a command on the +// system and expects it to succeed. +func NewCommandRunningCleaner(command string, args []string) Cleaner { + return func(ctx context.Context) error { + if err := exec.CommandContext(ctx, command, args...).Run(); err != nil { + return util.StatusWrapWithCode(err, codes.Internal, "Failed to run cleaning command") + } + return nil + } +} diff --git a/pkg/cleaner/command_running_cleaner_test.go b/pkg/cleaner/command_running_cleaner_test.go new file mode 100644 index 0000000..aa3cfe5 --- /dev/null +++ b/pkg/cleaner/command_running_cleaner_test.go @@ -0,0 +1,29 @@ +package cleaner_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestCommandRunningCleaner(t *testing.T) { + _, ctx := gomock.WithContext(context.Background(), t) + + t.Run("Failure", func(t *testing.T) { + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to run cleaning command: exit status 1"), + cleaner.NewCommandRunningCleaner("false", nil)(ctx)) + }) + + t.Run("Success", func(t *testing.T) { + require.NoError(t, cleaner.NewCommandRunningCleaner("true", nil)(ctx)) + }) +} diff --git a/pkg/cleaner/directory_cleaner.go b/pkg/cleaner/directory_cleaner.go new file mode 100644 index 0000000..fc800b3 --- /dev/null +++ b/pkg/cleaner/directory_cleaner.go @@ -0,0 +1,22 @@ +package cleaner + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" +) + +// NewDirectoryCleaner creates a Cleaner that can remove all files +// within a given directory. It can, for example, be used to remove +// files from build directories and system temporary directories. +func NewDirectoryCleaner(directory filesystem.Directory, path string) Cleaner { + return func(ctx context.Context) error { + if err := directory.RemoveAllChildren(); err != nil { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to clean directory %#v", path) + } + return nil + } +} diff --git a/pkg/cleaner/directory_cleaner_test.go b/pkg/cleaner/directory_cleaner_test.go new file mode 100644 index 0000000..de5c642 --- /dev/null +++ b/pkg/cleaner/directory_cleaner_test.go @@ -0,0 +1,37 @@ +package cleaner_test + +import ( + "context" + "syscall" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestDirectoryCleaner(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + t.Run("Failure", func(t *testing.T) { + directory := mock.NewMockDirectory(ctrl) + directory.EXPECT().RemoveAllChildren().Return(syscall.EACCES) + + testutil.RequireEqualStatus( + t, + status.Error(codes.Internal, "Failed to clean directory \"/tmp\": permission denied"), + cleaner.NewDirectoryCleaner(directory, "/tmp")(ctx)) + }) + + t.Run("Success", func(t *testing.T) { + directory := mock.NewMockDirectory(ctrl) + directory.EXPECT().RemoveAllChildren() + + require.NoError(t, cleaner.NewDirectoryCleaner(directory, "/tmp")(ctx)) + }) +} diff --git a/pkg/cleaner/filtering_process_table.go b/pkg/cleaner/filtering_process_table.go new file mode 100644 index 0000000..fc82a1c --- /dev/null +++ b/pkg/cleaner/filtering_process_table.go @@ -0,0 +1,37 @@ +package cleaner + +// ProcessFilterFunc is a callback that is provided to +// NewFilteringProcessTable to act as a filter function for processes +// returned by GetProcesses(). Only processes for which this callback +// function returns true are returned. +type ProcessFilterFunc func(process *Process) bool + +type filteringProcessTable struct { + base ProcessTable + filter ProcessFilterFunc +} + +// NewFilteringProcessTable is a decorator for ProcessTable that only +// causes it to return processes matching a given filter. These are the +// processes that bb_runner should consider terminating. +func NewFilteringProcessTable(base ProcessTable, filter ProcessFilterFunc) ProcessTable { + return &filteringProcessTable{ + base: base, + filter: filter, + } +} + +func (pt *filteringProcessTable) GetProcesses() ([]Process, error) { + unfilteredProcesses, err := pt.base.GetProcesses() + if err != nil { + return nil, err + } + + var filteredProcesses []Process + for _, process := range unfilteredProcesses { + if pt.filter(&process) { + filteredProcesses = append(filteredProcesses, process) + } + } + return filteredProcesses, nil +} diff --git a/pkg/cleaner/filtering_process_table_test.go b/pkg/cleaner/filtering_process_table_test.go new file mode 100644 index 0000000..c135c72 --- /dev/null +++ b/pkg/cleaner/filtering_process_table_test.go @@ -0,0 +1,70 @@ +package cleaner_test + +import ( + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestFilteringProcessTable(t *testing.T) { + ctrl := gomock.NewController(t) + + baseProcessTable := mock.NewMockProcessTable(ctrl) + processTable := cleaner.NewFilteringProcessTable( + baseProcessTable, + func(process *cleaner.Process) bool { + return process.UserID == 123 && + process.CreationTime.After(time.Unix(1500000000, 0)) + }) + + t.Run("Failure", func(t *testing.T) { + baseProcessTable.EXPECT().GetProcesses().Return(nil, status.Error(codes.Internal, "Out of memory")) + + // Errors from the base process table should be forwarded. + _, err := processTable.GetProcesses() + require.Equal(t, status.Error(codes.Internal, "Out of memory"), err) + }) + + t.Run("Success", func(t *testing.T) { + baseProcessTable.EXPECT().GetProcesses().Return([]cleaner.Process{ + // Process is running as a different user. It + // should be left alone. + { + ProcessID: 1, + UserID: 122, + CreationTime: time.Unix(1600000000, 0), + }, + // Process is running as the right user, but it + // was started earlier on. It should be left + // alone, as it may be important to the system. + { + ProcessID: 2, + UserID: 123, + CreationTime: time.Unix(1400000000, 0), + }, + // Process that should be matched. + { + ProcessID: 3, + UserID: 123, + CreationTime: time.Unix(1600000000, 0), + }, + }, nil) + + processes, err := processTable.GetProcesses() + require.NoError(t, err) + require.Equal(t, []cleaner.Process{ + { + ProcessID: 3, + UserID: 123, + CreationTime: time.Unix(1600000000, 0), + }, + }, processes) + }) +} diff --git a/pkg/cleaner/idle_invoker.go b/pkg/cleaner/idle_invoker.go new file mode 100644 index 0000000..18838bc --- /dev/null +++ b/pkg/cleaner/idle_invoker.go @@ -0,0 +1,96 @@ +package cleaner + +import ( + "context" + "sync" + + "github.com/buildbarn/bb-storage/pkg/util" +) + +// Cleaner is a function that cleans up some resource provided by the +// operating system, such as stale processes in a process table or files +// in a temporary directory. +type Cleaner func(ctx context.Context) error + +// IdleInvoker is a helper type for invoking a Cleaner function. As it's +// generally not safe to call a Cleaner function while one or more build +// actions are running, they should only be invoked when the system is +// idle. +// +// IdleInvoker keeps track of a use count to determine whether the +// system is idle. When transitioning from idle to busy or from busy to +// idle, the Cleaner function is called. +type IdleInvoker struct { + f Cleaner + + lock sync.Mutex + useCount uint + wakeup <-chan struct{} +} + +// NewIdleInvoker creates a new IdleInvoker that is in the idle state. +func NewIdleInvoker(f Cleaner) *IdleInvoker { + return &IdleInvoker{f: f} +} + +func (i *IdleInvoker) clean(ctx context.Context) error { + if i.wakeup != nil { + panic("Cleaning is already in progress") + } + wakeup := make(chan struct{}) + i.wakeup = wakeup + + i.lock.Unlock() + err := i.f(ctx) + i.lock.Lock() + + close(wakeup) + i.wakeup = nil + return err +} + +// Acquire the IdleInvoker by incrementing its use count. If the use +// count transitions from zero to one, the Cleaner is called. +// Acquisition does not take place if the Cleaner returns an error. +func (i *IdleInvoker) Acquire(ctx context.Context) error { + // Wait for existing calls to the Cleaner to finish. + i.lock.Lock() + for i.wakeup != nil { + wakeup := i.wakeup + i.lock.Unlock() + select { + case <-wakeup: + case <-ctx.Done(): + return util.StatusFromContext(ctx) + } + i.lock.Lock() + } + defer i.lock.Unlock() + + // Don't perform cleaning in case we've already been acquired, + // as we don't want to cause disruptions. + if i.useCount == 0 { + if err := i.clean(ctx); err != nil { + return err + } + } + i.useCount++ + return nil +} + +// Release the IdleInvoker by decrementing its use count. If the use +// count transitions from one to zero, the Cleaner is called. The use +// count is always decremented, even if cleaning fails. +func (i *IdleInvoker) Release(ctx context.Context) error { + i.lock.Lock() + defer i.lock.Unlock() + + if i.useCount == 0 { + panic("Called Release() on IdleInvoker with a zero use count") + } + i.useCount-- + if i.useCount > 0 { + return nil + } + return i.clean(ctx) +} diff --git a/pkg/cleaner/idle_invoker_test.go b/pkg/cleaner/idle_invoker_test.go new file mode 100644 index 0000000..b95ad7a --- /dev/null +++ b/pkg/cleaner/idle_invoker_test.go @@ -0,0 +1,97 @@ +package cleaner_test + +import ( + "context" + "sync" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestIdleInvoker(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + mockCleaner := mock.NewMockCleaner(ctrl) + idleInvoker := cleaner.NewIdleInvoker(mockCleaner.Call) + + t.Run("Once", func(t *testing.T) { + // The cleaner function should be called upon + // acquisition and release. + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Acquire(ctx)) + + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Release(ctx)) + }) + + t.Run("Twice", func(t *testing.T) { + // The cleaner function should only be called once when + // acquired initially, and once during the final + // release. + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Acquire(ctx)) + require.NoError(t, idleInvoker.Acquire(ctx)) + + require.NoError(t, idleInvoker.Release(ctx)) + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Release(ctx)) + }) + + t.Run("AcquireFailure", func(t *testing.T) { + // Cleaning failures upon acquisition should cause us to + // remain in the initial uninitialized state. This + // causes cleaning to be retried. + mockCleaner.EXPECT().Call(ctx).Return(context.DeadlineExceeded) + require.Equal(t, context.DeadlineExceeded, idleInvoker.Acquire(ctx)) + + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Acquire(ctx)) + + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Release(ctx)) + }) + + t.Run("ReleaseFailure", func(t *testing.T) { + // Cleaning failures upon release should cause us to + // still release. The error has to be propagated. + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Acquire(ctx)) + + mockCleaner.EXPECT().Call(ctx).Return(context.DeadlineExceeded) + require.Equal(t, context.DeadlineExceeded, idleInvoker.Release(ctx)) + + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Acquire(ctx)) + + mockCleaner.EXPECT().Call(ctx) + require.NoError(t, idleInvoker.Release(ctx)) + }) + + t.Run("Parallel", func(t *testing.T) { + // The cleaning function should be called exactly once, + // even when used concurrently. + var wg sync.WaitGroup + mockCleaner.EXPECT().Call(ctx) + wg.Add(100) + for i := 0; i < 100; i++ { + go func() { + require.NoError(t, idleInvoker.Acquire(ctx)) + wg.Done() + }() + } + wg.Wait() + + mockCleaner.EXPECT().Call(ctx) + wg.Add(100) + for i := 0; i < 100; i++ { + go func() { + require.NoError(t, idleInvoker.Release(ctx)) + wg.Done() + }() + } + wg.Wait() + }) +} diff --git a/pkg/cleaner/process_table.go b/pkg/cleaner/process_table.go new file mode 100644 index 0000000..fbcc9d0 --- /dev/null +++ b/pkg/cleaner/process_table.go @@ -0,0 +1,19 @@ +package cleaner + +import ( + "time" +) + +// Process running on the operating system. This type contains a subset +// of the information normally displayed by tools such as ps and top. +type Process struct { + ProcessID int + UserID int + CreationTime time.Time +} + +// ProcessTable is an interface for extracting a list of processes +// running on the operating system. +type ProcessTable interface { + GetProcesses() ([]Process, error) +} diff --git a/pkg/cleaner/process_table_cleaner.go b/pkg/cleaner/process_table_cleaner.go new file mode 100644 index 0000000..1216074 --- /dev/null +++ b/pkg/cleaner/process_table_cleaner.go @@ -0,0 +1,25 @@ +package cleaner + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/util" +) + +// NewProcessTableCleaner creates a decorator for Runner that kills +// processes that were left behind by previous build actions (e.g., +// daemonized processes). +func NewProcessTableCleaner(processTable ProcessTable) Cleaner { + return func(ctx context.Context) error { + processes, err := processTable.GetProcesses() + if err != nil { + return util.StatusWrap(err, "Failed to get processes from process table") + } + for _, process := range processes { + if err := killProcess(int(process.ProcessID)); err != nil { + return util.StatusWrapf(err, "Failed to kill process %d", process.ProcessID) + } + } + return nil + } +} diff --git a/pkg/cleaner/process_table_cleaner_disabled.go b/pkg/cleaner/process_table_cleaner_disabled.go new file mode 100644 index 0000000..911efc0 --- /dev/null +++ b/pkg/cleaner/process_table_cleaner_disabled.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package cleaner + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func killProcess(id int) error { + return status.Error(codes.Unimplemented, "Killing processes is not supported on this platform") +} diff --git a/pkg/cleaner/process_table_cleaner_unix.go b/pkg/cleaner/process_table_cleaner_unix.go new file mode 100644 index 0000000..2ecd262 --- /dev/null +++ b/pkg/cleaner/process_table_cleaner_unix.go @@ -0,0 +1,20 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package cleaner + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func killProcess(id int) error { + // Ignore EPERM errors, as we may get those if we try to kill + // setuid processes. Also ignore ESRCH errors, as we can get + // those if we try to kill a process that already terminated. + if err := unix.Kill(id, syscall.SIGKILL); err != nil && err != syscall.EPERM && err != syscall.ESRCH { + return err + } + return nil +} diff --git a/pkg/cleaner/system_process_table_darwin.go b/pkg/cleaner/system_process_table_darwin.go new file mode 100644 index 0000000..a569900 --- /dev/null +++ b/pkg/cleaner/system_process_table_darwin.go @@ -0,0 +1,38 @@ +//go:build darwin +// +build darwin + +package cleaner + +import ( + "time" + + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" +) + +type systemProcessTable struct{} + +func (pt systemProcessTable) GetProcesses() ([]Process, error) { + kinfoProcs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.Internal, "Failed to obtain process table") + } + + processes := make([]Process, 0, len(kinfoProcs)) + for _, kinfoProc := range kinfoProcs { + startTime := kinfoProc.Proc.P_starttime + processes = append(processes, Process{ + ProcessID: int(kinfoProc.Proc.P_pid), + UserID: int(kinfoProc.Eproc.Ucred.Uid), + CreationTime: time.Unix(startTime.Sec, int64(startTime.Usec)*1000), + }) + } + return processes, nil +} + +// SystemProcessTable corresponds with the process table of the locally +// running operating system. On this operating system the information is +// extracted from the "kern.proc.all" sysctl. +var SystemProcessTable ProcessTable = systemProcessTable{} diff --git a/pkg/cleaner/system_process_table_disabled.go b/pkg/cleaner/system_process_table_disabled.go new file mode 100644 index 0000000..46bd829 --- /dev/null +++ b/pkg/cleaner/system_process_table_disabled.go @@ -0,0 +1,20 @@ +//go:build freebsd || windows +// +build freebsd windows + +package cleaner + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type systemProcessTable struct{} + +func (pt systemProcessTable) GetProcesses() ([]Process, error) { + return nil, status.Error(codes.Unimplemented, "Scanning the process table is not supported on this platform") +} + +// SystemProcessTable corresponds with the process table of the locally +// running operating system. On this operating system this functionality +// is ont available. +var SystemProcessTable ProcessTable = systemProcessTable{} diff --git a/pkg/cleaner/system_process_table_linux.go b/pkg/cleaner/system_process_table_linux.go new file mode 100644 index 0000000..4b8a98d --- /dev/null +++ b/pkg/cleaner/system_process_table_linux.go @@ -0,0 +1,62 @@ +//go:build linux +// +build linux + +package cleaner + +import ( + "os" + "strconv" + "time" + + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" +) + +type systemProcessTable struct{} + +func (pt systemProcessTable) GetProcesses() ([]Process, error) { + // Open procfs. + fd, err := unix.Open("/proc", unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.Internal, "Failed to open /proc") + } + f := os.NewFile(uintptr(fd), ".") + defer f.Close() + + // Obtain a list of all processes that are currently running. + names, err := f.Readdirnames(-1) + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.Internal, "Failed to obtain directory listing of /proc") + } + + var processes []Process + for _, name := range names { + // Filter out non-process entries (e.g., /proc/cmdline). + pid, err := strconv.ParseInt(name, 10, 0) + if err != nil { + continue + } + + // Stat process directory entries to obtain user ID and + // creation time. + var stat unix.Stat_t + if err := unix.Fstatat(fd, name, &stat, unix.AT_SYMLINK_NOFOLLOW); os.IsNotExist(err) { + continue + } else if err != nil { + return nil, util.StatusWrapfWithCode(err, codes.Internal, "Failed to stat process %d", pid) + } + processes = append(processes, Process{ + ProcessID: int(pid), + UserID: int(stat.Uid), + CreationTime: time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)), + }) + } + return processes, nil +} + +// SystemProcessTable corresponds with the process table of the locally +// running operating system. On this operating system the information is +// extracted from procfs. +var SystemProcessTable ProcessTable = systemProcessTable{} diff --git a/pkg/cleaner/system_process_table_test.go b/pkg/cleaner/system_process_table_test.go new file mode 100644 index 0000000..6b7a62e --- /dev/null +++ b/pkg/cleaner/system_process_table_test.go @@ -0,0 +1,39 @@ +package cleaner_test + +import ( + "os" + "runtime" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + "github.com/stretchr/testify/require" +) + +func TestSystemProcessTable(t *testing.T) { + // TODO: Implement this functionality on non-Linux platforms. + if runtime.GOOS == "freebsd" || runtime.GOOS == "windows" { + return + } + + processes, err := cleaner.SystemProcessTable.GetProcesses() + require.NoError(t, err) + + // The returned process table should contain the currently + // running process. The user ID and creation time should also be + // sensible. + // TODO: Doesn't testify provide a require.Contains() that takes + // a custom matcher function? + found := false + processID := os.Getpid() + for _, process := range processes { + if process.ProcessID == processID { + found = true + require.Equal(t, os.Getuid(), process.UserID) + require.True(t, process.CreationTime.After(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC))) + require.False(t, process.CreationTime.After(time.Now())) + break + } + } + require.True(t, found) +} diff --git a/pkg/clock/BUILD.bazel b/pkg/clock/BUILD.bazel new file mode 100644 index 0000000..7f97179 --- /dev/null +++ b/pkg/clock/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "clock", + srcs = ["suspendable_clock.go"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/clock", + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/clock"], +) + +go_test( + name = "clock_test", + srcs = ["suspendable_clock_test.go"], + deps = [ + ":clock", + "//internal/mock", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/clock/suspendable_clock.go b/pkg/clock/suspendable_clock.go new file mode 100644 index 0000000..f35facb --- /dev/null +++ b/pkg/clock/suspendable_clock.go @@ -0,0 +1,289 @@ +package clock + +import ( + "context" + "sync" + "time" + + "github.com/buildbarn/bb-storage/pkg/clock" +) + +// Suspendable is an interface type of the methods of SuspendableClock +// that are used by NewSuspendingBlobAccess() and +// NewSuspendingDirectoryFetcher(). +type Suspendable interface { + Suspend() + Resume() +} + +// UnsuspendedDurationKey instances can be provided to Context.Value() +// to obtain the total amount of time a SuspendableClock associated with +// the Context object was not suspended, since the creation of the +// Context. +type UnsuspendedDurationKey struct{} + +// SuspendableClock is a decorator for Clock that allows Timers and +// Contexts with timeouts to be suspended temporarily. This decorator +// can, for example, be used to let FUSE-based workers compensate the +// execution timeout of build actions with time spent performing +// BlobAccess reads. +type SuspendableClock struct { + base clock.Clock + maximumSuspension time.Duration + timeoutThreshold time.Duration + + lock sync.Mutex + suspensionCount int + unsuspensionStart time.Time + totalUnsuspended time.Duration +} + +var ( + _ clock.Clock = (*SuspendableClock)(nil) + _ Suspendable = (*SuspendableClock)(nil) +) + +// NewSuspendableClock creates a new SuspendableClock. +// +// The maximumSuspension argument denotes the maximum amount of time +// Timers and Contexts with timeouts may be suspended. This prevents +// users from bypassing timeouts when creating access patterns that +// cause the clock to be suspended excessively. +func NewSuspendableClock(base clock.Clock, maximumSuspension, timeoutThreshold time.Duration) *SuspendableClock { + return &SuspendableClock{ + base: base, + maximumSuspension: maximumSuspension, + timeoutThreshold: timeoutThreshold, + unsuspensionStart: time.Unix(0, 0), + } +} + +// Suspend the Clock, thereby causing all active Timer and Context +// objects to not trigger (except if the maximum suspension duration is +// reached). +// +// Clocks can be suspended multiple times. The clock will only continue +// to run if Resume() is called an equal number of times. +func (c *SuspendableClock) Suspend() { + c.lock.Lock() + defer c.lock.Unlock() + + if c.suspensionCount == 0 { + c.totalUnsuspended += c.base.Now().Sub(c.unsuspensionStart) + } + c.suspensionCount++ +} + +// Resume the Clock, thereby causing all active Timer and Context +// objects to continue processing. +func (c *SuspendableClock) Resume() { + c.lock.Lock() + defer c.lock.Unlock() + + if c.suspensionCount == 0 { + panic("Attempted to release a suspendedable clock that wasn't acquired") + } + c.suspensionCount-- + if c.suspensionCount == 0 { + c.unsuspensionStart = c.base.Now() + } +} + +// Now returns the current time of day. +func (c *SuspendableClock) Now() time.Time { + return c.base.Now() +} + +// getTotalUnsuspendedNow computes the total amount of time the clock +// was not suspended since its creation. If the current time is needed +// to compute the exact duration, a call is made into the underlying +// clock. +func (c *SuspendableClock) getTotalUnsuspendedNow() time.Duration { + totalUnsuspended := c.totalUnsuspended + if c.suspensionCount == 0 { + // Clock is not suspended right now. + totalUnsuspended += c.base.Now().Sub(c.unsuspensionStart) + } + return totalUnsuspended +} + +// getTotalUnsuspendedWithTime is identical to getTotalUnsuspendedNow, +// except that a known time value can be provided. +func (c *SuspendableClock) getTotalUnsuspendedWithTime(now time.Time) time.Duration { + totalUnsuspended := c.totalUnsuspended + if c.suspensionCount == 0 && now.After(c.unsuspensionStart) { + // Clock is not suspended right now. + totalUnsuspended += now.Sub(c.unsuspensionStart) + } + return totalUnsuspended +} + +// NewContextWithTimeout creates a Context object that automatically +// cancels itself after a certain amount of time has passed, taking +// suspensions of the Clock into account. +func (c *SuspendableClock) NewContextWithTimeout(parent context.Context, d time.Duration) (context.Context, context.CancelFunc) { + // Wrap the context into one that the maximum suspension already + // applied. This ensures that Context.Deadline() returns a + // proper upper bound. It also provides us a usable CancelFunc. + baseContext, baseCancel := c.base.NewContextWithTimeout(parent, d+c.maximumSuspension) + baseDoneChannel := baseContext.Done() + + // Create a context object to be returned by this function. + doneChannel := make(chan struct{}) + ctx := &suspendableContext{ + Context: baseContext, + doneChannel: doneChannel, + + lock: &c.lock, + } + + // Gather the initial amount of time not suspended at the start, + // so that we can compute the total amount we've observed upon + // completion. + c.lock.Lock() + go func() { + initialTotalUnsuspended := c.getTotalUnsuspendedNow() + finalTotalUnsuspended := initialTotalUnsuspended + d + for { + c.lock.Unlock() + baseTimer, baseChannel := c.base.NewTimer(d) + select { + case now := <-baseChannel: + // Timer expired. + c.lock.Lock() + currentTotalUnsuspended := c.getTotalUnsuspendedWithTime(now) + d = finalTotalUnsuspended - currentTotalUnsuspended + if d < c.timeoutThreshold { + // Amount of time suspended in + // the meantime is not worth + // creating another timer for. + ctx.err = context.DeadlineExceeded + ctx.unsuspendedDuration = currentTotalUnsuspended - initialTotalUnsuspended + c.lock.Unlock() + close(doneChannel) + return + } + case <-baseDoneChannel: + // Base context got canceled. + c.lock.Lock() + ctx.err = baseContext.Err() + ctx.unsuspendedDuration = c.getTotalUnsuspendedNow() - initialTotalUnsuspended + c.lock.Unlock() + baseTimer.Stop() + close(doneChannel) + return + } + } + }() + + return ctx, baseCancel +} + +// NewTimer creates a channel that publishes the time of day at a point +// of time in the future, taking suspension of the Clock into account. +func (c *SuspendableClock) NewTimer(d time.Duration) (clock.Timer, <-chan time.Time) { + // Create a timer that puts an upper bound on the amount of time + // the timer may be delayed due to suspension. This prevents + // users from delaying the timer indefinitely. + maximumSuspensionTimer, maximumSuspensionChannel := c.base.NewTimer(d + c.maximumSuspension) + + // Create timer handle to be returned by this function. + stopChannel := make(chan struct{}) + t := &suspendableTimer{ + lock: &c.lock, + stopChannel: stopChannel, + } + resultChannel := make(chan time.Time, 1) + + c.lock.Lock() + go func() { + finalTotalUnsuspended := c.getTotalUnsuspendedNow() + d + for { + c.lock.Unlock() + baseTimer, baseChannel := c.base.NewTimer(d) + select { + case now := <-baseChannel: + // Timer expired. + c.lock.Lock() + d = finalTotalUnsuspended - c.getTotalUnsuspendedWithTime(now) + if d < c.timeoutThreshold { + // Amount of time suspended in + // the meantime is not worth + // creating another timer for. + t.stopChannel = nil + c.lock.Unlock() + maximumSuspensionTimer.Stop() + resultChannel <- now + return + } + case now := <-maximumSuspensionChannel: + // Maximum amount of suspension reached. + c.lock.Lock() + t.stopChannel = nil + c.lock.Unlock() + baseTimer.Stop() + resultChannel <- now + return + case <-stopChannel: + // Timer stopped. + maximumSuspensionTimer.Stop() + baseTimer.Stop() + return + } + } + }() + return t, resultChannel +} + +// suspendableContext is the implementation of Context that is returned +// by SuspendableClock.NewContextWithTimeout(). +type suspendableContext struct { + context.Context + doneChannel <-chan struct{} + + lock *sync.Mutex + err error + unsuspendedDuration time.Duration +} + +func (ctx *suspendableContext) Done() <-chan struct{} { + return ctx.doneChannel +} + +func (ctx *suspendableContext) Err() error { + ctx.lock.Lock() + defer ctx.lock.Unlock() + + return ctx.err +} + +func (ctx *suspendableContext) Value(key interface{}) interface{} { + if key != (UnsuspendedDurationKey{}) { + return ctx.Context.Value(key) + } + + ctx.lock.Lock() + defer ctx.lock.Unlock() + + return ctx.unsuspendedDuration +} + +// suspendableTimer is the implementation of Timer that is returned by +// SuspendableClock.NewTimer(). Its sole purpose is to provide +// cancelation. +type suspendableTimer struct { + lock *sync.Mutex + stopChannel chan<- struct{} +} + +func (ctx *suspendableTimer) Stop() bool { + ctx.lock.Lock() + defer ctx.lock.Unlock() + + if ctx.stopChannel != nil { + close(ctx.stopChannel) + ctx.stopChannel = nil + return true + } + return false +} diff --git a/pkg/clock/suspendable_clock_test.go b/pkg/clock/suspendable_clock_test.go new file mode 100644 index 0000000..3ad7fc7 --- /dev/null +++ b/pkg/clock/suspendable_clock_test.go @@ -0,0 +1,272 @@ +package clock_test + +import ( + "context" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/clock" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestSuspendableClockNow(t *testing.T) { + ctrl := gomock.NewController(t) + + baseClock := mock.NewMockClock(ctrl) + suspendableClock := clock.NewSuspendableClock(baseClock, time.Hour, time.Second) + + t.Run("Success", func(t *testing.T) { + // Calls to obtain the time of day should simply be forwarded. + baseClock.EXPECT().Now().Return(time.Unix(123, 0)) + require.Equal(t, time.Unix(123, 0), suspendableClock.Now()) + }) +} + +func TestSuspendableClockNewContextWithTimeout(t *testing.T) { + ctrl := gomock.NewController(t) + + baseClock := mock.NewMockClock(ctrl) + suspendableClock := clock.NewSuspendableClock(baseClock, time.Hour, time.Second) + + t.Run("NoSuspension", func(t *testing.T) { + // When no suspension calls happen against the + // SuspendableClock, the context should time out after + // the provided amount of time. + baseContext := mock.NewMockContext(ctrl) + maximumSuspensionContext := mock.NewMockContext(ctrl) + maximumSuspensionCancel := mock.NewMockCancelFunc(ctrl) + baseClock.EXPECT().NewContextWithTimeout(baseContext, time.Hour+5*time.Second). + Return(maximumSuspensionContext, maximumSuspensionCancel.Call) + baseClock.EXPECT().Now().Return(time.Unix(1018, 0)) + maximumSuspensionDoneChannel := make(chan struct{}) + maximumSuspensionContext.EXPECT().Done().Return(maximumSuspensionDoneChannel) + baseTimer := mock.NewMockTimer(ctrl) + baseChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer, baseChannel) + + suspendableContext, suspendableCancel := suspendableClock.NewContextWithTimeout(baseContext, 5*time.Second) + suspendableDoneChannel := suspendableContext.Done() + require.Empty(t, suspendableDoneChannel) + + baseChannel <- time.Unix(1023, 0) + + <-suspendableContext.Done() + require.Equal(t, context.DeadlineExceeded, suspendableContext.Err()) + + maximumSuspensionCancel.EXPECT().Call() + suspendableCancel() + + require.Equal(t, 5*time.Second, suspendableContext.Value(clock.UnsuspendedDurationKey{})) + }) + + t.Run("Canceled", func(t *testing.T) { + // Cancellation of the suspendable context should cause + // all associated resources to be freed. + baseContext := mock.NewMockContext(ctrl) + maximumSuspensionContext := mock.NewMockContext(ctrl) + maximumSuspensionCancel := mock.NewMockCancelFunc(ctrl) + baseClock.EXPECT().NewContextWithTimeout(baseContext, time.Hour+5*time.Second). + Return(maximumSuspensionContext, maximumSuspensionCancel.Call) + baseClock.EXPECT().Now().Return(time.Unix(1100, 0)) + maximumSuspensionDoneChannel := make(chan struct{}) + maximumSuspensionContext.EXPECT().Done().Return(maximumSuspensionDoneChannel) + baseTimer := mock.NewMockTimer(ctrl) + baseChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer, baseChannel) + + suspendableContext, suspendableCancel := suspendableClock.NewContextWithTimeout(baseContext, 5*time.Second) + suspendableDoneChannel := suspendableContext.Done() + require.Empty(t, suspendableDoneChannel) + + baseClock.EXPECT().Now().Return(time.Unix(1103, 0)) + maximumSuspensionCancel.EXPECT().Call() + maximumSuspensionContext.EXPECT().Err().Return(context.Canceled) + baseTimer.EXPECT().Stop().Return(true) + + suspendableCancel() + + close(maximumSuspensionDoneChannel) + + <-suspendableContext.Done() + require.Equal(t, context.Canceled, suspendableContext.Err()) + + require.Equal(t, 3*time.Second, suspendableContext.Value(clock.UnsuspendedDurationKey{})) + }) + + t.Run("Suspension", func(t *testing.T) { + // Create a context with a timeout of five seconds. Suspend + // the clock for one seconds during these five seconds. + // This should cause a second timer to be created with a + // one second timeout. + baseContext := mock.NewMockContext(ctrl) + maximumSuspensionContext := mock.NewMockContext(ctrl) + maximumSuspensionCancel := mock.NewMockCancelFunc(ctrl) + baseClock.EXPECT().NewContextWithTimeout(baseContext, time.Hour+5*time.Second). + Return(maximumSuspensionContext, maximumSuspensionCancel.Call) + baseClock.EXPECT().Now().Return(time.Unix(1220, 0)) + maximumSuspensionDoneChannel := make(chan struct{}) + maximumSuspensionContext.EXPECT().Done().Return(maximumSuspensionDoneChannel) + baseTimer1 := mock.NewMockTimer(ctrl) + baseChannel1 := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer1, baseChannel1) + + suspendableContext, suspendableCancel := suspendableClock.NewContextWithTimeout(baseContext, 5*time.Second) + suspendableDoneChannel := suspendableContext.Done() + require.Empty(t, suspendableDoneChannel) + + baseClock.EXPECT().Now().Return(time.Unix(1222, 0)) + suspendableClock.Suspend() + + // It is possible to suspend the clock recursively. It + // should have no effect on the bookkeeping. + suspendableClock.Suspend() + suspendableClock.Suspend() + suspendableClock.Resume() + suspendableClock.Resume() + + baseClock.EXPECT().Now().Return(time.Unix(1223, 0)) + suspendableClock.Resume() + + baseTimer2 := mock.NewMockTimer(ctrl) + baseChannel2 := make(chan time.Time) + baseClock.EXPECT().NewTimer(1*time.Second).Return(baseTimer2, baseChannel2) + + baseChannel1 <- time.Unix(1225, 0) + + baseChannel2 <- time.Unix(1226, 0) + + <-suspendableContext.Done() + require.Equal(t, context.DeadlineExceeded, suspendableContext.Err()) + + maximumSuspensionCancel.EXPECT().Call() + suspendableCancel() + + require.Equal(t, 5*time.Second, suspendableContext.Value(clock.UnsuspendedDurationKey{})) + }) +} + +func TestSuspendableClockNewTimer(t *testing.T) { + ctrl := gomock.NewController(t) + + baseClock := mock.NewMockClock(ctrl) + suspendableClock := clock.NewSuspendableClock(baseClock, time.Hour, time.Second) + + t.Run("NoSuspension", func(t *testing.T) { + // When no suspension calls happen against the + // SuspendableClock, a timer created from it should + // behave like an ordinary one. + maximumSuspensionTimer := mock.NewMockTimer(ctrl) + maximumSuspensionChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(time.Hour+5*time.Second).Return(maximumSuspensionTimer, maximumSuspensionChannel) + baseClock.EXPECT().Now().Return(time.Unix(1018, 0)) + baseTimer := mock.NewMockTimer(ctrl) + baseChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer, baseChannel) + + suspendableTimer, suspendableChannel := suspendableClock.NewTimer(5 * time.Second) + require.Empty(t, suspendableChannel) + + maximumSuspensionTimer.EXPECT().Stop().Return(true) + + baseChannel <- time.Unix(1023, 0) + require.Equal(t, time.Unix(1023, 0), <-suspendableChannel) + + require.False(t, suspendableTimer.Stop()) + }) + + t.Run("Stopped", func(t *testing.T) { + // Cancellations on the timer returned by + // SuspendableClock should be propagated to the + // underlying instance. + maximumSuspensionTimer := mock.NewMockTimer(ctrl) + maximumSuspensionChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(time.Hour+5*time.Second).Return(maximumSuspensionTimer, maximumSuspensionChannel) + baseClock.EXPECT().Now().Return(time.Unix(1105, 0)) + baseTimer := mock.NewMockTimer(ctrl) + baseChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer, baseChannel) + + suspendableTimer, suspendableChannel := suspendableClock.NewTimer(5 * time.Second) + require.Empty(t, suspendableChannel) + + maximumSuspensionTimer.EXPECT().Stop().Return(true) + wait := make(chan struct{}) + baseTimer.EXPECT().Stop().DoAndReturn(func() bool { + close(wait) + return true + }) + require.True(t, suspendableTimer.Stop()) + <-wait + }) + + t.Run("Suspension", func(t *testing.T) { + // Create a timer that runs for five seconds. Suspend + // the clock for one seconds during these five seconds. + // This should cause a second timer to be created with a + // one second timeout. + maximumSuspensionTimer := mock.NewMockTimer(ctrl) + maximumSuspensionChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(time.Hour+5*time.Second).Return(maximumSuspensionTimer, maximumSuspensionChannel) + baseClock.EXPECT().Now().Return(time.Unix(1220, 0)) + baseTimer1 := mock.NewMockTimer(ctrl) + baseChannel1 := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer1, baseChannel1) + + _, suspendableChannel := suspendableClock.NewTimer(5 * time.Second) + require.Empty(t, suspendableChannel) + + baseClock.EXPECT().Now().Return(time.Unix(1222, 0)) + suspendableClock.Suspend() + + // It is possible to suspend the clock recursively. It + // should have no effect on the bookkeeping. + suspendableClock.Suspend() + suspendableClock.Suspend() + suspendableClock.Resume() + suspendableClock.Resume() + + baseClock.EXPECT().Now().Return(time.Unix(1223, 0)) + suspendableClock.Resume() + + baseTimer2 := mock.NewMockTimer(ctrl) + baseChannel2 := make(chan time.Time) + baseClock.EXPECT().NewTimer(1*time.Second).Return(baseTimer2, baseChannel2) + + baseChannel1 <- time.Unix(1225, 0) + + maximumSuspensionTimer.EXPECT().Stop().Return(true) + + baseChannel2 <- time.Unix(1226, 0) + require.Equal(t, time.Unix(1226, 0), <-suspendableChannel) + }) + + t.Run("SuspensionTooSmall", func(t *testing.T) { + // Suspend the clock for just a very small amount of + // time. This should not cause a second timer to be + // created, as that would only contribute to more load + // on the system. + maximumSuspensionTimer := mock.NewMockTimer(ctrl) + maximumSuspensionChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(time.Hour+5*time.Second).Return(maximumSuspensionTimer, maximumSuspensionChannel) + baseClock.EXPECT().Now().Return(time.Unix(1320, 0)) + baseTimer := mock.NewMockTimer(ctrl) + baseChannel := make(chan time.Time) + baseClock.EXPECT().NewTimer(5*time.Second).Return(baseTimer, baseChannel) + + _, suspendableChannel := suspendableClock.NewTimer(5 * time.Second) + require.Empty(t, suspendableChannel) + + baseClock.EXPECT().Now().Return(time.Unix(1322, 0)) + suspendableClock.Suspend() + + baseClock.EXPECT().Now().Return(time.Unix(1322, 500000000)) + suspendableClock.Resume() + + maximumSuspensionTimer.EXPECT().Stop().Return(true) + + baseChannel <- time.Unix(1325, 0) + require.Equal(t, time.Unix(1325, 0), <-suspendableChannel) + }) +} diff --git a/pkg/credentials/BUILD.bazel b/pkg/credentials/BUILD.bazel new file mode 100644 index 0000000..d901706 --- /dev/null +++ b/pkg/credentials/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "credentials", + srcs = [ + "proc_credentials_nonunix.go", + "proc_credentials_unix.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/credentials", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/proto/configuration/credentials", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/proto/configuration/credentials", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/proto/configuration/credentials", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/proto/configuration/credentials", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/proto/configuration/credentials", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/proto/configuration/credentials", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "//conditions:default": [], + }), +) diff --git a/pkg/credentials/proc_credentials_nonunix.go b/pkg/credentials/proc_credentials_nonunix.go new file mode 100644 index 0000000..d6e52ea --- /dev/null +++ b/pkg/credentials/proc_credentials_nonunix.go @@ -0,0 +1,24 @@ +//go:build windows +// +build windows + +package credentials + +import ( + "os" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetSysProcAttrFromConfiguration returns a SysProcAttr object that can +// be passed to LocalRunner to run child processes with custom +// credentials. +func GetSysProcAttrFromConfiguration(configuration *credentials.UNIXCredentialsConfiguration) (*syscall.SysProcAttr, int, error) { + if configuration != nil { + return nil, 0, status.Error(codes.InvalidArgument, "UNIX credentials cannot be specified on this platform") + } + return &syscall.SysProcAttr{}, os.Getuid(), nil +} diff --git a/pkg/credentials/proc_credentials_unix.go b/pkg/credentials/proc_credentials_unix.go new file mode 100644 index 0000000..effb64b --- /dev/null +++ b/pkg/credentials/proc_credentials_unix.go @@ -0,0 +1,27 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package credentials + +import ( + "os" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials" +) + +// GetSysProcAttrFromConfiguration returns a SysProcAttr object that can +// be passed to LocalRunner to run child processes with custom +// credentials. +func GetSysProcAttrFromConfiguration(configuration *credentials.UNIXCredentialsConfiguration) (*syscall.SysProcAttr, int, error) { + if configuration == nil { + return &syscall.SysProcAttr{}, os.Getuid(), nil + } + return &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: configuration.UserId, + Gid: configuration.GroupId, + Groups: configuration.AdditionalGroupIds, + }, + }, int(configuration.UserId), nil +} diff --git a/pkg/filesystem/BUILD.bazel b/pkg/filesystem/BUILD.bazel new file mode 100644 index 0000000..0f73432 --- /dev/null +++ b/pkg/filesystem/BUILD.bazel @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "filesystem", + srcs = [ + "bitmap_sector_allocator.go", + "block_device_backed_file_pool.go", + "configuration.go", + "directory_backed_file_pool.go", + "empty_file_pool.go", + "file_pool.go", + "in_memory_file_pool.go", + "lazy_directory.go", + "metrics_file_pool.go", + "quota_enforcing_file_pool.go", + "sector_allocator.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/filesystem", + "@com_github_buildbarn_bb_storage//pkg/blockdevice", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_test( + name = "filesystem_test", + srcs = [ + "bitmap_sector_allocator_test.go", + "block_device_backed_file_pool_test.go", + "directory_backed_file_pool_test.go", + "empty_file_pool_test.go", + "in_memory_file_pool_test.go", + "lazy_directory_test.go", + "quota_enforcing_file_pool_test.go", + ], + deps = [ + ":filesystem", + "//internal/mock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/filesystem/access/BUILD.bazel b/pkg/filesystem/access/BUILD.bazel new file mode 100644 index 0000000..cd3040d --- /dev/null +++ b/pkg/filesystem/access/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "access", + srcs = [ + "bloom_filter_computing_monitor.go", + "bloom_filter_reader.go", + "monitor.go", + "path_hashes.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/resourceusage", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_test( + name = "access_test", + srcs = [ + "bloom_filter_computing_monitor_test.go", + "bloom_filter_reader_test.go", + ], + deps = [ + ":access", + "//pkg/proto/resourceusage", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/filesystem/access/bloom_filter_computing_monitor.go b/pkg/filesystem/access/bloom_filter_computing_monitor.go new file mode 100644 index 0000000..78f0e67 --- /dev/null +++ b/pkg/filesystem/access/bloom_filter_computing_monitor.go @@ -0,0 +1,227 @@ +package access + +import ( + "math" + "sync" + "sync/atomic" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// bloomFilterComputingState is the shared state that's referenced by +// all instances of BloomFilterComputingUnreadDirectoryMonitor and +// bloomFilterComputingReadDirectoryMonitor. It contains all of the +// hashes that should be encoded in the resulting Bloom filter. +type bloomFilterComputingState struct { + directoriesResolved atomic.Uint64 + + lock sync.Mutex + allHashes map[PathHashes]struct{} + directoriesRead uint64 + filesRead uint64 +} + +// BloomFilterComputingUnreadDirectoryMonitor is an implementation of +// UnreadDirectoryMonitor that is capable of computing a Bloom filter of +// all of the paths of files and directories that have been read. +type BloomFilterComputingUnreadDirectoryMonitor struct { + state *bloomFilterComputingState + hashes PathHashes +} + +// NewBloomFilterComputingUnreadDirectoryMonitor creates an +// UnreadDirectoryMonitor that is capable of computing a Bloom filter of +// all of the paths of files and directories that have been read. The +// instance that is returned corresponds to the empty path (root +// directory). +func NewBloomFilterComputingUnreadDirectoryMonitor() *BloomFilterComputingUnreadDirectoryMonitor { + udm := &BloomFilterComputingUnreadDirectoryMonitor{ + state: &bloomFilterComputingState{ + allHashes: map[PathHashes]struct{}{}, + }, + hashes: RootPathHashes, + } + // Assume the root directory is always resolved. + udm.state.directoriesResolved.Store(1) + return udm +} + +var _ UnreadDirectoryMonitor = (*BloomFilterComputingUnreadDirectoryMonitor)(nil) + +// ReadDirectory can be called to indicate that the contents of a +// directory have been read. It causes the directory to be added to the +// resulting Bloom filter. +func (udm *BloomFilterComputingUnreadDirectoryMonitor) ReadDirectory() ReadDirectoryMonitor { + s := udm.state + s.lock.Lock() + s.directoriesRead++ + s.allHashes[udm.hashes] = struct{}{} + s.lock.Unlock() + + return &bloomFilterComputingReadDirectoryMonitor{ + state: s, + hashes: udm.hashes, + } +} + +// largestPrimeOffsets is a table of the largest prime numbers below +// powers of 2. To save space, the delta against the powers of 2 are +// stored. Reference: https://oeis.org/A014234. +var largestPrimeOffsets = [...]uint8{ + 1<<3 - 7, + 1<<4 - 13, + 1<<5 - 31, + 1<<6 - 61, + 1<<7 - 127, + 1<<8 - 251, + 1<<9 - 509, + 1<<10 - 1021, + 1<<11 - 2039, + 1<<12 - 4093, + 1<<13 - 8191, + 1<<14 - 16381, + 1<<15 - 32749, + 1<<16 - 65521, + 1<<17 - 131071, + 1<<18 - 262139, + 1<<19 - 524287, + 1<<20 - 1048573, + 1<<21 - 2097143, + 1<<22 - 4194301, + 1<<23 - 8388593, + 1<<24 - 16777213, + 1<<25 - 33554393, + 1<<26 - 67108859, + 1<<27 - 134217689, + 1<<28 - 268435399, + 1<<29 - 536870909, + 1<<30 - 1073741789, + 1<<31 - 2147483647, + 1<<32 - 4294967291, + 1<<33 - 8589934583, + 1<<34 - 17179869143, + 1<<35 - 34359738337, + 1<<36 - 68719476731, + 1<<37 - 137438953447, + 1<<38 - 274877906899, + 1<<39 - 549755813881, + 1<<40 - 1099511627689, + 1<<41 - 2199023255531, + 1<<42 - 4398046511093, + 1<<43 - 8796093022151, + 1<<44 - 17592186044399, + 1<<45 - 35184372088777, + 1<<46 - 70368744177643, + 1<<47 - 140737488355213, + 1<<48 - 281474976710597, + 1<<49 - 562949953421231, + 1<<50 - 1125899906842597, + 1<<51 - 2251799813685119, + 1<<52 - 4503599627370449, + 1<<53 - 9007199254740881, + 1<<54 - 18014398509481951, + 1<<55 - 36028797018963913, + 1<<56 - 72057594037927931, + 1<<57 - 144115188075855859, + 1<<58 - 288230376151711717, + 1<<59 - 576460752303423433, + 1<<60 - 1152921504606846883, + 1<<61 - 2305843009213693951, + 1<<62 - 4611686018427387847, + 1<<63 - 9223372036854775783, +} + +// GetBloomFilter returns the Bloom filter that contains all of the +// paths of files and directories that have been read. +// +// The size of the resulting Bloom filter is based on the desired bits +// per element, and the maximum size in bits. In case it is limited by +// the maximum size, the resulting Bloom filter will be oversaturated, +// causing the probability of false positives to increase. +func (udm *BloomFilterComputingUnreadDirectoryMonitor) GetBloomFilter(bitsPerElement, maximumSizeBytes int) ([]byte, uint32) { + s := udm.state + s.lock.Lock() + defer s.lock.Unlock() + + // Determine the size of the Bloom filter, taking both the + // element count and size per element. The resulting size is the + // largest prime below a power of 2, not exceeding the + // configured maximum size. + elementCount := len(s.allHashes) + desiredSizeBits := elementCount * bitsPerElement + sizeBits, sizeBytes := 7, 1 + for shift, largestPrimeOffset := range largestPrimeOffsets { + newSizeBits := 8< maximumSizeBytes { + break + } + sizeBits, sizeBytes = newSizeBits, newSizeBytes + if sizeBits >= desiredSizeBits { + break + } + } + + // Determine the optimal number of hash functions to use. + hashFunctions := uint32(1) + if elementCount > 0 { + hashFunctions = uint32(math.Round(float64(sizeBits) / float64(elementCount) * math.Ln2)) + if hashFunctions < 1 { + hashFunctions = 1 + } + } + + // Construct the Bloom filter using the desired size and number + // of hash functions. The Bloom filter is terminated with a 1 + // bit, so that consumers can reobtain the exact size in bits. + bloomFilter := make([]byte, sizeBytes) + bloomFilter[sizeBytes-1] |= 1 << (sizeBits % 8) + for hashes := range s.allHashes { + hashIterator := hashes.Finalize() + for i := uint32(0); i < hashFunctions; i++ { + bit := hashIterator.GetNextHash() % uint64(sizeBits) + bloomFilter[bit/8] |= 1 << (bit % 8) + } + } + return bloomFilter, hashFunctions +} + +// GetInputRootResourceUsage returns statistics on how many files and +// directories in an input root are being accessed. This message can be +// attached to the auxiliary metadata of ActionResult. +func (udm *BloomFilterComputingUnreadDirectoryMonitor) GetInputRootResourceUsage() *resourceusage.InputRootResourceUsage { + s := udm.state + s.lock.Lock() + defer s.lock.Unlock() + + directoriesResolved := s.directoriesResolved.Load() + return &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: directoriesResolved, + DirectoriesRead: s.directoriesRead, + FilesRead: s.filesRead, + } +} + +type bloomFilterComputingReadDirectoryMonitor struct { + state *bloomFilterComputingState + hashes PathHashes +} + +func (sdm *bloomFilterComputingReadDirectoryMonitor) ResolvedDirectory(name path.Component) UnreadDirectoryMonitor { + s := sdm.state + s.directoriesResolved.Add(1) + + return &BloomFilterComputingUnreadDirectoryMonitor{ + state: s, + hashes: sdm.hashes.AppendComponent(name), + } +} + +func (sdm *bloomFilterComputingReadDirectoryMonitor) ReadFile(name path.Component) { + s := sdm.state + s.lock.Lock() + s.filesRead++ + s.allHashes[sdm.hashes.AppendComponent(name)] = struct{}{} + s.lock.Unlock() +} diff --git a/pkg/filesystem/access/bloom_filter_computing_monitor_test.go b/pkg/filesystem/access/bloom_filter_computing_monitor_test.go new file mode 100644 index 0000000..a7011f0 --- /dev/null +++ b/pkg/filesystem/access/bloom_filter_computing_monitor_test.go @@ -0,0 +1,197 @@ +package access_test + +import ( + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" +) + +func TestBloomFilterComputingUnreadDirectoryMonitor(t *testing.T) { + rootUnreadDirectoryMonitor := access.NewBloomFilterComputingUnreadDirectoryMonitor() + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 1, + DirectoriesRead: 0, + FilesRead: 0, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("Empty", func(t *testing.T) { + // Bloom filters without any paths in them should + // always be 1 byte in size, with no elements set. The + // number of hash functions should be exactly 1, as it + // causes consumers to do the minimal amount of work, + // while ensuring a zero percent false positive rate. + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(10, 1000) + require.Equal(t, []byte{0x80}, bloomFilter) + require.Equal(t, uint32(1), hashFunctions) + }) + + // Read the contents of the root directory. This should cause us + // to return Bloom filters for a single element. + rootReadDirectoryMonitor := rootUnreadDirectoryMonitor.ReadDirectory() + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 1, + DirectoriesRead: 1, + FilesRead: 0, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("RootDirectoryRead", func(t *testing.T) { + t.Run("SingleByte", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(1, 1000) + require.Equal(t, []byte{0xc6}, bloomFilter) + require.Equal(t, uint32(5), hashFunctions) + }) + + t.Run("TwoBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(10, 1000) + require.Equal(t, []byte{0x0b, 0x2a}, bloomFilter) + require.Equal(t, uint32(9), hashFunctions) + }) + + t.Run("FourBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(20, 1000) + require.Equal(t, []byte{0xa0, 0xa6, 0xb2, 0xd8}, bloomFilter) + require.Equal(t, uint32(21), hashFunctions) + }) + + t.Run("EightBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(40, 1000) + require.Equal(t, []byte{0x5f, 0x19, 0x50, 0xab, 0x53, 0x4b, 0x19, 0x3b}, bloomFilter) + require.Equal(t, uint32(42), hashFunctions) + }) + + t.Run("SizeConstrained", func(t *testing.T) { + // Using 40 bits per elements should normally + // yield an 8 byte Bloom filter. Because the + // maximum is set to 2 bytes, we should get the + // same results as using 10 bits per element. + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(40, 2) + require.Equal(t, []byte{0x0b, 0x2a}, bloomFilter) + require.Equal(t, uint32(9), hashFunctions) + }) + }) + + // Merely resolving a child directory contained in the root + // directory should not cause the Bloom filter to change. Its + // contents should be read. + childUnreadDirectoryMonitor := rootReadDirectoryMonitor.ResolvedDirectory(path.MustNewComponent("dir")) + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 2, + DirectoriesRead: 1, + FilesRead: 0, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("ChildDirectoryResolved", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(40, 1000) + require.Equal(t, []byte{0x5f, 0x19, 0x50, 0xab, 0x53, 0x4b, 0x19, 0x3b}, bloomFilter) + require.Equal(t, uint32(42), hashFunctions) + }) + + // Read the child directory's contents. This should cause the + // Bloom filter to get updated. + childReadDirectoryMonitor := childUnreadDirectoryMonitor.ReadDirectory() + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 2, + DirectoriesRead: 2, + FilesRead: 0, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("ChildDirectoryRead", func(t *testing.T) { + t.Run("SingleByte", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(1, 1000) + require.Equal(t, []byte{0xd4}, bloomFilter) + require.Equal(t, uint32(2), hashFunctions) + }) + + t.Run("TwoBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(5, 1000) + require.Equal(t, uint32(5), hashFunctions) + require.Equal(t, []byte{0x8a, 0x3a}, bloomFilter) + }) + + t.Run("FourBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(10, 1000) + require.Equal(t, []byte{0x92, 0xb7, 0x32, 0xc2}, bloomFilter) + require.Equal(t, uint32(11), hashFunctions) + }) + + t.Run("EightBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(20, 1000) + require.Equal(t, []byte{0x1f, 0x59, 0x51, 0xf0, 0x43, 0xde, 0x18, 0x3f}, bloomFilter) + require.Equal(t, uint32(21), hashFunctions) + }) + }) + + // Read a file in the root directory. + rootReadDirectoryMonitor.ReadFile(path.MustNewComponent("file")) + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 2, + DirectoriesRead: 2, + FilesRead: 1, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("RootFileRead", func(t *testing.T) { + t.Run("SingleByte", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(1, 1000) + require.Equal(t, []byte{0xf6}, bloomFilter) + require.Equal(t, uint32(2), hashFunctions) + }) + + t.Run("TwoBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(3, 1000) + require.Equal(t, []byte{0x0a, 0x36}, bloomFilter) + require.Equal(t, uint32(3), hashFunctions) + }) + + t.Run("FourBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(6, 1000) + require.Equal(t, []byte{0x92, 0xe5, 0x13, 0xc2}, bloomFilter) + require.Equal(t, uint32(7), hashFunctions) + }) + + t.Run("EightBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(11, 1000) + require.Equal(t, []byte{0x1d, 0xd2, 0x43, 0xf0, 0x63, 0xf2, 0x18, 0x3e}, bloomFilter) + require.Equal(t, uint32(14), hashFunctions) + }) + }) + + // Read a file in the child directory. Even though its name is + // identical to the file in the root directory, the full path + // differs. The resulting Bloom filters should thus differ. + childReadDirectoryMonitor.ReadFile(path.MustNewComponent("file")) + testutil.RequireEqualProto(t, &resourceusage.InputRootResourceUsage{ + DirectoriesResolved: 2, + DirectoriesRead: 2, + FilesRead: 2, + }, rootUnreadDirectoryMonitor.GetInputRootResourceUsage()) + + t.Run("RootFileRead", func(t *testing.T) { + t.Run("SingleByte", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(1, 1000) + require.Equal(t, []byte{0xd6}, bloomFilter) + require.Equal(t, uint32(1), hashFunctions) + }) + + t.Run("TwoBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(3, 1000) + require.Equal(t, []byte{0x4a, 0x2e}, bloomFilter) + require.Equal(t, uint32(2), hashFunctions) + }) + + t.Run("FourBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(5, 1000) + require.Equal(t, []byte{0x36, 0xa5, 0x73, 0xc2}, bloomFilter) + require.Equal(t, uint32(5), hashFunctions) + }) + + t.Run("EightBytes", func(t *testing.T) { + bloomFilter, hashFunctions := rootUnreadDirectoryMonitor.GetBloomFilter(9, 1000) + require.Equal(t, []byte{0x1d, 0xb2, 0x43, 0xf1, 0x61, 0xfa, 0x18, 0x3f}, bloomFilter) + require.Equal(t, uint32(11), hashFunctions) + }) + }) +} diff --git a/pkg/filesystem/access/bloom_filter_reader.go b/pkg/filesystem/access/bloom_filter_reader.go new file mode 100644 index 0000000..95e3bd9 --- /dev/null +++ b/pkg/filesystem/access/bloom_filter_reader.go @@ -0,0 +1,58 @@ +package access + +import ( + "math/bits" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// BloomFilterReader is a helper type for doing lookups against Bloom filters +// that were generated using BloomFilterComputingUnreadDirectoryMonitor. +type BloomFilterReader struct { + bloomFilter []byte + sizeBits int + hashFunctions uint32 +} + +// NewBloomFilterReader checks the validity of a Bloom filter, and +// returns a reader type when valid. +func NewBloomFilterReader(bloomFilter []byte, hashFunctions uint32) (*BloomFilterReader, error) { + // Derive the exact size in bits. The last byte contains padding + // of the form "100..." that we need to strip. For that we need + // to count the leading zeros of the last byte. + if len(bloomFilter) == 0 { + return nil, status.Error(codes.InvalidArgument, "Bloom filter is empty") + } + leadingZeros := bits.LeadingZeros8(uint8(bloomFilter[len(bloomFilter)-1])) + if leadingZeros == 8 { + return nil, status.Error(codes.InvalidArgument, "Bloom filter's trailing byte is not properly padded") + } + + // Put an upper limit on the number of hash functions to use, as + // we don't want malformed Bloom filters to lead to deadlocks. + // Even if the number of hash functions is intentionally set + // above this limit, checking fewer is correct. It will merely + // leads to an increase in false positives. + if maximumHashFunctions := uint32(1000); hashFunctions > maximumHashFunctions { + hashFunctions = maximumHashFunctions + } + + return &BloomFilterReader{ + bloomFilter: bloomFilter, + sizeBits: len(bloomFilter)*8 - leadingZeros - 1, + hashFunctions: hashFunctions, + }, nil +} + +// Contains returns whether a path is contained in the Bloom filter. +func (r *BloomFilterReader) Contains(pathHashes PathHashes) bool { + hashIterator := pathHashes.Finalize() + for i := uint32(0); i < r.hashFunctions; i++ { + bit := hashIterator.GetNextHash() % uint64(r.sizeBits) + if r.bloomFilter[bit/8]&(1<<(bit%8)) == 0 { + return false + } + } + return true +} diff --git a/pkg/filesystem/access/bloom_filter_reader_test.go b/pkg/filesystem/access/bloom_filter_reader_test.go new file mode 100644 index 0000000..2328894 --- /dev/null +++ b/pkg/filesystem/access/bloom_filter_reader_test.go @@ -0,0 +1,41 @@ +package access_test + +import ( + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestBloomFilterReader(t *testing.T) { + t.Run("InvalidBloomFilter", func(t *testing.T) { + _, err := access.NewBloomFilterReader(nil, 123) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Bloom filter is empty"), err) + + _, err = access.NewBloomFilterReader([]byte{0x12, 0x00}, 123) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Bloom filter's trailing byte is not properly padded"), err) + }) + + t.Run("Success", func(t *testing.T) { + // Attempt to read one of the Bloom filters created by + // TestBloomFilterComputingUnreadDirectoryMonitor. + reader, err := access.NewBloomFilterReader([]byte{0x1d, 0xb2, 0x43, 0xf1, 0x61, 0xfa, 0x18, 0x3f}, 11) + require.NoError(t, err) + + require.True(t, reader.Contains(access.RootPathHashes)) + require.True(t, reader.Contains(access.RootPathHashes. + AppendComponent(path.MustNewComponent("dir")))) + require.True(t, reader.Contains(access.RootPathHashes. + AppendComponent(path.MustNewComponent("file")))) + require.True(t, reader.Contains(access.RootPathHashes. + AppendComponent(path.MustNewComponent("dir")). + AppendComponent(path.MustNewComponent("file")))) + require.False(t, reader.Contains(access.RootPathHashes. + AppendComponent(path.MustNewComponent("nonexistent")))) + }) +} diff --git a/pkg/filesystem/access/monitor.go b/pkg/filesystem/access/monitor.go new file mode 100644 index 0000000..8636f1c --- /dev/null +++ b/pkg/filesystem/access/monitor.go @@ -0,0 +1,20 @@ +package access + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// UnreadDirectoryMonitor is used to report file system access activity +// against a directory whose contents have not yet been read. +type UnreadDirectoryMonitor interface { + ReadDirectory() ReadDirectoryMonitor +} + +// ReadDirectoryMonitor is used to report file system access activity +// against a directory whose contents have been read. It is possible to +// report resolution of child directories, and reads against child +// files. +type ReadDirectoryMonitor interface { + ResolvedDirectory(name path.Component) UnreadDirectoryMonitor + ReadFile(name path.Component) +} diff --git a/pkg/filesystem/access/path_hashes.go b/pkg/filesystem/access/path_hashes.go new file mode 100644 index 0000000..6fde0cc --- /dev/null +++ b/pkg/filesystem/access/path_hashes.go @@ -0,0 +1,73 @@ +package access + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +const fnv1aPrime = 1099511628211 + +// PathHashes is a set of Bloom filter hashes corresponding to a given +// path in the input root. +type PathHashes struct { + baseHash uint64 +} + +// NewPathHashesFromBaseHash creates a new set of Bloom filter hashes +// corresponding to an explicit base hash value. +func NewPathHashesFromBaseHash(baseHash uint64) PathHashes { + return PathHashes{ + baseHash: baseHash, + } +} + +// GetBaseHash returns the base hash value of a set of Bloom filter +// hashes. This can be used to preserve and restore an instance of +// PathHashes. +func (hf PathHashes) GetBaseHash() uint64 { + return hf.baseHash +} + +// RootPathHashes is the set of Bloom filter hashes corresponding to the +// root directory of the input root (i.e., the empty path). +var RootPathHashes = PathHashes{ + // FNV-1a offset basis. + baseHash: 14695981039346656037, +} + +// AppendComponent returns a new set of Bloom filter hashes +// corresponding to a child of the current directory. +func (hf PathHashes) AppendComponent(name path.Component) PathHashes { + // Path separator. + baseHash := (hf.baseHash ^ '/') * fnv1aPrime + + // Filename. + nameStr := name.String() + for i := 0; i < len(nameStr); i++ { + baseHash = (baseHash ^ uint64(nameStr[i])) * fnv1aPrime + } + return PathHashes{ + baseHash: baseHash, + } +} + +// Finalize the set of Bloom filter hashes corresponding to the current +// path, and return a PathHashIterator to extract these hashes. +func (hf PathHashes) Finalize() PathHashIterator { + return PathHashIterator{ + nextHash: hf.baseHash, + } +} + +// PathHashIterator is capable of yielding a sequence of hashes +// corresponding to a given path. +type PathHashIterator struct { + nextHash uint64 +} + +// GetNextHash progresses the PathHashIterator and returns the next hash +// corresponding to a given path. +func (hi *PathHashIterator) GetNextHash() uint64 { + nextHash := hi.nextHash + hi.nextHash = (nextHash ^ '/') * fnv1aPrime + return nextHash +} diff --git a/pkg/filesystem/bitmap_sector_allocator.go b/pkg/filesystem/bitmap_sector_allocator.go new file mode 100644 index 0000000..32d27a1 --- /dev/null +++ b/pkg/filesystem/bitmap_sector_allocator.go @@ -0,0 +1,170 @@ +package filesystem + +import ( + "fmt" + "math/bits" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type bitmapSectorAllocator struct { + lock sync.Mutex + freeBitmap []uint64 // One bits indicate sectors that are free. + nextSector uint32 +} + +const ( + allBits = ^uint64(0) +) + +// NewBitmapSectorAllocator creates a SectorAllocator that stores +// information on which sectors are allocated in a bitmap. Sectors are +// allocated by sequentially scanning the bitmap, continuing where +// previous calls left off. +// +// Due to its simplicity, this allocator would be prone to heavy +// fragmentation if used for general purpose file systems. For FilePool +// this is not a problem, because files have short lifetimes. +// Fragmentation disappears entirely when a worker goes idle, causing +// the FilePool to go empty. +func NewBitmapSectorAllocator(sectorCount uint32) SectorAllocator { + // Construct a bitmap. Make the bitmap a bit too big, so that + // it's always terminated with one or more sectors that are + // permanently in use. This prevents the need for explicit + // bounds checking inside our algorithms. + sa := &bitmapSectorAllocator{ + freeBitmap: make([]uint64, sectorCount/64+1), + } + + // Mark the exact number of sectors as being free. + for i := uint32(0); i < sectorCount/64; i++ { + sa.freeBitmap[i] = allBits + } + sa.freeBitmap[sectorCount/64] = ^(allBits << (sectorCount % 64)) + return sa +} + +func (sa *bitmapSectorAllocator) AllocateContiguous(maximum int) (uint32, int, error) { + sa.lock.Lock() + defer sa.lock.Unlock() + + // Allocate sectors from the current bitmap word. + split := sa.nextSector / 64 + if m := sa.freeBitmap[split] & (allBits << (sa.nextSector % 64)); m != 0 { + return sa.allocateAt(split, m, maximum) + } + + // Allocate sectors from the current location to the end. + for i := split + 1; i < uint32(len(sa.freeBitmap)); i++ { + if m := sa.freeBitmap[i]; m != 0 { + return sa.allocateAt(i, m, maximum) + } + } + + // Allocate sectors from the beginning to the current location. + for i := uint32(0); i <= split; i++ { + if m := sa.freeBitmap[i]; m != 0 { + return sa.allocateAt(i, m, maximum) + } + } + return 0, 0, status.Error(codes.ResourceExhausted, "No free sectors available") +} + +func (sa *bitmapSectorAllocator) allocateAt(index uint32, mask uint64, maximum int) (uint32, int, error) { + // Compute the first sector at which to start allocating. + initialShift := bits.TrailingZeros64(mask) + firstSector := index*64 + uint32(initialShift) + + // Allocate sectors from the first bitmap word. + allocated := bits.TrailingZeros64(^(mask >> initialShift)) + if allocated > maximum { + allocated = maximum + } + sa.freeBitmap[index] &^= ^(allBits << allocated) << initialShift + + if initialShift+allocated == 64 { + // More sectors are requested than available in the + // first bitmap word. Fully allocate as many bitmap + // words as possible. + index++ + maximum -= allocated + for maximum >= 64 && sa.freeBitmap[index] == allBits { + sa.freeBitmap[index] = 0 + index++ + maximum -= 64 + allocated += 64 + } + + // Allocate remaining sectors from a final bitmap word. + available := bits.TrailingZeros64(^sa.freeBitmap[index]) + if available > maximum { + available = maximum + } + sa.freeBitmap[index] &= allBits << available + allocated += available + } + + sa.nextSector = firstSector + uint32(allocated) + return firstSector + 1, allocated, nil +} + +func (sa *bitmapSectorAllocator) freeWithMask(index int, mask uint64) { + if m := sa.freeBitmap[index] & mask; m != 0 { + panic(fmt.Sprintf("Attempted to free sectors %x at index %d, even though they are not allocated", m, index)) + } + sa.freeBitmap[index] |= mask +} + +func (sa *bitmapSectorAllocator) FreeContiguous(firstSector uint32, count int) { + firstSector-- + + sa.lock.Lock() + defer sa.lock.Unlock() + + // Free sectors from the initial bitmap word. + mask := allBits + if count < 64 { + mask = ^(allBits << count) + } + offsetWithinFirstWord := int(firstSector % 64) + index := int(firstSector / 64) + sa.freeWithMask(index, mask< alreadyFreed { + // More sectors are freed than available in the first + // bitmap word. Full free as many bitmap words as + // possible. + count -= alreadyFreed + index++ + for count >= 64 { + if sa.freeBitmap[index] != 0 { + panic(fmt.Sprintf("Attempted to free sectors index %d, even though they are not allocated", index)) + } + sa.freeBitmap[index] = allBits + index++ + count -= 64 + } + + // Free remaining sectors from the final bitmap word. + sa.freeWithMask(index, ^(allBits << count)) + } +} + +func (sa *bitmapSectorAllocator) FreeList(sectors []uint32) { + sa.lock.Lock() + defer sa.lock.Unlock() + + for _, sector := range sectors { + if sector != 0 { + sector-- + i := sector / 64 + b := sector % 64 + if sa.freeBitmap[i]&(1< 0 { + f.fp.sectorAllocator.FreeList(f.sectors) + } + f.fp = nil + f.sectors = nil + return nil +} + +// toDeviceOffset converts a sector number and offset within a sector to +// a byte offset on the block device. +func (f *blockDeviceBackedFile) toDeviceOffset(sector uint32, offsetWithinSector int) int64 { + return int64(sector-1)*int64(f.fp.sectorSizeBytes) + int64(offsetWithinSector) +} + +// getInitialSectorIndex is called by ReadAt() and WriteAt() to +// determine which sectors in a file are affected by the operation. +func (f *blockDeviceBackedFile) getInitialSectorIndex(off int64, n int) (int, int, int) { + firstSectorIndex := int(off / int64(f.fp.sectorSizeBytes)) + endSectorIndex := int((uint64(off) + uint64(n) + uint64(f.fp.sectorSizeBytes) - 1) / uint64(f.fp.sectorSizeBytes)) + if endSectorIndex > len(f.sectors) { + endSectorIndex = len(f.sectors) + } + offsetWithinSector := int(off % int64(f.fp.sectorSizeBytes)) + return firstSectorIndex, endSectorIndex - 1, offsetWithinSector +} + +// incrementSectorIndex is called by ReadAt() and WriteAt() to progress +// to the next sequence of contiguously stored sectors. +func (f *blockDeviceBackedFile) incrementSectorIndex(sectorIndex, offsetWithinSector *int, n int) { + if (*offsetWithinSector+n)%f.fp.sectorSizeBytes != 0 { + panic("Read or write did not finish at sector boundary") + } + *sectorIndex += (*offsetWithinSector + n) / f.fp.sectorSizeBytes + *offsetWithinSector = 0 +} + +// getSectorsContiguous converts an index of a sector in a file to the +// on-disk sector number. It also computes how many sectors are stored +// contiguously starting at this point. +func (f *blockDeviceBackedFile) getSectorsContiguous(firstSectorIndex, lastSectorIndex int) (uint32, int) { + firstSector := f.sectors[firstSectorIndex] + nContiguous := 1 + if firstSector == 0 { + // A hole in a sparse file. Determine the size of the hole. + for firstSectorIndex+nContiguous <= lastSectorIndex && + f.sectors[firstSectorIndex+nContiguous] == 0 { + nContiguous++ + } + } else { + // A region that contains actual data. Determine how + // many sectors are contiguous. + for firstSectorIndex+nContiguous <= lastSectorIndex && + uint64(f.sectors[firstSectorIndex+nContiguous]) == uint64(firstSector)+uint64(nContiguous) { + nContiguous++ + } + } + return firstSector, nContiguous +} + +// limitBufferToSectorBoundary limits the size of a buffer to a given +// number of sectors. This function is used to restrict the size of a +// write to just that part that can be written contiguously. +func (f *blockDeviceBackedFile) limitBufferToSectorBoundary(p []byte, sectorCount, offsetWithinSector int) []byte { + if n := sectorCount*f.fp.sectorSizeBytes - offsetWithinSector; n < len(p) { + return p[:n] + } + return p +} + +func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { + // Short circuit calls that are out of bounds. + if off < 0 { + return 0, status.Errorf(codes.InvalidArgument, "Negative seek offset: %d", off) + } + if uint64(off) >= f.sizeBytes { + return 0, io.EOF + } + + sectorSizeBytes := int64(f.fp.sectorSizeBytes) + sectorIndex := int(off / sectorSizeBytes) + switch regionType { + case filesystem.Data: + if sectorIndex >= len(f.sectors) { + // Inside the hole at the end of the file. + return 0, io.EOF + } + if f.sectors[sectorIndex] != 0 { + // Already inside a sector containing data. + return off, nil + } + // Find the next sector containing data. + for { + sectorIndex++ + if f.sectors[sectorIndex] != 0 { + return int64(sectorIndex) * sectorSizeBytes, nil + } + } + case filesystem.Hole: + if sectorIndex >= len(f.sectors) || f.sectors[sectorIndex] == 0 { + // Already inside a hole. + return off, nil + } + // Find the next sector containing a hole. + for sectorIndex++; sectorIndex < len(f.sectors); sectorIndex++ { + if f.sectors[sectorIndex] == 0 { + return int64(sectorIndex) * sectorSizeBytes, nil + } + } + if allSectors := int64(len(f.sectors)) * sectorSizeBytes; allSectors < int64(f.sizeBytes) { + // File ends with a hole. + return allSectors, nil + } + // File ends in the middle of a sector containing data. + return int64(f.sizeBytes), nil + default: + panic("Unknown region type") + } +} + +// readFromSectors performs a single read against the block device. It +// attempts to read as much data into the output buffer as is possible +// in a single read operation. If the file is fragmented, multiple reads +// are necessary, requiring this function to be called repeatedly. +func (f *blockDeviceBackedFile) readFromSectors(p []byte, sectorIndex, lastSectorIndex, offsetWithinSector int) (int, error) { + if sectorIndex >= len(f.sectors) { + // Attempted to read from a hole located at the + // end of the file. Fill up all of the remaining + // space with zero bytes. + for i := 0; i < len(p); i++ { + p[i] = 0 + } + return len(p), nil + } + + sector, sectorsToRead := f.getSectorsContiguous(sectorIndex, lastSectorIndex) + p = f.limitBufferToSectorBoundary(p, sectorsToRead, offsetWithinSector) + if sector == 0 { + // Attempted to read from a sparse region of the file. + // Fill in zero bytes. + for i := 0; i < len(p); i++ { + p[i] = 0 + } + return len(p), nil + } + + // Attempted to read from a region of the file that contains + // actual data. Read data from the block device. + n, err := f.fp.blockDevice.ReadAt(p, f.toDeviceOffset(sector, offsetWithinSector)) + if err != nil && err != io.EOF { + return n, err + } + if n != len(p) { + return n, status.Errorf(codes.Internal, "Read against block device returned %d bytes, while %d bytes were expected", n, len(p)) + } + return n, nil +} + +func (f *blockDeviceBackedFile) ReadAt(p []byte, off int64) (int, error) { + // Short circuit calls that are out of bounds. + if off < 0 { + return 0, status.Errorf(codes.InvalidArgument, "Negative read offset: %d", off) + } + if len(p) == 0 { + return 0, nil + } + + // Limit the read operation to the size of the file. Already + // determine whether this operation will return nil or io.EOF. + if uint64(off) >= f.sizeBytes { + return 0, io.EOF + } + var success error + if end := uint64(off) + uint64(len(p)); end >= f.sizeBytes { + success = io.EOF + p = p[:f.sizeBytes-uint64(off)] + } + + // As the file may be stored on disk non-contiguously or may be + // a sparse file with holes, the read may need to be decomposed + // into smaller ones. Each loop iteration performs one read. + sectorIndex, lastSectorIndex, offsetWithinSector := f.getInitialSectorIndex(off, len(p)) + nTotal := 0 + for { + n, err := f.readFromSectors(p, sectorIndex, lastSectorIndex, offsetWithinSector) + nTotal += n + p = p[n:] + if err != nil { + return nTotal, err + } + if len(p) == 0 { + return nTotal, success + } + f.incrementSectorIndex(§orIndex, &offsetWithinSector, n) + } +} + +// truncateSectors truncates a file to a given number of sectors. +func (f *blockDeviceBackedFile) truncateSectors(sectorCount int) { + if len(f.sectors) > sectorCount { + f.fp.sectorAllocator.FreeList(f.sectors[sectorCount:]) + f.sectors = f.sectors[:sectorCount] + + // Ensure that no hole remains at the end, as that would + // lead to unnecessary fragmentation when growing the + // file again. + for len(f.sectors) > 0 && f.sectors[len(f.sectors)-1] == 0 { + f.sectors = f.sectors[:len(f.sectors)-1] + } + } +} + +func (f *blockDeviceBackedFile) Sync() error { + // Because FilePool does not provide any persistency, there is + // no need to synchronize any data. + return nil +} + +func (f *blockDeviceBackedFile) Truncate(size int64) error { + if size < 0 { + return status.Errorf(codes.InvalidArgument, "Negative truncation size: %d", size) + } + + sectorIndex := int(size / int64(f.fp.sectorSizeBytes)) + offsetWithinSector := int(size % int64(f.fp.sectorSizeBytes)) + if offsetWithinSector == 0 { + // Truncating to an exact number of sectors. + f.truncateSectors(sectorIndex) + } else { + // Truncating to partially into a sector. + if uint64(size) < f.sizeBytes && sectorIndex < len(f.sectors) && f.sectors[sectorIndex] != 0 { + // The file is being shrunk and the new last + // sector is not a hole. Zero the trailing part + // of the last sector to ensure that growing the + // file later on doesn't bring back old data. + sector := f.sectors[sectorIndex] + zeroes := f.fp.zeroSector[:f.fp.sectorSizeBytes-offsetWithinSector] + if diff := f.sizeBytes - uint64(size); uint64(len(zeroes)) > diff { + zeroes = zeroes[:diff] + } + if _, err := f.fp.blockDevice.WriteAt(zeroes, f.toDeviceOffset(sector, offsetWithinSector)); err != nil { + return err + } + } + f.truncateSectors(sectorIndex + 1) + } + + f.sizeBytes = uint64(size) + return nil +} + +// writeToNewSectors is used to write data into new sectors. This +// function is called when holes in a sparse file are filled up or when +// data is appended to the end of a file. +func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector int) (int, uint32, int, error) { + // Allocate space to store the data. + sectorsToAllocate := int((uint64(offsetWithinSector) + uint64(len(p)) + uint64(f.fp.sectorSizeBytes) - 1) / uint64(f.fp.sectorSizeBytes)) + firstSector, sectorsAllocated, err := f.fp.sectorAllocator.AllocateContiguous(sectorsToAllocate) + if err != nil { + return 0, 0, 0, err + } + + // We may not have been able to allocate the desired amount of + // space contiguously. Restrict the write to just the space we + // managed to allocate. + p = f.limitBufferToSectorBoundary(p, sectorsAllocated, offsetWithinSector) + nWritten := len(p) + + // Write the first sector separately when we need to introduce + // leading zero padding. + sector := firstSector + if offsetWithinSector > 0 { + buf := make([]byte, f.fp.sectorSizeBytes) + nWritten := copy(buf[offsetWithinSector:], p) + if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } + + p = p[nWritten:] + sector++ + } + + // Write as many sectors to the block device as possible. + if fullSectors := len(p) / f.fp.sectorSizeBytes; fullSectors > 0 { + fullSectorsSize := fullSectors * f.fp.sectorSizeBytes + if _, err := f.fp.blockDevice.WriteAt(p[:fullSectorsSize], f.toDeviceOffset(sector, 0)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } + p = p[fullSectorsSize:] + sector += uint32(fullSectors) + } + + // Write the last sector separately when we need to introduce + // trailing zero padding. + if len(p) > 0 { + buf := make([]byte, f.fp.sectorSizeBytes) + copy(buf, p) + if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } + } + return nWritten, firstSector, sectorsAllocated, nil +} + +// insertSectorsContiguous inserts a series of contiguous sectors into a +// file. This function is used to update a file after appending data to +// it or filling up a hole in a sparse file. +func (f *blockDeviceBackedFile) insertSectorsContiguous(firstSectorIndex int, firstSector uint32, count int) { + for i := 0; i < count; i++ { + sectorIndex := firstSectorIndex + i + if f.sectors[sectorIndex] != 0 { + panic(fmt.Sprintf("Attempted to replace existing sector at index %d", sectorIndex)) + } + f.sectors[sectorIndex] = firstSector + uint32(i) + } +} + +// writeToSectors performs a single write against the block device. It +// attempts to write as much data from the input buffer as is possible +// in a single write operation. If the file is fragmented, multiple +// writes are necessary, requiring this function to be called +// repeatedly. +func (f *blockDeviceBackedFile) writeToSectors(p []byte, sectorIndex, lastSectorIndex, offsetWithinSector int) (int, error) { + if sectorIndex >= len(f.sectors) { + // Attempted to write past the end-of-file or within a + // hole located at the end of a sparse file. Allocate + // space and grow the file. + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + if err != nil { + return 0, err + } + f.sectors = append(f.sectors, make([]uint32, sectorIndex+sectorsAllocated-len(f.sectors))...) + f.insertSectorsContiguous(sectorIndex, firstSector, sectorsAllocated) + return bytesWritten, nil + } + + sector, sectorsToWrite := f.getSectorsContiguous(sectorIndex, lastSectorIndex) + p = f.limitBufferToSectorBoundary(p, sectorsToWrite, offsetWithinSector) + if sector == 0 { + // Attempted to write to a hole within a sparse file. + // Allocate space and insert sectors into the file. + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + if err != nil { + return 0, err + } + f.insertSectorsContiguous(sectorIndex, firstSector, sectorsAllocated) + return bytesWritten, nil + } + + // Attempted to overwrite existing sectors of the file. + return f.fp.blockDevice.WriteAt(p, f.toDeviceOffset(sector, offsetWithinSector)) +} + +func (f *blockDeviceBackedFile) WriteAt(p []byte, off int64) (int, error) { + // Short circuit calls that are out of bounds. + if off < 0 { + return 0, status.Errorf(codes.InvalidArgument, "Negative write offset: %d", off) + } + if len(p) == 0 { + return 0, nil + } + + // As the file may be stored on disk non-contiguously or may be + // a sparse file with holes, the write may need to be decomposed + // into smaller ones. Each loop iteration performs one write. + sectorIndex, lastSectorIndex, offsetWithinSector := f.getInitialSectorIndex(off, len(p)) + nTotal := 0 + for { + n, err := f.writeToSectors(p, sectorIndex, lastSectorIndex, offsetWithinSector) + nTotal += n + p = p[n:] + if len(p) == 0 || err != nil { + // Adjust file size if needed. + if newSize := uint64(off) + uint64(nTotal); nTotal > 0 && f.sizeBytes < newSize { + f.sizeBytes = newSize + } + return nTotal, err + } + f.incrementSectorIndex(§orIndex, &offsetWithinSector, n) + } +} diff --git a/pkg/filesystem/block_device_backed_file_pool_test.go b/pkg/filesystem/block_device_backed_file_pool_test.go new file mode 100644 index 0000000..c181559 --- /dev/null +++ b/pkg/filesystem/block_device_backed_file_pool_test.go @@ -0,0 +1,321 @@ +package filesystem_test + +import ( + "io" + "math" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestBlockDeviceBackedFilePool(t *testing.T) { + ctrl := gomock.NewController(t) + + blockDevice := mock.NewMockBlockDevice(ctrl) + sectorAllocator := mock.NewMockSectorAllocator(ctrl) + pool := re_filesystem.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 16) + + t.Run("ReadEmptyFile", func(t *testing.T) { + // Test that reads on an empty file work as expected. + f, err := pool.NewFile() + require.NoError(t, err) + + var p [10]byte + n, err := f.ReadAt(p[:], math.MinInt64) + require.Equal(t, 0, n) + require.Equal(t, status.Error(codes.InvalidArgument, "Negative read offset: -9223372036854775808"), err) + + n, err = f.ReadAt(p[:], -1) + require.Equal(t, 0, n) + require.Equal(t, status.Error(codes.InvalidArgument, "Negative read offset: -1"), err) + + n, err = f.ReadAt(p[:], 0) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + n, err = f.ReadAt(p[:], 1) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + n, err = f.ReadAt(p[:], math.MaxInt64) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + require.NoError(t, f.Close()) + }) + + t.Run("Truncate", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + // Invalid size. + require.Equal(t, status.Error(codes.InvalidArgument, "Negative truncation size: -9223372036854775808"), f.Truncate(-9223372036854775808)) + require.Equal(t, status.Error(codes.InvalidArgument, "Negative truncation size: -1"), f.Truncate(-1)) + + // Growing and shrinking an empty file should not + // cause any I/O, as it contains no used sectors. + require.NoError(t, f.Truncate(16*1024*1024*1024)) + require.NoError(t, f.Truncate(0)) + + // Add some contents to the file to perform further + // testing. + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(5), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Foo\x00\x00\x00"), int64(64)).Return(16, nil) + n, err := f.WriteAt([]byte("Foo"), 90) + require.Equal(t, 3, n) + require.NoError(t, err) + + // Growing the file should not cause any I/O. + require.NoError(t, f.Truncate(100)) + + // Shrinking the file should cause trailing bytes in the + // last sector to be zeroed. Simulate the case where + // this fails. The file size should remain as is. + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), int64(69)). + Return(0, status.Error(codes.Internal, "Disk on fire")) + require.Equal(t, status.Error(codes.Internal, "Disk on fire"), f.Truncate(85)) + + // Perform truncations that do succeed. + require.NoError(t, f.Truncate(96)) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00"), int64(76)).Return(16, nil) + require.NoError(t, f.Truncate(92)) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00"), int64(74)).Return(16, nil) + require.NoError(t, f.Truncate(90)) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00"), int64(65)).Return(16, nil) + require.NoError(t, f.Truncate(81)) + + // Continuing to shrink the file should eventually cause + // the final sector to be released. + sectorAllocator.EXPECT().FreeList([]uint32{5}) + require.NoError(t, f.Truncate(80)) + + require.NoError(t, f.Close()) + }) + + t.Run("WritesAndReadOnSingleSector", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + // The initial write to a sector should cause the full + // sector to be written. This ensures that no + // unnecessary reads are triggered against storage and + // that any leading bytes are zeroed. + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(12), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00Hello\x00\x00\x00\x00\x00\x00\x00\x00\x00"), int64(176)).Return(16, nil) + n, err := f.WriteAt([]byte("Hello"), 2) + require.Equal(t, 5, n) + require.NoError(t, err) + + // Successive writes to the same sector should not add + // any null byte padding. All of that work was already + // done by the previous write operation. + blockDevice.EXPECT().WriteAt([]byte("world"), int64(184)).Return(5, nil) + n, err = f.WriteAt([]byte("world"), 8) + require.Equal(t, 5, n) + require.NoError(t, err) + + // Reads should be limited to the end-of-file. + blockDevice.EXPECT().ReadAt(gomock.Len(13), int64(176)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return copy(p, []byte("\x00\x00Hello\x00world")), nil + }) + var buf [16]byte + n, err = f.ReadAt(buf[:], 0) + require.Equal(t, 13, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("\x00\x00Hello\x00world"), buf[:n]) + + sectorAllocator.EXPECT().FreeList([]uint32{12}) + require.NoError(t, f.Close()) + }) + + t.Run("WriteFragmentation", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + // Simulate the case where 137 bytes of data needs to be + // written, requiring 10 sectors of storage space. The + // sector allocator is not able to give us 10 contiguous + // sectors, meaning multiple allocations of smaller + // regions are performed. + sectorAllocator.EXPECT().AllocateContiguous(10).Return(uint32(75), 3, nil) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Lorem "), int64(1184)).Return(16, nil) + blockDevice.EXPECT().WriteAt([]byte("ipsum dolor sit amet, consectetu"), int64(1200)).Return(32, nil) + + sectorAllocator.EXPECT().AllocateContiguous(7).Return(uint32(21), 4, nil) + blockDevice.EXPECT().WriteAt([]byte("r adipiscing elit. Suspendisse quis mollis eros, sit amet pellen"), int64(320)).Return(64, nil) + + sectorAllocator.EXPECT().AllocateContiguous(3).Return(uint32(105), 2, nil) + blockDevice.EXPECT().WriteAt([]byte("tesque lectus. Quisque non ex ni"), int64(1664)).Return(32, nil) + + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(40), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("sl.\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), int64(624)).Return(16, nil) + + n, err := f.WriteAt([]byte( + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "+ + "Suspendisse quis mollis eros, sit amet pellentesque lectus. "+ + "Quisque non ex nisl."), 42) + require.Equal(t, 137, n) + require.NoError(t, err) + + sectorAllocator.EXPECT().FreeList([]uint32{0, 0, 75, 76, 77, 21, 22, 23, 24, 105, 106, 40}) + require.NoError(t, f.Close()) + }) + + t.Run("WriteSectorAllocatorFailure", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + // Failure to allocate sectors should cause the write to + // fail as well. Any previously allocated sectors should + // still be attached to the file and freed later on. + sectorAllocator.EXPECT().AllocateContiguous(5).Return(uint32(75), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Lorem "), int64(1184)).Return(16, nil) + + sectorAllocator.EXPECT().AllocateContiguous(4).Return(uint32(0), 0, status.Error(codes.ResourceExhausted, "Out of storage space")) + + n, err := f.WriteAt([]byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit."), 42) + require.Equal(t, 6, n) + require.Equal(t, status.Error(codes.ResourceExhausted, "Out of storage space"), err) + + sectorAllocator.EXPECT().FreeList([]uint32{0, 0, 75}) + require.NoError(t, f.Close()) + }) + + t.Run("WriteIOFailure", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + // Write failures to freshly allocator sectors should + // cause them to not be attached to the file. The + // sectors should be released immediately. + sectorAllocator.EXPECT().AllocateContiguous(5).Return(uint32(75), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Lorem "), int64(1184)).Return(16, nil) + + sectorAllocator.EXPECT().AllocateContiguous(4).Return(uint32(39), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("ipsum dolor sit "), int64(608)).Return(0, status.Error(codes.Internal, "Disk failure")) + sectorAllocator.EXPECT().FreeContiguous(uint32(39), 1) + + n, err := f.WriteAt([]byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit."), 42) + require.Equal(t, 6, n) + require.Equal(t, status.Error(codes.Internal, "Disk failure"), err) + + sectorAllocator.EXPECT().FreeList([]uint32{0, 0, 75}) + require.NoError(t, f.Close()) + }) + + t.Run("GetNextRegionOffset", func(t *testing.T) { + // Test the behavior on empty files. + f, err := pool.NewFile() + require.NoError(t, err) + + _, err = f.GetNextRegionOffset(-1, filesystem.Data) + require.Equal(t, status.Error(codes.InvalidArgument, "Negative seek offset: -1"), err) + _, err = f.GetNextRegionOffset(-1, filesystem.Hole) + require.Equal(t, status.Error(codes.InvalidArgument, "Negative seek offset: -1"), err) + + _, err = f.GetNextRegionOffset(0, filesystem.Data) + require.Equal(t, io.EOF, err) + _, err = f.GetNextRegionOffset(0, filesystem.Hole) + require.Equal(t, io.EOF, err) + + _, err = f.GetNextRegionOffset(1, filesystem.Data) + require.Equal(t, io.EOF, err) + _, err = f.GetNextRegionOffset(1, filesystem.Hole) + require.Equal(t, io.EOF, err) + + // Test the behavior on a sparse file that starts with a + // hole and ends with data. + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(5), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("Hello\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), int64(64)).Return(16, nil) + n, err := f.WriteAt([]byte("Hello"), 128) + require.Equal(t, 5, n) + require.NoError(t, err) + + nextOffset, err := f.GetNextRegionOffset(0, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128), nextOffset) + nextOffset, err = f.GetNextRegionOffset(0, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(0), nextOffset) + + nextOffset, err = f.GetNextRegionOffset(1, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128), nextOffset) + nextOffset, err = f.GetNextRegionOffset(1, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(1), nextOffset) + + nextOffset, err = f.GetNextRegionOffset(128-1, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128), nextOffset) + nextOffset, err = f.GetNextRegionOffset(128-1, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(128-1), nextOffset) + + nextOffset, err = f.GetNextRegionOffset(128, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128), nextOffset) + nextOffset, err = f.GetNextRegionOffset(128, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(128+5), nextOffset) + + nextOffset, err = f.GetNextRegionOffset(128+4, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128+4), nextOffset) + nextOffset, err = f.GetNextRegionOffset(128+4, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(128+5), nextOffset) + + _, err = f.GetNextRegionOffset(128+5, filesystem.Data) + require.Equal(t, io.EOF, err) + _, err = f.GetNextRegionOffset(128+5, filesystem.Hole) + require.Equal(t, io.EOF, err) + + // Test the behavior on a sparse file that ends with a hole. + require.NoError(t, f.Truncate(384)) + + nextOffset, err = f.GetNextRegionOffset(128, filesystem.Data) + require.NoError(t, err) + require.Equal(t, int64(128), nextOffset) + nextOffset, err = f.GetNextRegionOffset(128, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(128+16), nextOffset) + + _, err = f.GetNextRegionOffset(256, filesystem.Data) + require.Equal(t, io.EOF, err) + nextOffset, err = f.GetNextRegionOffset(256, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(256), nextOffset) + + _, err = f.GetNextRegionOffset(384-1, filesystem.Data) + require.Equal(t, io.EOF, err) + nextOffset, err = f.GetNextRegionOffset(384-1, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(384-1), nextOffset) + + _, err = f.GetNextRegionOffset(384, filesystem.Data) + require.Equal(t, io.EOF, err) + _, err = f.GetNextRegionOffset(384, filesystem.Hole) + require.Equal(t, io.EOF, err) + + sectorAllocator.EXPECT().FreeList([]uint32{0, 0, 0, 0, 0, 0, 0, 0, 5}) + require.NoError(t, f.Close()) + }) + + t.Run("WriteAt", func(t *testing.T) { + f, err := pool.NewFile() + require.NoError(t, err) + + _, err = f.WriteAt([]byte{0}, -1) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Negative write offset: -1"), err) + }) +} diff --git a/pkg/filesystem/configuration.go b/pkg/filesystem/configuration.go new file mode 100644 index 0000000..330b84d --- /dev/null +++ b/pkg/filesystem/configuration.go @@ -0,0 +1,55 @@ +package filesystem + +import ( + "math" + + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem" + "github.com/buildbarn/bb-storage/pkg/blockdevice" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewFilePoolFromConfiguration constructs a FilePool based on +// parameters provided in a configuration file. +func NewFilePoolFromConfiguration(configuration *pb.FilePoolConfiguration) (FilePool, error) { + if configuration == nil { + // No configuration provided. Because there are setups + // in which it's not required to use a file pool, let's + // return an empty file pool by default. + return EmptyFilePool, nil + } + + var filePool FilePool + switch backend := configuration.Backend.(type) { + case *pb.FilePoolConfiguration_InMemory: + filePool = InMemoryFilePool + case *pb.FilePoolConfiguration_DirectoryPath: + directory, err := filesystem.NewLocalDirectory(backend.DirectoryPath) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to open directory %#v", backend.DirectoryPath) + } + if err := directory.RemoveAllChildren(); err != nil { + directory.Close() + return nil, util.StatusWrapf(err, "Failed to empty out directory %#v", backend.DirectoryPath) + } + filePool = NewDirectoryBackedFilePool(directory) + case *pb.FilePoolConfiguration_BlockDevice: + blockDevice, sectorSizeBytes, sectorCount, err := blockdevice.NewBlockDeviceFromConfiguration(backend.BlockDevice, true) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create block device") + } + if sectorCount > math.MaxUint32 { + return nil, util.StatusWrapf(err, "Block device has %d sectors, while only %d may be addressed", sectorCount, uint32(math.MaxUint32)) + } + filePool = NewBlockDeviceBackedFilePool( + blockDevice, + NewBitmapSectorAllocator(uint32(sectorCount)), + sectorSizeBytes) + default: + return nil, status.Error(codes.InvalidArgument, "Configuration did not contain a supported file pool backend") + } + return NewMetricsFilePool(filePool), nil +} diff --git a/pkg/filesystem/directory_backed_file_pool.go b/pkg/filesystem/directory_backed_file_pool.go new file mode 100644 index 0000000..2cbe127 --- /dev/null +++ b/pkg/filesystem/directory_backed_file_pool.go @@ -0,0 +1,106 @@ +package filesystem + +import ( + "io" + "os" + "strconv" + "sync/atomic" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type directoryBackedFilePool struct { + directory filesystem.Directory + + nextID atomic.Uint64 +} + +// NewDirectoryBackedFilePool creates a FilePool that stores all data +// written to files into a single directory on disk. Files stored in the +// underlying directory are simply identified by an incrementing number. +// +// As many files may exist at a given point in time, this implementation +// does not keep any backing files open. This would exhaust the worker's +// file descriptor table. Files are opened on demand. +// +// TODO: Maybe use an eviction.Set to keep a small number of files open? +func NewDirectoryBackedFilePool(directory filesystem.Directory) FilePool { + return &directoryBackedFilePool{ + directory: directory, + } +} + +func (fp *directoryBackedFilePool) NewFile() (filesystem.FileReadWriter, error) { + return &lazyOpeningSelfDeletingFile{ + directory: fp.directory, + name: path.MustNewComponent(strconv.FormatUint(fp.nextID.Add(1), 10)), + }, nil +} + +// lazyOpeningSelfDeletingFile is a file descriptor that forwards +// operations to a file that is opened on demand. Upon closure, the +// underlying file is unlinked. +type lazyOpeningSelfDeletingFile struct { + directory filesystem.Directory + name path.Component +} + +func (f *lazyOpeningSelfDeletingFile) Close() error { + if err := f.directory.Remove(f.name); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (f *lazyOpeningSelfDeletingFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { + fh, err := f.directory.OpenRead(f.name) + if os.IsNotExist(err) { + // Empty file that doesn't explicitly exist in the + // backing store yet. Treat it as if it's a zero-length + // file. + return 0, io.EOF + } else if err != nil { + return 0, err + } + defer fh.Close() + return fh.GetNextRegionOffset(off, regionType) +} + +func (f *lazyOpeningSelfDeletingFile) ReadAt(p []byte, off int64) (int, error) { + fh, err := f.directory.OpenRead(f.name) + if os.IsNotExist(err) { + // Empty file that doesn't explicitly exist in the + // backing store yet. Treat it as if it's a zero-length + // file. + return 0, io.EOF + } else if err != nil { + return 0, err + } + defer fh.Close() + return fh.ReadAt(p, off) +} + +func (f *lazyOpeningSelfDeletingFile) Sync() error { + // Because FilePool does not provide any persistency, there is + // no need to synchronize any data. + return nil +} + +func (f *lazyOpeningSelfDeletingFile) Truncate(size int64) error { + fh, err := f.directory.OpenWrite(f.name, filesystem.CreateReuse(0o600)) + if err != nil { + return err + } + defer fh.Close() + return fh.Truncate(size) +} + +func (f *lazyOpeningSelfDeletingFile) WriteAt(p []byte, off int64) (int, error) { + fh, err := f.directory.OpenWrite(f.name, filesystem.CreateReuse(0o600)) + if err != nil { + return 0, err + } + defer fh.Close() + return fh.WriteAt(p, off) +} diff --git a/pkg/filesystem/directory_backed_file_pool_test.go b/pkg/filesystem/directory_backed_file_pool_test.go new file mode 100644 index 0000000..8be35bf --- /dev/null +++ b/pkg/filesystem/directory_backed_file_pool_test.go @@ -0,0 +1,95 @@ +package filesystem_test + +import ( + "io" + "syscall" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestDirectoryBackedFilePool(t *testing.T) { + ctrl := gomock.NewController(t) + + directory := mock.NewMockDirectory(ctrl) + fp := re_filesystem.NewDirectoryBackedFilePool(directory) + + t.Run("EmptyFile", func(t *testing.T) { + f, err := fp.NewFile() + require.NoError(t, err) + + // Underlying file should not yet exist. This should be + // interpreted as if the file is empty. + directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) + var p [10]byte + n, err := f.ReadAt(p[:], 0) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + // GetNextRegionOffset() should behave similarly. + directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) + _, err = f.GetNextRegionOffset(0, filesystem.Data) + require.Equal(t, io.EOF, err) + + directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) + _, err = f.GetNextRegionOffset(0, filesystem.Hole) + require.Equal(t, io.EOF, err) + + directory.EXPECT().Remove(path.MustNewComponent("1")).Return(syscall.ENOENT) + require.NoError(t, f.Close()) + }) + + t.Run("NonEmptyFile", func(t *testing.T) { + f, err := fp.NewFile() + require.NoError(t, err) + + // Write a piece of text into the file. + fileWriter := mock.NewMockFileWriter(ctrl) + directory.EXPECT().OpenWrite(path.MustNewComponent("2"), filesystem.CreateReuse(0o600)).Return(fileWriter, nil) + fileWriter.EXPECT().WriteAt([]byte("Hello, world"), int64(123)).Return(12, nil) + fileWriter.EXPECT().Close() + n, err := f.WriteAt([]byte("Hello, world"), 123) + require.Equal(t, 12, n) + require.NoError(t, err) + + // Truncate a part of it. + fileWriter = mock.NewMockFileWriter(ctrl) + directory.EXPECT().OpenWrite(path.MustNewComponent("2"), filesystem.CreateReuse(0o600)).Return(fileWriter, nil) + fileWriter.EXPECT().Truncate(int64(128)) + fileWriter.EXPECT().Close() + require.NoError(t, f.Truncate(128)) + + // Read back the end of the file. + fileReader := mock.NewMockFileReader(ctrl) + directory.EXPECT().OpenRead(path.MustNewComponent("2")).Return(fileReader, nil) + fileReader.EXPECT().ReadAt(gomock.Any(), int64(120)).DoAndReturn( + func(p []byte, off int64) (int, error) { + require.Len(t, p, 10) + copy(p, "\x00\x00\x00Hello") + return 8, io.EOF + }) + fileReader.EXPECT().Close() + var p [10]byte + n, err = f.ReadAt(p[:], 120) + require.Equal(t, 8, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("\x00\x00\x00Hello"), p[:8]) + + // Calls for GetNextRegionOffset() should be forwarded. + fileReader = mock.NewMockFileReader(ctrl) + directory.EXPECT().OpenRead(path.MustNewComponent("2")).Return(fileReader, nil) + fileReader.EXPECT().GetNextRegionOffset(int64(0), filesystem.Hole).Return(int64(123), nil) + fileReader.EXPECT().Close() + off, err := f.GetNextRegionOffset(0, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(123), off) + + directory.EXPECT().Remove(path.MustNewComponent("2")).Return(nil) + require.NoError(t, f.Close()) + }) +} diff --git a/pkg/filesystem/empty_file_pool.go b/pkg/filesystem/empty_file_pool.go new file mode 100644 index 0000000..1e60af3 --- /dev/null +++ b/pkg/filesystem/empty_file_pool.go @@ -0,0 +1,20 @@ +package filesystem + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type emptyFilePool struct{} + +func (fp emptyFilePool) NewFile() (filesystem.FileReadWriter, error) { + return nil, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool") +} + +// EmptyFilePool is a FilePool that does not permit the creation of new +// files. It is used as the default FilePool for the root of the +// worker's FUSE file system to disallow the creation of files not bound +// to a specific build action. +var EmptyFilePool FilePool = emptyFilePool{} diff --git a/pkg/filesystem/empty_file_pool_test.go b/pkg/filesystem/empty_file_pool_test.go new file mode 100644 index 0000000..e5696fa --- /dev/null +++ b/pkg/filesystem/empty_file_pool_test.go @@ -0,0 +1,16 @@ +package filesystem_test + +import ( + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestEmptyFilePool(t *testing.T) { + _, err := filesystem.EmptyFilePool.NewFile() + require.Equal(t, err, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool")) +} diff --git a/pkg/filesystem/file_pool.go b/pkg/filesystem/file_pool.go new file mode 100644 index 0000000..1dc9120 --- /dev/null +++ b/pkg/filesystem/file_pool.go @@ -0,0 +1,15 @@ +package filesystem + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +// FilePool is an allocator for temporary files. Files are created by +// calling NewFile(). They are automatically removed by calling Close(). +// +// File handles returned by NewFile() are not thread-safe. Additional +// locking needs to be done at higher levels to permit safe concurrent +// access. +type FilePool interface { + NewFile() (filesystem.FileReadWriter, error) +} diff --git a/pkg/filesystem/in_memory_file_pool.go b/pkg/filesystem/in_memory_file_pool.go new file mode 100644 index 0000000..04f0c52 --- /dev/null +++ b/pkg/filesystem/in_memory_file_pool.go @@ -0,0 +1,81 @@ +package filesystem + +import ( + "io" + + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +type inMemoryFilePool struct{} + +func (fp inMemoryFilePool) NewFile() (filesystem.FileReadWriter, error) { + return &inMemoryFile{}, nil +} + +type inMemoryFile struct { + data []byte +} + +func (f *inMemoryFile) Close() error { + f.data = nil + return nil +} + +func (f *inMemoryFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { + // Files are stored in a byte slice contiguously, so there is no + // sparseness. + if off >= int64(len(f.data)) { + return 0, io.EOF + } + switch regionType { + case filesystem.Data: + return off, nil + case filesystem.Hole: + return int64(len(f.data)), nil + default: + panic("Unknown region type") + } +} + +func (f *inMemoryFile) ReadAt(p []byte, off int64) (int, error) { + if int(off) >= len(f.data) { + return 0, io.EOF + } + if n := copy(p, f.data[off:]); n < len(p) { + return n, io.EOF + } + return len(p), nil +} + +func (f *inMemoryFile) Sync() error { + // Because FilePool does not provide any persistency, there is + // no need to synchronize any data. + return nil +} + +func (f *inMemoryFile) Truncate(size int64) error { + if len(f.data) >= int(size) { + // Truncate the file. + f.data = f.data[:size] + } else { + // Grow the file. + f.data = append(f.data, make([]byte, int(size)-len(f.data))...) + } + return nil +} + +func (f *inMemoryFile) WriteAt(p []byte, off int64) (int, error) { + // Zero-sized writes should not cause the file to grow. + if len(p) == 0 { + return 0, nil + } + + if size := int(off) + len(p); len(f.data) < size { + // Grow the file. + f.data = append(f.data, make([]byte, size-len(f.data))...) + } + return copy(f.data[off:], p), nil +} + +// InMemoryFilePool is a FilePool that stores all data in memory. +var InMemoryFilePool FilePool = inMemoryFilePool{} diff --git a/pkg/filesystem/in_memory_file_pool_test.go b/pkg/filesystem/in_memory_file_pool_test.go new file mode 100644 index 0000000..079b4c9 --- /dev/null +++ b/pkg/filesystem/in_memory_file_pool_test.go @@ -0,0 +1,65 @@ +package filesystem_test + +import ( + "io" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/stretchr/testify/require" +) + +func TestInMemoryFilePool(t *testing.T) { + fp := filesystem.InMemoryFilePool + + t.Run("EmptyFile", func(t *testing.T) { + f, err := fp.NewFile() + require.NoError(t, err) + + var p [10]byte + n, err := f.ReadAt(p[:], 0) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + require.NoError(t, f.Close()) + }) + + t.Run("NonEmptyFile", func(t *testing.T) { + f, err := fp.NewFile() + require.NoError(t, err) + + // Write a piece of text into the file. + n, err := f.WriteAt([]byte("Hello, world"), 123) + require.Equal(t, 12, n) + require.NoError(t, err) + + // Truncate a part of it. + require.NoError(t, f.Truncate(128)) + + // Read back the end of the file. + var p [10]byte + n, err = f.ReadAt(p[:], 120) + require.Equal(t, 8, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("\x00\x00\x00Hello"), p[:8]) + + require.NoError(t, f.Close()) + }) + + t.Run("ZeroSizedWrite", func(t *testing.T) { + f, err := fp.NewFile() + require.NoError(t, err) + + // A zero-sized write should not cause the file to + // actually grow. The read should still return EOF. + n, err := f.WriteAt(nil, 123) + require.Equal(t, 0, n) + require.NoError(t, err) + + var p [10]byte + n, err = f.ReadAt(p[:], 0) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + + require.NoError(t, f.Close()) + }) +} diff --git a/pkg/filesystem/lazy_directory.go b/pkg/filesystem/lazy_directory.go new file mode 100644 index 0000000..78abceb --- /dev/null +++ b/pkg/filesystem/lazy_directory.go @@ -0,0 +1,247 @@ +package filesystem + +import ( + "os" + "time" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" +) + +// DirectoryOpener is a callback that is used by LazyDirectory to open +// the underlying directory on demand. +type DirectoryOpener func() (filesystem.DirectoryCloser, error) + +type lazyDirectory struct { + directoryOpener DirectoryOpener +} + +// NewLazyDirectory creates a directory handle that forwards all calls +// to a directory that is created on demand. The primary use case for +// this adapter is for the FUSE-based runner. +// +// A runner process may get started before the worker is able to create +// its FUSE mount point. This would cause the runner to obtain a handle +// to the build directory underneath the FUSE mount, causing builds to +// fail due to missing input files. +// +// Relatedly, if the worker would start before the runner, but end up +// crashing/restarting, the runner would still have a directory handle +// pointing to a stale FUSE mount. +// +// This wrapper prevents these problems by ensuring that we never hold +// on to a file descriptor to the build directory. +func NewLazyDirectory(directoryOpener DirectoryOpener) filesystem.Directory { + return &lazyDirectory{ + directoryOpener: directoryOpener, + } +} + +func (d *lazyDirectory) openUnderlying() (filesystem.DirectoryCloser, error) { + underlying, err := d.directoryOpener() + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.Internal, "Failed to open underlying directory") + } + return underlying, nil +} + +func (d *lazyDirectory) EnterDirectory(name path.Component) (filesystem.DirectoryCloser, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.EnterDirectory(name) +} + +func (d *lazyDirectory) OpenAppend(name path.Component, creationMode filesystem.CreationMode) (filesystem.FileAppender, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.OpenAppend(name, creationMode) +} + +func (d *lazyDirectory) OpenRead(name path.Component) (filesystem.FileReader, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.OpenRead(name) +} + +func (d *lazyDirectory) OpenReadWrite(name path.Component, creationMode filesystem.CreationMode) (filesystem.FileReadWriter, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.OpenReadWrite(name, creationMode) +} + +func (d *lazyDirectory) OpenWrite(name path.Component, creationMode filesystem.CreationMode) (filesystem.FileWriter, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.OpenWrite(name, creationMode) +} + +func (d *lazyDirectory) Link(oldName path.Component, newDirectory filesystem.Directory, newName path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Link(oldName, newDirectory, newName) +} + +func (d *lazyDirectory) Clonefile(oldName path.Component, newDirectory filesystem.Directory, newName path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Clonefile(oldName, newDirectory, newName) +} + +func (d *lazyDirectory) Lstat(name path.Component) (filesystem.FileInfo, error) { + underlying, err := d.openUnderlying() + if err != nil { + return filesystem.FileInfo{}, err + } + defer underlying.Close() + return underlying.Lstat(name) +} + +func (d *lazyDirectory) Mkdir(name path.Component, perm os.FileMode) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Mkdir(name, perm) +} + +func (d *lazyDirectory) Mknod(name path.Component, perm os.FileMode, deviceNumber filesystem.DeviceNumber) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Mknod(name, perm, deviceNumber) +} + +func (d *lazyDirectory) ReadDir() ([]filesystem.FileInfo, error) { + underlying, err := d.openUnderlying() + if err != nil { + return nil, err + } + defer underlying.Close() + return underlying.ReadDir() +} + +func (d *lazyDirectory) Readlink(name path.Component) (string, error) { + underlying, err := d.openUnderlying() + if err != nil { + return "", err + } + defer underlying.Close() + return underlying.Readlink(name) +} + +func (d *lazyDirectory) Remove(name path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Remove(name) +} + +func (d *lazyDirectory) RemoveAll(name path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.RemoveAll(name) +} + +func (d *lazyDirectory) RemoveAllChildren() error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.RemoveAllChildren() +} + +func (d *lazyDirectory) Rename(oldName path.Component, newDirectory filesystem.Directory, newName path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Rename(oldName, newDirectory, newName) +} + +func (d *lazyDirectory) Symlink(oldName string, newName path.Component) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Symlink(oldName, newName) +} + +func (d *lazyDirectory) Sync() error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Sync() +} + +func (d *lazyDirectory) Chtimes(name path.Component, atime, mtime time.Time) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Chtimes(name, atime, mtime) +} + +func (d *lazyDirectory) IsWritable() (bool, error) { + underlying, err := d.openUnderlying() + if err != nil { + return false, err + } + defer underlying.Close() + return underlying.IsWritable() +} + +func (d *lazyDirectory) IsWritableChild(name path.Component) (bool, error) { + underlying, err := d.openUnderlying() + if err != nil { + return false, err + } + defer underlying.Close() + return underlying.IsWritableChild(name) +} + +func (d *lazyDirectory) Apply(arg interface{}) error { + underlying, err := d.openUnderlying() + if err != nil { + return err + } + defer underlying.Close() + return underlying.Apply(arg) +} diff --git a/pkg/filesystem/lazy_directory_test.go b/pkg/filesystem/lazy_directory_test.go new file mode 100644 index 0000000..4cb2396 --- /dev/null +++ b/pkg/filesystem/lazy_directory_test.go @@ -0,0 +1,189 @@ +package filesystem_test + +import ( + "os" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestLazyDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + directoryOpener := mock.NewMockDirectoryOpener(ctrl) + directory := re_filesystem.NewLazyDirectory(directoryOpener.Call) + + t.Run("EnterSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + childDirectory := mock.NewMockDirectoryCloser(ctrl) + underlyingDirectory.EXPECT().EnterDirectory(path.MustNewComponent("sub")).Return(childDirectory, nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + returnedDirectory, err := directory.EnterDirectory(path.MustNewComponent("sub")) + require.NoError(t, err) + require.Equal(t, returnedDirectory, childDirectory) + }) + + t.Run("EnterFailure", func(t *testing.T) { + directoryOpener.EXPECT().Call().Return(nil, status.Error(codes.PermissionDenied, "Not allowed to access build directory")) + + // Error code should be transformed to Internal. We + // don't want to propagate the underlying error code, as + // that could cause confusion/invalid behaviour (e.g., + // NotFound). + _, err := directory.EnterDirectory(path.MustNewComponent("sub")) + testutil.RequireEqualStatus(t, err, status.Error(codes.Internal, "Failed to open underlying directory: Not allowed to access build directory")) + }) + + t.Run("LinkSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + otherDirectory := mock.NewMockDirectoryCloser(ctrl) + underlyingDirectory.EXPECT().Link(path.MustNewComponent("old"), otherDirectory, path.MustNewComponent("new")).Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.Link(path.MustNewComponent("old"), otherDirectory, path.MustNewComponent("new")) + require.NoError(t, err) + }) + + t.Run("LstatSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Lstat(path.MustNewComponent("foo")).Return(filesystem.NewFileInfo(path.MustNewComponent("foo"), filesystem.FileTypeDirectory, false), nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + fileInfo, err := directory.Lstat(path.MustNewComponent("foo")) + require.NoError(t, err) + require.Equal(t, fileInfo, filesystem.NewFileInfo(path.MustNewComponent("foo"), filesystem.FileTypeDirectory, false)) + }) + + t.Run("MkdirSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Mkdir(path.MustNewComponent("sub"), os.FileMode(0o777)).Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.Mkdir(path.MustNewComponent("sub"), 0o777) + require.NoError(t, err) + }) + + t.Run("OpenReadSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + childFile := mock.NewMockFileReader(ctrl) + underlyingDirectory.EXPECT().OpenRead(path.MustNewComponent("file")).Return(childFile, nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + f, err := directory.OpenRead(path.MustNewComponent("file")) + require.NoError(t, err) + require.Equal(t, f, childFile) + }) + + t.Run("ReadDirSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().ReadDir().Return([]filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("a"), filesystem.FileTypeDirectory, false), + filesystem.NewFileInfo(path.MustNewComponent("b"), filesystem.FileTypeRegularFile, false), + }, nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + contents, err := directory.ReadDir() + require.NoError(t, err) + require.Equal(t, contents, []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("a"), filesystem.FileTypeDirectory, false), + filesystem.NewFileInfo(path.MustNewComponent("b"), filesystem.FileTypeRegularFile, false), + }) + }) + + t.Run("ReadlinkSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Readlink(path.MustNewComponent("symlink")).Return("target", nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + target, err := directory.Readlink(path.MustNewComponent("symlink")) + require.NoError(t, err) + require.Equal(t, target, "target") + }) + + t.Run("RemoveSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Remove(path.MustNewComponent("file")).Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.Remove(path.MustNewComponent("file")) + require.NoError(t, err) + }) + + t.Run("RemoveAllSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().RemoveAll(path.MustNewComponent("directory")).Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.RemoveAll(path.MustNewComponent("directory")) + require.NoError(t, err) + }) + + t.Run("RemoveAllChildrenSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().RemoveAllChildren().Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.RemoveAllChildren() + require.NoError(t, err) + }) + + t.Run("RenameSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + otherDirectory := mock.NewMockDirectoryCloser(ctrl) + underlyingDirectory.EXPECT().Rename(path.MustNewComponent("old"), otherDirectory, path.MustNewComponent("new")) + underlyingDirectory.EXPECT().Close() + + require.NoError(t, directory.Rename(path.MustNewComponent("old"), otherDirectory, path.MustNewComponent("new"))) + }) + + t.Run("SymlinkSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Symlink("old", path.MustNewComponent("new")).Return(nil) + underlyingDirectory.EXPECT().Close().Return(nil) + + // Call should be forwarded literally. + err := directory.Symlink("old", path.MustNewComponent("new")) + require.NoError(t, err) + }) + + t.Run("SyncSuccess", func(t *testing.T) { + underlyingDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryOpener.EXPECT().Call().Return(underlyingDirectory, nil) + underlyingDirectory.EXPECT().Sync() + underlyingDirectory.EXPECT().Close() + + require.NoError(t, directory.Sync()) + }) +} diff --git a/pkg/filesystem/metrics_file_pool.go b/pkg/filesystem/metrics_file_pool.go new file mode 100644 index 0000000..7938cd9 --- /dev/null +++ b/pkg/filesystem/metrics_file_pool.go @@ -0,0 +1,66 @@ +package filesystem + +import ( + "sync" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + filePoolPrometheusMetrics sync.Once + + filePoolFilesCreated = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "filesystem", + Name: "file_pool_files_created_total", + Help: "Number of times a file was created that is backed by a file pool.", + }) + filePoolFilesClosed = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "filesystem", + Name: "file_pool_files_closed_total", + Help: "Number of times a file was closed that is backed by a file pool.", + }) +) + +type metricsFilePool struct { + base FilePool +} + +// NewMetricsFilePool creates a decorator for FilePool that exposes +// Prometheus metrics on how many files are created and closed. +func NewMetricsFilePool(base FilePool) FilePool { + filePoolPrometheusMetrics.Do(func() { + prometheus.MustRegister(filePoolFilesCreated) + prometheus.MustRegister(filePoolFilesClosed) + }) + + return &metricsFilePool{ + base: base, + } +} + +func (fp *metricsFilePool) NewFile() (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile() + if err != nil { + return nil, err + } + filePoolFilesCreated.Inc() + return &metricsFile{ + FileReadWriter: f, + }, nil +} + +type metricsFile struct { + filesystem.FileReadWriter +} + +func (f *metricsFile) Close() error { + err := f.FileReadWriter.Close() + f.FileReadWriter = nil + filePoolFilesClosed.Inc() + return err +} diff --git a/pkg/filesystem/quota_enforcing_file_pool.go b/pkg/filesystem/quota_enforcing_file_pool.go new file mode 100644 index 0000000..0b07131 --- /dev/null +++ b/pkg/filesystem/quota_enforcing_file_pool.go @@ -0,0 +1,137 @@ +package filesystem + +import ( + "sync/atomic" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// quotaMetric is a simple 64-bit counter from/to which can be +// subtracted/added atomically. It is used to store the number of files +// and bytes of space available. +type quotaMetric struct { + remaining atomic.Int64 +} + +func (m *quotaMetric) allocate(v int64) bool { + for { + remaining := m.remaining.Load() + if remaining < v { + return false + } + if m.remaining.CompareAndSwap(remaining, remaining-v) { + return true + } + } +} + +func (m *quotaMetric) release(v int64) { + m.remaining.Add(v) +} + +type quotaEnforcingFilePool struct { + base FilePool + + filesRemaining quotaMetric + bytesRemaining quotaMetric +} + +// NewQuotaEnforcingFilePool creates a FilePool that enforces disk +// quotas. It limits how many files may be extracted from an underlying +// FilePool, while also limiting the total size of all files that are +// extracted. Space is reclaimed by either truncating files or closing +// them. +func NewQuotaEnforcingFilePool(base FilePool, maximumFileCount, maximumTotalSize int64) FilePool { + fp := "aEnforcingFilePool{ + base: base, + } + fp.filesRemaining.remaining.Store(maximumFileCount) + fp.bytesRemaining.remaining.Store(maximumTotalSize) + return fp +} + +func (fp *quotaEnforcingFilePool) NewFile() (filesystem.FileReadWriter, error) { + if !fp.filesRemaining.allocate(1) { + return nil, status.Error(codes.InvalidArgument, "File count quota reached") + } + f, err := fp.base.NewFile() + if err != nil { + fp.filesRemaining.release(1) + return nil, err + } + return "aEnforcingFile{ + FileReadWriter: f, + pool: fp, + }, nil +} + +type quotaEnforcingFile struct { + filesystem.FileReadWriter + + pool *quotaEnforcingFilePool + size int64 +} + +func (f *quotaEnforcingFile) Close() error { + // Close underlying file. + err := f.FileReadWriter.Close() + f.FileReadWriter = nil + + // Release associated resources. + f.pool.filesRemaining.release(1) + f.pool.bytesRemaining.release(f.size) + f.pool = nil + return err +} + +func (f *quotaEnforcingFile) Truncate(size int64) error { + if size < f.size { + // File is shrinking. + if err := f.FileReadWriter.Truncate(size); err != nil { + return err + } + f.pool.bytesRemaining.release(f.size - size) + } else if size > f.size { + // File is growing. + additionalSpace := size - f.size + if !f.pool.bytesRemaining.allocate(additionalSpace) { + return status.Error(codes.InvalidArgument, "File size quota reached") + } + if err := f.FileReadWriter.Truncate(size); err != nil { + f.pool.bytesRemaining.release(additionalSpace) + return err + } + } + f.size = size + return nil +} + +func (f *quotaEnforcingFile) WriteAt(p []byte, off int64) (int, error) { + // No need to allocate space if the file is not growing. + desiredSize := off + int64(len(p)) + if desiredSize <= f.size { + return f.FileReadWriter.WriteAt(p, off) + } + + // File is growing. Allocate space prior to writing. Release it, + // potentially partially, upon failure. + if !f.pool.bytesRemaining.allocate(desiredSize - f.size) { + return 0, status.Error(codes.InvalidArgument, "File size quota reached") + } + n, err := f.FileReadWriter.WriteAt(p, off) + actualSize := int64(0) + if n > 0 { + actualSize = off + int64(n) + } + if actualSize < f.size { + actualSize = f.size + } + if actualSize < desiredSize { + f.pool.bytesRemaining.release(desiredSize - actualSize) + } + f.size = actualSize + return n, err +} diff --git a/pkg/filesystem/quota_enforcing_file_pool_test.go b/pkg/filesystem/quota_enforcing_file_pool_test.go new file mode 100644 index 0000000..6332335 --- /dev/null +++ b/pkg/filesystem/quota_enforcing_file_pool_test.go @@ -0,0 +1,139 @@ +package filesystem_test + +import ( + "io" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// testRemainingQuota is a helper function for the +// QuotaEnforcingFilePool tests to check that a certain amount of space +// is available within the pool. +func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *mock.MockFilePool, pool re_filesystem.FilePool, filesRemaining int, bytesRemaining int64) { + // Check that the remaining number of files is available by + // allocating all of them. + underlyingFiles := make([]*mock.MockFileReadWriter, filesRemaining) + files := make([]filesystem.FileReadWriter, filesRemaining) + for i := 0; i < filesRemaining; i++ { + underlyingFiles[i] = mock.NewMockFileReadWriter(ctrl) + underlyingPool.EXPECT().NewFile().Return(underlyingFiles[i], nil) + var err error + files[i], err = pool.NewFile() + require.NoError(t, err) + } + _, err := pool.NewFile() + require.Equal(t, err, status.Error(codes.InvalidArgument, "File count quota reached")) + for i := 0; i < filesRemaining; i++ { + underlyingFiles[i].EXPECT().Close().Return(nil) + require.NoError(t, files[i].Close()) + } + + // Check that the remaining amount of space is available by + // allocating one file and truncating it to the exact size. + underlyingFile := mock.NewMockFileReadWriter(ctrl) + underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) + f, err := pool.NewFile() + require.NoError(t, err) + if bytesRemaining != 0 { + underlyingFile.EXPECT().Truncate(bytesRemaining).Return(nil) + } + require.NoError(t, f.Truncate(bytesRemaining)) + require.Equal(t, f.Truncate(bytesRemaining+1), status.Error(codes.InvalidArgument, "File size quota reached")) + underlyingFile.EXPECT().Close().Return(nil) + require.NoError(t, f.Close()) +} + +func TestQuotaEnforcingFilePoolExample(t *testing.T) { + ctrl := gomock.NewController(t) + + // An empty pool should have the advertised amount of space available. + underlyingPool := mock.NewMockFilePool(ctrl) + pool := re_filesystem.NewQuotaEnforcingFilePool(underlyingPool, 10, 1000) + testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) + + // Failure to allocate a file from the underlying pool should + // not affect the quota. + underlyingPool.EXPECT().NewFile().Return(nil, status.Error(codes.Internal, "I/O error")) + _, err := pool.NewFile() + require.Equal(t, err, status.Error(codes.Internal, "I/O error")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) + + // Successfully allocate a file. + underlyingFile := mock.NewMockFileReadWriter(ctrl) + underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) + f, err := pool.NewFile() + require.NoError(t, err) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) + + // Read calls should be forwarded properly. + var p [10]byte + underlyingFile.EXPECT().ReadAt(p[:], int64(123)).Return(0, io.EOF) + n, err := f.ReadAt(p[:], 123) + require.Equal(t, 0, n) + require.Equal(t, io.EOF, err) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) + + // Writes that would cause the file to grow beyond the maximum + // size should be disallowed. + n, err = f.WriteAt(p[:], 991) + require.Equal(t, 0, n) + require.Equal(t, err, status.Error(codes.InvalidArgument, "File size quota reached")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) + + // A failed write should initially allocate all of the required + // space, but release the full amount once more. + underlyingFile.EXPECT().WriteAt(p[:], int64(990)).Return(0, status.Error(codes.Internal, "Cannot write data at all")) + n, err = f.WriteAt(p[:], 990) + require.Equal(t, 0, n) + require.Equal(t, err, status.Error(codes.Internal, "Cannot write data at all")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) + + // A short write should initially allocate all of the required + // space, but release the amount of data that was not written. + underlyingFile.EXPECT().WriteAt(p[:], int64(990)).Return(7, status.Error(codes.Internal, "Disk died in the middle of the write")) + n, err = f.WriteAt(p[:], 990) + require.Equal(t, 7, n) + require.Equal(t, err, status.Error(codes.Internal, "Disk died in the middle of the write")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 3) + + // I/O error while shrinking file should not cause the quotas to + // be affected. + underlyingFile.EXPECT().Truncate(int64(123)).Return(status.Error(codes.Internal, "Failed to adjust inode")) + require.Equal(t, f.Truncate(123), status.Error(codes.Internal, "Failed to adjust inode")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 3) + + // Successfully shrinking the file. + underlyingFile.EXPECT().Truncate(int64(123)).Return(nil) + require.NoError(t, f.Truncate(123)) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 877) + + // Growing the file past the permitted size should not be + // allowed. + require.Equal(t, f.Truncate(1001), status.Error(codes.InvalidArgument, "File size quota reached")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 877) + + // I/O error while growing file should not cause the quotas to + // be affected. + underlyingFile.EXPECT().Truncate(int64(1000)).Return(status.Error(codes.Internal, "Failed to adjust inode")) + require.Equal(t, f.Truncate(1000), status.Error(codes.Internal, "Failed to adjust inode")) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 877) + + // Successfully growing the file. + underlyingFile.EXPECT().Truncate(int64(1000)).Return(nil) + require.NoError(t, f.Truncate(1000)) + testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 0) + + // Closing the file should bring the pool back in the initial + // state. + underlyingFile.EXPECT().Close().Return(nil) + require.NoError(t, f.Close()) + testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) +} diff --git a/pkg/filesystem/sector_allocator.go b/pkg/filesystem/sector_allocator.go new file mode 100644 index 0000000..5b60438 --- /dev/null +++ b/pkg/filesystem/sector_allocator.go @@ -0,0 +1,24 @@ +package filesystem + +// SectorAllocator is used by BlockDeviceBackedFilePool to allocate +// space on the block device that is needed to store files. +type SectorAllocator interface { + // Allocate a contiguous range of sectors. + // + // Under high utilization, it may not be possible to allocate + // all space contiguously. In that case, this function returns + // fewer sectors than requested. Repeated calls to this function + // are necessary to request the desired amount of space, albeit + // fragmented. + // + // Sector numbers handed out by this function start at one. + // Zero can be used by the user of this interface for special + // purposes (e.g., sparse files). + AllocateContiguous(maximum int) (uint32, int, error) + // Free a contiguous range of sectors. It is invalid to call + // this function with the first sector number being zero. + FreeContiguous(first uint32, count int) + // Free a potentially fragmented list of sectors. Elements with + // value zero are ignored. + FreeList(sectors []uint32) +} diff --git a/pkg/filesystem/virtual/BUILD.bazel b/pkg/filesystem/virtual/BUILD.bazel new file mode 100644 index 0000000..1d75ef7 --- /dev/null +++ b/pkg/filesystem/virtual/BUILD.bazel @@ -0,0 +1,107 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "virtual", + srcs = [ + "access_monitoring_initial_contents_fetcher.go", + "attributes.go", + "base_symlink_factory.go", + "blob_access_cas_file_factory.go", + "byte_range_lock_set.go", + "cas_file_factory.go", + "cas_initial_contents_fetcher.go", + "character_device_factory.go", + "child.go", + "directory.go", + "empty_initial_contents_fetcher.go", + "file_allocator.go", + "fuse_handle_allocator.go", + "handle_allocating_file_allocator.go", + "handle_allocating_symlink_factory.go", + "handle_allocator.go", + "in_memory_prepopulated_directory.go", + "initial_contents_fetcher.go", + "leaf.go", + "native_leaf.go", + "nfs_handle_allocator.go", + "node.go", + "permissions.go", + "placeholder_file.go", + "pool_backed_file_allocator.go", + "prepopulated_directory.go", + "read_only_directory.go", + "resolvable_digest_handle_allocator.go", + "resolvable_handle_allocating_cas_file_factory.go", + "sorter.go", + "special_file.go", + "stateless_handle_allocating_cas_file_factory.go", + "static_directory.go", + "status.go", + "symlink_factory.go", + "user_settable_symlink.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cas", + "//pkg/filesystem", + "//pkg/filesystem/access", + "//pkg/proto/outputpathpersistency", + "//pkg/proto/remoteoutputservice", + "//pkg/proto/tmp_installer", + "//pkg/sync", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//types/known/emptypb", + ], +) + +go_test( + name = "virtual_test", + srcs = [ + "access_monitoring_initial_contents_fetcher_test.go", + "blob_access_cas_file_factory_test.go", + "byte_range_lock_set_test.go", + "cas_initial_contents_fetcher_test.go", + "character_device_factory_test.go", + "fuse_handle_allocator_test.go", + "in_memory_prepopulated_directory_test.go", + "nfs_handle_allocator_test.go", + "pool_backed_file_allocator_test.go", + "stateless_handle_allocating_cas_file_factory_test.go", + "static_directory_test.go", + "user_settable_symlink_test.go", + ], + deps = [ + ":virtual", + "//internal/mock", + "//pkg/proto/outputpathpersistency", + "//pkg/proto/remoteoutputservice", + "//pkg/proto/tmp_installer", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/proto/auth", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//types/known/structpb", + ], +) diff --git a/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher.go b/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher.go new file mode 100644 index 0000000..2de6d23 --- /dev/null +++ b/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher.go @@ -0,0 +1,59 @@ +package virtual + +import ( + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type accessMonitoringInitialContentsFetcher struct { + InitialContentsFetcher + unreadDirectoryMonitor access.UnreadDirectoryMonitor +} + +// NewAccessMonitoringInitialContentsFetcher decorates an +// InitialContentsFetcher, so that any read access to files and +// directories is reported to an UnreadDirectoryMonitor. This can be +// used to create file system access profiles of build actions. +func NewAccessMonitoringInitialContentsFetcher(base InitialContentsFetcher, rootDirectoryMonitor access.UnreadDirectoryMonitor) InitialContentsFetcher { + return &accessMonitoringInitialContentsFetcher{ + InitialContentsFetcher: base, + unreadDirectoryMonitor: rootDirectoryMonitor, + } +} + +func (icf *accessMonitoringInitialContentsFetcher) FetchContents(fileReadMonitorFactory FileReadMonitorFactory) (map[path.Component]InitialNode, error) { + // Call into underlying initial contents fetcher. Wrap the file + // read monitors that are installed on the files, so that we can + // detect file access. + readDirectoryMonitor := icf.unreadDirectoryMonitor.ReadDirectory() + contents, err := icf.InitialContentsFetcher.FetchContents(func(name path.Component) FileReadMonitor { + if fileReadMonitor := fileReadMonitorFactory(name); fileReadMonitor != nil { + return func() { + fileReadMonitor() + readDirectoryMonitor.ReadFile(name) + } + } + return func() { + readDirectoryMonitor.ReadFile(name) + } + }) + if err != nil { + return nil, err + } + + // Wrap all of the child directories, so that we can detect + // directory access. + wrappedContents := make(map[path.Component]InitialNode, len(contents)) + for name, node := range contents { + childInitialContentsFetcher, leaf := node.GetPair() + if childInitialContentsFetcher != nil { + wrappedContents[name] = InitialNode{}.FromDirectory(&accessMonitoringInitialContentsFetcher{ + InitialContentsFetcher: childInitialContentsFetcher, + unreadDirectoryMonitor: readDirectoryMonitor.ResolvedDirectory(name), + }) + } else { + wrappedContents[name] = InitialNode{}.FromLeaf(leaf) + } + } + return wrappedContents, nil +} diff --git a/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher_test.go b/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher_test.go new file mode 100644 index 0000000..10e6e6e --- /dev/null +++ b/pkg/filesystem/virtual/access_monitoring_initial_contents_fetcher_test.go @@ -0,0 +1,96 @@ +package virtual_test + +import ( + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestAccessMonitoringInitialContentsFetcher(t *testing.T) { + ctrl := gomock.NewController(t) + + baseInitialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + rootUnreadDirectoryMonitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + initialContentsFetcher := virtual.NewAccessMonitoringInitialContentsFetcher( + baseInitialContentsFetcher, + rootUnreadDirectoryMonitor) + + t.Run("FetchContentsFailed", func(t *testing.T) { + // If fetching the initial contents of a directory + // fails, we should not mark the directory as being + // read. The reason being that it may succeed later on. + // We can't call into ReadDirectory() multiple times. + rootReadDirectoryMonitor := mock.NewMockReadDirectoryMonitor(ctrl) + rootUnreadDirectoryMonitor.EXPECT().ReadDirectory().Return(rootReadDirectoryMonitor) + baseFileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + baseInitialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + Return(nil, status.Error(codes.Internal, "Network error")) + + _, err := initialContentsFetcher.FetchContents(baseFileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Network error"), err) + }) + + t.Run("FetchContentsSucceeded", func(t *testing.T) { + // Reading the directory's contents should report it as being + // read. It should return children that are wrapped as well. + baseChildInitialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + baseChildFile := mock.NewMockNativeLeaf(ctrl) + baseChildFileReadMonitor := mock.NewMockFileReadMonitor(ctrl) + baseFileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + baseFileReadMonitorFactory.EXPECT().Call(path.MustNewComponent("file")).Return(baseChildFileReadMonitor.Call) + var childFileReadMonitor virtual.FileReadMonitor + baseInitialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + DoAndReturn(func(fileReadMonitorFactory virtual.FileReadMonitorFactory) (map[path.Component]virtual.InitialNode, error) { + childFileReadMonitor = fileReadMonitorFactory(path.MustNewComponent("file")) + return map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(baseChildInitialContentsFetcher), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(baseChildFile), + }, nil + }) + rootReadDirectoryMonitor := mock.NewMockReadDirectoryMonitor(ctrl) + rootUnreadDirectoryMonitor.EXPECT().ReadDirectory().Return(rootReadDirectoryMonitor) + childUnreadDirectoryMonitor := mock.NewMockUnreadDirectoryMonitor(ctrl) + rootReadDirectoryMonitor.EXPECT().ResolvedDirectory(path.MustNewComponent("dir")).Return(childUnreadDirectoryMonitor) + + rootContents, err := initialContentsFetcher.FetchContents(baseFileReadMonitorFactory.Call) + require.NoError(t, err) + require.Len(t, rootContents, 2) + + t.Run("ChildDirectory", func(t *testing.T) { + childInitialContentsFetcher, _ := rootContents[path.MustNewComponent("dir")].GetPair() + + t.Run("FetchContentsSucceeded", func(t *testing.T) { + baseChildInitialContentsFetcher.EXPECT().FetchContents(gomock.Any()).Return(map[path.Component]virtual.InitialNode{}, nil) + childReadDirectoryMonitor := mock.NewMockReadDirectoryMonitor(ctrl) + childUnreadDirectoryMonitor.EXPECT().ReadDirectory().Return(childReadDirectoryMonitor) + baseChildFileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + + childContents, err := childInitialContentsFetcher.FetchContents(baseChildFileReadMonitorFactory.Call) + require.NoError(t, err) + require.Empty(t, childContents) + }) + }) + + t.Run("ChildFile", func(t *testing.T) { + _, childFile := rootContents[path.MustNewComponent("file")].GetPair() + require.Equal(t, baseChildFile, childFile) + + // If a notification is sent that the file's + // contents have been read, it should be + // duplicated both to the base file read + // monitor, and the read directory monitor. + baseChildFileReadMonitor.EXPECT().Call() + rootReadDirectoryMonitor.EXPECT().ReadFile(path.MustNewComponent("file")) + + childFileReadMonitor() + }) + }) +} diff --git a/pkg/filesystem/virtual/attributes.go b/pkg/filesystem/virtual/attributes.go new file mode 100644 index 0000000..e16cc05 --- /dev/null +++ b/pkg/filesystem/virtual/attributes.go @@ -0,0 +1,189 @@ +package virtual + +import ( + "time" + + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +// AttributesMask is a bitmask of status attributes that need to be +// requested through Node.VirtualGetAttributes(). +type AttributesMask uint32 + +const ( + // AttributesMaskChangeID requests the change ID, which clients + // can use to determine if file data, directory contents, or + // attributes of the node have changed. + AttributesMaskChangeID AttributesMask = 1 << iota + // AttributesMaskDeviceNumber requests the raw device number + // (st_rdev). + AttributesMaskDeviceNumber + // AttributesMaskFileHandle requests an identifier of the file + // that contains sufficient information to be able to resolve it + // at a later point in time. + AttributesMaskFileHandle + // AttributesMaskFileType requests the file type (upper 4 bits + // of st_mode). + AttributesMaskFileType + // AttributesMaskInodeNumber requests the inode number (st_ino). + AttributesMaskInodeNumber + // AttributesMaskLastDataModificationTime requests the last data + // modification time (st_mtim). + AttributesMaskLastDataModificationTime + // AttributesMaskLinkCount requests the link count (st_nlink). + AttributesMaskLinkCount + // AttributesMaskPermissions requests the permissions (lowest 12 + // bits of set_mode). + AttributesMaskPermissions + // AttributesMaskSizeBytes requests the file size (st_size). + AttributesMaskSizeBytes +) + +// Attributes of a file, normally requested through stat() or readdir(). +// A bitmask is used to track which attributes are set. +type Attributes struct { + fieldsPresent AttributesMask + + changeID uint64 + deviceNumber filesystem.DeviceNumber + fileHandle []byte + fileType filesystem.FileType + inodeNumber uint64 + lastDataModificationTime time.Time + linkCount uint32 + permissions Permissions + sizeBytes uint64 +} + +// GetChangeID returns the change ID, which clients can use to determine +// if file data, directory contents, or attributes of the node have +// changed. +func (a *Attributes) GetChangeID() uint64 { + if a.fieldsPresent&AttributesMaskChangeID == 0 { + panic("The change ID attribute is mandatory, meaning it should be set when requested") + } + return a.changeID +} + +// SetChangeID sets the change ID, which clients can use to determine if +// file data, directory contents, or attributes of the node have +// changed. +func (a *Attributes) SetChangeID(changeID uint64) *Attributes { + a.changeID = changeID + a.fieldsPresent |= AttributesMaskChangeID + return a +} + +// GetDeviceNumber returns the raw device number (st_rdev). +func (a *Attributes) GetDeviceNumber() (filesystem.DeviceNumber, bool) { + return a.deviceNumber, a.fieldsPresent&AttributesMaskDeviceNumber != 0 +} + +// SetDeviceNumber sets the raw device number (st_rdev). +func (a *Attributes) SetDeviceNumber(deviceNumber filesystem.DeviceNumber) *Attributes { + a.deviceNumber = deviceNumber + a.fieldsPresent |= AttributesMaskDeviceNumber + return a +} + +// GetFileHandle returns an identifier of the file that contains +// sufficient information to be able to resolve it at a later point in +// time. +func (a *Attributes) GetFileHandle() []byte { + if a.fieldsPresent&AttributesMaskFileHandle == 0 { + panic("The file handle attribute is mandatory, meaning it should be set when requested") + } + return a.fileHandle +} + +// SetFileHandle sets an identifier of the file that contains +// sufficient information to be able to resolve it at a later point in +// time. +func (a *Attributes) SetFileHandle(fileHandle []byte) *Attributes { + a.fileHandle = fileHandle + a.fieldsPresent |= AttributesMaskFileHandle + return a +} + +// GetFileType returns the file type (upper 4 bits of st_mode). +func (a *Attributes) GetFileType() filesystem.FileType { + if a.fieldsPresent&AttributesMaskFileType == 0 { + panic("The file type attribute is mandatory, meaning it should be set when requested") + } + return a.fileType +} + +// SetFileType sets the file type (upper 4 bits of st_mode). +func (a *Attributes) SetFileType(fileType filesystem.FileType) *Attributes { + a.fileType = fileType + a.fieldsPresent |= AttributesMaskFileType + return a +} + +// GetInodeNumber returns the inode number (st_ino). +func (a *Attributes) GetInodeNumber() uint64 { + if a.fieldsPresent&AttributesMaskInodeNumber == 0 { + panic("The inode number attribute is mandatory, meaning it should be set when requested") + } + return a.inodeNumber +} + +// SetInodeNumber sets the inode number (st_ino). +func (a *Attributes) SetInodeNumber(inodeNumber uint64) *Attributes { + a.inodeNumber = inodeNumber + a.fieldsPresent |= AttributesMaskInodeNumber + return a +} + +// GetLastDataModificationTime returns the last data modification time +// (st_mtim). +func (a *Attributes) GetLastDataModificationTime() (time.Time, bool) { + return a.lastDataModificationTime, a.fieldsPresent&AttributesMaskLastDataModificationTime != 0 +} + +// SetLastDataModificationTime sets the last data modification time +// (st_mtim). +func (a *Attributes) SetLastDataModificationTime(lastDataModificationTime time.Time) *Attributes { + a.lastDataModificationTime = lastDataModificationTime + a.fieldsPresent |= AttributesMaskLastDataModificationTime + return a +} + +// GetLinkCount returns the link count (st_nlink). +func (a *Attributes) GetLinkCount() uint32 { + if a.fieldsPresent&AttributesMaskLinkCount == 0 { + panic("The link count attribute is mandatory, meaning it should be set when requested") + } + return a.linkCount +} + +// SetLinkCount sets the link count (st_nlink). +func (a *Attributes) SetLinkCount(linkCount uint32) *Attributes { + a.linkCount = linkCount + a.fieldsPresent |= AttributesMaskLinkCount + return a +} + +// GetPermissions returns the mode (lowest 12 bits of st_mode). +func (a *Attributes) GetPermissions() (Permissions, bool) { + return a.permissions, a.fieldsPresent&AttributesMaskPermissions != 0 +} + +// SetPermissions sets the mode (lowest 12 bits of st_mode). +func (a *Attributes) SetPermissions(permissions Permissions) *Attributes { + a.permissions = permissions + a.fieldsPresent |= AttributesMaskPermissions + return a +} + +// GetSizeBytes returns the file size (st_size). +func (a *Attributes) GetSizeBytes() (uint64, bool) { + return a.sizeBytes, a.fieldsPresent&AttributesMaskSizeBytes != 0 +} + +// SetSizeBytes sets the file size (st_size). +func (a *Attributes) SetSizeBytes(sizeBytes uint64) *Attributes { + a.sizeBytes = sizeBytes + a.fieldsPresent |= AttributesMaskSizeBytes + return a +} diff --git a/pkg/filesystem/virtual/base_symlink_factory.go b/pkg/filesystem/virtual/base_symlink_factory.go new file mode 100644 index 0000000..9762a6d --- /dev/null +++ b/pkg/filesystem/virtual/base_symlink_factory.go @@ -0,0 +1,80 @@ +package virtual + +import ( + "context" + "unicode/utf8" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type symlinkFactory struct{} + +func (symlinkFactory) LookupSymlink(target []byte) NativeLeaf { + return symlink{target: target} +} + +// BaseSymlinkFactory can be used to create simple immutable symlink nodes. +var BaseSymlinkFactory SymlinkFactory = symlinkFactory{} + +type symlink struct { + placeholderFile + + target []byte +} + +func (f symlink) Readlink() (string, error) { + if !utf8.Valid(f.target) { + return "", status.Error(codes.InvalidArgument, "Symbolic link contents are not valid UTF-8") + } + return string(f.target), nil +} + +func (f symlink) GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) { + target, err := f.Readlink() + if err != nil { + return nil, err + } + return &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_Symlink_{ + Symlink: &remoteoutputservice.FileStatus_Symlink{ + Target: target, + }, + }, + }, nil +} + +func (f symlink) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { + if target, err := f.Readlink(); err == nil { + directory.Symlinks = append(directory.Symlinks, &remoteexecution.SymlinkNode{ + Name: name.String(), + Target: target, + }) + } +} + +func (f symlink) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + attributes.SetChangeID(0) + attributes.SetFileType(filesystem.FileTypeSymlink) + attributes.SetPermissions(PermissionsRead | PermissionsWrite | PermissionsExecute) + attributes.SetSizeBytes(uint64(len(f.target))) +} + +func (f symlink) VirtualReadlink(ctx context.Context) ([]byte, Status) { + return f.target, StatusOK +} + +func (f symlink) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if _, ok := in.GetSizeBytes(); ok { + return StatusErrInval + } + f.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} diff --git a/pkg/filesystem/virtual/blob_access_cas_file_factory.go b/pkg/filesystem/virtual/blob_access_cas_file_factory.go new file mode 100644 index 0000000..b56a329 --- /dev/null +++ b/pkg/filesystem/virtual/blob_access_cas_file_factory.go @@ -0,0 +1,235 @@ +package virtual + +import ( + "context" + "syscall" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" +) + +type blobAccessCASFileFactory struct { + context context.Context + contentAddressableStorage blobstore.BlobAccess + errorLogger util.ErrorLogger +} + +// NewBlobAccessCASFileFactory creates a CASFileFactory that can be used +// to create FUSE files that are directly backed by BlobAccess. Files +// created by this factory are entirely immutable; it is only possible +// to read their contents. +func NewBlobAccessCASFileFactory(ctx context.Context, contentAddressableStorage blobstore.BlobAccess, errorLogger util.ErrorLogger) CASFileFactory { + return &blobAccessCASFileFactory{ + context: ctx, + contentAddressableStorage: contentAddressableStorage, + errorLogger: errorLogger, + } +} + +func (cff *blobAccessCASFileFactory) LookupFile(blobDigest digest.Digest, isExecutable bool, readMonitor FileReadMonitor) NativeLeaf { + if readMonitor != nil { + panic("The read monitor should have been set up by StatelessHandleAllocatingCASFileFactory") + } + baseFile := blobAccessCASFile{ + factory: cff, + digest: blobDigest, + } + if isExecutable { + return &executableBlobAccessCASFile{blobAccessCASFile: baseFile} + } + return ®ularBlobAccessCASFile{blobAccessCASFile: baseFile} +} + +// blobAccessCASFile is the base type for all BlobAccess backed CAS +// files. This type is intentionally kept as small as possible, as many +// instances may be created. All shared options are shared in the +// factory object. +type blobAccessCASFile struct { + factory *blobAccessCASFileFactory + digest digest.Digest +} + +func (f *blobAccessCASFile) Link() Status { + // As this file is stateless, we don't need to do any explicit + // bookkeeping for hardlinks. + return StatusOK +} + +func (f *blobAccessCASFile) Readlink() (string, error) { + return "", syscall.EINVAL +} + +func (f *blobAccessCASFile) Unlink() { +} + +func (f *blobAccessCASFile) UploadFile(ctx context.Context, contentAddressableStorage blobstore.BlobAccess, digestFunction digest.Function) (digest.Digest, error) { + // This file is already backed by the Content Addressable + // Storage. There is thus no need to upload it once again. + // + // The client that created this build action already called + // FindMissingBlobs() on this file, so there's also a high + // degree of certainty that this file won't disappear from the + // Content Addressable Storage any time soon. + return f.digest, nil +} + +func (f *blobAccessCASFile) GetContainingDigests() digest.Set { + return f.digest.ToSingletonSet() +} + +func (f *blobAccessCASFile) GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) { + fileStatusFile := remoteoutputservice.FileStatus_File{} + if digestFunction != nil { + // Assume that the file uses the same hash algorithm as + // the provided digest function. Incompatible files are + // removed from storage at the start of the build. + fileStatusFile.Digest = f.digest.GetProto() + } + return &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &fileStatusFile, + }, + }, nil +} + +func (f *blobAccessCASFile) VirtualAllocate(off, size uint64) Status { + return StatusErrWrongType +} + +func (f *blobAccessCASFile) virtualGetAttributesCommon(attributes *Attributes) { + attributes.SetChangeID(0) + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetSizeBytes(uint64(f.digest.GetSizeBytes())) +} + +func (f *blobAccessCASFile) VirtualSeek(offset uint64, regionType filesystem.RegionType) (*uint64, Status) { + sizeBytes := uint64(f.digest.GetSizeBytes()) + switch regionType { + case filesystem.Data: + if offset >= sizeBytes { + return nil, StatusErrNXIO + } + return &offset, StatusOK + case filesystem.Hole: + if offset >= sizeBytes { + return nil, StatusErrNXIO + } + return &sizeBytes, StatusOK + default: + panic("Requests for other seek modes should have been intercepted") + } +} + +func (f *blobAccessCASFile) VirtualRead(buf []byte, off uint64) (int, bool, Status) { + size := uint64(f.digest.GetSizeBytes()) + buf, eof := BoundReadToFileSize(buf, off, size) + if len(buf) > 0 { + if n, err := f.factory.contentAddressableStorage.Get(f.factory.context, f.digest).ReadAt(buf, int64(off)); n != len(buf) { + f.factory.errorLogger.Log(util.StatusWrapf(err, "Failed to read from %s at offset %d", f.digest, off)) + return 0, false, StatusErrIO + } + } + return len(buf), eof, StatusOK +} + +func (f *blobAccessCASFile) VirtualReadlink(ctx context.Context) ([]byte, Status) { + return nil, StatusErrInval +} + +func (f *blobAccessCASFile) VirtualClose(shareAccess ShareMask) {} + +func (f *blobAccessCASFile) virtualSetAttributesCommon(in *Attributes) Status { + // TODO: chmod() calls against CAS backed files should not be + // permitted. Unfortunately, we allowed it in the past. When + // using bb_clientd's Remote Output Service, we see Bazel + // performing such calls, so we can't forbid it right now. + /* + if _, ok := in.GetPermissions(); ok { + return StatusErrPerm + } + */ + if _, ok := in.GetSizeBytes(); ok { + return StatusErrAccess + } + return StatusOK +} + +func (f *blobAccessCASFile) VirtualWrite(buf []byte, off uint64) (int, Status) { + panic("Request to write to read-only file should have been intercepted") +} + +// regularBlobAccessCASFile is the type BlobAccess backed files that are +// not executable (-x). +type regularBlobAccessCASFile struct { + blobAccessCASFile +} + +func (f *regularBlobAccessCASFile) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { + directory.Files = append(directory.Files, &remoteexecution.FileNode{ + Name: name.String(), + Digest: f.digest.GetProto(), + IsExecutable: false, + }) +} + +func (f *regularBlobAccessCASFile) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + f.virtualGetAttributesCommon(attributes) + attributes.SetPermissions(PermissionsRead) +} + +func (f *regularBlobAccessCASFile) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if shareAccess&^ShareMaskRead != 0 || options.Truncate { + return StatusErrAccess + } + f.VirtualGetAttributes(ctx, requested, attributes) + return StatusOK +} + +func (f *regularBlobAccessCASFile) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if s := f.virtualSetAttributesCommon(in); s != StatusOK { + return s + } + f.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} + +// regularBlobAccessCASFile is the type BlobAccess backed files that are +// executable (+x). +type executableBlobAccessCASFile struct { + blobAccessCASFile +} + +func (f *executableBlobAccessCASFile) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { + directory.Files = append(directory.Files, &remoteexecution.FileNode{ + Name: name.String(), + Digest: f.digest.GetProto(), + IsExecutable: true, + }) +} + +func (f *executableBlobAccessCASFile) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + f.virtualGetAttributesCommon(attributes) + attributes.SetPermissions(PermissionsRead | PermissionsExecute) +} + +func (f *executableBlobAccessCASFile) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if shareAccess&^ShareMaskRead != 0 || options.Truncate { + return StatusErrAccess + } + f.VirtualGetAttributes(ctx, requested, attributes) + return StatusOK +} + +func (f *executableBlobAccessCASFile) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if s := f.virtualSetAttributesCommon(in); s != StatusOK { + return s + } + f.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} diff --git a/pkg/filesystem/virtual/blob_access_cas_file_factory_test.go b/pkg/filesystem/virtual/blob_access_cas_file_factory_test.go new file mode 100644 index 0000000..f158dc4 --- /dev/null +++ b/pkg/filesystem/virtual/blob_access_cas_file_factory_test.go @@ -0,0 +1,211 @@ +package virtual_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +const blobAccessCASFileFactoryAttributesMask = virtual.AttributesMaskChangeID | + virtual.AttributesMaskFileType | + virtual.AttributesMaskPermissions | + virtual.AttributesMaskSizeBytes + +func TestBlobAccessCASFileFactoryVirtualSeek(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + casFileFactory := virtual.NewBlobAccessCASFileFactory( + ctx, + contentAddressableStorage, + errorLogger) + + digest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 123) + f := casFileFactory.LookupFile(digest, false, nil) + var out virtual.Attributes + f.VirtualGetAttributes(ctx, blobAccessCASFileFactoryAttributesMask, &out) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeRegularFile). + SetPermissions(virtual.PermissionsRead). + SetSizeBytes(123), + &out) + + t.Run("SEEK_DATA", func(t *testing.T) { + offset, s := f.VirtualSeek(0, filesystem.Data) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, uint64(0), *offset) + + offset, s = f.VirtualSeek(122, filesystem.Data) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, uint64(122), *offset) + + _, s = f.VirtualSeek(123, filesystem.Data) + require.Equal(t, virtual.StatusErrNXIO, s) + }) + + t.Run("SEEK_HOLE", func(t *testing.T) { + offset, s := f.VirtualSeek(0, filesystem.Hole) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, uint64(123), *offset) + + offset, s = f.VirtualSeek(122, filesystem.Hole) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, uint64(123), *offset) + + _, s = f.VirtualSeek(123, filesystem.Hole) + require.Equal(t, virtual.StatusErrNXIO, s) + }) +} + +func TestBlobAccessCASFileFactoryGetContainingDigests(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + casFileFactory := virtual.NewBlobAccessCASFileFactory( + ctx, + contentAddressableStorage, + errorLogger) + + digest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "d7ac2672607ba20a44d01d03a6685b24", 400) + f := casFileFactory.LookupFile(digest, true, nil) + var out virtual.Attributes + f.VirtualGetAttributes(ctx, blobAccessCASFileFactoryAttributesMask, &out) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeRegularFile). + SetPermissions(virtual.PermissionsRead|virtual.PermissionsExecute). + SetSizeBytes(400), + &out) + + require.Equal(t, digest.ToSingletonSet(), f.GetContainingDigests()) +} + +func TestBlobAccessCASFileFactoryGetOutputServiceFileStatus(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + casFileFactory := virtual.NewBlobAccessCASFileFactory( + ctx, + contentAddressableStorage, + errorLogger) + + digest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 123) + f := casFileFactory.LookupFile(digest, false, nil) + var out virtual.Attributes + f.VirtualGetAttributes(ctx, blobAccessCASFileFactoryAttributesMask, &out) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeRegularFile). + SetPermissions(virtual.PermissionsRead). + SetSizeBytes(123), + &out) + + // When the provided digest.Function is nil, we should only + // report that this is a file. + fileStatus, err := f.GetOutputServiceFileStatus(nil) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{}, + }, + }, fileStatus) + + // When the provided digest.Function is set, we should return + // the digest of the file as well. There is no need to perform + // any I/O, as the digest is already embedded in the file. + digestFunction := digest.GetDigestFunction() + fileStatus, err = f.GetOutputServiceFileStatus(&digestFunction) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{ + Digest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 123, + }, + }, + }, + }, fileStatus) +} + +func TestBlobAccessCASFileFactoryAppendOutputPathPersistencyDirectoryNode(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + casFileFactory := virtual.NewBlobAccessCASFileFactory( + ctx, + contentAddressableStorage, + errorLogger) + + digest1 := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 123) + f1 := casFileFactory.LookupFile(digest1, false, nil) + var out1 virtual.Attributes + f1.VirtualGetAttributes(ctx, blobAccessCASFileFactoryAttributesMask, &out1) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeRegularFile). + SetPermissions(virtual.PermissionsRead). + SetSizeBytes(123), + &out1) + + digest2 := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "0282d25bf4aefdb9cb50ccc78d974f0a", 456) + f2 := casFileFactory.LookupFile(digest2, true, nil) + var out2 virtual.Attributes + f2.VirtualGetAttributes(ctx, blobAccessCASFileFactoryAttributesMask, &out2) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeRegularFile). + SetPermissions(virtual.PermissionsRead|virtual.PermissionsExecute). + SetSizeBytes(456), + &out2) + + var directory outputpathpersistency.Directory + f1.AppendOutputPathPersistencyDirectoryNode(&directory, path.MustNewComponent("hello")) + f2.AppendOutputPathPersistencyDirectoryNode(&directory, path.MustNewComponent("world")) + testutil.RequireEqualProto(t, &outputpathpersistency.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello", + Digest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 123, + }, + IsExecutable: false, + }, + { + Name: "world", + Digest: &remoteexecution.Digest{ + Hash: "0282d25bf4aefdb9cb50ccc78d974f0a", + SizeBytes: 456, + }, + IsExecutable: true, + }, + }, + }, &directory) +} diff --git a/pkg/filesystem/virtual/byte_range_lock_set.go b/pkg/filesystem/virtual/byte_range_lock_set.go new file mode 100644 index 0000000..8573c8c --- /dev/null +++ b/pkg/filesystem/virtual/byte_range_lock_set.go @@ -0,0 +1,210 @@ +package virtual + +// ByteRangeLockType is an enumeration that controls what kind of lock +// needs to be acquired. +type ByteRangeLockType int + +const ( + // ByteRangeLockTypeUnlocked indicates that a byte range should + // be unlocked. This value can only be provided to Set(); not + // Test(). It is equivalent to POSIX's F_UNLCK. + ByteRangeLockTypeUnlocked ByteRangeLockType = iota + // ByteRangeLockTypeLockedExclusive indicates that a byte range + // should be locked exclusively for writing. It is equivalent to + // POSIX's F_WRLCK. + ByteRangeLockTypeLockedExclusive + // ByteRangeLockTypeLockedShared indicates that a byte range + // should be locked shared for reading. It is equivalent to + // F_RDLCK. + ByteRangeLockTypeLockedShared +) + +// ByteRangeLock holds information on a lock held on a non-empty range +// of bytes. Each lock has an owner associated with it. This field is +// only checked for equality, allowing it to merge adjacent or +// overlapping locks held by the same owner. +type ByteRangeLock[Owner comparable] struct { + Start uint64 + End uint64 + Owner Owner + Type ByteRangeLockType +} + +type byteRangeLockEntry[Owner comparable] struct { + previous *byteRangeLockEntry[Owner] + next *byteRangeLockEntry[Owner] + lock ByteRangeLock[Owner] +} + +func (le *byteRangeLockEntry[Owner]) insertBefore(leNew *byteRangeLockEntry[Owner]) { + leNew.previous = le.previous + leNew.next = le + leNew.previous.next = leNew + le.previous = leNew +} + +func (le *byteRangeLockEntry[Owner]) remove() { + le.previous.next = le.next + le.next.previous = le.previous + le.previous = nil + le.next = nil +} + +// ByteRangeLockSet is a set for ByteRangeLocks applied against the same +// file. The set is modeled as a linked list, where entries are sorted +// by starting address. All entries describe disjoint non-empty ranges +// of bytes, except for shared locks with distinct owners. +type ByteRangeLockSet[Owner comparable] struct { + list byteRangeLockEntry[Owner] +} + +// Initialize the ByteRangeLockSet, so that it does not contain any +// locks. +func (ls *ByteRangeLockSet[Owner]) Initialize() { + ls.list.previous = &ls.list + ls.list.next = &ls.list +} + +// Set a byte range to a given lock. Calls to this method must generally +// be preceded by calls to Test(), as it is assumed the lock to be +// inserted does not conflict with the locks that are currently held. +// +// This method returns the increase (positive) or decrease (negative) of +// the number of entries in the set. This can be used by the caller to +// determine when the owner of the lock can be released. +func (ls *ByteRangeLockSet[Owner]) Set(lProvided *ByteRangeLock[Owner]) int { + leNew := &byteRangeLockEntry[Owner]{ + lock: *lProvided, + } + lNew := &leNew.lock + + // Find the spot where to insert the new entry. + var leTrailing *byteRangeLockEntry[Owner] + leSearch := ls.list.next + for { + lSearch := &leSearch.lock + if leSearch == &ls.list || lNew.Start <= lSearch.Start { + // Found the spot where the new entry should be + // inserted. + break + } + + // The new entry starts after the existing entry. + if lSearch.Owner == lNew.Owner { + if lNew.Type == lSearch.Type { + if lNew.Start <= lSearch.End { + // Beginning of the new entry touches + // or overlaps with an entry of the + // same type. Grow the entry and + // insert it, so it gets combined. + lNew.Start = lSearch.Start + break + } + } else { + if lNew.Start < lSearch.End { + // Beginning of the new entry overlaps + // with an entry of a different type. + // Truncate the existing entry. + if lNew.End < lSearch.End { + // New entry punches a hole in + // the existing entry. Split it. + if leTrailing != nil { + panic("New entry has multiple trailing overlapping entries, which is impossible") + } + leTrailing = &byteRangeLockEntry[Owner]{ + lock: ByteRangeLock[Owner]{ + Start: lNew.End, + End: lSearch.End, + Owner: lSearch.Owner, + Type: lSearch.Type, + }, + } + lSearch.End = lNew.Start + } + lSearch.End = lNew.Start + } + } + } + leSearch = leSearch.next + } + + // Insert the new entry, except if we're unlocking. + delta := 0 + if lNew.Type != ByteRangeLockTypeUnlocked { + leSearch.insertBefore(leNew) + delta++ + } + + // Merge successive entries into the new entry. + for { + lSearch := &leSearch.lock + if leSearch == &ls.list || lNew.End < lSearch.Start { + // New entry does not affect any entries beyond + // this point. + break + } + + leSearchNext := leSearch.next + if lSearch.Owner == lNew.Owner { + if lNew.End >= lSearch.End { + // New entry completely overlaps with an + // existing entry. Remove the existing + // entry. + leSearch.remove() + delta-- + } else { + if lNew.Type == lSearch.Type { + // End of new entry touches or + // overlaps with an entry of the + // same type. Combine them. + lNew.End = lSearch.End + leSearch.remove() + delta-- + } else { + // End of new entry touches or + // overlaps with an entry of a + // different type. Remove the + // leading part of the entry and + // move it to a location further + // down the list. + if leTrailing != nil { + panic("New entry has multiple trailing overlapping entries, which is impossible") + } + leTrailing = leSearch + lSearch.Start = lNew.End + leSearch.remove() + delta-- + } + } + } + leSearch = leSearchNext + } + + // Insert the trailing part of the entry whose leading part got + // overwritten, or in which a hole got punched. + if leTrailing != nil { + leSearch.insertBefore(leTrailing) + delta++ + } + return delta +} + +// Test whether a new lock to be inserted does not conflict with any of +// the other locks registered in the set. +func (ls *ByteRangeLockSet[Owner]) Test(lTest *ByteRangeLock[Owner]) *ByteRangeLock[Owner] { + leSearch := ls.list.next + for { + lSearch := &leSearch.lock + if leSearch == &ls.list || lSearch.Start >= lTest.End { + // At the end of the list, or there are no + // longer any entries that follow that have any + // overlap. + return nil + } + if lSearch.Owner != lTest.Owner && lSearch.End > lTest.Start && (lSearch.Type == ByteRangeLockTypeLockedExclusive || lTest.Type == ByteRangeLockTypeLockedExclusive) { + // Found a conflicting entry. + return lSearch + } + leSearch = leSearch.next + } +} diff --git a/pkg/filesystem/virtual/byte_range_lock_set_test.go b/pkg/filesystem/virtual/byte_range_lock_set_test.go new file mode 100644 index 0000000..434c9e1 --- /dev/null +++ b/pkg/filesystem/virtual/byte_range_lock_set_test.go @@ -0,0 +1,107 @@ +package virtual_test + +import ( + "math" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/stretchr/testify/require" +) + +func TestByteRangeLockSet(t *testing.T) { + var ls virtual.ByteRangeLockSet[rune] + ls.Initialize() + + // Create two locks that are disjoint. + require.Equal(t, 1, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 0, + End: 10, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedExclusive, + })) + + require.Equal(t, 1, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 20, + End: 30, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedExclusive, + })) + + // Placing a third lock in between should cause the locks to be + // merged into one. + require.Equal(t, -1, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 10, + End: 20, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedExclusive, + })) + + // Downgrading a range in the middle to a shared lock should + // cause the exclusive lock to be split and a new shared lock to + // be inserted in between. + require.Equal(t, 2, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 10, + End: 20, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedShared, + })) + + // Now that locks have been set up, check whether querying locks + // through Test() works as expected. + require.Equal( + t, + &virtual.ByteRangeLock[rune]{ + Start: 10, + End: 20, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedShared, + }, + ls.Test(&virtual.ByteRangeLock[rune]{ + Start: 14, + End: 16, + Owner: 'B', + Type: virtual.ByteRangeLockTypeLockedExclusive, + })) + require.Equal( + t, + &virtual.ByteRangeLock[rune]{ + Start: 0, + End: 10, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedExclusive, + }, + ls.Test(&virtual.ByteRangeLock[rune]{ + Start: 8, + End: 12, + Owner: 'B', + Type: virtual.ByteRangeLockTypeLockedShared, + })) + require.Nil(t, ls.Test(&virtual.ByteRangeLock[rune]{ + Start: 14, + End: 16, + Owner: 'B', + Type: virtual.ByteRangeLockTypeLockedShared, + })) + require.Nil(t, ls.Test(&virtual.ByteRangeLock[rune]{ + Start: 14, + End: 16, + Owner: 'A', + Type: virtual.ByteRangeLockTypeLockedExclusive, + })) + + // Fully unlocking the file should cause all locks to be + // dropped. + require.Equal(t, -3, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 0, + End: math.MaxUint64, + Owner: 'A', + Type: virtual.ByteRangeLockTypeUnlocked, + })) + + require.Equal(t, 0, ls.Set(&virtual.ByteRangeLock[rune]{ + Start: 0, + End: math.MaxUint64, + Owner: 'A', + Type: virtual.ByteRangeLockTypeUnlocked, + })) +} diff --git a/pkg/filesystem/virtual/cas_file_factory.go b/pkg/filesystem/virtual/cas_file_factory.go new file mode 100644 index 0000000..a25ccc9 --- /dev/null +++ b/pkg/filesystem/virtual/cas_file_factory.go @@ -0,0 +1,11 @@ +package virtual + +import ( + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// CASFileFactory is a factory type for files whose contents correspond +// with an object stored in the Content Addressable Storage (CAS). +type CASFileFactory interface { + LookupFile(digest digest.Digest, isExecutable bool, readMonitor FileReadMonitor) NativeLeaf +} diff --git a/pkg/filesystem/virtual/cas_initial_contents_fetcher.go b/pkg/filesystem/virtual/cas_initial_contents_fetcher.go new file mode 100644 index 0000000..6168165 --- /dev/null +++ b/pkg/filesystem/virtual/cas_initial_contents_fetcher.go @@ -0,0 +1,189 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-remote-execution/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type casInitialContentsFetcherOptions struct { + context context.Context + casFileFactory CASFileFactory + symlinkFactory SymlinkFactory + digestFunction digest.Function +} + +type casInitialContentsFetcher struct { + options *casInitialContentsFetcherOptions + directoryWalker cas.DirectoryWalker +} + +// NewCASInitialContentsFetcher creates an InitialContentsFetcher that +// lazily instantiates a full directory hierarchy based on directory +// objects stored in the Content Addressable Storage (CAS). +// +// Upon request, it loads the root directory of the tree and converts +// all of the children to either additional InitialContentFetchers +// (directories), FileBackedFiles (regular files) or Symlinks (symbolic +// links). +func NewCASInitialContentsFetcher(ctx context.Context, directoryWalker cas.DirectoryWalker, casFileFactory CASFileFactory, symlinkFactory SymlinkFactory, digestFunction digest.Function) InitialContentsFetcher { + return &casInitialContentsFetcher{ + options: &casInitialContentsFetcherOptions{ + context: ctx, + casFileFactory: casFileFactory, + symlinkFactory: symlinkFactory, + digestFunction: digestFunction, + }, + directoryWalker: directoryWalker, + } +} + +func (icf *casInitialContentsFetcher) fetchContentsUnwrapped(fileReadMonitorFactory FileReadMonitorFactory) (map[path.Component]InitialNode, error) { + directory, err := icf.directoryWalker.GetDirectory(icf.options.context) + if err != nil { + return nil, err + } + + // Create InitialContentsFetchers for all child directories. + // These can yield even more InitialContentsFetchers for + // grandchildren. + children := make(map[path.Component]InitialNode, len(directory.Directories)+len(directory.Files)+len(directory.Symlinks)) + for _, entry := range directory.Directories { + component, ok := path.NewComponent(entry.Name) + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "Directory %#v has an invalid name", entry.Name) + } + if _, ok := children[component]; ok { + return nil, status.Errorf(codes.InvalidArgument, "Directory contains multiple children named %#v", entry.Name) + } + + childDigest, err := icf.options.digestFunction.NewDigestFromProto(entry.Digest) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to obtain digest for directory %#v", entry.Name) + } + children[component] = InitialNode{}.FromDirectory(&casInitialContentsFetcher{ + options: icf.options, + directoryWalker: icf.directoryWalker.GetChild(childDigest), + }) + } + + // Ensure that leaves are properly unlinked if this method fails. + leavesToUnlink := make([]NativeLeaf, 0, len(directory.Files)+len(directory.Symlinks)) + defer func() { + for _, leaf := range leavesToUnlink { + leaf.Unlink() + } + }() + + // Create Content Addressable Storage backed read-only files. + for _, entry := range directory.Files { + component, ok := path.NewComponent(entry.Name) + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "File %#v has an invalid name", entry.Name) + } + if _, ok := children[component]; ok { + return nil, status.Errorf(codes.InvalidArgument, "Directory contains multiple children named %#v", entry.Name) + } + + childDigest, err := icf.options.digestFunction.NewDigestFromProto(entry.Digest) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to obtain digest for file %#v", entry.Name) + } + leaf := icf.options.casFileFactory.LookupFile(childDigest, entry.IsExecutable, fileReadMonitorFactory(component)) + children[component] = InitialNode{}.FromLeaf(leaf) + leavesToUnlink = append(leavesToUnlink, leaf) + } + + // Create symbolic links. + for _, entry := range directory.Symlinks { + component, ok := path.NewComponent(entry.Name) + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "Symlink %#v has an invalid name", entry.Name) + } + if _, ok := children[component]; ok { + return nil, status.Errorf(codes.InvalidArgument, "Directory contains multiple children named %#v", entry.Name) + } + + leaf := icf.options.symlinkFactory.LookupSymlink([]byte(entry.Target)) + children[component] = InitialNode{}.FromLeaf(leaf) + leavesToUnlink = append(leavesToUnlink, leaf) + } + + leavesToUnlink = nil + return children, nil +} + +func (icf *casInitialContentsFetcher) FetchContents(fileReadMonitorFactory FileReadMonitorFactory) (map[path.Component]InitialNode, error) { + children, err := icf.fetchContentsUnwrapped(fileReadMonitorFactory) + if err != nil { + return nil, util.StatusWrap(err, icf.directoryWalker.GetDescription()) + } + return children, nil +} + +func (icf *casInitialContentsFetcher) GetContainingDigests(ctx context.Context) (digest.Set, error) { + gatherer := casContainingDigestsGatherer{ + context: ctx, + digestFunction: icf.options.digestFunction, + digests: digest.NewSetBuilder(), + directoriesGathered: map[digest.Digest]struct{}{}, + } + err := gatherer.traverse(icf.directoryWalker) + if err != nil { + return digest.EmptySet, err + } + return gatherer.digests.Build(), nil +} + +// casContainingDigestsGatherer is used by casInitialContentsFetcher's +// GetContainingDigests() to compute the transitive closure of digests +// referenced by a hierarchy of Directory objects. +type casContainingDigestsGatherer struct { + context context.Context + digestFunction digest.Function + digests digest.SetBuilder + directoriesGathered map[digest.Digest]struct{} +} + +func (g *casContainingDigestsGatherer) traverse(directoryWalker cas.DirectoryWalker) error { + // Add the directory itself. + g.digests.Add(directoryWalker.GetContainingDigest()) + + directory, err := directoryWalker.GetDirectory(g.context) + if err != nil { + return util.StatusWrap(err, directoryWalker.GetDescription()) + } + + // Recursively traverse all child directories. Ignore + // directories that were processed before, as we don't want to + // be tricked into performing an exponential number of + // traversals against malicious Tree objects. + for _, entry := range directory.Directories { + childDigest, err := g.digestFunction.NewDigestFromProto(entry.Digest) + if err != nil { + return util.StatusWrapf(err, "%s: Failed to obtain digest for directory %#v", directoryWalker.GetDescription(), entry.Name) + } + if _, ok := g.directoriesGathered[childDigest]; !ok { + g.directoriesGathered[childDigest] = struct{}{} + if err := g.traverse(directoryWalker.GetChild(childDigest)); err != nil { + return err + } + } + } + + for _, entry := range directory.Files { + childDigest, err := g.digestFunction.NewDigestFromProto(entry.Digest) + if err != nil { + return util.StatusWrapf(err, "%s: Failed to obtain digest for file %#v", directoryWalker.GetDescription(), entry.Name) + } + g.digests.Add(childDigest) + } + + return nil +} diff --git a/pkg/filesystem/virtual/cas_initial_contents_fetcher_test.go b/pkg/filesystem/virtual/cas_initial_contents_fetcher_test.go new file mode 100644 index 0000000..da2a9bd --- /dev/null +++ b/pkg/filesystem/virtual/cas_initial_contents_fetcher_test.go @@ -0,0 +1,371 @@ +package virtual_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestCASInitialContentsFetcherFetchContents(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryWalker := mock.NewMockDirectoryWalker(ctrl) + casFileFactory := mock.NewMockCASFileFactory(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + initialContentsFetcher := virtual.NewCASInitialContentsFetcher( + ctx, + directoryWalker, + casFileFactory, + symlinkFactory, + digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5)) + + t.Run("DirectoryWalkerFailure", func(t *testing.T) { + // Errors from the backend should be propagated. + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx). + Return(nil, status.Error(codes.Internal, "Server failure")) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Root directory: Server failure"), err) + }) + + t.Run("ChildDirectoryInvalidName", func(t *testing.T) { + // Directories containing entries with invalid names + // should be rejected, as they cannot be instantiated. + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "..", + Digest: &remoteexecution.Digest{ + Hash: "4df5f448a5e6b3c41e6aae7a8a9832aa", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Directory \"..\" has an invalid name"), err) + }) + + t.Run("ChildDirectoryInvalidDigest", func(t *testing.T) { + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "hello", + Digest: &remoteexecution.Digest{ + Hash: "Not a valid digest", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Failed to obtain digest for directory \"hello\": Hash has length 18, while 32 characters were expected"), err) + }) + + t.Run("ChildFileInvalidDigest", func(t *testing.T) { + // If an error occurs after creating the first file, any + // previously created files should be unlinked prior to + // returning, so that the files don't leak. + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "file1", + Digest: &remoteexecution.Digest{ + Hash: "ded43ceff96666255cbb89a40cb9d1bd", + SizeBytes: 1200, + }, + }, + { + Name: "file2", + Digest: &remoteexecution.Digest{ + Hash: "Not a valid digest", + SizeBytes: 1300, + }, + }, + }, + }, nil) + file1 := mock.NewMockNativeLeaf(ctrl) + fileReadMonitor1 := mock.NewMockFileReadMonitor(ctrl) + fileReadMonitorFactory.EXPECT().Call(path.MustNewComponent("file1")).Return(fileReadMonitor1.Call) + casFileFactory.EXPECT().LookupFile( + digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "ded43ceff96666255cbb89a40cb9d1bd", 1200), + /* isExecutable = */ false, + gomock.Any(), + ).Return(file1) + file1.EXPECT().Unlink() + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Failed to obtain digest for file \"file2\": Hash has length 18, while 32 characters were expected"), err) + }) + + t.Run("DuplicateNames", func(t *testing.T) { + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello", + Digest: &remoteexecution.Digest{ + Hash: "0970ca3d192dde1268a19b44bbecadcf", + SizeBytes: 3000, + }, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "hello", + Target: "target", + }, + }, + }, nil) + file1 := mock.NewMockNativeLeaf(ctrl) + fileReadMonitor1 := mock.NewMockFileReadMonitor(ctrl) + fileReadMonitorFactory.EXPECT().Call(path.MustNewComponent("hello")).Return(fileReadMonitor1.Call) + casFileFactory.EXPECT().LookupFile( + digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "0970ca3d192dde1268a19b44bbecadcf", 3000), + /* isExecutable = */ false, + gomock.Any(), + ).Return(file1) + file1.EXPECT().Unlink() + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Directory contains multiple children named \"hello\""), err) + }) + + t.Run("Success", func(t *testing.T) { + // Let the InitialContentsFetcher successfully parse a + // Directory object. + fileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory", + Digest: &remoteexecution.Digest{ + Hash: "4b3b03436604cb9d831b91c71a8c1952", + SizeBytes: 123, + }, + }, + }, + Files: []*remoteexecution.FileNode{ + { + Name: "executable", + Digest: &remoteexecution.Digest{ + Hash: "946fbe7108add776d3e3094f512c3483", + SizeBytes: 456, + }, + IsExecutable: true, + }, + { + Name: "file", + Digest: &remoteexecution.Digest{ + Hash: "c0607941dd5b3ca8e175a1bfbfd1c0ea", + SizeBytes: 789, + }, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target", + }, + }, + }, nil) + childDirectoryWalker := mock.NewMockDirectoryWalker(ctrl) + directoryWalker.EXPECT().GetChild(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4b3b03436604cb9d831b91c71a8c1952", 123)). + Return(childDirectoryWalker) + executableLeaf := mock.NewMockNativeLeaf(ctrl) + executableReadMonitor := mock.NewMockFileReadMonitor(ctrl) + fileReadMonitorFactory.EXPECT().Call(path.MustNewComponent("executable")).Return(executableReadMonitor.Call) + casFileFactory.EXPECT().LookupFile( + digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "946fbe7108add776d3e3094f512c3483", 456), + /* isExecutable = */ true, + gomock.Any(), + ).Return(executableLeaf) + fileLeaf := mock.NewMockNativeLeaf(ctrl) + fileReadMonitor := mock.NewMockFileReadMonitor(ctrl) + fileReadMonitorFactory.EXPECT().Call(path.MustNewComponent("file")).Return(fileReadMonitor.Call) + casFileFactory.EXPECT().LookupFile( + digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "c0607941dd5b3ca8e175a1bfbfd1c0ea", 789), + /* isExecutable = */ false, + gomock.Any(), + ).Return(fileLeaf) + symlinkLeaf := mock.NewMockNativeLeaf(ctrl) + symlinkFactory.EXPECT().LookupSymlink([]byte("target")).Return(symlinkLeaf) + + children, err := initialContentsFetcher.FetchContents(fileReadMonitorFactory.Call) + require.NoError(t, err) + childInitialContentsFetcher, _ := children[path.MustNewComponent("directory")].GetPair() + require.Equal(t, map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory"): virtual.InitialNode{}.FromDirectory(childInitialContentsFetcher), + path.MustNewComponent("executable"): virtual.InitialNode{}.FromLeaf(executableLeaf), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(fileLeaf), + path.MustNewComponent("symlink"): virtual.InitialNode{}.FromLeaf(symlinkLeaf), + }, children) + + // Check that the InitialContentsFetcher that is created + // for the subdirectory calls into the right DirectoryWalker. + childFileReadMonitorFactory := mock.NewMockFileReadMonitorFactory(ctrl) + childDirectoryWalker.EXPECT().GetDirectory(ctx). + Return(&remoteexecution.Directory{}, nil) + + grandchildren, err := childInitialContentsFetcher.FetchContents(childFileReadMonitorFactory.Call) + require.NoError(t, err) + require.Empty(t, grandchildren) + }) +} + +func TestCASInitialContentsFetcherGetContainingDigests(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + directoryWalker := mock.NewMockDirectoryWalker(ctrl) + casFileFactory := mock.NewMockCASFileFactory(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + initialContentsFetcher := virtual.NewCASInitialContentsFetcher( + ctx, + directoryWalker, + casFileFactory, + symlinkFactory, + digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5)) + + t.Run("DirectoryWalkerFailure", func(t *testing.T) { + // Errors from the backend should be propagated. + directoryWalker.EXPECT().GetContainingDigest(). + Return(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "7f390b0d6fb7831b0172bd7ce3e54256", 12)) + directoryWalker.EXPECT().GetDirectory(ctx). + Return(nil, status.Error(codes.Internal, "Server failure")) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.GetContainingDigests(ctx) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Root directory: Server failure"), err) + }) + + t.Run("ChildDirectoryInvalidDigest", func(t *testing.T) { + directoryWalker.EXPECT().GetContainingDigest(). + Return(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "7f390b0d6fb7831b0172bd7ce3e54256", 12)) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "hello", + Digest: &remoteexecution.Digest{ + Hash: "Not a valid digest", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.GetContainingDigests(ctx) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Failed to obtain digest for directory \"hello\": Hash has length 18, while 32 characters were expected"), err) + }) + + t.Run("ChildFileInvalidDigest", func(t *testing.T) { + directoryWalker.EXPECT().GetContainingDigest(). + Return(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "7f390b0d6fb7831b0172bd7ce3e54256", 12)) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello", + Digest: &remoteexecution.Digest{ + Hash: "Not a valid digest", + SizeBytes: 123, + }, + }, + }, + }, nil) + directoryWalker.EXPECT().GetDescription().Return("Root directory") + + _, err := initialContentsFetcher.GetContainingDigests(ctx) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Root directory: Failed to obtain digest for file \"hello\": Hash has length 18, while 32 characters were expected"), err) + }) + + t.Run("Success", func(t *testing.T) { + // Successfully compute the transitive closure of + // digests referenced by a directory hierarchy. Each + // directory should only be processed once to prevent + // exponential running times on malicious Tree objects. + directoryDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "7f390b0d6fb7831b0172bd7ce3e54256", 12) + directoryWalker.EXPECT().GetContainingDigest().Return(directoryDigest) + directoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory1", + Digest: &remoteexecution.Digest{ + Hash: "4b3b03436604cb9d831b91c71a8c1952", + SizeBytes: 123, + }, + }, + { + Name: "directory2", + Digest: &remoteexecution.Digest{ + Hash: "4b3b03436604cb9d831b91c71a8c1952", + SizeBytes: 123, + }, + }, + }, + Files: []*remoteexecution.FileNode{ + { + Name: "file", + Digest: &remoteexecution.Digest{ + Hash: "c0607941dd5b3ca8e175a1bfbfd1c0ea", + SizeBytes: 789, + }, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "target", + }, + }, + }, nil) + childDirectoryWalker := mock.NewMockDirectoryWalker(ctrl) + childDirectoryDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4b3b03436604cb9d831b91c71a8c1952", 123) + directoryWalker.EXPECT().GetChild(childDirectoryDigest).Return(childDirectoryWalker) + childDirectoryWalker.EXPECT().GetContainingDigest().Return(childDirectoryDigest) + childDirectoryWalker.EXPECT().GetDirectory(ctx).Return(&remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "file", + Digest: &remoteexecution.Digest{ + Hash: "19dc69325bd8dfcd75cefbb6144ea3bb", + SizeBytes: 42, + }, + }, + }, + }, nil) + + digests, err := initialContentsFetcher.GetContainingDigests(ctx) + require.NoError(t, err) + require.Equal( + t, + digest.NewSetBuilder(). + Add(directoryDigest). + Add(childDirectoryDigest). + Add(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "c0607941dd5b3ca8e175a1bfbfd1c0ea", 789)). + Add(digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "19dc69325bd8dfcd75cefbb6144ea3bb", 42)). + Build(), + digests) + }) +} diff --git a/pkg/filesystem/virtual/character_device_factory.go b/pkg/filesystem/virtual/character_device_factory.go new file mode 100644 index 0000000..844cdfa --- /dev/null +++ b/pkg/filesystem/virtual/character_device_factory.go @@ -0,0 +1,74 @@ +package virtual + +import ( + "bytes" + "encoding/binary" + "io" + "math" + + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +// CharacterDeviceFactory is a factory type for character devices. +// Character devices are immutable files; it is not possible to change +// the device after it has been created. +type CharacterDeviceFactory interface { + LookupCharacterDevice(deviceNumber filesystem.DeviceNumber) NativeLeaf +} + +type baseCharacterDeviceFactory struct{} + +func (baseCharacterDeviceFactory) LookupCharacterDevice(deviceNumber filesystem.DeviceNumber) NativeLeaf { + return NewSpecialFile(filesystem.FileTypeCharacterDevice, &deviceNumber) +} + +// BaseCharacterDeviceFactory can be used to create simple immutable +// character device nodes. +var BaseCharacterDeviceFactory CharacterDeviceFactory = baseCharacterDeviceFactory{} + +type handleAllocatingCharacterDeviceFactory struct { + base CharacterDeviceFactory + allocator ResolvableHandleAllocator +} + +// NewHandleAllocatingCharacterDeviceFactory creates a decorator for +// CharacterDeviceFactory that creates character devices that have a +// handle associated with them. +// +// Because device numbers are small, this implementation uses a +// resolvable handle allocator, meaning that the major and minor number +// of the device are stored in the file handle. +func NewHandleAllocatingCharacterDeviceFactory(base CharacterDeviceFactory, allocation ResolvableHandleAllocation) CharacterDeviceFactory { + cdf := &handleAllocatingCharacterDeviceFactory{ + base: base, + } + cdf.allocator = allocation.AsResolvableAllocator(cdf.resolve) + return cdf +} + +func (cdf *handleAllocatingCharacterDeviceFactory) LookupCharacterDevice(deviceNumber filesystem.DeviceNumber) NativeLeaf { + // Convert the device number to a binary identifier. + major, minor := deviceNumber.ToMajorMinor() + var identifier [binary.MaxVarintLen32 * 2]byte + length := binary.PutUvarint(identifier[:], uint64(major)) + length += binary.PutUvarint(identifier[length:], uint64(minor)) + + return cdf.allocator. + New(bytes.NewBuffer(identifier[:length])). + AsNativeLeaf(cdf.base.LookupCharacterDevice(deviceNumber)) +} + +func (cdf *handleAllocatingCharacterDeviceFactory) resolve(r io.ByteReader) (DirectoryChild, Status) { + // Convert the binary identifier to a device number. + major, err := binary.ReadUvarint(r) + if err != nil || major > math.MaxUint32 { + return DirectoryChild{}, StatusErrBadHandle + } + minor, err := binary.ReadUvarint(r) + if err != nil || minor > math.MaxUint32 { + return DirectoryChild{}, StatusErrBadHandle + } + deviceNumber := filesystem.NewDeviceNumberFromMajorMinor(uint32(major), uint32(minor)) + + return DirectoryChild{}.FromLeaf(cdf.LookupCharacterDevice(deviceNumber)), StatusOK +} diff --git a/pkg/filesystem/virtual/character_device_factory_test.go b/pkg/filesystem/virtual/character_device_factory_test.go new file mode 100644 index 0000000..cc2f98f --- /dev/null +++ b/pkg/filesystem/virtual/character_device_factory_test.go @@ -0,0 +1,86 @@ +package virtual_test + +import ( + "bytes" + "io" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestHandleAllocatingCharacterDeviceFactory(t *testing.T) { + ctrl := gomock.NewController(t) + + baseCharacterDeviceFactory := mock.NewMockCharacterDeviceFactory(ctrl) + rootHandleAllocation := mock.NewMockResolvableHandleAllocation(ctrl) + handleAllocator := mock.NewMockResolvableHandleAllocator(ctrl) + var handleResolver virtual.HandleResolver + rootHandleAllocation.EXPECT().AsResolvableAllocator(gomock.Any()). + DoAndReturn(func(hr virtual.HandleResolver) virtual.ResolvableHandleAllocator { + handleResolver = hr + return handleAllocator + }) + characterDeviceFactory := virtual.NewHandleAllocatingCharacterDeviceFactory( + baseCharacterDeviceFactory, + rootHandleAllocation) + + t.Run("Lookup", func(t *testing.T) { + // Look up /dev/null (on Linux: major 1, minor 3). + deviceNumber := filesystem.NewDeviceNumberFromMajorMinor(1, 3) + underlyingLeaf := mock.NewMockNativeLeaf(ctrl) + baseCharacterDeviceFactory.EXPECT().LookupCharacterDevice(deviceNumber).Return(underlyingLeaf) + wrappedLeaf := mock.NewMockNativeLeaf(ctrl) + leafHandleAllocation := mock.NewMockResolvableHandleAllocation(ctrl) + handleAllocator.EXPECT().New(gomock.Any()). + DoAndReturn(func(id io.WriterTo) virtual.ResolvableHandleAllocation { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(2), n) + require.Equal(t, []byte{1, 3}, idBuf.Bytes()) + return leafHandleAllocation + }) + leafHandleAllocation.EXPECT().AsNativeLeaf(underlyingLeaf).Return(wrappedLeaf) + + require.Equal(t, wrappedLeaf, characterDeviceFactory.LookupCharacterDevice(deviceNumber)) + }) + + t.Run("ResolverEmpty", func(t *testing.T) { + // An empty file handle should not resolve. + _, s := handleResolver(bytes.NewBuffer(nil)) + require.Equal(t, virtual.StatusErrBadHandle, s) + }) + + t.Run("ResolverSingleNumber", func(t *testing.T) { + // Only provided a major number. + _, s := handleResolver(bytes.NewBuffer([]byte{1})) + require.Equal(t, virtual.StatusErrBadHandle, s) + }) + + t.Run("ResolverTwoNumbers", func(t *testing.T) { + // Provided both a major and minor number. + deviceNumber := filesystem.NewDeviceNumberFromMajorMinor(1, 3) + underlyingLeaf := mock.NewMockNativeLeaf(ctrl) + baseCharacterDeviceFactory.EXPECT().LookupCharacterDevice(deviceNumber).Return(underlyingLeaf) + wrappedLeaf := mock.NewMockNativeLeaf(ctrl) + leafHandleAllocation := mock.NewMockResolvableHandleAllocation(ctrl) + handleAllocator.EXPECT().New(gomock.Any()). + DoAndReturn(func(id io.WriterTo) virtual.ResolvableHandleAllocation { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(2), n) + require.Equal(t, []byte{1, 3}, idBuf.Bytes()) + return leafHandleAllocation + }) + leafHandleAllocation.EXPECT().AsNativeLeaf(underlyingLeaf).Return(wrappedLeaf) + + actualChild, s := handleResolver(bytes.NewBuffer([]byte{1, 3})) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromLeaf(wrappedLeaf), actualChild) + }) +} diff --git a/pkg/filesystem/virtual/child.go b/pkg/filesystem/virtual/child.go new file mode 100644 index 0000000..e70ef9d --- /dev/null +++ b/pkg/filesystem/virtual/child.go @@ -0,0 +1,52 @@ +package virtual + +// Child is a variant type that either contains a directory or leaf +// object. +// +// TODO: In principle it should be possible to eliminate the 'kind' +// field, if it weren't for the fact that we can't compare TDirectory +// and TLeaf against nil. There is no nullable constraint. +// https://github.com/golang/go/issues/53656 +type Child[TDirectory any, TLeaf any, TNode any] struct { + kind int + directory TDirectory + leaf TLeaf +} + +// FromDirectory creates a Child that contains a directory. +func (Child[TDirectory, TLeaf, TNode]) FromDirectory(directory TDirectory) Child[TDirectory, TLeaf, TNode] { + return Child[TDirectory, TLeaf, TNode]{kind: 1, directory: directory} +} + +// FromLeaf creates a Child that contains a leaf. +func (Child[TDirectory, TLeaf, TNode]) FromLeaf(leaf TLeaf) Child[TDirectory, TLeaf, TNode] { + return Child[TDirectory, TLeaf, TNode]{kind: 2, leaf: leaf} +} + +// IsSet returns true if the Child contains either a directory or leaf. +func (c Child[TDirectory, TLeaf, TNode]) IsSet() bool { + return c.kind != 0 +} + +// GetNode returns the value of the child as a single object, making it +// possible to call into methods that are both provided by the directory +// and leaf types. +func (c Child[TDirectory, TLeaf, TNode]) GetNode() TNode { + switch c.kind { + case 1: + // These casts are unnecessary, and are only needed + // because type parameters can not constrain each other. + // https://groups.google.com/g/golang-nuts/c/VrbEngj8Itg/m/54_l5f73BQAJ + return any(c.directory).(TNode) + case 2: + return any(c.leaf).(TNode) + default: + panic("Child is not set") + } +} + +// GetPair returns the value of the child as a directory or leaf object, +// making it possible to call into directory/leaf specific methods. +func (c Child[TDirectory, TLeaf, TNode]) GetPair() (TDirectory, TLeaf) { + return c.directory, c.leaf +} diff --git a/pkg/filesystem/virtual/configuration/BUILD.bazel b/pkg/filesystem/virtual/configuration/BUILD.bazel new file mode 100644 index 0000000..de5065c --- /dev/null +++ b/pkg/filesystem/virtual/configuration/BUILD.bazel @@ -0,0 +1,59 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "configuration", + srcs = [ + "attribute_caching_duration.go", + "configuration.go", + "fuse_mount_disabled.go", + "fuse_mount_enabled.go", + "nfsv4_mount_darwin.go", + "nfsv4_mount_disabled.go", + "remove_stale_mounts.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/configuration", + visibility = ["//visibility:public"], + deps = [ + "//pkg/filesystem/virtual", + "//pkg/filesystem/virtual/nfsv4", + "//pkg/proto/configuration/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/program", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/rpcserver", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/filesystem/virtual/fuse", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/filesystem/virtual/fuse", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_go_xdr//pkg/protocols/darwin_nfs_sys_prot", + "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/filesystem/virtual/fuse", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_go_xdr//pkg/protocols/darwin_nfs_sys_prot", + "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/filesystem/virtual/fuse", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", + ], + "//conditions:default": [], + }), +) diff --git a/pkg/filesystem/virtual/configuration/attribute_caching_duration.go b/pkg/filesystem/virtual/configuration/attribute_caching_duration.go new file mode 100644 index 0000000..c952ec9 --- /dev/null +++ b/pkg/filesystem/virtual/configuration/attribute_caching_duration.go @@ -0,0 +1,66 @@ +package configuration + +import ( + "time" +) + +// AttributeCachingDuration specifies the amount of time attributes of +// files may be cached by an NFSv4 file system that accesses a virtual +// file system. +// +// Unlike FUSE, NFSv4.0 provides no facilities for letting the server +// invalidate information on files cached by the client. To work around +// this, most NFS clients provide mount options such as 'acregmin', +// 'acregmax', 'acdirmin' and 'acdirmax' that permit specifying the +// amount of time attributes may be cached. +type AttributeCachingDuration struct { + minimum time.Duration + maximum time.Duration +} + +// Min returns the lowest attribute cache duration. This can be used to +// combine multiple attribute caching durations into a single value, in +// case the NFS client makes no distinction between individual values. +func (a AttributeCachingDuration) Min(b AttributeCachingDuration) AttributeCachingDuration { + if a.minimum > b.minimum { + a.minimum = b.minimum + } + if a.maximum > b.maximum { + a.maximum = b.maximum + } + return a +} + +// NoAttributeCaching indicates that the NFS client should not cache any +// file attributes, such as size, modification time, permissions and +// symlink target. +// +// This is a good policy for bb_virtual_tmp's symbolic link, whose +// target may not be cached. +var NoAttributeCaching = AttributeCachingDuration{ + minimum: 0, + maximum: 0, +} + +// ShortAttributeCaching indicates that the NFS client should only cache +// file attributes for a short amount of time. +// +// This is a good policy for bb_worker's root directory. The contents of +// this directory change regularly, but there is a very low probability +// of immediate reuse of build action subdirectories having the same +// name. It is therefore desirable to have a limited amount of caching. +var ShortAttributeCaching = AttributeCachingDuration{ + minimum: time.Second, + maximum: time.Second, +} + +// LongAttributeCaching indicates that the NFS client may cache file +// attributes for a long amount of time. +// +// This is a good policy for files or directories that are either fully +// immutable, or are only mutated through operations performed by the +// client through the mount point. +var LongAttributeCaching = AttributeCachingDuration{ + minimum: time.Minute, + maximum: 5 * time.Minute, +} diff --git a/pkg/filesystem/virtual/configuration/configuration.go b/pkg/filesystem/virtual/configuration/configuration.go new file mode 100644 index 0000000..ff46305 --- /dev/null +++ b/pkg/filesystem/virtual/configuration/configuration.go @@ -0,0 +1,126 @@ +package configuration + +import ( + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4" + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/eviction" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + nfsv4_xdr "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/rpcserver" + "github.com/jmespath/go-jmespath" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Mount of a virtual file system that has been created using +// NewMountFromConfiguration(), but that hasn't been exposed to the +// kernel or network yet. Before calling Expose(), the caller has the +// possibility to construct a root directory. +type Mount interface { + Expose(terminationGroup program.Group, rootDirectory virtual.Directory) error +} + +type fuseMount struct { + mountPath string + configuration *pb.FUSEMountConfiguration + handleAllocator *virtual.FUSEStatefulHandleAllocator + fsName string +} + +type nfsv4Mount struct { + mountPath string + configuration *pb.NFSv4MountConfiguration + handleAllocator *virtual.NFSStatefulHandleAllocator + authenticator rpcserver.Authenticator + fsName string + rootDirectoryAttributeCaching AttributeCachingDuration + childDirectoriesAttributeCaching AttributeCachingDuration + leavesAttributeCaching AttributeCachingDuration +} + +func (m *nfsv4Mount) Expose(terminationGroup program.Group, rootDirectory virtual.Directory) error { + // Random values that the client can use to detect that the + // server has been restarted and lost all state. + var verifier nfsv4_xdr.Verifier4 + random.FastThreadSafeGenerator.Read(verifier[:]) + var stateIDOtherPrefix [4]byte + random.FastThreadSafeGenerator.Read(stateIDOtherPrefix[:]) + + enforcedLeaseTime := m.configuration.EnforcedLeaseTime + if err := enforcedLeaseTime.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid enforced lease time") + } + announcedLeaseTime := m.configuration.AnnouncedLeaseTime + if err := announcedLeaseTime.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid announced lease time") + } + + // Create an RPC server that offers the NFSv4 program. + rpcServer := rpcserver.NewServer(map[uint32]rpcserver.Service{ + nfsv4_xdr.NFS4_PROGRAM_PROGRAM_NUMBER: nfsv4_xdr.NewNfs4ProgramService( + nfsv4.NewMetricsProgram( + nfsv4.NewBaseProgram( + rootDirectory, + m.handleAllocator.ResolveHandle, + random.NewFastSingleThreadedGenerator(), + verifier, + stateIDOtherPrefix, + clock.SystemClock, + enforcedLeaseTime.AsDuration(), + announcedLeaseTime.AsDuration()))), + }, m.authenticator) + + return m.mount(terminationGroup, rpcServer) +} + +// NewMountFromConfiguration creates a new FUSE mount based on options +// specified in a configuration message and starts processing of +// incoming requests. +func NewMountFromConfiguration(configuration *pb.MountConfiguration, fsName string, rootDirectoryAttributeCaching, childDirectoriesAttributeCaching, leavesAttributeCaching AttributeCachingDuration) (Mount, virtual.StatefulHandleAllocator, error) { + switch backend := configuration.Backend.(type) { + case *pb.MountConfiguration_Fuse: + handleAllocator := virtual.NewFUSEHandleAllocator(random.FastThreadSafeGenerator) + return &fuseMount{ + mountPath: configuration.MountPath, + configuration: backend.Fuse, + handleAllocator: handleAllocator, + fsName: fsName, + }, handleAllocator, nil + case *pb.MountConfiguration_Nfsv4: + handleAllocator := virtual.NewNFSHandleAllocator(random.NewFastSingleThreadedGenerator()) + + authenticator := rpcserver.AllowAuthenticator + if systemAuthentication := backend.Nfsv4.SystemAuthentication; systemAuthentication != nil { + compiledExpression, err := jmespath.Compile(systemAuthentication.MetadataJmespathExpression) + if err != nil { + return nil, nil, util.StatusWrap(err, "Failed to compile system authentication metadata JMESPath expression") + } + evictionSet, err := eviction.NewSetFromConfiguration[nfsv4.SystemAuthenticatorCacheKey](systemAuthentication.CacheReplacementPolicy) + if err != nil { + return nil, nil, util.StatusWrap(err, "Failed to create system authentication eviction set") + } + authenticator = nfsv4.NewSystemAuthenticator( + compiledExpression, + int(systemAuthentication.MaximumCacheSize), + eviction.NewMetricsSet(evictionSet, "SystemAuthenticator")) + } + + return &nfsv4Mount{ + mountPath: configuration.MountPath, + configuration: backend.Nfsv4, + handleAllocator: handleAllocator, + authenticator: authenticator, + fsName: fsName, + rootDirectoryAttributeCaching: rootDirectoryAttributeCaching, + childDirectoriesAttributeCaching: childDirectoriesAttributeCaching, + leavesAttributeCaching: leavesAttributeCaching, + }, handleAllocator, nil + default: + return nil, nil, status.Error(codes.InvalidArgument, "No virtual file system backend configuration provided") + } +} diff --git a/pkg/filesystem/virtual/configuration/fuse_mount_disabled.go b/pkg/filesystem/virtual/configuration/fuse_mount_disabled.go new file mode 100644 index 0000000..366412e --- /dev/null +++ b/pkg/filesystem/virtual/configuration/fuse_mount_disabled.go @@ -0,0 +1,16 @@ +//go:build freebsd || windows +// +build freebsd windows + +package configuration + +import ( + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/program" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (m *fuseMount) Expose(terminationGroup program.Group, rootDirectory virtual.Directory) error { + return status.Error(codes.Unimplemented, "FUSE is not supported on this platform") +} diff --git a/pkg/filesystem/virtual/configuration/fuse_mount_enabled.go b/pkg/filesystem/virtual/configuration/fuse_mount_enabled.go new file mode 100644 index 0000000..548fdba --- /dev/null +++ b/pkg/filesystem/virtual/configuration/fuse_mount_enabled.go @@ -0,0 +1,98 @@ +//go:build darwin || linux +// +build darwin linux + +package configuration + +import ( + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/fuse" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/util" + go_fuse "github.com/hanwen/go-fuse/v2/fuse" + "github.com/jmespath/go-jmespath" +) + +func (m *fuseMount) Expose(terminationGroup program.Group, rootDirectory virtual.Directory) error { + // Parse configuration options. + var directoryEntryValidity time.Duration + if d := m.configuration.DirectoryEntryValidity; d != nil { + if err := d.CheckValid(); err != nil { + util.StatusWrap(err, "Failed to parse directory entry validity") + } + directoryEntryValidity = d.AsDuration() + } + var inodeAttributeValidity time.Duration + if d := m.configuration.InodeAttributeValidity; d != nil { + if err := d.CheckValid(); err != nil { + util.StatusWrap(err, "Failed to parse inode attribute validity") + } + inodeAttributeValidity = d.AsDuration() + } + + authenticator := fuse.AllowAuthenticator + if expression := m.configuration.InHeaderAuthenticationMetadataJmespathExpression; expression != "" { + compiledExpression, err := jmespath.Compile(expression) + if err != nil { + return util.StatusWrap(err, "Failed to compile in-header authentication metadata JMESPath expression") + } + authenticator = fuse.NewInHeaderAuthenticator(compiledExpression) + } + + // Launch the FUSE server. + removeStaleMounts(m.mountPath) + deterministicTimestamp := uint64(filesystem.DeterministicFileModificationTimestamp.Unix()) + server, err := go_fuse.NewServer( + fuse.NewMetricsRawFileSystem( + fuse.NewDefaultAttributesInjectingRawFileSystem( + fuse.NewSimpleRawFileSystem( + rootDirectory, + m.handleAllocator.RegisterRemovalNotifier, + authenticator), + directoryEntryValidity, + inodeAttributeValidity, + &go_fuse.Attr{ + Atime: deterministicTimestamp, + Ctime: deterministicTimestamp, + Mtime: deterministicTimestamp, + }), + clock.SystemClock), + m.mountPath, + &go_fuse.MountOptions{ + // The name isn't strictly necessary, but is + // filled in to prevent runc from crashing with + // this error: + // https://github.com/opencontainers/runc/blob/v1.0.0-rc10/libcontainer/mount/mount_linux.go#L69 + // + // Newer versions of runc use an improved parser + // that's more reliable: + // https://github.com/moby/sys/blob/master/mountinfo/mountinfo_linux.go + FsName: m.fsName, + AllowOther: m.configuration.AllowOther, + DirectMount: m.configuration.DirectMount, + // Speed up workloads that perform many tiny + // writes. This means data is only guaranteed to + // make it into the virtual file system after + // calling close()/fsync()/munmap()/msync(). + EnableWritebackCache: true, + }) + if err != nil { + return util.StatusWrap(err, "Failed to create FUSE server") + } + // TODO: Run this as part of the program.Group, so that it gets + // cleaned up upon shutdown. + go server.Serve() + + // Adjust configuration options that can only be set after the + // FUSE server has been launched. + if err := fuse.SetLinuxBackingDevInfoTunables( + m.mountPath, + m.configuration.LinuxBackingDevInfoTunables, + ); err != nil { + return util.StatusWrap(err, "Failed to set Linux Backing Device Info tunables") + } + return nil +} diff --git a/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go b/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go new file mode 100644 index 0000000..13c35e7 --- /dev/null +++ b/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go @@ -0,0 +1,251 @@ +//go:build darwin +// +build darwin + +package configuration + +import ( + "bytes" + "context" + "io" + "log" + "net" + "os" + "os/exec" + "regexp" + "strconv" + "sync" + "time" + "unsafe" + + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/bb-storage/pkg/util" + nfs_sys_prot "github.com/buildbarn/go-xdr/pkg/protocols/darwin_nfs_sys_prot" + "github.com/buildbarn/go-xdr/pkg/rpcserver" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + initializeNFSOnce sync.Once + + macOSBuildVersionPattern = regexp.MustCompile("^([0-9]+)([A-Z])([0-9]+)") +) + +func writeNfstime32(d time.Duration, w io.Writer) { + nanos := d.Nanoseconds() + t := nfs_sys_prot.Nfstime32{ + Seconds: int32(nanos / 1e9), + Nseconds: uint32(nanos % 1e9), + } + t.WriteTo(w) +} + +func writeAttributeCachingDuration(d *AttributeCachingDuration, w io.Writer) { + writeNfstime32(d.minimum, w) + writeNfstime32(d.maximum, w) +} + +type macOSBuildVersion struct { + major int64 + minor byte + daily int64 +} + +// getMacOSBuildVersion returns the build version of the currently +// running instance of macOS. For example, on macOS 13.0.1, it will +// return (22, 'A', 400). +func getMacOSBuildVersion() (macOSBuildVersion, error) { + osVersion, err := unix.Sysctl("kern.osversion") + if err != nil { + return macOSBuildVersion{}, util.StatusWrap(err, "Failed to obtain the version of macOS running on the system") + } + submatches := macOSBuildVersionPattern.FindStringSubmatch(osVersion) + if submatches == nil { + return macOSBuildVersion{}, status.Errorf(codes.Internal, "Cannot parse macOS version %#v", osVersion) + } + major, err := strconv.ParseInt(submatches[1], 10, 64) + if err != nil { + return macOSBuildVersion{}, util.StatusWrapf(err, "Invalid macOS major version %#v", submatches[1]) + } + daily, err := strconv.ParseInt(submatches[3], 10, 64) + if err != nil { + return macOSBuildVersion{}, util.StatusWrapf(err, "Invalid macOS daily version %#v", submatches[3]) + } + return macOSBuildVersion{ + major: major, + minor: submatches[2][0], + daily: daily, + }, nil +} + +func (bv macOSBuildVersion) greaterEqual(major int64, minor byte, daily int64) bool { + return bv.major > major || (bv.major == major && (bv.minor > minor || (bv.minor == minor && bv.daily >= daily))) +} + +func (m *nfsv4Mount) mount(terminationGroup program.Group, rpcServer *rpcserver.Server) error { + // Extract the version of macOS used. We need to know this, as + // it determines which mount options are supported. + buildVersion, err := getMacOSBuildVersion() + if err != nil { + return err + } + + // macOS may require us to perform certain initialisation steps + // before attempting to create the NFS mount, such as loading + // the kernel extension containing the NFS client. + // + // Instead of trying to mimic those steps, call mount_nfs(8) in + // such a way that the arguments are valid, but is guaranteed to + // fail quickly. + initializeNFSOnce.Do(func() { + exec.Command("/sbin/mount_nfs", "0.0.0.0:/", "/").Run() + }) + + darwinConfiguration, ok := m.configuration.OperatingSystem.(*pb.NFSv4MountConfiguration_Darwin) + if !ok { + return status.Error(codes.InvalidArgument, "Darwin specific NFSv4 server configuration options not provided") + } + + // Expose the NFSv4 server on a UNIX socket. + osConfiguration := darwinConfiguration.Darwin + if err := os.Remove(osConfiguration.SocketPath); err != nil && !os.IsNotExist(err) { + return util.StatusWrapf(err, "Could not remove stale socket for NFSv4 server %#v", osConfiguration.SocketPath) + } + sock, err := net.Listen("unix", osConfiguration.SocketPath) + if err != nil { + return util.StatusWrap(err, "Failed to create listening socket for NFSv4 server") + } + // TODO: Run this as part of the program.Group, so that it gets + // cleaned up upon shutdown. + go func() { + for { + c, err := sock.Accept() + if err != nil { + log.Print("Got accept error: ", err) + } + go func() { + err := rpcServer.HandleConnection(c, c) + c.Close() + if err != nil { + log.Print("Failure handling NFSv4 connection: ", err) + } + }() + } + }() + + // Construct attributes that are provided to mount(2). For NFS, + // these attributes are stored in an XDR message. Similar to how + // NFSv4's fattr4 works, the attributes need to be emitted in + // increasing order by bitmask field. + attrMask := make(nfs_sys_prot.Bitmap, nfs_sys_prot.NFS_MATTR_BITMAP_LEN) + var attrVals bytes.Buffer + + // Don't bother setting up a callback service, as we don't issue + // CB_NOTIFY operations. Using this option is also a requirement + // for making NFSv4 over UNIX sockets work. + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_FLAGS + flags := nfs_sys_prot.NfsMattrFlags{ + Mask: []uint32{ + (1 << nfs_sys_prot.NFS_MFLAG_NOCALLBACK) | + (1 << nfs_sys_prot.NFS_MFLAG_SKIP_RENEW), + }, + Value: []uint32{ + (1 << nfs_sys_prot.NFS_MFLAG_NOCALLBACK) | + (1 << nfs_sys_prot.NFS_MFLAG_SKIP_RENEW), + }, + } + flags.WriteTo(&attrVals) + + // Explicitly request the use of NFSv4.0. + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_NFS_VERSION + nfs_sys_prot.WriteNfsMattrNfsVersion(&attrVals, 4) + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_NFS_MINOR_VERSION + nfs_sys_prot.WriteNfsMattrNfsMinorVersion(&attrVals, 0) + + // Set attribute caching durations. This needs to be set at + // mount time, as NFSv4 provides no facilities for conveying + // this on a per GETATTR response basis. + attrMask[0] |= (1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_REG_MIN) | (1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_REG_MAX) + writeAttributeCachingDuration(&m.leavesAttributeCaching, &attrVals) + attrMask[0] |= (1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_DIR_MIN) | (1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_DIR_MAX) + supportsRootDirectoryAttributeCachingTimeouts := buildVersion.greaterEqual(23, 'E', 86) + if supportsRootDirectoryAttributeCachingTimeouts { + writeAttributeCachingDuration(&m.childDirectoriesAttributeCaching, &attrVals) + } else { + // This version of macOS does not support the + // 'acrootdirmin' and 'acrootdirmax' mount options. The + // 'acdirmin' and 'acdirmax' option controls the + // attribute caching duration for all directories in the + // file system. + directoriesAttributeCaching := m.rootDirectoryAttributeCaching.Min(m.childDirectoriesAttributeCaching) + writeAttributeCachingDuration(&directoriesAttributeCaching, &attrVals) + } + + // "ticotsord" is the X/Open Transport Interface (XTI) + // equivalent of AF_LOCAL with SOCK_STREAM. + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_SOCKET_TYPE + nfs_sys_prot.WriteNfsMattrSocketType(&attrVals, "ticotsord") + + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_FS_LOCATIONS + fsLocations := nfs_sys_prot.NfsFsLocations{ + NfslLocation: []nfs_sys_prot.NfsFsLocation{{ + NfslServer: []nfs_sys_prot.NfsFsServer{{ + NfssName: m.fsName, + NfssAddress: []string{osConfiguration.SocketPath}, + }}, + }}, + } + fsLocations.WriteTo(&attrVals) + + attrMask[0] |= 1 << nfs_sys_prot.NFS_MATTR_LOCAL_NFS_PORT + nfs_sys_prot.WriteNfsMattrLocalNfsPort(&attrVals, osConfiguration.SocketPath) + + if m.leavesAttributeCaching == NoAttributeCaching { + attrMask[1] |= 1 << (nfs_sys_prot.NFS_MATTR_READLINK_NOCACHE - 32) + nfs_sys_prot.NFS_READLINK_CACHE_MODE_FULLY_UNCACHED.WriteTo(&attrVals) + } + + if supportsRootDirectoryAttributeCachingTimeouts { + attrMask[1] |= (1 << (nfs_sys_prot.NFS_MATTR_ATTRCACHE_ROOTDIR_MIN - 32)) | (1 << (nfs_sys_prot.NFS_MATTR_ATTRCACHE_ROOTDIR_MAX - 32)) + writeAttributeCachingDuration(&m.rootDirectoryAttributeCaching, &attrVals) + } + + // Construct the nfs_mount_args message and serialize it. + for attrMask[len(attrMask)-1] == 0 { + attrMask = attrMask[:len(attrMask)-1] + } + mountArgs := nfs_sys_prot.NfsMountArgs{ + ArgsVersion: nfs_sys_prot.NFS_ARGSVERSION_XDR, + XdrArgsVersion: nfs_sys_prot.NFS_XDRARGS_VERSION_0, + NfsMountAttrs: nfs_sys_prot.NfsMattr{ + Attrmask: attrMask, + AttrVals: attrVals.Bytes(), + }, + } + mountArgs.ArgsLength = uint32(mountArgs.GetEncodedSizeBytes()) + + mountArgsBuf := bytes.NewBuffer(make([]byte, 0, mountArgs.ArgsLength)) + if _, err := mountArgs.WriteTo(mountArgsBuf); err != nil { + return util.StatusWrap(err, "Failed to marshal NFS mount arguments") + } + + // Call mount(2) with the serialized nfs_mount_args message. + mountPath := m.mountPath + removeStaleMounts(mountPath) + if err := unix.Mount("nfs", mountPath, 0, unsafe.Pointer(&mountArgsBuf.Bytes()[0])); err != nil { + return util.StatusWrap(err, "Mounting NFS volume failed") + } + + // Automatically unmount upon shutdown. + terminationGroup.Go(func(ctx context.Context, siblingsGroup, dependenciesGroup program.Group) error { + <-ctx.Done() + if err := unix.Unmount(mountPath, 0); err != nil { + return util.StatusWrapf(err, "Failed to unmount %#v", mountPath) + } + return nil + }) + return nil +} diff --git a/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go b/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go new file mode 100644 index 0000000..09ea112 --- /dev/null +++ b/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go @@ -0,0 +1,16 @@ +//go:build freebsd || linux || windows +// +build freebsd linux windows + +package configuration + +import ( + "github.com/buildbarn/bb-storage/pkg/program" + "github.com/buildbarn/go-xdr/pkg/rpcserver" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (m *nfsv4Mount) mount(terminationGroup program.Group, rpcServer *rpcserver.Server) error { + return status.Error(codes.Unimplemented, "NFSv4 is not supported on this platform") +} diff --git a/pkg/filesystem/virtual/configuration/remove_stale_mounts.go b/pkg/filesystem/virtual/configuration/remove_stale_mounts.go new file mode 100644 index 0000000..494b67d --- /dev/null +++ b/pkg/filesystem/virtual/configuration/remove_stale_mounts.go @@ -0,0 +1,17 @@ +//go:build darwin || linux +// +build darwin linux + +package configuration + +import ( + "golang.org/x/sys/unix" +) + +// removeStaleMounts cleans up stale FUSE/NFSv4 mounts that were left +// behind by a previous invocation of the program. As FUSE apparently +// allows multiple mounts to be placed on top of a single inode, we must +// call unmount() repeatedly. +func removeStaleMounts(path string) { + for unix.Unmount(path, 0) == nil { + } +} diff --git a/pkg/filesystem/virtual/directory.go b/pkg/filesystem/virtual/directory.go new file mode 100644 index 0000000..4d83d14 --- /dev/null +++ b/pkg/filesystem/virtual/directory.go @@ -0,0 +1,104 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// DirectoryEntryReporter is used by VirtualReadDir() to report +// individual directory entries. These methods may be called while locks +// on the underlying directory are held. This means that it's not safe +// to call methods of the child directory, as that could cause +// deadlocks. +type DirectoryEntryReporter interface { + // TODO: Can't use DirectoryChild in the arguments here, due to + // https://github.com/golang/go/issues/50259. + ReportEntry(nextCookie uint64, name path.Component, child Child[Directory, Leaf, Node], attributes *Attributes) bool +} + +// ChangeInfo contains a pair of change IDs of a directory, before and +// after performing a directory mutating operation. This information +// needs to be returned by various NFSv4 operations. +type ChangeInfo struct { + Before uint64 + After uint64 +} + +// DirectoryChild is either a Directory or a Leaf, as returned by +// Directory.VirtualLookup(). +type DirectoryChild = Child[Directory, Leaf, Node] + +// Directory node that is exposed through FUSE using +// SimpleRawFileSystem, or through NFSv4. The names of all of these +// operations are prefixed with 'Virtual' to ensure they don't collide +// with filesystem.Directory. +type Directory interface { + Node + + // VirtualOpenChild opens a regular file within the directory. + // + // When createAttributes is nil, this method will fail with + // StatusErrNoEnt if the file does not exist. When not nil, a + // file will be created. + // + // When existingOptions is nil, this method will fail with + // StatusErrExist if the file already exists. When not nil, an + // existing file will be opened. + // + // Either one or both of createAttributes and existingOptions + // need to be provided. + VirtualOpenChild(ctx context.Context, name path.Component, shareAccess ShareMask, createAttributes *Attributes, existingOptions *OpenExistingOptions, requested AttributesMask, openedFileAttributes *Attributes) (Leaf, AttributesMask, ChangeInfo, Status) + // VirtualLink links an existing file into the directory. + VirtualLink(ctx context.Context, name path.Component, leaf Leaf, requested AttributesMask, attributes *Attributes) (ChangeInfo, Status) + // VirtualLookup obtains the inode corresponding with a child + // stored within the directory. + // + // TODO: Can't use DirectoryChild in the return type here, due to + // https://github.com/golang/go/issues/50259. + VirtualLookup(ctx context.Context, name path.Component, requested AttributesMask, out *Attributes) (Child[Directory, Leaf, Node], Status) + // VirtualMkdir creates an empty directory within the current + // directory. + VirtualMkdir(name path.Component, requested AttributesMask, attributes *Attributes) (Directory, ChangeInfo, Status) + // VirtualMknod creates a character FIFO or UNIX domain socket + // within the current directory. + VirtualMknod(ctx context.Context, name path.Component, fileType filesystem.FileType, requested AttributesMask, attributes *Attributes) (Leaf, ChangeInfo, Status) + // VirtualReadDir reports files and directories stored within + // the directory. + VirtualReadDir(ctx context.Context, firstCookie uint64, requested AttributesMask, reporter DirectoryEntryReporter) Status + // VirtualRename renames a file stored in the current directory, + // potentially moving it to another directory. + VirtualRename(oldName path.Component, newDirectory Directory, newName path.Component) (ChangeInfo, ChangeInfo, Status) + // VirtualRemove removes an empty directory or leaf node stored + // within the current directory. Depending on the parameters, + // this method behaves like rmdir(), unlink() or a mixture of + // the two. The latter is needed by NFSv4. + VirtualRemove(name path.Component, removeDirectory, removeLeaf bool) (ChangeInfo, Status) + // VirtualSymlink creates a symbolic link within the current + // directory. + VirtualSymlink(ctx context.Context, pointedTo []byte, linkName path.Component, requested AttributesMask, attributes *Attributes) (Leaf, ChangeInfo, Status) +} + +const ( + // ImplicitDirectoryLinkCount is the value that should be + // assigned to fuse.attr.Nlink for directory nodes for which the + // directory contents are not defined explicitly. These may be + // directories that are lazy-loading, or have an infinite number + // of children due to them being defined programmatically. + // + // It is important that we return a link count lower than two + // for these directories. Tools like GNU find(1) rely on an + // accurate link count to rule out the existence of child + // directories. A link count below two instructs them to disable + // such optimizations, forcing them to read directory listings. + // See the "-noleaf" option in find(1)'s man page for details. + // + // File systems such as btrfs also set the link count of + // directories to one. + ImplicitDirectoryLinkCount uint32 = 1 + // EmptyDirectoryLinkCount is the value that should be assigned + // to fuse.attr.Nlink for directory nodes that do not have any + // child directories. + EmptyDirectoryLinkCount uint32 = 2 +) diff --git a/pkg/filesystem/virtual/empty_initial_contents_fetcher.go b/pkg/filesystem/virtual/empty_initial_contents_fetcher.go new file mode 100644 index 0000000..9bdace1 --- /dev/null +++ b/pkg/filesystem/virtual/empty_initial_contents_fetcher.go @@ -0,0 +1,23 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type emptyInitialContentsFetcher struct{} + +func (f emptyInitialContentsFetcher) FetchContents(fileReadMonitorFactory FileReadMonitorFactory) (map[path.Component]InitialNode, error) { + return map[path.Component]InitialNode{}, nil +} + +func (f emptyInitialContentsFetcher) GetContainingDigests(ctx context.Context) (digest.Set, error) { + return digest.EmptySet, nil +} + +// EmptyInitialContentsFetcher is an instance of InitialContentsFetcher +// that yields no children. It can be used in case an empty directory +// needs to be created. +var EmptyInitialContentsFetcher InitialContentsFetcher = emptyInitialContentsFetcher{} diff --git a/pkg/filesystem/virtual/file_allocator.go b/pkg/filesystem/virtual/file_allocator.go new file mode 100644 index 0000000..2648bb7 --- /dev/null +++ b/pkg/filesystem/virtual/file_allocator.go @@ -0,0 +1,11 @@ +package virtual + +// FileAllocator is called into by InMemoryPrepopulatedDirectory to +// create new files within the file system. Such files could either be +// stored in memory, on disk, remotely, etc. +// +// Files returned by this interface should have a link count of 1, and +// are opened using the provided share access mask. +type FileAllocator interface { + NewFile(isExecutable bool, size uint64, shareAccess ShareMask) (NativeLeaf, Status) +} diff --git a/pkg/filesystem/virtual/fuse/BUILD.bazel b/pkg/filesystem/virtual/fuse/BUILD.bazel new file mode 100644 index 0000000..e01b41c --- /dev/null +++ b/pkg/filesystem/virtual/fuse/BUILD.bazel @@ -0,0 +1,190 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "fuse", + srcs = [ + "allow_authenticator.go", + "authenticator.go", + "default_attributes_injecting_raw_file_system.go", + "in_header_authenticator.go", + "metrics_raw_file_system.go", + "simple_raw_file_system.go", + "sysfs_disabled.go", + "sysfs_linux.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/fuse", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:aix": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:illumos": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:js": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "fuse_test", + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "default_attributes_injecting_raw_file_system_test.go", + "in_header_authenticator_test.go", + "simple_raw_file_system_test.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "default_attributes_injecting_raw_file_system_test.go", + "in_header_authenticator_test.go", + "simple_raw_file_system_test.go", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "default_attributes_injecting_raw_file_system_test.go", + "in_header_authenticator_test.go", + "simple_raw_file_system_test.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "default_attributes_injecting_raw_file_system_test.go", + "in_header_authenticator_test.go", + "simple_raw_file_system_test.go", + ], + "//conditions:default": [], + }), + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + ":fuse", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_golang_mock//gomock", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_stretchr_testify//require", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + ":fuse", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_golang_mock//gomock", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_stretchr_testify//require", + ], + "@io_bazel_rules_go//go/platform:ios": [ + ":fuse", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_golang_mock//gomock", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_stretchr_testify//require", + ], + "@io_bazel_rules_go//go/platform:linux": [ + ":fuse", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_golang_mock//gomock", + "@com_github_hanwen_go_fuse_v2//fuse", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_stretchr_testify//require", + ], + "//conditions:default": [], + }), +) diff --git a/pkg/filesystem/virtual/fuse/allow_authenticator.go b/pkg/filesystem/virtual/fuse/allow_authenticator.go new file mode 100644 index 0000000..fe2ac7f --- /dev/null +++ b/pkg/filesystem/virtual/fuse/allow_authenticator.go @@ -0,0 +1,21 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "context" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +type allowAuthenticator struct{} + +func (allowAuthenticator) Authenticate(ctx context.Context, caller *fuse.Caller) (context.Context, fuse.Status) { + return ctx, fuse.OK +} + +// AllowAuthenticator is an implementation of Authenticator that simply +// permits all incoming requests. No authentication metadata is attached +// to the context. +var AllowAuthenticator Authenticator = allowAuthenticator{} diff --git a/pkg/filesystem/virtual/fuse/authenticator.go b/pkg/filesystem/virtual/fuse/authenticator.go new file mode 100644 index 0000000..b972eef --- /dev/null +++ b/pkg/filesystem/virtual/fuse/authenticator.go @@ -0,0 +1,15 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "context" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +// Authenticator of incoming FUSE requests. +type Authenticator interface { + Authenticate(ctx context.Context, caller *fuse.Caller) (context.Context, fuse.Status) +} diff --git a/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system.go b/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system.go new file mode 100644 index 0000000..8dd6b2b --- /dev/null +++ b/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system.go @@ -0,0 +1,108 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "time" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +type defaultAttributesInjectingRawFileSystem struct { + fuse.RawFileSystem + + attrOut fuse.AttrOut + entryOut fuse.EntryOut +} + +// NewDefaultAttributesInjectingRawFileSystem creates a decorator for +// RawFileSystem that places default values into AttrOut and EntryOut +// structures before they are passed on to FUSE operations. This means +// their values are only retained if the underlying implementation +// doesn't fill in values explicitly. +// +// Use cases of this decorator include filling in default +// entry/attribute validity durations, file modification times, file +// ownership, etc. +func NewDefaultAttributesInjectingRawFileSystem(base fuse.RawFileSystem, entryValid, attrValid time.Duration, attr *fuse.Attr) fuse.RawFileSystem { + entryValidNsec := entryValid.Nanoseconds() + attrValidNsec := attrValid.Nanoseconds() + return &defaultAttributesInjectingRawFileSystem{ + RawFileSystem: base, + + attrOut: fuse.AttrOut{ + AttrValid: uint64(attrValidNsec / 1e9), + AttrValidNsec: uint32(attrValidNsec % 1e9), + Attr: *attr, + }, + entryOut: fuse.EntryOut{ + EntryValid: uint64(entryValidNsec / 1e9), + EntryValidNsec: uint32(entryValidNsec % 1e9), + AttrValid: uint64(attrValidNsec / 1e9), + AttrValidNsec: uint32(attrValidNsec % 1e9), + Attr: *attr, + }, + } +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name string, out *fuse.EntryOut) fuse.Status { + *out = rfs.entryOut + return rfs.RawFileSystem.Lookup(cancel, header, name, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) fuse.Status { + *out = rfs.attrOut + return rfs.RawFileSystem.GetAttr(cancel, input, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse.AttrOut) fuse.Status { + *out = rfs.attrOut + return rfs.RawFileSystem.SetAttr(cancel, input, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Mknod(cancel <-chan struct{}, input *fuse.MknodIn, name string, out *fuse.EntryOut) fuse.Status { + *out = rfs.entryOut + return rfs.RawFileSystem.Mknod(cancel, input, name, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Mkdir(cancel <-chan struct{}, input *fuse.MkdirIn, name string, out *fuse.EntryOut) fuse.Status { + *out = rfs.entryOut + return rfs.RawFileSystem.Mkdir(cancel, input, name, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Link(cancel <-chan struct{}, input *fuse.LinkIn, filename string, out *fuse.EntryOut) fuse.Status { + *out = rfs.entryOut + return rfs.RawFileSystem.Link(cancel, input, filename, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Symlink(cancel <-chan struct{}, header *fuse.InHeader, pointedTo, linkName string, out *fuse.EntryOut) fuse.Status { + *out = rfs.entryOut + return rfs.RawFileSystem.Symlink(cancel, header, pointedTo, linkName, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) Create(cancel <-chan struct{}, input *fuse.CreateIn, name string, out *fuse.CreateOut) fuse.Status { + out.EntryOut = rfs.entryOut + return rfs.RawFileSystem.Create(cancel, input, name, out) +} + +func (rfs *defaultAttributesInjectingRawFileSystem) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) fuse.Status { + return rfs.RawFileSystem.ReadDirPlus(cancel, input, &defaultAttributesInjectingReadDirPlusEntryList{ + ReadDirPlusEntryList: out, + entryOut: &rfs.entryOut, + }) +} + +type defaultAttributesInjectingReadDirPlusEntryList struct { + fuse.ReadDirPlusEntryList + + entryOut *fuse.EntryOut +} + +func (el *defaultAttributesInjectingReadDirPlusEntryList) AddDirLookupEntry(e fuse.DirEntry, off uint64) *fuse.EntryOut { + if entryOut := el.ReadDirPlusEntryList.AddDirLookupEntry(e, off); entryOut != nil { + *entryOut = *el.entryOut + return entryOut + } + return nil +} diff --git a/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system_test.go b/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system_test.go new file mode 100644 index 0000000..beaad0d --- /dev/null +++ b/pkg/filesystem/virtual/fuse/default_attributes_injecting_raw_file_system_test.go @@ -0,0 +1,210 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse_test + +import ( + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/fuse" + "github.com/golang/mock/gomock" + go_fuse "github.com/hanwen/go-fuse/v2/fuse" + "github.com/stretchr/testify/require" +) + +func TestDefaultAttributesInjectingRawFileSystem(t *testing.T) { + ctrl := gomock.NewController(t) + + base := mock.NewMockRawFileSystem(ctrl) + rfs := fuse.NewDefaultAttributesInjectingRawFileSystem( + base, + time.Minute+time.Second/2, + time.Minute/2+time.Second/4, + &go_fuse.Attr{ + Atime: 1596207097, + Mtime: 1596207531, + }) + + t.Run("Lookup", func(t *testing.T) { + // Lookup() is an example of an operation that returns + // an EntryOut through an output parameter. + base.EXPECT().Lookup( + nil, + &go_fuse.InHeader{NodeId: 5}, + "hello", + gomock.Any(), + ).DoAndReturn(func(cancel <-chan struct{}, header *go_fuse.InHeader, name string, out *go_fuse.EntryOut) go_fuse.Status { + out.NodeId = 6 + out.Ino = 6 + out.Size = 12 + out.Mode = go_fuse.S_IFLNK | 0o777 + out.Nlink = 1 + out.Mtime = 123 + return go_fuse.OK + }) + + var entryOut go_fuse.EntryOut + require.Equal( + t, + go_fuse.OK, + rfs.Lookup(nil, &go_fuse.InHeader{NodeId: 5}, "hello", &entryOut)) + require.Equal( + t, + go_fuse.EntryOut{ + NodeId: 6, + EntryValid: 60, + EntryValidNsec: 500000000, + AttrValid: 30, + AttrValidNsec: 250000000, + Attr: go_fuse.Attr{ + Ino: 6, + Size: 12, + Mode: go_fuse.S_IFLNK | 0o777, + Nlink: 1, + Atime: 1596207097, + Mtime: 123, + }, + }, + entryOut) + }) + + t.Run("GetAttr", func(t *testing.T) { + // GetAttr() is an example of an operation that returns + // an AttrOut through an output parameter. + base.EXPECT().GetAttr( + nil, + &go_fuse.GetAttrIn{InHeader: go_fuse.InHeader{NodeId: 5}}, + gomock.Any(), + ).DoAndReturn(func(cancel <-chan struct{}, input *go_fuse.GetAttrIn, out *go_fuse.AttrOut) go_fuse.Status { + out.Ino = 6 + out.Size = 12 + out.Mode = go_fuse.S_IFLNK | 0o777 + out.Nlink = 1 + out.Mtime = 123 + return go_fuse.OK + }) + + var attrOut go_fuse.AttrOut + require.Equal( + t, + go_fuse.OK, + rfs.GetAttr(nil, &go_fuse.GetAttrIn{InHeader: go_fuse.InHeader{NodeId: 5}}, &attrOut)) + require.Equal( + t, + go_fuse.AttrOut{ + AttrValid: 30, + AttrValidNsec: 250000000, + Attr: go_fuse.Attr{ + Ino: 6, + Size: 12, + Mode: go_fuse.S_IFLNK | 0o777, + Nlink: 1, + Atime: 1596207097, + Mtime: 123, + }, + }, + attrOut) + }) + + t.Run("ReadDirPlus", func(t *testing.T) { + // ReadDirPlus() returns EntryOut objects through a + // separate DirEntryList handle. + entryList := mock.NewMockReadDirPlusEntryList(ctrl) + base.EXPECT().ReadDirPlus( + nil, + &go_fuse.ReadIn{InHeader: go_fuse.InHeader{NodeId: 5}}, + gomock.Any(), + ).DoAndReturn(func(cancel <-chan struct{}, input *go_fuse.ReadIn, out go_fuse.ReadDirPlusEntryList) go_fuse.Status { + e := out.AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFLNK, + Name: "symlink", + Ino: 6, + }, 5) + e.NodeId = 6 + e.Ino = 6 + e.Size = 12 + e.Mode = go_fuse.S_IFLNK | 0o777 + e.Nlink = 1 + e.Mtime = 123 + + e = out.AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFREG, + Name: "file", + Ino: 7, + }, 6) + e.NodeId = 7 + e.Ino = 7 + e.Size = 42 + e.Mode = go_fuse.S_IFREG | 0o644 + e.Nlink = 2 + e.Mtime = 123 + + require.Nil(t, out.AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFDIR, + Name: "directory", + Ino: 8, + }, 7)) + return go_fuse.OK + }) + var entry1 go_fuse.EntryOut + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFLNK, + Name: "symlink", + Ino: 6, + }, uint64(5)).Return(&entry1) + var entry2 go_fuse.EntryOut + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFREG, + Name: "file", + Ino: 7, + }, uint64(6)).Return(&entry2) + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Mode: go_fuse.S_IFDIR, + Name: "directory", + Ino: 8, + }, uint64(7)) + + require.Equal( + t, + go_fuse.OK, + rfs.ReadDirPlus(nil, &go_fuse.ReadIn{InHeader: go_fuse.InHeader{NodeId: 5}}, entryList)) + require.Equal( + t, + go_fuse.EntryOut{ + NodeId: 6, + EntryValid: 60, + EntryValidNsec: 500000000, + AttrValid: 30, + AttrValidNsec: 250000000, + Attr: go_fuse.Attr{ + Ino: 6, + Size: 12, + Mode: go_fuse.S_IFLNK | 0o777, + Nlink: 1, + Atime: 1596207097, + Mtime: 123, + }, + }, + entry1) + require.Equal( + t, + go_fuse.EntryOut{ + NodeId: 7, + EntryValid: 60, + EntryValidNsec: 500000000, + AttrValid: 30, + AttrValidNsec: 250000000, + Attr: go_fuse.Attr{ + Ino: 7, + Size: 42, + Mode: go_fuse.S_IFREG | 0o644, + Nlink: 2, + Atime: 1596207097, + Mtime: 123, + }, + }, + entry2) + }) +} diff --git a/pkg/filesystem/virtual/fuse/in_header_authenticator.go b/pkg/filesystem/virtual/fuse/in_header_authenticator.go new file mode 100644 index 0000000..328416f --- /dev/null +++ b/pkg/filesystem/virtual/fuse/in_header_authenticator.go @@ -0,0 +1,46 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "context" + "log" + + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/jmespath/go-jmespath" +) + +type inHeaderAuthenticator struct { + metadataExtractor *jmespath.JMESPath +} + +// NewInHeaderAuthenticator creates an Authenticator that obtains +// authentication metadata from an incoming FUSE request by inspecting +// the "fuse_in_header" structure that's provided by the kernel. This +// structure contains the user ID, group ID and process ID of the +// calling process. +func NewInHeaderAuthenticator(metadataExtractor *jmespath.JMESPath) Authenticator { + return &inHeaderAuthenticator{ + metadataExtractor: metadataExtractor, + } +} + +func (a *inHeaderAuthenticator) Authenticate(ctx context.Context, caller *fuse.Caller) (context.Context, fuse.Status) { + raw, err := a.metadataExtractor.Search(map[string]any{ + "uid": caller.Uid, + "gid": caller.Gid, + "pid": caller.Pid, + }) + if err != nil { + log.Print("Failed to perform authentication metadata extraction: ", err) + return nil, fuse.EIO + } + authenticationMetadata, err := auth.NewAuthenticationMetadataFromRaw(raw) + if err != nil { + log.Print("Failed to create authentication metadata: ", err) + return nil, fuse.EIO + } + return auth.NewContextWithAuthenticationMetadata(ctx, authenticationMetadata), fuse.OK +} diff --git a/pkg/filesystem/virtual/fuse/in_header_authenticator_test.go b/pkg/filesystem/virtual/fuse/in_header_authenticator_test.go new file mode 100644 index 0000000..587f7e5 --- /dev/null +++ b/pkg/filesystem/virtual/fuse/in_header_authenticator_test.go @@ -0,0 +1,35 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/fuse" + "github.com/buildbarn/bb-storage/pkg/auth" + go_fuse "github.com/hanwen/go-fuse/v2/fuse" + "github.com/jmespath/go-jmespath" + "github.com/stretchr/testify/require" +) + +func TestInHeaderAuthenticator(t *testing.T) { + authenticator := fuse.NewInHeaderAuthenticator(jmespath.MustCompile("{\"public\": @}")) + + ctxWithMetadata, s := authenticator.Authenticate(context.Background(), &go_fuse.Caller{ + Owner: go_fuse.Owner{ + Uid: 1000, + Gid: 100, + }, + Pid: 10847, + }) + require.Equal(t, go_fuse.OK, s) + require.Equal(t, map[string]any{ + "public": map[string]any{ + "uid": 1000.0, + "gid": 100.0, + "pid": 10847.0, + }, + }, auth.AuthenticationMetadataFromContext(ctxWithMetadata).GetRaw()) +} diff --git a/pkg/filesystem/virtual/fuse/metrics_raw_file_system.go b/pkg/filesystem/virtual/fuse/metrics_raw_file_system.go new file mode 100644 index 0000000..5897814 --- /dev/null +++ b/pkg/filesystem/virtual/fuse/metrics_raw_file_system.go @@ -0,0 +1,466 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "sync" + "syscall" + "time" + + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/prometheus/client_golang/prometheus" + + "golang.org/x/sys/unix" +) + +var ( + rawFileSystemOperationsPrometheusMetrics sync.Once + + rawFileSystemOperationsDurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "fuse", + Name: "raw_file_system_operations_duration_seconds", + Help: "Amount of time spent per operation on raw file system objects, in seconds.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"operation", "status_code"}) + rawFileSystemCallbacks = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "fuse", + Name: "raw_file_system_callbacks_total", + Help: "Total number of callbacks invoked by raw file system objects.", + }, + []string{"callback", "status_code"}) +) + +// operationHistogram holds references to Prometheus metrics for a single +// FUSE operation that can never fail. +type operationHistogram struct { + ok prometheus.Observer +} + +func newOperationHistogram(operation string) operationHistogram { + return operationHistogram{ + ok: rawFileSystemOperationsDurationSeconds.WithLabelValues(operation, "OK"), + } +} + +func (m *operationHistogram) observe(timeStart, timeStop time.Time) { + m.ok.Observe(timeStop.Sub(timeStart).Seconds()) +} + +// operationHistogramWithStatus holds references to Prometheus metrics for +// a single FUSE operation that can fail with a fuse.Status. +type operationHistogramWithStatus struct { + ok prometheus.Observer + failure prometheus.ObserverVec +} + +func newOperationHistogramWithStatus(operation string) operationHistogramWithStatus { + return operationHistogramWithStatus{ + ok: rawFileSystemOperationsDurationSeconds.WithLabelValues(operation, "OK"), + failure: rawFileSystemOperationsDurationSeconds.MustCurryWith(map[string]string{"operation": operation}), + } +} + +func (m *operationHistogramWithStatus) observe(s fuse.Status, timeStart, timeStop time.Time) { + d := timeStop.Sub(timeStart).Seconds() + if s == fuse.OK { + m.ok.Observe(d) + } else { + // Use unix.ErrnoName() instead of fuse.Status.String(). + // The latter inserts OS specific errno integer values + // into the error message, which is not desirable in + // heterogeneous environments. + m.failure.WithLabelValues(unix.ErrnoName(syscall.Errno(s))).Observe(d) + } +} + +// callbackCounterWithStatus holds references to Prometheus metrics for +// a single FUSE server callback that can fail with a fuse.Status. +type callbackCounterWithStatus struct { + ok prometheus.Counter + failure *prometheus.CounterVec +} + +func newCallbackCounterWithStatus(callback string) callbackCounterWithStatus { + return callbackCounterWithStatus{ + ok: rawFileSystemCallbacks.WithLabelValues(callback, "OK"), + failure: rawFileSystemCallbacks.MustCurryWith(map[string]string{"callback": callback}), + } +} + +func (m *callbackCounterWithStatus) inc(s fuse.Status) { + if s == fuse.OK { + m.ok.Inc() + } else { + m.failure.WithLabelValues(unix.ErrnoName(syscall.Errno(s))).Inc() + } +} + +var ( + // Already populate the HistogramVec with entries for all operations. + operationHistogramLookup = newOperationHistogramWithStatus("Lookup") + operationHistogramForget = newOperationHistogram("Forget") + operationHistogramGetAttr = newOperationHistogramWithStatus("GetAttr") + operationHistogramSetAttr = newOperationHistogramWithStatus("SetAttr") + operationHistogramMknod = newOperationHistogramWithStatus("Mknod") + operationHistogramMkdir = newOperationHistogramWithStatus("Mkdir") + operationHistogramUnlink = newOperationHistogramWithStatus("Unlink") + operationHistogramRmdir = newOperationHistogramWithStatus("Rmdir") + operationHistogramRename = newOperationHistogramWithStatus("Rename") + operationHistogramLink = newOperationHistogramWithStatus("Link") + operationHistogramSymlink = newOperationHistogramWithStatus("Symlink") + operationHistogramReadlink = newOperationHistogramWithStatus("Readlink") + operationHistogramAccess = newOperationHistogramWithStatus("Access") + operationHistogramGetXAttr = newOperationHistogramWithStatus("GetXAttr") + operationHistogramListXAttr = newOperationHistogramWithStatus("ListXAttr") + operationHistogramSetXAttr = newOperationHistogramWithStatus("SetXAttr") + operationHistogramRemoveXAttr = newOperationHistogramWithStatus("RemoveXAttr") + operationHistogramCreate = newOperationHistogramWithStatus("Create") + operationHistogramOpen = newOperationHistogramWithStatus("Open") + operationHistogramRead = newOperationHistogramWithStatus("Read") + operationHistogramLseek = newOperationHistogramWithStatus("Lseek") + operationHistogramGetLk = newOperationHistogramWithStatus("GetLk") + operationHistogramSetLk = newOperationHistogramWithStatus("SetLk") + operationHistogramSetLkw = newOperationHistogramWithStatus("SetLkw") + operationHistogramRelease = newOperationHistogram("Release") + operationHistogramWrite = newOperationHistogramWithStatus("Write") + operationHistogramCopyFileRange = newOperationHistogramWithStatus("CopyFileRange") + operationHistogramFlush = newOperationHistogramWithStatus("Flush") + operationHistogramFsync = newOperationHistogramWithStatus("Fsync") + operationHistogramFallocate = newOperationHistogramWithStatus("Fallocate") + operationHistogramOpenDir = newOperationHistogramWithStatus("OpenDir") + operationHistogramReadDir = newOperationHistogramWithStatus("ReadDir") + operationHistogramReadDirPlus = newOperationHistogramWithStatus("ReadDirPlus") + operationHistogramReleaseDir = newOperationHistogram("ReleaseDir") + operationHistogramFsyncDir = newOperationHistogramWithStatus("FsyncDir") + operationHistogramStatFs = newOperationHistogramWithStatus("StatFs") + + callbackCounterDeleteNotify = newCallbackCounterWithStatus("DeleteNotify") + callbackCounterEntryNotify = newCallbackCounterWithStatus("EntryNotify") + callbackCounterInodeNotify = newCallbackCounterWithStatus("InodeNotify") + callbackCounterInodeRetrieveCache = newCallbackCounterWithStatus("InodeRetrieveCache") + callbackCounterInodeNotifyStoreCache = newCallbackCounterWithStatus("InodeNotifyStoreCache") +) + +type metricsRawFileSystem struct { + base fuse.RawFileSystem + clock clock.Clock +} + +// NewMetricsRawFileSystem creates a decorator for fuse.RawFileSystem +// that exposes Prometheus metrics for each of the operations invoked. +func NewMetricsRawFileSystem(base fuse.RawFileSystem, clock clock.Clock) fuse.RawFileSystem { + rawFileSystemOperationsPrometheusMetrics.Do(func() { + prometheus.MustRegister(rawFileSystemOperationsDurationSeconds) + prometheus.MustRegister(rawFileSystemCallbacks) + }) + + return &metricsRawFileSystem{ + base: base, + clock: clock, + } +} + +func (rfs *metricsRawFileSystem) String() string { + return rfs.base.String() +} + +func (rfs *metricsRawFileSystem) SetDebug(debug bool) { + rfs.base.SetDebug(debug) +} + +func (rfs *metricsRawFileSystem) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name string, out *fuse.EntryOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Lookup(cancel, header, name, out) + operationHistogramLookup.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Forget(nodeID, nLookup uint64) { + timeStart := rfs.clock.Now() + rfs.base.Forget(nodeID, nLookup) + operationHistogramForget.observe(timeStart, rfs.clock.Now()) +} + +func (rfs *metricsRawFileSystem) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.GetAttr(cancel, input, out) + operationHistogramGetAttr.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse.AttrOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.SetAttr(cancel, input, out) + operationHistogramSetAttr.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Mknod(cancel <-chan struct{}, input *fuse.MknodIn, name string, out *fuse.EntryOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Mknod(cancel, input, name, out) + operationHistogramMknod.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Mkdir(cancel <-chan struct{}, input *fuse.MkdirIn, name string, out *fuse.EntryOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Mkdir(cancel, input, name, out) + operationHistogramMkdir.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Unlink(cancel <-chan struct{}, header *fuse.InHeader, name string) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Unlink(cancel, header, name) + operationHistogramUnlink.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Rmdir(cancel <-chan struct{}, header *fuse.InHeader, name string) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Rmdir(cancel, header, name) + operationHistogramRmdir.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Rename(cancel <-chan struct{}, input *fuse.RenameIn, oldName, newName string) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Rename(cancel, input, oldName, newName) + operationHistogramRename.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Link(cancel <-chan struct{}, input *fuse.LinkIn, filename string, out *fuse.EntryOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Link(cancel, input, filename, out) + operationHistogramLink.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Symlink(cancel <-chan struct{}, header *fuse.InHeader, pointedTo, linkName string, out *fuse.EntryOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Symlink(cancel, header, pointedTo, linkName, out) + operationHistogramSymlink.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Readlink(cancel <-chan struct{}, header *fuse.InHeader) ([]byte, fuse.Status) { + timeStart := rfs.clock.Now() + out, s := rfs.base.Readlink(cancel, header) + operationHistogramReadlink.observe(s, timeStart, rfs.clock.Now()) + return out, s +} + +func (rfs *metricsRawFileSystem) Access(cancel <-chan struct{}, input *fuse.AccessIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Access(cancel, input) + operationHistogramAccess.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (uint32, fuse.Status) { + timeStart := rfs.clock.Now() + r, s := rfs.base.GetXAttr(cancel, header, attr, dest) + operationHistogramGetXAttr.observe(s, timeStart, rfs.clock.Now()) + return r, s +} + +func (rfs *metricsRawFileSystem) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (uint32, fuse.Status) { + timeStart := rfs.clock.Now() + r, s := rfs.base.ListXAttr(cancel, header, dest) + operationHistogramListXAttr.observe(s, timeStart, rfs.clock.Now()) + return r, s +} + +func (rfs *metricsRawFileSystem) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.SetXAttr(cancel, input, attr, data) + operationHistogramSetXAttr.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.RemoveXAttr(cancel, header, attr) + operationHistogramRemoveXAttr.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Create(cancel <-chan struct{}, input *fuse.CreateIn, name string, out *fuse.CreateOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Create(cancel, input, name, out) + operationHistogramCreate.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Open(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Open(cancel, input, out) + operationHistogramOpen.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Read(cancel <-chan struct{}, input *fuse.ReadIn, buf []byte) (fuse.ReadResult, fuse.Status) { + timeStart := rfs.clock.Now() + r, s := rfs.base.Read(cancel, input, buf) + operationHistogramRead.observe(s, timeStart, rfs.clock.Now()) + return r, s +} + +func (rfs *metricsRawFileSystem) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Lseek(cancel, in, out) + operationHistogramLseek.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) GetLk(cancel <-chan struct{}, input *fuse.LkIn, out *fuse.LkOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.GetLk(cancel, input, out) + operationHistogramGetLk.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) SetLk(cancel <-chan struct{}, input *fuse.LkIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.SetLk(cancel, input) + operationHistogramSetLk.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) SetLkw(cancel <-chan struct{}, input *fuse.LkIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.SetLkw(cancel, input) + operationHistogramSetLkw.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Release(cancel <-chan struct{}, input *fuse.ReleaseIn) { + timeStart := rfs.clock.Now() + rfs.base.Release(cancel, input) + operationHistogramRelease.observe(timeStart, rfs.clock.Now()) +} + +func (rfs *metricsRawFileSystem) Write(cancel <-chan struct{}, input *fuse.WriteIn, data []byte) (uint32, fuse.Status) { + timeStart := rfs.clock.Now() + r, s := rfs.base.Write(cancel, input, data) + operationHistogramWrite.observe(s, timeStart, rfs.clock.Now()) + return r, s +} + +func (rfs *metricsRawFileSystem) CopyFileRange(cancel <-chan struct{}, input *fuse.CopyFileRangeIn) (uint32, fuse.Status) { + timeStart := rfs.clock.Now() + r, s := rfs.base.CopyFileRange(cancel, input) + operationHistogramCopyFileRange.observe(s, timeStart, rfs.clock.Now()) + return r, s +} + +func (rfs *metricsRawFileSystem) Flush(cancel <-chan struct{}, input *fuse.FlushIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Flush(cancel, input) + operationHistogramFlush.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Fsync(cancel <-chan struct{}, input *fuse.FsyncIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Fsync(cancel, input) + operationHistogramFsync.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Fallocate(cancel <-chan struct{}, input *fuse.FallocateIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.Fallocate(cancel, input) + operationHistogramFallocate.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) OpenDir(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.OpenDir(cancel, input, out) + operationHistogramOpenDir.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirEntryList) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.ReadDir(cancel, input, out) + operationHistogramReadDir.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.ReadDirPlus(cancel, input, out) + operationHistogramReadDirPlus.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) ReleaseDir(input *fuse.ReleaseIn) { + timeStart := rfs.clock.Now() + rfs.base.ReleaseDir(input) + operationHistogramReleaseDir.observe(timeStart, rfs.clock.Now()) +} + +func (rfs *metricsRawFileSystem) FsyncDir(cancel <-chan struct{}, input *fuse.FsyncIn) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.FsyncDir(cancel, input) + operationHistogramFsyncDir.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) StatFs(cancel <-chan struct{}, input *fuse.InHeader, out *fuse.StatfsOut) fuse.Status { + timeStart := rfs.clock.Now() + s := rfs.base.StatFs(cancel, input, out) + operationHistogramStatFs.observe(s, timeStart, rfs.clock.Now()) + return s +} + +func (rfs *metricsRawFileSystem) Init(server fuse.ServerCallbacks) { + rfs.base.Init(&metricsServerCallbacks{ + base: server, + }) +} + +type metricsServerCallbacks struct { + base fuse.ServerCallbacks +} + +func (sc *metricsServerCallbacks) DeleteNotify(parent, child uint64, name string) fuse.Status { + s := sc.base.DeleteNotify(parent, child, name) + callbackCounterDeleteNotify.inc(s) + return s +} + +func (sc *metricsServerCallbacks) EntryNotify(parent uint64, name string) fuse.Status { + s := sc.base.EntryNotify(parent, name) + callbackCounterEntryNotify.inc(s) + return s +} + +func (sc *metricsServerCallbacks) InodeNotify(node uint64, off, length int64) fuse.Status { + s := sc.base.InodeNotify(node, off, length) + callbackCounterInodeNotify.inc(s) + return s +} + +func (sc *metricsServerCallbacks) InodeRetrieveCache(node uint64, offset int64, dest []byte) (int, fuse.Status) { + r, s := sc.base.InodeRetrieveCache(node, offset, dest) + callbackCounterInodeRetrieveCache.inc(s) + return r, s +} + +func (sc *metricsServerCallbacks) InodeNotifyStoreCache(node uint64, offset int64, data []byte) fuse.Status { + s := sc.base.InodeNotifyStoreCache(node, offset, data) + callbackCounterInodeNotifyStoreCache.inc(s) + return s +} diff --git a/pkg/filesystem/virtual/fuse/simple_raw_file_system.go b/pkg/filesystem/virtual/fuse/simple_raw_file_system.go new file mode 100644 index 0000000..a0d4d60 --- /dev/null +++ b/pkg/filesystem/virtual/fuse/simple_raw_file_system.go @@ -0,0 +1,908 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse + +import ( + "context" + "fmt" + "log" + "sync" + "syscall" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/hanwen/go-fuse/v2/fuse" + + "golang.org/x/sys/unix" +) + +const ( + // AttributesMaskForFUSEAttr is the attributes mask to use for + // VirtualGetAttributes() to populate all relevant fields of + // fuse.Attr. + AttributesMaskForFUSEAttr = virtual.AttributesMaskDeviceNumber | + virtual.AttributesMaskFileType | + virtual.AttributesMaskInodeNumber | + virtual.AttributesMaskLastDataModificationTime | + virtual.AttributesMaskLinkCount | + virtual.AttributesMaskPermissions | + virtual.AttributesMaskSizeBytes + // AttributesMaskForFUSEDirEntry is the attributes mask to use + // for VirtualReadDir() to populate all relevant fields of + // fuse.DirEntry. + AttributesMaskForFUSEDirEntry = virtual.AttributesMaskFileType | + virtual.AttributesMaskInodeNumber +) + +func toFUSEStatus(s virtual.Status) fuse.Status { + switch s { + case virtual.StatusOK: + return fuse.OK + case virtual.StatusErrAccess: + return fuse.EACCES + case virtual.StatusErrExist: + return fuse.Status(syscall.EEXIST) + case virtual.StatusErrInval: + return fuse.EINVAL + case virtual.StatusErrIO: + return fuse.EIO + case virtual.StatusErrIsDir: + return fuse.EISDIR + case virtual.StatusErrNoEnt: + return fuse.ENOENT + case virtual.StatusErrNotDir: + return fuse.ENOTDIR + case virtual.StatusErrNotEmpty: + return fuse.Status(syscall.ENOTEMPTY) + case virtual.StatusErrNXIO: + return fuse.Status(syscall.ENXIO) + case virtual.StatusErrPerm: + return fuse.EPERM + case virtual.StatusErrROFS: + return fuse.EROFS + case virtual.StatusErrStale: + return fuse.Status(syscall.ESTALE) + case virtual.StatusErrSymlink: + return fuse.Status(syscall.EOPNOTSUPP) + case virtual.StatusErrWrongType: + return fuse.EBADF + case virtual.StatusErrXDev: + return fuse.EXDEV + default: + panic("Unknown status") + } +} + +type directoryEntry struct { + directory virtual.Directory + nLookup uint64 +} + +type leafEntry struct { + leaf virtual.Leaf + nLookup uint64 +} + +type simpleRawFileSystem struct { + removalNotifierRegistrar virtual.FUSERemovalNotifierRegistrar + authenticator Authenticator + + // Maps to resolve node IDs to directories and leaves. + nodeLock sync.RWMutex + directories map[uint64]directoryEntry + leaves map[uint64]leafEntry +} + +// NewSimpleRawFileSystem creates a go-fuse RawFileSystem that converts +// flat FUSE operations to calls against a hierarchy of Directory and +// Leaf objects. +// +// This implementation is comparable to the RawFileSystem +// implementations created using go-fuse's fs.NewNodeFS() and +// nodefs.FileSystemConnector.RawFS(), except that it is simpler. It +// does not contain an inode number allocator, nor does it attempt to +// keep track of files stored in a directory. Tracking this information +// is the responsibility of the Directory and Leaf implementations. +// +// FUSE as a protocol makes no true distinction between Directory and +// Leaf objects. This implementation could therefore have been +// simplified a bit by merging these two interface types together. +// Separation between these two interfaces was added to make it easier +// to understand which operations actually get called against a given +// object type. +func NewSimpleRawFileSystem(rootDirectory virtual.Directory, removalNotifierRegistrar virtual.FUSERemovalNotifierRegistrar, authenticator Authenticator) fuse.RawFileSystem { + return &simpleRawFileSystem{ + removalNotifierRegistrar: removalNotifierRegistrar, + authenticator: authenticator, + + directories: map[uint64]directoryEntry{ + fuse.FUSE_ROOT_ID: { + directory: rootDirectory, + nLookup: 1, + }, + }, + leaves: map[uint64]leafEntry{}, + } +} + +func toFUSEFileType(fileType filesystem.FileType) uint32 { + switch fileType { + case filesystem.FileTypeBlockDevice: + return syscall.S_IFBLK + case filesystem.FileTypeCharacterDevice: + return syscall.S_IFCHR + case filesystem.FileTypeDirectory: + return syscall.S_IFDIR + case filesystem.FileTypeFIFO: + return syscall.S_IFIFO + case filesystem.FileTypeRegularFile: + return syscall.S_IFREG + case filesystem.FileTypeSocket: + return syscall.S_IFSOCK + case filesystem.FileTypeSymlink: + return syscall.S_IFLNK + default: + panic("Unknown file type") + } +} + +func populateAttr(attributes *virtual.Attributes, out *fuse.Attr) { + if deviceNumber, ok := attributes.GetDeviceNumber(); ok { + out.Rdev = uint32(deviceNumber.ToRaw()) + } + + out.Ino = attributes.GetInodeNumber() + out.Nlink = attributes.GetLinkCount() + out.Mode = toFUSEFileType(attributes.GetFileType()) + + if lastDataModificationTime, ok := attributes.GetLastDataModificationTime(); ok { + nanos := lastDataModificationTime.UnixNano() + out.Mtime = uint64(nanos / 1e9) + out.Mtimensec = uint32(nanos % 1e9) + } + + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Attributes do not contain mandatory permissions attribute") + } + out.Mode |= uint32(permissions.ToMode()) + + sizeBytes, ok := attributes.GetSizeBytes() + if !ok { + panic("Attributes do not contain mandatory size attribute") + } + out.Size = sizeBytes +} + +func populateEntryOut(attributes *virtual.Attributes, out *fuse.EntryOut) { + populateAttr(attributes, &out.Attr) + out.NodeId = out.Ino +} + +func (rfs *simpleRawFileSystem) getDirectoryLocked(nodeID uint64) virtual.Directory { + if entry, ok := rfs.directories[nodeID]; ok { + return entry.directory + } + panic(fmt.Sprintf("Node ID %d does not correspond to a known directory", nodeID)) +} + +func (rfs *simpleRawFileSystem) getLeafLocked(nodeID uint64) virtual.Leaf { + if entry, ok := rfs.leaves[nodeID]; ok { + return entry.leaf + } + panic(fmt.Sprintf("Node ID %d does not correspond to a known leaf", nodeID)) +} + +func (rfs *simpleRawFileSystem) getNodeLocked(nodeID uint64) virtual.Node { + if entry, ok := rfs.directories[nodeID]; ok { + return entry.directory + } + if entry, ok := rfs.leaves[nodeID]; ok { + return entry.leaf + } + panic(fmt.Sprintf("Node ID %d does not correspond to a known directory or leaf", nodeID)) +} + +func (rfs *simpleRawFileSystem) addDirectory(i virtual.Directory, attributes *virtual.Attributes, out *fuse.EntryOut) { + populateEntryOut(attributes, out) + + rfs.nodeLock.Lock() + defer rfs.nodeLock.Unlock() + + if _, ok := rfs.leaves[out.NodeId]; ok { + panic(fmt.Sprintf("Directory %d has the same node ID as an existing leaf", out.NodeId)) + } + + // Increment lookup count of directory. + rfs.directories[out.NodeId] = directoryEntry{ + directory: i, + nLookup: rfs.directories[out.NodeId].nLookup + 1, + } +} + +func (rfs *simpleRawFileSystem) addLeaf(i virtual.Leaf, attributes *virtual.Attributes, out *fuse.EntryOut) { + populateEntryOut(attributes, out) + + rfs.nodeLock.Lock() + defer rfs.nodeLock.Unlock() + + if _, ok := rfs.directories[out.NodeId]; ok { + panic(fmt.Sprintf("Leaf %d has the same node ID as an existing directory", out.NodeId)) + } + + // Increment lookup count of leaf. + rfs.leaves[out.NodeId] = leafEntry{ + leaf: i, + nLookup: rfs.leaves[out.NodeId].nLookup + 1, + } +} + +// channelBackedContext is an implementation of context.Context around +// the cancellation channel that go-fuse provides. It does not have any +// values or deadline associated with it. +type channelBackedContext struct { + cancel <-chan struct{} +} + +var _ context.Context = channelBackedContext{} + +func (ctx channelBackedContext) Deadline() (time.Time, bool) { + var t time.Time + return t, false +} + +func (ctx channelBackedContext) Done() <-chan struct{} { + return ctx.cancel +} + +func (ctx channelBackedContext) Err() error { + select { + case <-ctx.cancel: + return context.Canceled + default: + return nil + } +} + +func (ctx channelBackedContext) Value(key any) any { + return nil +} + +func (rfs *simpleRawFileSystem) createContext(cancel <-chan struct{}, caller *fuse.Caller) (context.Context, fuse.Status) { + return rfs.authenticator.Authenticate(channelBackedContext{cancel: cancel}, caller) +} + +func (rfs *simpleRawFileSystem) String() string { + return "SimpleRawFileSystem" +} + +func (rfs *simpleRawFileSystem) SetDebug(debug bool) {} + +func (rfs *simpleRawFileSystem) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name string, out *fuse.EntryOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &header.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(header.NodeId) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + child, vs := i.VirtualLookup(ctx, path.MustNewComponent(name), AttributesMaskForFUSEAttr, &attributes) + if vs != virtual.StatusOK { + // TODO: Should we add support for generating negative + // cache entries? Preliminary testing shows that this + // doesn't seem effective, probably because build + // actions are short lived. + return toFUSEStatus(vs) + } + if directory, leaf := child.GetPair(); directory != nil { + rfs.addDirectory(directory, &attributes, out) + } else { + rfs.addLeaf(leaf, &attributes, out) + } + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Forget(nodeID, nLookup uint64) { + rfs.nodeLock.Lock() + defer rfs.nodeLock.Unlock() + + // Decrement lookup count of the directory or leaf node. We can + // remove the entry from our bookkeeping if the lookup count + // reaches zero. + if entry, ok := rfs.directories[nodeID]; ok { + if entry.nLookup < nLookup { + panic(fmt.Sprintf("Attempted to forget directory %d %d times, while it was only looked up %d times", nodeID, nLookup, entry.nLookup)) + } + entry.nLookup -= nLookup + if entry.nLookup == 0 { + delete(rfs.directories, nodeID) + } else { + rfs.directories[nodeID] = entry + } + } else if entry, ok := rfs.leaves[nodeID]; ok { + if entry.nLookup < nLookup { + panic(fmt.Sprintf("Attempted to forget leaf %d %d times, while it was only looked up %d times", nodeID, nLookup, entry.nLookup)) + } + entry.nLookup -= nLookup + if entry.nLookup == 0 { + delete(rfs.leaves, nodeID) + } else { + rfs.leaves[nodeID] = entry + } + } else { + panic(fmt.Sprintf("Attempted to forget node %d %d times, even though no directory or leaf under that ID exists", nodeID, nLookup)) + } +} + +func (rfs *simpleRawFileSystem) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getNodeLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + i.VirtualGetAttributes(ctx, AttributesMaskForFUSEAttr, &attributes) + populateAttr(&attributes, &out.Attr) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse.AttrOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getNodeLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var attributesIn virtual.Attributes + if input.Valid&(fuse.FATTR_UID|fuse.FATTR_GID) != 0 { + return fuse.EPERM + } + if input.Valid&fuse.FATTR_MODE != 0 { + attributesIn.SetPermissions(virtual.NewPermissionsFromMode(input.Mode)) + } + if input.Valid&fuse.FATTR_SIZE != 0 { + attributesIn.SetSizeBytes(input.Size) + } + + var attributesOut virtual.Attributes + if s := i.VirtualSetAttributes(ctx, &attributesIn, AttributesMaskForFUSEAttr, &attributesOut); s != virtual.StatusOK { + return toFUSEStatus(s) + } + populateAttr(&attributesOut, &out.Attr) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Mknod(cancel <-chan struct{}, input *fuse.MknodIn, name string, out *fuse.EntryOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var fileType filesystem.FileType + switch input.Mode & syscall.S_IFMT { + case syscall.S_IFIFO: + fileType = filesystem.FileTypeFIFO + case syscall.S_IFSOCK: + fileType = filesystem.FileTypeSocket + default: + return fuse.EPERM + } + + var attributes virtual.Attributes + child, _, vs := i.VirtualMknod(ctx, path.MustNewComponent(name), fileType, AttributesMaskForFUSEAttr, &attributes) + if vs != virtual.StatusOK { + return toFUSEStatus(vs) + } + rfs.addLeaf(child, &attributes, out) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Mkdir(cancel <-chan struct{}, input *fuse.MkdirIn, name string, out *fuse.EntryOut) fuse.Status { + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + child, _, s := i.VirtualMkdir(path.MustNewComponent(name), AttributesMaskForFUSEAttr, &attributes) + if s != virtual.StatusOK { + return toFUSEStatus(s) + } + rfs.addDirectory(child, &attributes, out) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Unlink(cancel <-chan struct{}, header *fuse.InHeader, name string) fuse.Status { + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(header.NodeId) + rfs.nodeLock.RUnlock() + + _, s := i.VirtualRemove(path.MustNewComponent(name), false, true) + return toFUSEStatus(s) +} + +func (rfs *simpleRawFileSystem) Rmdir(cancel <-chan struct{}, header *fuse.InHeader, name string) fuse.Status { + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(header.NodeId) + rfs.nodeLock.RUnlock() + + _, s := i.VirtualRemove(path.MustNewComponent(name), true, false) + return toFUSEStatus(s) +} + +func (rfs *simpleRawFileSystem) Rename(cancel <-chan struct{}, input *fuse.RenameIn, oldName, newName string) fuse.Status { + rfs.nodeLock.RLock() + iOld := rfs.getDirectoryLocked(input.NodeId) + iNew := rfs.getDirectoryLocked(input.Newdir) + rfs.nodeLock.RUnlock() + + _, _, s := iOld.VirtualRename(path.MustNewComponent(oldName), iNew, path.MustNewComponent(newName)) + return toFUSEStatus(s) +} + +func (rfs *simpleRawFileSystem) Link(cancel <-chan struct{}, input *fuse.LinkIn, filename string, out *fuse.EntryOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + iParent := rfs.getDirectoryLocked(input.NodeId) + iChild := rfs.getLeafLocked(input.Oldnodeid) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + if _, s := iParent.VirtualLink(ctx, path.MustNewComponent(filename), iChild, AttributesMaskForFUSEAttr, &attributes); s != virtual.StatusOK { + return toFUSEStatus(s) + } + rfs.addLeaf(iChild, &attributes, out) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Symlink(cancel <-chan struct{}, header *fuse.InHeader, pointedTo, linkName string, out *fuse.EntryOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &header.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(header.NodeId) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + child, _, vs := i.VirtualSymlink(ctx, []byte(pointedTo), path.MustNewComponent(linkName), AttributesMaskForFUSEAttr, &attributes) + if vs != virtual.StatusOK { + return toFUSEStatus(vs) + } + rfs.addLeaf(child, &attributes, out) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Readlink(cancel <-chan struct{}, header *fuse.InHeader) ([]byte, fuse.Status) { + ctx, s := rfs.createContext(cancel, &header.Caller) + if s != fuse.OK { + return nil, s + } + + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(header.NodeId) + rfs.nodeLock.RUnlock() + + target, vs := i.VirtualReadlink(ctx) + return target, toFUSEStatus(vs) +} + +func (rfs *simpleRawFileSystem) Access(cancel <-chan struct{}, input *fuse.AccessIn) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getNodeLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var requiredPermissions virtual.Permissions + if input.Mask&fuse.R_OK != 0 { + requiredPermissions |= virtual.PermissionsRead + } + if input.Mask&fuse.W_OK != 0 { + requiredPermissions |= virtual.PermissionsWrite + } + if input.Mask&fuse.X_OK != 0 { + requiredPermissions |= virtual.PermissionsExecute + } + + var attributes virtual.Attributes + i.VirtualGetAttributes(ctx, virtual.AttributesMaskPermissions, &attributes) + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Node did not return permissions attribute, even though it was requested") + } + if requiredPermissions&^permissions != 0 { + return fuse.EACCES + } + return fuse.OK +} + +func (rfs *simpleRawFileSystem) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (uint32, fuse.Status) { + // By returning ENOSYS here, the Linux FUSE driver will set + // fuse_conn::no_getxattr. This will completely eliminate + // getxattr() calls going forward. More details: + // + // https://github.com/torvalds/linux/blob/371e8fd02969383204b1f6023451125dbc20dfbd/fs/fuse/xattr.c#L60-L61 + // https://github.com/torvalds/linux/blob/371e8fd02969383204b1f6023451125dbc20dfbd/fs/fuse/xattr.c#L85-L88 + // + // Similar logic is used for some of the other operations. + return 0, fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (uint32, fuse.Status) { + return 0, fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status { + return fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status { + return fuse.ENOSYS +} + +// oflagsToShareMask converts access modes stored in open() flags to a +// ShareMask, indicating which operations are expected to be called +// against the file descriptor. +func oflagsToShareMask(oflags uint32) (virtual.ShareMask, fuse.Status) { + switch oflags & syscall.O_ACCMODE { + case syscall.O_RDONLY: + return virtual.ShareMaskRead, fuse.OK + case syscall.O_WRONLY: + return virtual.ShareMaskWrite, fuse.OK + case syscall.O_RDWR: + return virtual.ShareMaskRead | virtual.ShareMaskWrite, fuse.OK + default: + return 0, fuse.EINVAL + } +} + +// oflagsToOpenExistingOptions converts options stored in open() flags +// that pertain to handling of existing files to an OpenExistingOptions +// struct, which may be provided to VirtualOpen*(). +func oflagsToOpenExistingOptions(oflags uint32, options *virtual.OpenExistingOptions) { + options.Truncate = oflags&syscall.O_TRUNC != 0 +} + +func (rfs *simpleRawFileSystem) Create(cancel <-chan struct{}, input *fuse.CreateIn, name string, out *fuse.CreateOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + // Translate access mode. + shareAccess, s := oflagsToShareMask(input.Flags) + if s != fuse.OK { + return s + } + + // Take O_EXCL and O_TRUNC flags into consideration. + var existingOptions *virtual.OpenExistingOptions + if input.Flags&syscall.O_EXCL == 0 { + existingOptions = &virtual.OpenExistingOptions{} + oflagsToOpenExistingOptions(input.Flags, existingOptions) + } + + var openedFileAttributes virtual.Attributes + child, _, _, vs := i.VirtualOpenChild( + ctx, + path.MustNewComponent(name), + shareAccess, + (&virtual.Attributes{}).SetPermissions(virtual.NewPermissionsFromMode(input.Mode)), + existingOptions, + AttributesMaskForFUSEAttr, + &openedFileAttributes) + if vs != virtual.StatusOK { + return toFUSEStatus(vs) + } + rfs.addLeaf(child, &openedFileAttributes, &out.EntryOut) + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Open(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + shareAccess, s := oflagsToShareMask(input.Flags) + if s != fuse.OK { + return s + } + var options virtual.OpenExistingOptions + oflagsToOpenExistingOptions(input.Flags, &options) + + return toFUSEStatus(i.VirtualOpenSelf(ctx, shareAccess, &options, 0, &virtual.Attributes{})) +} + +func (rfs *simpleRawFileSystem) Read(cancel <-chan struct{}, input *fuse.ReadIn, buf []byte) (fuse.ReadResult, fuse.Status) { + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + nRead, _, s := i.VirtualRead(buf, input.Offset) + if s != virtual.StatusOK { + return nil, toFUSEStatus(s) + } + return fuse.ReadResultData(buf[:nRead]), fuse.OK +} + +func (rfs *simpleRawFileSystem) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekOut) fuse.Status { + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(in.NodeId) + rfs.nodeLock.RUnlock() + + var regionType filesystem.RegionType + switch in.Whence { + case unix.SEEK_DATA: + regionType = filesystem.Data + case unix.SEEK_HOLE: + regionType = filesystem.Hole + default: + panic("Requests for other seek modes should have been intercepted") + } + + offset, s := i.VirtualSeek(in.Offset, regionType) + if s != virtual.StatusOK { + return toFUSEStatus(s) + } + if offset == nil { + return fuse.Status(syscall.ENXIO) + } + out.Offset = *offset + return fuse.OK +} + +func (rfs *simpleRawFileSystem) GetLk(cancel <-chan struct{}, input *fuse.LkIn, out *fuse.LkOut) fuse.Status { + return fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) SetLk(cancel <-chan struct{}, input *fuse.LkIn) fuse.Status { + return fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) SetLkw(cancel <-chan struct{}, input *fuse.LkIn) fuse.Status { + return fuse.ENOSYS +} + +func (rfs *simpleRawFileSystem) Release(cancel <-chan struct{}, input *fuse.ReleaseIn) { + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + shareAccess, s := oflagsToShareMask(input.Flags) + if s != fuse.OK { + panic("Input flags cannot be converted to share mask") + } + + i.VirtualClose(shareAccess) +} + +func (rfs *simpleRawFileSystem) Write(cancel <-chan struct{}, input *fuse.WriteIn, data []byte) (uint32, fuse.Status) { + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + n, s := i.VirtualWrite(data, input.Offset) + return uint32(n), toFUSEStatus(s) +} + +func (rfs *simpleRawFileSystem) CopyFileRange(cancel <-chan struct{}, input *fuse.CopyFileRangeIn) (uint32, fuse.Status) { + return 0, fuse.ENOTSUP +} + +func (rfs *simpleRawFileSystem) Flush(cancel <-chan struct{}, input *fuse.FlushIn) fuse.Status { + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Fsync(cancel <-chan struct{}, input *fuse.FsyncIn) fuse.Status { + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Fallocate(cancel <-chan struct{}, input *fuse.FallocateIn) fuse.Status { + rfs.nodeLock.RLock() + i := rfs.getLeafLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + return toFUSEStatus(i.VirtualAllocate(input.Offset, input.Length)) +} + +func (rfs *simpleRawFileSystem) OpenDir(cancel <-chan struct{}, input *fuse.OpenIn, out *fuse.OpenOut) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + + var attributes virtual.Attributes + i.VirtualGetAttributes(ctx, virtual.AttributesMaskPermissions, &attributes) + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Node did not return permissions attribute, even though it was requested") + } + if permissions&virtual.PermissionsRead == 0 { + return fuse.EACCES + } + return fuse.OK +} + +// Directory entries that needed to be prepended to the results of all +// ReadDir() and ReadDirPlus() operations. The inode number is not +// filled in for these entries, which is permitted. +var dotDotEntries = []fuse.DirEntry{ + {Mode: fuse.S_IFDIR, Name: "."}, + {Mode: fuse.S_IFDIR, Name: ".."}, +} + +const dotDotEntriesCount uint64 = 2 + +func toFUSEDirEntry(name path.Component, attributes *virtual.Attributes) fuse.DirEntry { + return fuse.DirEntry{ + Mode: toFUSEFileType(attributes.GetFileType()), + Name: name.String(), + Ino: attributes.GetInodeNumber(), + } +} + +type readDirReporter struct { + out fuse.ReadDirEntryList +} + +func (r *readDirReporter) ReportEntry(nextCookie uint64, name path.Component, child virtual.DirectoryChild, attributes *virtual.Attributes) bool { + return r.out.AddDirEntry(toFUSEDirEntry(name, attributes), dotDotEntriesCount+nextCookie) +} + +func (rfs *simpleRawFileSystem) ReadDir(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirEntryList) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + // Inject "." and ".." entries at the start of the results. + offset := input.Offset + for ; offset < dotDotEntriesCount; offset++ { + if !out.AddDirEntry(dotDotEntries[offset], offset+1) { + return fuse.OK + } + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + return toFUSEStatus( + i.VirtualReadDir( + ctx, + offset-dotDotEntriesCount, + AttributesMaskForFUSEDirEntry, + &readDirReporter{out: out})) +} + +type readDirPlusReporter struct { + rfs *simpleRawFileSystem + out fuse.ReadDirPlusEntryList +} + +func (r *readDirPlusReporter) ReportEntry(nextCookie uint64, name path.Component, child virtual.DirectoryChild, attributes *virtual.Attributes) bool { + if e := r.out.AddDirLookupEntry(toFUSEDirEntry(name, attributes), dotDotEntriesCount+nextCookie); e != nil { + if directory, leaf := child.GetPair(); directory != nil { + r.rfs.addDirectory(directory, attributes, e) + } else { + r.rfs.addLeaf(leaf, attributes, e) + } + return true + } + return false +} + +func (rfs *simpleRawFileSystem) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out fuse.ReadDirPlusEntryList) fuse.Status { + ctx, s := rfs.createContext(cancel, &input.Caller) + if s != fuse.OK { + return s + } + + // Return "." and ".." entries at the start of the results. + // These don't need to be looked up, as the kernel tracks these + // for us automatically. + offset := input.Offset + for ; offset < dotDotEntriesCount; offset++ { + if out.AddDirLookupEntry(dotDotEntries[offset], offset+1) == nil { + return fuse.OK + } + } + + rfs.nodeLock.RLock() + i := rfs.getDirectoryLocked(input.NodeId) + rfs.nodeLock.RUnlock() + return toFUSEStatus( + i.VirtualReadDir( + ctx, + offset-dotDotEntriesCount, + AttributesMaskForFUSEAttr, + &readDirPlusReporter{rfs: rfs, out: out})) +} + +func (rfs *simpleRawFileSystem) ReleaseDir(input *fuse.ReleaseIn) {} + +func (rfs *simpleRawFileSystem) FsyncDir(cancel <-chan struct{}, input *fuse.FsyncIn) fuse.Status { + return fuse.OK +} + +func (rfs *simpleRawFileSystem) StatFs(cancel <-chan struct{}, input *fuse.InHeader, out *fuse.StatfsOut) fuse.Status { + // Announce support for filenames up to 255 bytes in size. This + // seems to be the common limit for UNIX file systems. Setting + // this value is necessary to make pathconf(path, _PC_NAME_MAX) + // work. + out.NameLen = 255 + return fuse.OK +} + +func (rfs *simpleRawFileSystem) Init(server fuse.ServerCallbacks) { + // Obtain the inode number of the root directory. + rfs.nodeLock.RLock() + rootDirectory := rfs.directories[fuse.FUSE_ROOT_ID].directory + rfs.nodeLock.RUnlock() + var attributes virtual.Attributes + rootDirectory.VirtualGetAttributes(context.Background(), virtual.AttributesMaskInodeNumber, &attributes) + rootDirectoryInodeNumber := attributes.GetInodeNumber() + + rfs.removalNotifierRegistrar(func(parent uint64, name path.Component) { + // EntryNotify can be called to report that directory entries + // have been removed. This causes them to be removed from the + // directory entry cache used by FUSE as well. + if parent == rootDirectoryInodeNumber { + // Even though we permit the root directory to + // have an arbitrary inode number, FUSE requires + // that the root directory uses node ID 1. + if s := server.EntryNotify(fuse.FUSE_ROOT_ID, name.String()); s != fuse.OK && s != fuse.ENOENT { + log.Printf("Failed to invalidate %#v in root directory: %s", name.String(), s) + } + } else { + // Discard invalidations for directory entries + // if the containing directory isn't known to + // the kernel. These requests would fail anyway. + rfs.nodeLock.RLock() + _, ok := rfs.directories[parent] + rfs.nodeLock.RUnlock() + if ok { + if s := server.EntryNotify(parent, name.String()); s != fuse.OK && s != fuse.ENOENT { + log.Printf("Failed to invalidate %#v in directory %d: %s", name.String(), parent, s) + } + } + } + }) +} diff --git a/pkg/filesystem/virtual/fuse/simple_raw_file_system_test.go b/pkg/filesystem/virtual/fuse/simple_raw_file_system_test.go new file mode 100644 index 0000000..7bea79a --- /dev/null +++ b/pkg/filesystem/virtual/fuse/simple_raw_file_system_test.go @@ -0,0 +1,1317 @@ +//go:build darwin || linux +// +build darwin linux + +package fuse_test + +import ( + "context" + "syscall" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/fuse" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/golang/mock/gomock" + go_fuse "github.com/hanwen/go-fuse/v2/fuse" + "github.com/stretchr/testify/require" +) + +func TestSimpleRawFileSystemAccess(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Failure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskPermissions, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetPermissions(virtual.PermissionsWrite | virtual.PermissionsExecute) + }) + + require.Equal(t, go_fuse.EACCES, rfs.Access(nil, &go_fuse.AccessIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mask: go_fuse.R_OK, + })) + }) + + t.Run("Success", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskPermissions, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute) + }) + + require.Equal(t, go_fuse.OK, rfs.Access(nil, &go_fuse.AccessIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mask: go_fuse.R_OK | go_fuse.X_OK, + })) + }) +} + +func TestSimpleRawFileSystemLookup(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("NotFound", func(t *testing.T) { + // Lookup failure errors should be propagated. + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("nonexistent"), fuse.AttributesMaskForFUSEAttr, gomock.Any()). + Return(virtual.DirectoryChild{}, virtual.StatusErrNoEnt) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.ENOENT, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "nonexistent", &entryOut)) + }) + + t.Run("Directory", func(t *testing.T) { + // Looking up a directory should cause the attributes to + // be returned. The inode number should be used as the + // node ID, so that future operations can refer to it. + childDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("directory"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(123) + out.SetLastDataModificationTime(time.Unix(1654790759, 405372932)) + out.SetLinkCount(5) + out.SetPermissions(virtual.PermissionsExecute) + out.SetSizeBytes(2048) + return virtual.DirectoryChild{}.FromDirectory(childDirectory), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "directory", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 123, + Mtime: 1654790759, + Mtimensec: 405372932, + Nlink: 5, + Size: 2048, + }, + }, entryOut) + + // Performing a successive lookup against the node ID of + // the directory should call into that directory; not + // the root directory. + childDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("nonexistent"), fuse.AttributesMaskForFUSEAttr, gomock.Any()). + Return(virtual.DirectoryChild{}, virtual.StatusErrNoEnt) + + require.Equal(t, go_fuse.ENOENT, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: 123, + }, "nonexistent", &entryOut)) + }) + + t.Run("File", func(t *testing.T) { + // Looking up a file should work similarly to the above. + childFile := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("file"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeRegularFile) + out.SetInodeNumber(456) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead) + out.SetSizeBytes(1300) + return virtual.DirectoryChild{}.FromLeaf(childFile), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "file", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 456, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFREG | 0o444, + Ino: 456, + Nlink: 1, + Size: 1300, + }, + }, entryOut) + }) +} + +func TestSimpleRawFileSystemForget(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + for i := 0; i < 10; i++ { + // Perform ten lookups of the same directory. + childDirectory := mock.NewMockVirtualDirectory(ctrl) + for j := 0; j < 10; j++ { + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("directory"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(123) + out.SetLinkCount(4) + out.SetPermissions(virtual.PermissionsExecute) + out.SetSizeBytes(2048) + return virtual.DirectoryChild{}.FromDirectory(childDirectory), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "directory", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 123, + Nlink: 4, + Size: 2048, + }, + }, entryOut) + } + + // Operations against the node ID of the directory + // should all be forwarded to the directory object. + childDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(123) + out.SetLinkCount(4) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsExecute) + out.SetSizeBytes(2048) + }) + + var directoryAttrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.GetAttr(nil, &go_fuse.GetAttrIn{ + InHeader: go_fuse.InHeader{ + NodeId: 123, + }, + }, &directoryAttrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o555, + Ino: 123, + Nlink: 4, + Size: 2048, + }, + }, directoryAttrOut) + + // Forget the directory a total of ten times. This + // should cause the node ID to be released, meaning that + // it's safe to reuse the same ID for another purpose. + rfs.Forget(123, 3) + rfs.Forget(123, 2) + rfs.Forget(123, 5) + + // Perform ten lookups of the same file. This file + // reuses the node ID that was previously used by the + // directory. This should be safe, as this node ID was + // forgotten. + childFile := mock.NewMockVirtualLeaf(ctrl) + for j := 0; j < 10; j++ { + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("file"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeRegularFile) + out.SetInodeNumber(123) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead) + out.SetSizeBytes(42) + return virtual.DirectoryChild{}.FromLeaf(childFile), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "file", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFREG | 0o444, + Ino: 123, + Nlink: 1, + Size: 42, + }, + }, entryOut) + } + + // Operations against the node ID should now all go to + // the file -- not the directory. + childFile.EXPECT().VirtualGetAttributes(gomock.Any(), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetFileType(filesystem.FileTypeRegularFile) + out.SetInodeNumber(123) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsExecute) + out.SetSizeBytes(5) + }) + + var fileAttrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.GetAttr(nil, &go_fuse.GetAttrIn{ + InHeader: go_fuse.InHeader{ + NodeId: 123, + }, + }, &fileAttrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFREG | 0o555, + Ino: 123, + Nlink: 1, + Size: 5, + }, + }, fileAttrOut) + + // Also forget the file a total of ten times. + rfs.Forget(123, 9) + rfs.Forget(123, 1) + } +} + +func TestSimpleRawFileSystemGetAttr(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Success", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(42) + out.SetLinkCount(7) + out.SetPermissions(virtual.PermissionsExecute) + out.SetSizeBytes(12) + }) + + var attrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.GetAttr(nil, &go_fuse.GetAttrIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, &attrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 42, + Nlink: 7, + Size: 12, + }, + }, attrOut) + }) +} + +func TestSimpleRawFileSystemSetAttr(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Chown", func(t *testing.T) { + // chown() operations are not supported. + var attrOut go_fuse.AttrOut + require.Equal(t, go_fuse.EPERM, rfs.SetAttr(nil, &go_fuse.SetAttrIn{ + SetAttrInCommon: go_fuse.SetAttrInCommon{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Valid: go_fuse.FATTR_UID | go_fuse.FATTR_GID, + Owner: go_fuse.Owner{ + Uid: 1000, + Gid: 1000, + }, + }, + }, &attrOut)) + }) + + t.Run("Failure", func(t *testing.T) { + // A truncate() call that is denied. + rootDirectory.EXPECT().VirtualSetAttributes( + gomock.Any(), + (&virtual.Attributes{}).SetSizeBytes(400), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).Return(virtual.StatusErrAccess) + + var attrOut go_fuse.AttrOut + require.Equal(t, go_fuse.EACCES, rfs.SetAttr(nil, &go_fuse.SetAttrIn{ + SetAttrInCommon: go_fuse.SetAttrInCommon{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Valid: go_fuse.FATTR_SIZE, + Size: 400, + }, + }, &attrOut)) + }) + + t.Run("Success", func(t *testing.T) { + // A chmod() call that is permitted. + rootDirectory.EXPECT().VirtualSetAttributes( + gomock.Any(), + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsExecute), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, in *virtual.Attributes, requested virtual.AttributesMask, out *virtual.Attributes) virtual.Status { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(9000) + out.SetLinkCount(12) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsExecute) + out.SetSizeBytes(42) + return virtual.StatusOK + }) + + var attrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.SetAttr(nil, &go_fuse.SetAttrIn{ + SetAttrInCommon: go_fuse.SetAttrInCommon{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Valid: go_fuse.FATTR_MODE, + Mode: 0o500, + }, + }, &attrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o555, + Ino: 9000, + Nlink: 12, + Size: 42, + }, + }, attrOut) + }) +} + +func TestSimpleRawFileSystemMknod(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("BlockDevice", func(t *testing.T) { + // An mknod() call for a block device should be + // rejected. Creating those would be a security issue. + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.EPERM, rfs.Mknod(nil, &go_fuse.MknodIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mode: syscall.S_IFBLK | 0o777, + Rdev: 456, + }, "hello", &entryOut)) + }) + + t.Run("Failure", func(t *testing.T) { + // An mknod() call for a socket that is denied. + rootDirectory.EXPECT().VirtualMknod(gomock.Any(), path.MustNewComponent("hello"), filesystem.FileTypeSocket, fuse.AttributesMaskForFUSEAttr, gomock.Any()). + Return(nil, virtual.ChangeInfo{}, virtual.StatusErrPerm) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.EPERM, rfs.Mknod(nil, &go_fuse.MknodIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mode: syscall.S_IFSOCK | 0o777, + Rdev: 456, + }, "hello", &entryOut)) + }) + + t.Run("Success", func(t *testing.T) { + // An mknod() call for a FIFO that succeeds. + childLeaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualMknod(gomock.Any(), path.MustNewComponent("hello"), filesystem.FileTypeFIFO, fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, fileType filesystem.FileType, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + out.SetFileType(filesystem.FileTypeFIFO) + out.SetInodeNumber(123) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite) + out.SetSizeBytes(100) + return childLeaf, virtual.ChangeInfo{ + Before: 41, + After: 42, + }, virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Mknod(nil, &go_fuse.MknodIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mode: go_fuse.S_IFIFO | 0o700, + }, "hello", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFIFO | 0o666, + Ino: 123, + Nlink: 1, + Size: 100, + }, + }, entryOut) + }) +} + +func TestSimpleRawFileSystemMkdir(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Failure", func(t *testing.T) { + // An mkdir() call that fails due to an I/O error. + rootDirectory.EXPECT().VirtualMkdir(path.MustNewComponent("hello"), fuse.AttributesMaskForFUSEAttr, gomock.Any()). + Return(nil, virtual.ChangeInfo{}, virtual.StatusErrIO) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.EIO, rfs.Mkdir(nil, &go_fuse.MkdirIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mode: 0o777, + }, "hello", &entryOut)) + }) + + t.Run("Success", func(t *testing.T) { + // An mkdir() call that succeeds. + childDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualMkdir(path.MustNewComponent("hello"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.Directory, virtual.ChangeInfo, virtual.Status) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(123) + out.SetLinkCount(12) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute) + out.SetSizeBytes(500) + return childDirectory, virtual.ChangeInfo{ + Before: 13, + After: 14, + }, virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Mkdir(nil, &go_fuse.MkdirIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Mode: 0o777, + }, "hello", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o777, + Ino: 123, + Nlink: 12, + Size: 500, + }, + }, entryOut) + }) +} + +func TestSimpleRawFileSystemUnlink(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Failure", func(t *testing.T) { + // An unlink() call that fails due to an I/O error. + rootDirectory.EXPECT().VirtualRemove(path.MustNewComponent("hello"), false, true). + Return(virtual.ChangeInfo{}, virtual.StatusErrIO) + + require.Equal(t, go_fuse.EIO, rfs.Unlink(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "hello")) + }) + + t.Run("Success", func(t *testing.T) { + // An unlink() call that succeeds. + rootDirectory.EXPECT().VirtualRemove(path.MustNewComponent("hello"), false, true). + Return(virtual.ChangeInfo{ + Before: 5, + After: 6, + }, virtual.StatusOK) + + require.Equal(t, go_fuse.OK, rfs.Unlink(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "hello")) + }) +} + +func TestSimpleRawFileSystemRmdir(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Failure", func(t *testing.T) { + // An rmdir() call that fails due to an I/O error. + rootDirectory.EXPECT().VirtualRemove(path.MustNewComponent("hello"), true, false). + Return(virtual.ChangeInfo{}, virtual.StatusErrIO) + + require.Equal(t, go_fuse.EIO, rfs.Rmdir(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "hello")) + }) + + t.Run("Success", func(t *testing.T) { + // An rmdir() call that succeeds. + rootDirectory.EXPECT().VirtualRemove(path.MustNewComponent("hello"), true, false). + Return(virtual.ChangeInfo{ + Before: 5, + After: 6, + }, virtual.StatusOK) + + require.Equal(t, go_fuse.OK, rfs.Rmdir(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "hello")) + }) +} + +func TestSimpleRawFileSystemSymlink(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Failure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualSymlink( + gomock.Any(), + []byte("target"), + path.MustNewComponent("symlink"), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).Return(nil, virtual.ChangeInfo{}, virtual.StatusErrExist) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.Status(syscall.EEXIST), rfs.Symlink(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "target", "symlink", &entryOut)) + }) + + t.Run("Success", func(t *testing.T) { + // Create a symbolic link. + symlink := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualSymlink( + gomock.Any(), + []byte("target"), + path.MustNewComponent("symlink"), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, pointedTo []byte, linkName path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + out.SetFileType(filesystem.FileTypeSymlink) + out.SetInodeNumber(123) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute) + out.SetSizeBytes(6) + return symlink, virtual.ChangeInfo{ + Before: 12, + After: 13, + }, virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Symlink(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "target", "symlink", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 123, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFLNK | 0o777, + Ino: 123, + Nlink: 1, + Size: 6, + }, + }, entryOut) + + // Future calls of the node should be forwarded to the + // right symlink instance. + symlink.EXPECT().VirtualReadlink(gomock.Any()).Return([]byte("target"), virtual.StatusOK) + + target, s := rfs.Readlink(nil, &go_fuse.InHeader{NodeId: 123}) + require.Equal(t, go_fuse.OK, s) + require.Equal(t, []byte("target"), target) + }) +} + +func TestSimpleRawFileSystemCreate(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("ReadWriteCreateExcl", func(t *testing.T) { + rootDirectory.EXPECT().VirtualOpenChild( + gomock.Any(), + path.MustNewComponent("excl"), + virtual.ShareMaskRead|virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite|virtual.PermissionsExecute), + nil, + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).Return(nil, virtual.AttributesMask(0), virtual.ChangeInfo{}, virtual.StatusErrExist) + + var createOut go_fuse.CreateOut + require.Equal(t, go_fuse.Status(syscall.EEXIST), rfs.Create(nil, &go_fuse.CreateIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Flags: syscall.O_CREAT | syscall.O_EXCL | syscall.O_RDWR, + Mode: 0o777, + }, "excl", &createOut)) + }) + + t.Run("WriteTruncate", func(t *testing.T) { + rootDirectory.EXPECT().VirtualOpenChild( + gomock.Any(), + path.MustNewComponent("trunc"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + &virtual.OpenExistingOptions{Truncate: true}, + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).Return(nil, virtual.AttributesMask(0), virtual.ChangeInfo{}, virtual.StatusErrNoEnt) + + var createOut go_fuse.CreateOut + require.Equal(t, go_fuse.ENOENT, rfs.Create(nil, &go_fuse.CreateIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Flags: syscall.O_CREAT | syscall.O_TRUNC | syscall.O_WRONLY, + Mode: 0o666, + }, "trunc", &createOut)) + }) +} + +func TestSimpleRawFileSystemOpenDir(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("PermissionDenied", func(t *testing.T) { + // FUSE on Linux doesn't check permissions on the + // directory prior to opening. Do that on the caller's + // behalf. + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskPermissions, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetPermissions(virtual.PermissionsExecute) + }) + + require.Equal(t, go_fuse.EACCES, rfs.OpenDir(nil, &go_fuse.OpenIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, &go_fuse.OpenOut{})) + }) + + // Further testing coverage is provided as part of ReadDir() and + // ReadDirPlus() tests. +} + +func TestSimpleRawFileSystemReadDir(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + // Open the root directory. + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskPermissions, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetPermissions(virtual.PermissionsRead) + }) + + var openOut go_fuse.OpenOut + require.Equal(t, go_fuse.OK, rfs.OpenDir(nil, &go_fuse.OpenIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, &openOut)) + require.Equal(t, go_fuse.OpenOut{}, openOut) + + t.Run("Failure", func(t *testing.T) { + // Directory listing failures should be propagated. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEDirEntry, + gomock.Any(), + ).Return(virtual.StatusErrIO) + entryList := mock.NewMockReadDirEntryList(ctrl) + gomock.InOrder( + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: ".", + Mode: go_fuse.S_IFDIR, + }, uint64(1)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "..", + Mode: go_fuse.S_IFDIR, + }, uint64(2)).Return(true), + ) + + require.Equal(t, go_fuse.EIO, rfs.ReadDir(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, entryList)) + }) + + t.Run("FromStart", func(t *testing.T) { + // "." and ".." entries should be prepended. This should + // cause all cookies of child entries to be incremented + // by two to make space. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEDirEntry, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, requested virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + require.True(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("directory"), + virtual.DirectoryChild{}.FromDirectory(mock.NewMockVirtualDirectory(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(27))) + require.True(t, reporter.ReportEntry( + uint64(2), + path.MustNewComponent("file"), + virtual.DirectoryChild{}.FromLeaf(mock.NewMockVirtualLeaf(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(42))) + require.True(t, reporter.ReportEntry( + uint64(3), + path.MustNewComponent("symlink"), + virtual.DirectoryChild{}.FromLeaf(mock.NewMockVirtualLeaf(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeSymlink). + SetInodeNumber(83))) + return virtual.StatusOK + }) + entryList := mock.NewMockReadDirEntryList(ctrl) + gomock.InOrder( + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: ".", + Mode: go_fuse.S_IFDIR, + }, uint64(1)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "..", + Mode: go_fuse.S_IFDIR, + }, uint64(2)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "directory", + Mode: go_fuse.S_IFDIR, + Ino: 27, + }, uint64(3)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "file", + Mode: go_fuse.S_IFREG, + Ino: 42, + }, uint64(4)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "symlink", + Mode: go_fuse.S_IFLNK, + Ino: 83, + }, uint64(5)).Return(true), + ) + + require.Equal(t, go_fuse.OK, rfs.ReadDir(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, entryList)) + }) + + t.Run("BetweenDots", func(t *testing.T) { + // Perform a partial read, starting between the "." and + // ".." directory entries. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEDirEntry, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, requested virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + require.True(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("directory"), + virtual.DirectoryChild{}.FromDirectory(mock.NewMockVirtualDirectory(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(27))) + return virtual.StatusOK + }) + entryList := mock.NewMockReadDirEntryList(ctrl) + gomock.InOrder( + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "..", + Mode: go_fuse.S_IFDIR, + }, uint64(2)).Return(true), + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "directory", + Mode: go_fuse.S_IFDIR, + Ino: 27, + }, uint64(3)).Return(true), + ) + + require.Equal(t, go_fuse.OK, rfs.ReadDir(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Offset: 1, + }, entryList)) + }) + + t.Run("PastDots", func(t *testing.T) { + // Perform a partial read, starting right after the "." + // and ".." directory entries. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEDirEntry, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, requested virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + require.True(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("directory"), + virtual.DirectoryChild{}.FromDirectory(mock.NewMockVirtualDirectory(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(27))) + return virtual.StatusOK + }) + entryList := mock.NewMockReadDirEntryList(ctrl) + gomock.InOrder( + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "directory", + Mode: go_fuse.S_IFDIR, + Ino: 27, + }, uint64(3)).Return(true), + ) + + require.Equal(t, go_fuse.OK, rfs.ReadDir(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Offset: 2, + }, entryList)) + }) + + t.Run("AtOffset", func(t *testing.T) { + // Perform a partial read at an arbitrary offset. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(52), + fuse.AttributesMaskForFUSEDirEntry, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, requested virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + require.False(t, reporter.ReportEntry( + uint64(55), + path.MustNewComponent("directory"), + virtual.DirectoryChild{}.FromDirectory(mock.NewMockVirtualDirectory(ctrl)), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(27))) + return virtual.StatusOK + }) + entryList := mock.NewMockReadDirEntryList(ctrl) + gomock.InOrder( + entryList.EXPECT().AddDirEntry(go_fuse.DirEntry{ + Name: "directory", + Mode: go_fuse.S_IFDIR, + Ino: 27, + }, uint64(57)), + ) + + require.Equal(t, go_fuse.OK, rfs.ReadDir(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + Offset: 54, + }, entryList)) + }) + + // Close the root directory. + rfs.ReleaseDir(&go_fuse.ReleaseIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }) +} + +func TestSimpleRawFileSystemReadDirPlus(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + // Open the root directory. + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskPermissions, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetPermissions(virtual.PermissionsRead) + }) + + var openOut go_fuse.OpenOut + require.Equal(t, go_fuse.OK, rfs.OpenDir(nil, &go_fuse.OpenIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, &openOut)) + require.Equal(t, go_fuse.OpenOut{}, openOut) + + t.Run("Failure", func(t *testing.T) { + // Directory listing failures should be propagated. + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).Return(virtual.StatusErrIO) + entryList := mock.NewMockReadDirPlusEntryList(ctrl) + var entryOutDot go_fuse.EntryOut + gomock.InOrder( + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: ".", + Mode: go_fuse.S_IFDIR, + }, uint64(1)).Return(&entryOutDot), + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: "..", + Mode: go_fuse.S_IFDIR, + }, uint64(2)).Return(&entryOutDot), + ) + + require.Equal(t, go_fuse.EIO, rfs.ReadDirPlus(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, entryList)) + require.Equal(t, go_fuse.EntryOut{}, entryOutDot) + }) + + t.Run("FromStart", func(t *testing.T) { + // "." and ".." entries should be prepended. This should + // cause all cookies of child entries to be incremented + // by two to make space. + childDirectory := mock.NewMockVirtualDirectory(ctrl) + childLeaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualReadDir( + gomock.Any(), + uint64(0), + fuse.AttributesMaskForFUSEAttr, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, requested virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + require.True(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("directory"), + virtual.DirectoryChild{}.FromDirectory(childDirectory), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(2). + SetLinkCount(12). + SetPermissions(virtual.PermissionsExecute). + SetSizeBytes(4096))) + require.True(t, reporter.ReportEntry( + uint64(2), + path.MustNewComponent("file"), + virtual.DirectoryChild{}.FromLeaf(childLeaf), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(3). + SetLinkCount(4). + SetPermissions(0). + SetSizeBytes(8192))) + return virtual.StatusOK + }) + entryList := mock.NewMockReadDirPlusEntryList(ctrl) + var entryOutDot, entryOutDirectory, entryOutFile go_fuse.EntryOut + gomock.InOrder( + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: ".", + Mode: go_fuse.S_IFDIR, + }, uint64(1)).Return(&entryOutDot), + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: "..", + Mode: go_fuse.S_IFDIR, + }, uint64(2)).Return(&entryOutDot), + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: "directory", + Mode: go_fuse.S_IFDIR, + Ino: 2, + }, uint64(3)).Return(&entryOutDirectory), + entryList.EXPECT().AddDirLookupEntry(go_fuse.DirEntry{ + Name: "file", + Mode: go_fuse.S_IFREG, + Ino: 3, + }, uint64(4)).Return(&entryOutFile), + ) + + require.Equal(t, go_fuse.OK, rfs.ReadDirPlus(nil, &go_fuse.ReadIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }, entryList)) + require.Equal(t, go_fuse.EntryOut{}, entryOutDot) + require.Equal(t, go_fuse.EntryOut{}, entryOutDot) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 2, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 2, + Nlink: 12, + Size: 4096, + }, + }, entryOutDirectory) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 3, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFREG, + Ino: 3, + Nlink: 4, + Size: 8192, + }, + }, entryOutFile) + + // VirtualGetAttributes() should only be called on + // objects contained within the resulting directory + // listing. + childDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(2) + out.SetLinkCount(12) + out.SetPermissions(virtual.PermissionsExecute) + out.SetSizeBytes(4096) + }) + childLeaf.EXPECT().VirtualGetAttributes(gomock.Any(), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetFileType(filesystem.FileTypeRegularFile) + out.SetInodeNumber(3) + out.SetLinkCount(4) + out.SetPermissions(0) + out.SetSizeBytes(8192) + }) + + // The entries returned by ReadDirPlus() have been + // registered automatically, meaning they can be + // accessed without a separate Lookup(). + var directoryAttrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.GetAttr(nil, &go_fuse.GetAttrIn{ + InHeader: go_fuse.InHeader{ + NodeId: 2, + }, + }, &directoryAttrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 2, + Nlink: 12, + Size: 4096, + }, + }, directoryAttrOut) + + var leafAttrOut go_fuse.AttrOut + require.Equal(t, go_fuse.OK, rfs.GetAttr(nil, &go_fuse.GetAttrIn{ + InHeader: go_fuse.InHeader{ + NodeId: 3, + }, + }, &leafAttrOut)) + require.Equal(t, go_fuse.AttrOut{ + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFREG, + Ino: 3, + Nlink: 4, + Size: 8192, + }, + }, leafAttrOut) + + // Release the children returned by ReadDirPlus(). + rfs.Forget(2, 1) + rfs.Forget(3, 1) + }) + + // For partial reads, we defer to the test of ReadDir(). + + // Close the root directory. + rfs.ReleaseDir(&go_fuse.ReleaseIn{ + InHeader: go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, + }) +} + +func TestSimpleRawFileSystemReadlink(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + symlink := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("symlink"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeSymlink) + out.SetInodeNumber(2) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute) + out.SetSizeBytes(6) + return virtual.DirectoryChild{}.FromLeaf(symlink), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "symlink", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 2, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFLNK | 0o777, + Ino: 2, + Nlink: 1, + Size: 6, + }, + }, entryOut) + + t.Run("IOError", func(t *testing.T) { + symlink.EXPECT().VirtualReadlink(gomock.Any()).Return(nil, virtual.StatusErrIO) + + _, s := rfs.Readlink(nil, &go_fuse.InHeader{NodeId: 2}) + require.Equal(t, go_fuse.EIO, s) + }) + + t.Run("WrongFileType", func(t *testing.T) { + symlink.EXPECT().VirtualReadlink(gomock.Any()).Return(nil, virtual.StatusErrInval) + + _, s := rfs.Readlink(nil, &go_fuse.InHeader{NodeId: 2}) + require.Equal(t, go_fuse.EINVAL, s) + }) + + t.Run("Success", func(t *testing.T) { + symlink.EXPECT().VirtualReadlink(gomock.Any()).Return([]byte("target"), virtual.StatusOK) + + target, s := rfs.Readlink(nil, &go_fuse.InHeader{NodeId: 2}) + require.Equal(t, go_fuse.OK, s) + require.Equal(t, []byte("target"), target) + }) +} + +func TestSimpleRawFileSystemStatFs(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + t.Run("Success", func(t *testing.T) { + // OSXFUSE lets the statvfs() system call succeed, even + // if StatFs() returns ENOSYS. Linux is more strict, in + // that this causes statvfs() to fail. + // + // Even though we don't provide any meaningful + // statistics, return success to get consistent behavior + // across platforms. + var statfsOut go_fuse.StatfsOut + require.Equal(t, go_fuse.OK, rfs.StatFs(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, &statfsOut)) + require.Equal(t, go_fuse.StatfsOut{ + NameLen: 255, + }, statfsOut) + }) +} + +func TestSimpleRawFileSystemInit(t *testing.T) { + ctrl := gomock.NewController(t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + removalNotifierRegistrar := mock.NewMockFUSERemovalNotifierRegistrar(ctrl) + rfs := fuse.NewSimpleRawFileSystem(rootDirectory, removalNotifierRegistrar.Call, fuse.AllowAuthenticator) + + // An Init() operation should cause SimpleRawFileSystem to + // register a removal notifier that forwards calls to + // EntryNotify() on the FUSE server. + // + // Because the FUSE server expects that we use + // go_fuse.FUSE_ROOT_ID for the root directory, we should do a + // one-time lookup of the inode number of the root directory, so + // that we can distinguish it from the other directories going + // forward. + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskInodeNumber, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(123) + }) + var removalNotifier virtual.FUSERemovalNotifier + removalNotifierRegistrar.EXPECT().Call(gomock.Any()).Do(func(rn virtual.FUSERemovalNotifier) { + removalNotifier = rn + }) + mockServerCallbacks := mock.NewMockServerCallbacks(ctrl) + rfs.Init(mockServerCallbacks) + + t.Run("RootDirectory", func(t *testing.T) { + // Calls for the root directory should have their inode + // number translated to FUSE_ROOT_ID, as that is the + // node ID that the FUSE protocol uses for the root + // directory object. + mockServerCallbacks.EXPECT().EntryNotify(uint64(go_fuse.FUSE_ROOT_ID), "Hello") + removalNotifier(123, path.MustNewComponent("Hello")) + }) + + t.Run("ChildDirectory", func(t *testing.T) { + // Add a second directory to the map of directories + // tracked by SimpleRawFileSystem. + childDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualLookup(gomock.Any(), path.MustNewComponent("directory"), fuse.AttributesMaskForFUSEAttr, gomock.Any()).DoAndReturn( + func(ctx context.Context, name path.Component, requested virtual.AttributesMask, out *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + out.SetFileType(filesystem.FileTypeDirectory) + out.SetInodeNumber(456) + out.SetLinkCount(1) + out.SetPermissions(virtual.PermissionsExecute) + out.SetSizeBytes(1200) + return virtual.DirectoryChild{}.FromDirectory(childDirectory), virtual.StatusOK + }) + + var entryOut go_fuse.EntryOut + require.Equal(t, go_fuse.OK, rfs.Lookup(nil, &go_fuse.InHeader{ + NodeId: go_fuse.FUSE_ROOT_ID, + }, "directory", &entryOut)) + require.Equal(t, go_fuse.EntryOut{ + NodeId: 456, + Attr: go_fuse.Attr{ + Mode: go_fuse.S_IFDIR | 0o111, + Ino: 456, + Nlink: 1, + Size: 1200, + }, + }, entryOut) + + // Calls to EntryNotify() should be forwarded to the + // underlying server in unmodified form. + mockServerCallbacks.EXPECT().EntryNotify(uint64(456), "hello") + removalNotifier(456, path.MustNewComponent("hello")) + + // Once the kernel requests that the directory is + // forgotten, there is no longer any need for calling + // into the kernel to forget directory entries. + rfs.Forget(456, 1) + removalNotifier(456, path.MustNewComponent("hello")) + }) +} + +// TODO: Add testing coverage for other calls as well. diff --git a/pkg/filesystem/virtual/fuse/sysfs_disabled.go b/pkg/filesystem/virtual/fuse/sysfs_disabled.go new file mode 100644 index 0000000..90870b3 --- /dev/null +++ b/pkg/filesystem/virtual/fuse/sysfs_disabled.go @@ -0,0 +1,23 @@ +//go:build !linux +// +build !linux + +package fuse + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// SetLinuxBackingDevInfoTunables adjusts tunables of the Backing Dev +// Info (BDI) belonging to a FUSE mount. These tunables can, for +// example, be used to increase the maximum number of dirty pages +// belonging to the mount. +// +// This is a placeholder implementation for operating systems other than +// Linux. +func SetLinuxBackingDevInfoTunables(mountPath string, variables map[string]string) error { + if len(variables) > 0 { + return status.Error(codes.Unimplemented, "Setting Linux Backing Dev Info tunables is only supported on Linux") + } + return nil +} diff --git a/pkg/filesystem/virtual/fuse/sysfs_linux.go b/pkg/filesystem/virtual/fuse/sysfs_linux.go new file mode 100644 index 0000000..eb7e116 --- /dev/null +++ b/pkg/filesystem/virtual/fuse/sysfs_linux.go @@ -0,0 +1,47 @@ +//go:build linux +// +build linux + +package fuse + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/unix" +) + +// SetLinuxBackingDevInfoTunables adjusts tunables of the Linux Backing +// Dev Info (BDI) belonging to a FUSE mount. These tunables can, for +// example, be used to increase the maximum number of dirty pages +// belonging to the mount. +// +// This implementation applies the tunables through Linux's sysfs. +func SetLinuxBackingDevInfoTunables(mountPath string, variables map[string]string) error { + // Construct the path in sysfs of the max_ratio file. The path + // is based on the major/minor number of the mount's st_dev. + var sb unix.Stat_t + if err := unix.Stat(mountPath, &sb); err != nil { + return util.StatusWrapf(err, "Failed to obtain device number from FUSE mount %#v", mountPath) + } + bdiPath := fmt.Sprintf("/sys/class/bdi/%d:%d", unix.Major(sb.Dev), unix.Minor(sb.Dev)) + + for key, value := range variables { + keyPath := filepath.Join(bdiPath, key) + f, err := os.OpenFile(keyPath, os.O_TRUNC|os.O_WRONLY, 0o666) + if err != nil { + return util.StatusWrapf(err, "Failed to open %#v corresponding to FUSE mount %#v", keyPath, mountPath) + } + _, err1 := f.Write([]byte(value)) + err2 := f.Close() + if err1 != nil { + return util.StatusWrapf(err1, "Failed to write to %#v corresponding to FUSE mount %#v", keyPath, mountPath) + } + if err2 != nil { + return util.StatusWrapf(err2, "Failed to close %#v corresponding to FUSE mount %#v", keyPath, mountPath) + } + } + return nil +} diff --git a/pkg/filesystem/virtual/fuse_handle_allocator.go b/pkg/filesystem/virtual/fuse_handle_allocator.go new file mode 100644 index 0000000..0540488 --- /dev/null +++ b/pkg/filesystem/virtual/fuse_handle_allocator.go @@ -0,0 +1,409 @@ +package virtual + +import ( + "context" + "io" + "sync" + "sync/atomic" + + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/random" +) + +// fnv1aHasher is a helper type for computing FNV-1a hashes. +type fnv1aHasher struct { + hash uint64 +} + +func (w *fnv1aHasher) Write(p []byte) (int, error) { + for _, c := range p { + w.hash ^= uint64(c) + w.hash *= 1099511628211 + } + return len(p), nil +} + +// FUSERemovalNotifier is a callback method that can be registered to +// report the removal of files from stateful directories. +type FUSERemovalNotifier func(parent uint64, name path.Component) + +// FUSERemovalNotifierRegistrar has the same signature as +// FUSEStatefulHandleAllocator.RegisterRemovalNotifier(). It has been +// added to aid testing. +type FUSERemovalNotifierRegistrar func(removalNotifier FUSERemovalNotifier) + +type fuseHandleOptions struct { + randomNumberGenerator random.ThreadSafeGenerator + + removalNotifiersLock sync.RWMutex + removalNotifiers []FUSERemovalNotifier +} + +// FUSEStatefulHandleAllocator creates a handle allocator for the +// purpose of exposing the virtual file system through FUSE. It is +// responsible for decorating all files in the file system, so that they +// have inode numbers and link counts. Inode numbers are unique for +// stateful (mutable) files, while they are identical for stateless +// files that share the same identifiers, meaning they can be +// deduplicated by the kernel. +// +// The FUSE protocol is stateful, in the sense that the kernel and the +// userspace service share state on which nodes in the file system have +// been resolved. The kernel will never issue requests against objects +// that have not been resolved yet, or for which the kernel has issued a +// FORGET operation. This means that this handle allocator does not need +// to provide any mechanism for resolving arbitrary files present in the +// file system, making its implementation simple. There is thus no +// distinction between stateless and resolvable files. +type FUSEStatefulHandleAllocator struct { + options *fuseHandleOptions +} + +var _ StatefulHandleAllocator = (*FUSEStatefulHandleAllocator)(nil) + +// NewFUSEHandleAllocator creates a new FUSEStatefulHandleAllocator. +func NewFUSEHandleAllocator(randomNumberGenerator random.ThreadSafeGenerator) *FUSEStatefulHandleAllocator { + return &FUSEStatefulHandleAllocator{ + options: &fuseHandleOptions{ + randomNumberGenerator: randomNumberGenerator, + }, + } +} + +// RegisterRemovalNotifier adds a new file removal notifier to the +// handle allocator. Any future calls to Release() against +// DirectoryHandles returned by this handle allocator will call into the +// FUSERemovalNotifier, providing it the inode number of the parent +// directory. +// +// This method is used by the FUSE server to register a callback that +// sends "entry notify" events to the kernel, causing the directory +// entry to be removed from the kernel's cache. +func (hr *FUSEStatefulHandleAllocator) RegisterRemovalNotifier(removalNotifier FUSERemovalNotifier) { + hr.options.removalNotifiersLock.Lock() + hr.options.removalNotifiers = append(hr.options.removalNotifiers, removalNotifier) + hr.options.removalNotifiersLock.Unlock() +} + +// New creates a new stateful handle allocation. +func (hr *FUSEStatefulHandleAllocator) New() StatefulHandleAllocation { + return &fuseStatefulHandleAllocation{ + options: hr.options, + } +} + +type fuseStatefulHandleAllocation struct { + options *fuseHandleOptions +} + +func (hn *fuseStatefulHandleAllocation) AsStatelessAllocator() StatelessHandleAllocator { + hr := &fuseStatelessHandleAllocator{ + inodeNumberSeed: hn.options.randomNumberGenerator.Uint64(), + } + *hn = fuseStatefulHandleAllocation{} + return hr +} + +func (hn *fuseStatefulHandleAllocation) AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator { + hr := &fuseResolvableHandleAllocator{ + inodeNumberSeed: hn.options.randomNumberGenerator.Uint64(), + } + *hn = fuseStatefulHandleAllocation{} + return hr +} + +func (hn *fuseStatefulHandleAllocation) AsStatefulDirectory(directory Directory) StatefulDirectoryHandle { + dh := &fuseStatefulDirectoryHandle{ + options: hn.options, + inodeNumber: hn.options.randomNumberGenerator.Uint64(), + } + *hn = fuseStatefulHandleAllocation{} + return dh +} + +func (hn *fuseStatefulHandleAllocation) AsStatelessDirectory(directory Directory) Directory { + d := &fuseStatelessDirectory{ + Directory: directory, + inodeNumber: hn.options.randomNumberGenerator.Uint64(), + } + *hn = fuseStatefulHandleAllocation{} + return d +} + +func (hn *fuseStatefulHandleAllocation) AsNativeLeaf(leaf NativeLeaf) NativeLeaf { + l := &fuseStatefulNativeLeaf{ + NativeLeaf: leaf, + inodeNumber: hn.options.randomNumberGenerator.Uint64(), + } + l.linkCount.Store(1) + *hn = fuseStatefulHandleAllocation{} + return l +} + +func (hn *fuseStatefulHandleAllocation) AsLeaf(leaf Leaf) Leaf { + panic("Regular leaf objects cannot be used in stateful contexts, as they cannot be linked/unlinked") +} + +type fuseStatelessHandleAllocator struct { + inodeNumberSeed uint64 +} + +func (hr *fuseStatelessHandleAllocator) New(w io.WriterTo) StatelessHandleAllocation { + hasher := fnv1aHasher{ + hash: hr.inodeNumberSeed, + } + if _, err := w.WriteTo(&hasher); err != nil { + panic(err) + } + return &fuseStatelessHandleAllocation{ + currentInodeNumber: hasher.hash, + } +} + +type fuseStatelessHandleAllocation struct { + currentInodeNumber uint64 +} + +func (hn *fuseStatelessHandleAllocation) AsStatelessAllocator() StatelessHandleAllocator { + hr := &fuseStatelessHandleAllocator{ + inodeNumberSeed: hn.currentInodeNumber, + } + *hn = fuseStatelessHandleAllocation{} + return hr +} + +func (hn *fuseStatelessHandleAllocation) AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator { + hr := &fuseResolvableHandleAllocator{ + inodeNumberSeed: hn.currentInodeNumber, + } + *hn = fuseStatelessHandleAllocation{} + return hr +} + +func (hn *fuseStatelessHandleAllocation) AsStatelessDirectory(directory Directory) Directory { + d := &fuseStatelessDirectory{ + Directory: directory, + inodeNumber: hn.currentInodeNumber, + } + *hn = fuseStatelessHandleAllocation{} + return d +} + +func (hn *fuseStatelessHandleAllocation) AsNativeLeaf(leaf NativeLeaf) NativeLeaf { + return &fuseStatelessNativeLeaf{ + NativeLeaf: leaf, + inodeNumber: hn.currentInodeNumber, + } +} + +func (hn *fuseStatelessHandleAllocation) AsLeaf(leaf Leaf) Leaf { + return &fuseStatelessLeaf{ + Leaf: leaf, + inodeNumber: hn.currentInodeNumber, + } +} + +type fuseResolvableHandleAllocator struct { + inodeNumberSeed uint64 +} + +func (hr *fuseResolvableHandleAllocator) New(w io.WriterTo) ResolvableHandleAllocation { + hasher := fnv1aHasher{ + hash: hr.inodeNumberSeed, + } + if _, err := w.WriteTo(&hasher); err != nil { + panic(err) + } + // Because we don't care about actually resolving files, we + // treat stateless and resolvable files equally. + return &fuseStatelessHandleAllocation{ + currentInodeNumber: hasher.hash, + } +} + +// fuseStatefulDirectoryHandle is a handle for stateful directories that +// augments the results of VirtualGetAttributes() to contain an inode +// number. It also provides a removal notifier that can call into the +// FUSE server. +type fuseStatefulDirectoryHandle struct { + options *fuseHandleOptions + inodeNumber uint64 +} + +func (dh *fuseStatefulDirectoryHandle) GetAttributes(requested AttributesMask, attributes *Attributes) { + attributes.SetInodeNumber(dh.inodeNumber) +} + +func (dh *fuseStatefulDirectoryHandle) NotifyRemoval(name path.Component) { + dh.options.removalNotifiersLock.RLock() + removalNotifiers := dh.options.removalNotifiers + dh.options.removalNotifiersLock.RUnlock() + + for _, removalNotifier := range removalNotifiers { + removalNotifier(dh.inodeNumber, name) + } +} + +func (dh *fuseStatefulDirectoryHandle) Release() {} + +// fuseStatelessDirectory is a decorator for stateless Directory objects +// that augments the results of VirtualGetAttributes() to contain an +// inode number. +type fuseStatelessDirectory struct { + Directory + inodeNumber uint64 +} + +func (d *fuseStatelessDirectory) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ AttributesMaskInodeNumber; remaining != 0 { + d.Directory.VirtualGetAttributes(ctx, remaining, attributes) + } + attributes.SetInodeNumber(d.inodeNumber) +} + +func (d *fuseStatelessDirectory) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := d.Directory.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + attributes.SetInodeNumber(d.inodeNumber) + return StatusOK +} + +// fuseStatefulNativeLeaf is a decorator for NativeLeaf that augments +// the results of VirtualGetAttributes() to contain an inode number and +// link count. Link() and Unlink() calls are intercepted, and are only +// forwarded if the link count drops to zero. +type fuseStatefulNativeLeaf struct { + NativeLeaf + inodeNumber uint64 + linkCount atomic.Uint32 +} + +func (l *fuseStatefulNativeLeaf) Link() Status { + for { + current := l.linkCount.Load() + if current == 0 { + // Attempted to link a file that was already unlinked. + return StatusErrStale + } + if l.linkCount.CompareAndSwap(current, current+1) { + return StatusOK + } + } +} + +func (l *fuseStatefulNativeLeaf) Unlink() { + if l.linkCount.Add(^uint32(0)) == 0 { + l.NativeLeaf.Unlink() + } +} + +func (l *fuseStatefulNativeLeaf) injectAttributes(attributes *Attributes) { + attributes.SetInodeNumber(l.inodeNumber) + attributes.SetLinkCount(l.linkCount.Load()) + // The change ID should normally also be affected by the link + // count. We don't bother overriding the change ID here, as FUSE + // does not depend on it. +} + +func (l *fuseStatefulNativeLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.NativeLeaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(attributes) +} + +func (l *fuseStatefulNativeLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} + +func (l *fuseStatefulNativeLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} + +// fuseStatelessNativeLeaf is a decorator for NativeLeaf that augments +// the results of VirtualGetAttributes() to contain an inode number and +// link count. For these kinds of files, the link count is just a +// constant. +type fuseStatelessNativeLeaf struct { + NativeLeaf + inodeNumber uint64 +} + +func (l *fuseStatelessNativeLeaf) Link() Status { + return StatusOK +} + +func (l *fuseStatelessNativeLeaf) Unlink() {} + +func (l *fuseStatelessNativeLeaf) injectAttributes(attributes *Attributes) { + attributes.SetInodeNumber(l.inodeNumber) + attributes.SetLinkCount(StatelessLeafLinkCount) +} + +func (l *fuseStatelessNativeLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.NativeLeaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(attributes) +} + +func (l *fuseStatelessNativeLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} + +func (l *fuseStatelessNativeLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} + +// fuseStatelessLeaf is a decorator for Leaf that augments the results +// of VirtualGetAttributes() to contain an inode number and link count. +// For these kinds of files, the link count is just a constant. +type fuseStatelessLeaf struct { + Leaf + inodeNumber uint64 +} + +func (l *fuseStatelessLeaf) injectAttributes(attributes *Attributes) { + attributes.SetInodeNumber(l.inodeNumber) + attributes.SetLinkCount(StatelessLeafLinkCount) +} + +func (l *fuseStatelessLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.Leaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(attributes) +} + +func (l *fuseStatelessLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.Leaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} + +func (l *fuseStatelessLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.Leaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(attributes) + return StatusOK +} diff --git a/pkg/filesystem/virtual/fuse_handle_allocator_test.go b/pkg/filesystem/virtual/fuse_handle_allocator_test.go new file mode 100644 index 0000000..2215f39 --- /dev/null +++ b/pkg/filesystem/virtual/fuse_handle_allocator_test.go @@ -0,0 +1,202 @@ +package virtual_test + +import ( + "bytes" + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestFUSEHandleAllocator(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + randomNumberGenerator := mock.NewMockThreadSafeGenerator(ctrl) + handleAllocator := virtual.NewFUSEHandleAllocator(randomNumberGenerator) + attributesMask := virtual.AttributesMaskInodeNumber | virtual.AttributesMaskLinkCount | virtual.AttributesMaskSizeBytes + + removalNotifier := mock.NewMockFUSERemovalNotifier(ctrl) + handleAllocator.RegisterRemovalNotifier(removalNotifier.Call) + + t.Run("StatefulDirectory", func(t *testing.T) { + // Create a stateful directory. The handle that is + // returned should add an inode number to the + // attributes. + baseDirectory := mock.NewMockVirtualDirectory(ctrl) + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xfccd1fc99a8c3425)) + directoryHandle := handleAllocator.New().AsStatefulDirectory(baseDirectory) + + var attr virtual.Attributes + directoryHandle.GetAttributes(attributesMask, &attr) + require.Equal( + t, + (&virtual.Attributes{}).SetInodeNumber(0xfccd1fc99a8c3425), + &attr) + + // Removal notifications should be forwarded, to the + // FUSE server, containing the inode number of the + // parent directory. + removalNotifier.EXPECT().Call(uint64(0xfccd1fc99a8c3425), path.MustNewComponent("output.o")) + directoryHandle.NotifyRemoval(path.MustNewComponent("output.o")) + + directoryHandle.Release() + }) + + t.Run("StatelessDirectory", func(t *testing.T) { + // Create a stateless directory and wrap it. Only an + // inode number should be added, as the directory is + // still responsible for providing its own link count. + // The link count is based on the number of child + // directories. + baseDirectory := mock.NewMockVirtualDirectory(ctrl) + baseDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskLinkCount|virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetLinkCount(17) + attributes.SetSizeBytes(42) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xa44671c491369d36)) + wrappedDirectory := handleAllocator.New().AsStatelessDirectory(baseDirectory) + + var attr virtual.Attributes + wrappedDirectory.VirtualGetAttributes(ctx, attributesMask, &attr) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0xa44671c491369d36). + SetLinkCount(17). + SetSizeBytes(42), + &attr) + }) + + t.Run("StatefulNativeLeaf", func(t *testing.T) { + // Create a stateful file and wrap it. A link count and + // inode number should be added. + baseLeaf := mock.NewMockNativeLeaf(ctrl) + baseLeaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetSizeBytes(42) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xf999bb2fd22421d8)) + wrappedLeaf := handleAllocator.New().AsNativeLeaf(baseLeaf) + + var attr1 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr1) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(1). + SetSizeBytes(42), + &attr1) + + // Hardlinking it should cause the link count to be + // increased. + require.Equal(t, virtual.StatusOK, wrappedLeaf.Link()) + + var attr2 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr2) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(2). + SetSizeBytes(42), + &attr2) + + // Unlinking it twice should cause the underlying leaf + // node to be unlinked. + wrappedLeaf.Unlink() + baseLeaf.EXPECT().Unlink() + wrappedLeaf.Unlink() + + var attr3 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr3) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(0). + SetSizeBytes(42), + &attr3) + + // Attempting to link it again should fail, as files + // cannot be brought back after being unlinked. + require.Equal(t, virtual.StatusErrStale, wrappedLeaf.Link()) + + var attr4 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr4) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(0). + SetSizeBytes(42), + &attr4) + }) + + t.Run("StatelessNativeLeaf", func(t *testing.T) { + // Create a stateless file and wrap it. A link count and + // inode number should be added. As the file is + // stateless, the link count uses a placeholder value. + // + // The inode number of the leaf corresponds with the + // FNV-1a hash of "Hello", using 0x6aae40a05f45b861 as + // the offset basis. + baseLeaf := mock.NewMockNativeLeaf(ctrl) + baseLeaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetSizeBytes(123) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x6aae40a05f45b861)) + wrappedLeaf := handleAllocator. + New(). + AsStatelessAllocator(). + New(bytes.NewBuffer([]byte("Hello"))). + AsNativeLeaf(baseLeaf) + + var attr1 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr1) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr1) + + // Hardlinking should have no effect. + require.Equal(t, virtual.StatusOK, wrappedLeaf.Link()) + + var attr2 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr2) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr2) + + // Unlinking also has no effect. + wrappedLeaf.Unlink() + wrappedLeaf.Unlink() + + var attr3 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr3) + require.Equal( + t, + (&virtual.Attributes{}). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr3) + }) +} diff --git a/pkg/filesystem/virtual/handle_allocating_file_allocator.go b/pkg/filesystem/virtual/handle_allocating_file_allocator.go new file mode 100644 index 0000000..97e5131 --- /dev/null +++ b/pkg/filesystem/virtual/handle_allocating_file_allocator.go @@ -0,0 +1,25 @@ +package virtual + +type handleAllocatingFileAllocator struct { + base FileAllocator + allocator StatefulHandleAllocator +} + +// NewHandleAllocatingFileAllocator creates a decorator for +// FileAllocator that creates mutable files that have a stateful handle +// associated with them. This gives ever mutable file its own inode +// number and link count. +func NewHandleAllocatingFileAllocator(base FileAllocator, allocator StatefulHandleAllocator) FileAllocator { + return &handleAllocatingFileAllocator{ + base: base, + allocator: allocator, + } +} + +func (fa *handleAllocatingFileAllocator) NewFile(isExecutable bool, size uint64, shareAccess ShareMask) (NativeLeaf, Status) { + leaf, s := fa.base.NewFile(isExecutable, size, shareAccess) + if s != StatusOK { + return nil, s + } + return fa.allocator.New().AsNativeLeaf(leaf), StatusOK +} diff --git a/pkg/filesystem/virtual/handle_allocating_symlink_factory.go b/pkg/filesystem/virtual/handle_allocating_symlink_factory.go new file mode 100644 index 0000000..644f9f8 --- /dev/null +++ b/pkg/filesystem/virtual/handle_allocating_symlink_factory.go @@ -0,0 +1,28 @@ +package virtual + +type handleAllocatingSymlinkFactory struct { + base SymlinkFactory + allocator StatelessHandleAllocator +} + +// NewHandleAllocatingSymlinkFactory is a decorator for SymlinkFactory +// that creates symbolic link nodes that have a stateless handle +// associated with them. +// +// Because symbolic link contents can be long, it is not possible to use +// a resolvable allocator here, as that would cause the full contents of +// the symbolic link to become part of the file handle, which is +// undesirable. In the case of NFS we want these nodes to be explicitly +// tracked, using an invisible link count. +func NewHandleAllocatingSymlinkFactory(base SymlinkFactory, allocation StatelessHandleAllocation) SymlinkFactory { + return &handleAllocatingSymlinkFactory{ + base: base, + allocator: allocation.AsStatelessAllocator(), + } +} + +func (sf *handleAllocatingSymlinkFactory) LookupSymlink(target []byte) NativeLeaf { + return sf.allocator. + New(ByteSliceID(target)). + AsNativeLeaf(sf.base.LookupSymlink(target)) +} diff --git a/pkg/filesystem/virtual/handle_allocator.go b/pkg/filesystem/virtual/handle_allocator.go new file mode 100644 index 0000000..833ee91 --- /dev/null +++ b/pkg/filesystem/virtual/handle_allocator.go @@ -0,0 +1,129 @@ +package virtual + +import ( + "encoding/binary" + "io" + + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// StatefulHandleAllocator is responsible for allocating new file +// handles, giving files and directories stored in the file system their +// own identity and lifetime. The exact meaning of identity is +// implementation specific. In the case of FUSE it can be an inode +// number/node ID, while in the case of NFSv4 it corresponds to the +// value of a 128-byte nfs_fh4. +type StatefulHandleAllocator interface { + New() StatefulHandleAllocation +} + +// StatefulHandleAllocation corresponds to an allocation of a file +// handle that is unique, meaning it can be used to identify stateful, +// mutable files or directories. +// +// Exactly one call to one of the methods on this interface needs to be +// performed to convert the allocation to an actual use of the file +// handle. +type StatefulHandleAllocation interface { + StatelessHandleAllocation + + AsStatefulDirectory(directory Directory) StatefulDirectoryHandle +} + +// StatefulDirectoryHandle is a handle that needs to be embedded into +// stateful directories. It can be used to report mutations to the +// directory through NotifyRemoval(), or report deletion through +// Release(). +// +// The directory type that embeds the handle must call GetAttributes() +// as part of Directory.VirtualGetAttributes() to augment the attributes +// with an inode number and/or file handle. +type StatefulDirectoryHandle interface { + GetAttributes(requested AttributesMask, attributes *Attributes) + NotifyRemoval(name path.Component) + Release() +} + +// StatelessHandleAllocator is responsible for allocating file handles +// of files that are stateless, immutable files. +// +// For every handle that is allocated, an identifier needs to be +// provided in the form of an io.WriterTo. This may be used to give +// files with the same identifier the same underlying file handle or +// inode number. +// +// NOTE: Care must be taken that the provided identifier is properly +// terminated or is prefixed with its own length. Stateless handle +// allocators may be nested, causing their identifiers to be +// concatenated. This could cause ambiguity if this rule is not +// followed. +type StatelessHandleAllocator interface { + New(id io.WriterTo) StatelessHandleAllocation +} + +// StatelessHandleAllocation corresponds to an allocation of a file +// handle that is stateless, meaning it can be used to identify +// stateless files or directories. +type StatelessHandleAllocation interface { + ResolvableHandleAllocation + + AsStatelessAllocator() StatelessHandleAllocator +} + +// ResolvableHandleAllocator is responsible for allocating file handles +// that are not only stateless, but can also be trivially reconstructed +// based on a small, bounded amount of information. +// +// This kind of allocator is generally used by bb_clientd's cas/ +// subdirectory. This directory allows for the exploration of arbitrary +// objects stored in the Content Addressable Storage (CAS). This +// allocator can be used to store the digest of such objects in the file +// handle, meaning bb_clientd doesn't need to track state for each of +// the files individually. +// +// The exact amount of space that can be stored in a file handle is +// protocol specific. NFSv3 and NFSv4 use file handles that are 64 and +// 128 bytes in size, respectively. +type ResolvableHandleAllocator interface { + New(id io.WriterTo) ResolvableHandleAllocation +} + +// HandleResolver is a method that is used by ResolvableHandleAllocator +// to reconstruct files based on the identifiers provided to New(). +// +// TODO: Implementations of this method must currently make sure that +// directories and leaves that are returned are decorated with a handle +// allocation. Can't we let implementations of ResolvableHandleAllocator +// do this? That way resolvers may remain simple. +type HandleResolver func(r io.ByteReader) (DirectoryChild, Status) + +// ResolvableHandleAllocation corresponds to an allocation of a file +// handle that is not only stateless, but can also be trivially +// reconstructed based on a small, bounded of information. +// +// TODO: ResolvableHandleAllocators can be nested. The resolver provided +// to AsResolvableAllocator() is only used for the first level. We could +// eliminate the argument, but that makes composition harder. +type ResolvableHandleAllocation interface { + AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator + AsStatelessDirectory(directory Directory) Directory + AsNativeLeaf(leaf NativeLeaf) NativeLeaf + AsLeaf(leaf Leaf) Leaf +} + +// ByteSliceID is a helper type for consumers of +// StatelessHandleAllocator and ResolvableHandleAllocator. It uses the +// contents of a byte slice as an object ID. The byte slice will be +// prefixed with its own length, so that no ambiguity exists if +// allocators are nested. +type ByteSliceID []byte + +// WriteTo writes the length of a byte slice and its contents to the Writer. +func (data ByteSliceID) WriteTo(w io.Writer) (nTotal int64, err error) { + var targetSize [binary.MaxVarintLen64]byte + n, err := w.Write(targetSize[:binary.PutUvarint(targetSize[:], uint64(len(data)))]) + nTotal += int64(n) + n, err = w.Write(data) + nTotal += int64(n) + return +} diff --git a/pkg/filesystem/virtual/in_memory_prepopulated_directory.go b/pkg/filesystem/virtual/in_memory_prepopulated_directory.go new file mode 100644 index 0000000..4d52f12 --- /dev/null +++ b/pkg/filesystem/virtual/in_memory_prepopulated_directory.go @@ -0,0 +1,1118 @@ +package virtual + +import ( + "context" + "fmt" + "sort" + "sync" + "syscall" + "time" + + re_sync "github.com/buildbarn/bb-remote-execution/pkg/sync" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" +) + +// StringMatcher is a function type that has the same signature as +// regexp.Regexp's MatchString() method. It is used by +// InMemoryPrepopulatedDirectory to determine which files should be +// hidden from directory listings. +type StringMatcher func(s string) bool + +// inMemoryFilesystem contains state that is shared across all +// inMemoryPrepopulatedDirectory objects that form a single hierarchy. +type inMemoryFilesystem struct { + symlinkFactory SymlinkFactory + statefulHandleAllocator StatefulHandleAllocator + initialContentsSorter Sorter + hiddenFilesMatcher StringMatcher + clock clock.Clock +} + +// inMemorySubtree contains state that is shared across all +// inMemoryPrepopulatedDirectory objects in a subtree of an +// inMemoryFilesystem. +// +// Every subtree in the filesystem may have its own file allocator. This +// permits us to apply per-action disk quotas. It may also have its own +// error logger, which allows us to notify LocalBuildExecutor of disk +// I/O errors. +type inMemorySubtree struct { + filesystem *inMemoryFilesystem + fileAllocator FileAllocator + errorLogger util.ErrorLogger +} + +func (s *inMemorySubtree) createNewDirectory(initialContentsFetcher InitialContentsFetcher) *inMemoryPrepopulatedDirectory { + d := &inMemoryPrepopulatedDirectory{ + subtree: s, + initialContentsFetcher: initialContentsFetcher, + contents: inMemoryDirectoryContents{ + lastDataModificationTime: s.filesystem.clock.Now(), + }, + } + d.handle = s.filesystem.statefulHandleAllocator.New().AsStatefulDirectory(d) + return d +} + +// inMemoryDirectoryChild contains exactly one reference to an object +// that's embedded in a parent directory. +type inMemoryDirectoryChild = Child[*inMemoryPrepopulatedDirectory, NativeLeaf, Node] + +// inMemoryDirectoryEntry is a directory entry for an object stored in +// inMemoryDirectoryContents. +type inMemoryDirectoryEntry struct { + child inMemoryDirectoryChild + + // For VirtualReadDir(). + cookie uint64 + name path.Component + previous *inMemoryDirectoryEntry + next *inMemoryDirectoryEntry +} + +// inMemoryDirectoryContents contains the listing of all children stored +// in an inMemoryPrepopulatedDirectory. Entries are stored both in a map +// and a list. The latter is needed for readdir() to behave +// deterministically. The isDeleted flag may be set when empty and no +// new children may be added. +type inMemoryDirectoryContents struct { + entriesMap map[path.Component]*inMemoryDirectoryEntry + entriesList inMemoryDirectoryEntry + isDeleted bool + changeID uint64 + lastDataModificationTime time.Time +} + +// initialize a directory by making it empty. +func (c *inMemoryDirectoryContents) initialize() { + c.entriesMap = map[path.Component]*inMemoryDirectoryEntry{} + c.entriesList.previous = &c.entriesList + c.entriesList.next = &c.entriesList +} + +// attach an existing directory or leaf to the directory contents. +func (c *inMemoryDirectoryContents) attach(subtree *inMemorySubtree, name path.Component, child inMemoryDirectoryChild) { + if err := c.mayAttach(name); err != 0 { + panic(fmt.Sprintf("Directory %#v may not be attached: %s", name, err)) + } + entry := &inMemoryDirectoryEntry{ + child: child, + + name: name, + cookie: c.changeID, + previous: c.entriesList.previous, + next: &c.entriesList, + } + c.entriesMap[name] = entry + entry.previous.next = entry + entry.next.previous = entry + c.touch(subtree) +} + +// attachDirectory adds a new directory to the directory contents. The +// initial contents of this new directory may be specified in the form +// of an InitialContentsFetcher, which gets evaluated lazily. +func (c *inMemoryDirectoryContents) attachNewDirectory(subtree *inMemorySubtree, name path.Component, initialContentsFetcher InitialContentsFetcher) *inMemoryPrepopulatedDirectory { + newDirectory := subtree.createNewDirectory(initialContentsFetcher) + c.attach(subtree, name, inMemoryDirectoryChild{}.FromDirectory(newDirectory)) + return newDirectory +} + +// Detach the entry from the directory. Clear the entry to prevent +// foot-shooting. This allows VirtualReadDir() to detect that iteration +// was interrupted. +func (c *inMemoryDirectoryContents) detach(subtree *inMemorySubtree, entry *inMemoryDirectoryEntry) { + delete(c.entriesMap, entry.name) + entry.previous.next = entry.next + entry.next.previous = entry.previous + entry.previous = nil + entry.next = nil + c.touch(subtree) +} + +func (c *inMemoryDirectoryContents) mayAttach(name path.Component) syscall.Errno { + if c.isDeleted { + return syscall.ENOENT + } + if _, ok := c.entriesMap[name]; ok { + return syscall.EEXIST + } + return 0 +} + +func (c *inMemoryDirectoryContents) virtualMayAttach(name path.Component) Status { + if c.isDeleted { + return StatusErrNoEnt + } + if _, ok := c.entriesMap[name]; ok { + return StatusErrExist + } + return StatusOK +} + +func (c *inMemoryDirectoryContents) touch(subtree *inMemorySubtree) { + c.changeID++ + c.lastDataModificationTime = subtree.filesystem.clock.Now() +} + +func (c *inMemoryDirectoryContents) isDeletable(hiddenFilesMatcher StringMatcher) bool { + for entry := c.entriesList.next; entry != &c.entriesList; entry = entry.next { + if directory, _ := entry.child.GetPair(); directory != nil || !hiddenFilesMatcher(entry.name.String()) { + return false + } + } + return true +} + +func (c *inMemoryDirectoryContents) createChildren(subtree *inMemorySubtree, children map[path.Component]InitialNode) { + // Either sort or shuffle the children before inserting them + // into the directory. This either makes VirtualReadDir() behave + // deterministically, or not, based on preference. + namesList := make(path.ComponentsList, 0, len(children)) + for name := range children { + namesList = append(namesList, name) + } + subtree.filesystem.initialContentsSorter(namesList) + + for _, name := range namesList { + if directory, leaf := children[name].GetPair(); directory != nil { + c.attachNewDirectory(subtree, name, directory) + } else { + c.attach(subtree, name, inMemoryDirectoryChild{}.FromLeaf(leaf)) + } + } +} + +func (c *inMemoryDirectoryContents) getEntryAtCookie(firstCookie uint64) *inMemoryDirectoryEntry { + entry := c.entriesList.next + for { + if entry == &c.entriesList || entry.cookie >= firstCookie { + return entry + } + entry = entry.next + } +} + +// getAndLockIfDirectory obtains a child from the current directory, and +// immediately locks it if it is a directory. To prevent possible +// deadlocks, we must respect the lock order. This may require this +// function to drop the lock on current directories prior to picking up +// the lock of the child directory. +func (c *inMemoryDirectoryContents) getAndLockIfDirectory(name path.Component, lockPile *re_sync.LockPile) (*inMemoryDirectoryEntry, bool) { + for { + entry, ok := c.entriesMap[name] + if !ok { + // No child node present. + return nil, false + } + directory, _ := entry.child.GetPair() + if directory == nil { + // Not a directory. + return entry, true + } + childDirectoryLock := &directory.lock + if lockPile.Lock(childDirectoryLock) { + // Lock acquisition of child succeeded without + // dropping any of the existing locks. + return entry, true + } + if c.entriesMap[name] == entry { + // Even though we dropped locks, no race occurred. + return entry, true + } + lockPile.Unlock(childDirectoryLock) + } +} + +func (c *inMemoryDirectoryContents) getDirectoriesAndLeavesCount(hiddenFilesMatcher StringMatcher) (directoriesCount, leavesCount int) { + for entry := c.entriesList.next; entry != &c.entriesList; entry = entry.next { + if directory, _ := entry.child.GetPair(); directory != nil { + directoriesCount++ + } else if !hiddenFilesMatcher(entry.name.String()) { + leavesCount++ + } + } + return +} + +// inMemoryPrepopulatedDirectory is an implementation of PrepopulatedDirectory that +// keeps all directory metadata stored in memory. Actual file data and +// metadata is not managed by this implementation. Files are allocated +// by calling into a provided FileAllocator. +// +// inMemoryPrepopulatedDirectory uses fine-grained locking. Every directory has +// its own mutex that protects its maps of child directories and leaf +// nodes. As various operations require the acquisition of multiple +// locks (e.g., Rename() locking up to three directories), util.LockPile +// is used for deadlock avoidance. To ensure consistency, locks on one +// or more directories may be held when calling into the FileAllocator +// or NativeLeaf nodes. +type inMemoryPrepopulatedDirectory struct { + subtree *inMemorySubtree + handle StatefulDirectoryHandle + + lock sync.Mutex + initialContentsFetcher InitialContentsFetcher + contents inMemoryDirectoryContents +} + +// NewInMemoryPrepopulatedDirectory creates a new PrepopulatedDirectory +// that keeps all directory metadata stored in memory. As the filesystem +// API does not allow traversing the hierarchy upwards, this directory +// can be considered the root directory of the hierarchy. +func NewInMemoryPrepopulatedDirectory(fileAllocator FileAllocator, symlinkFactory SymlinkFactory, errorLogger util.ErrorLogger, handleAllocator StatefulHandleAllocator, initialContentsSorter Sorter, hiddenFilesMatcher StringMatcher, clock clock.Clock) PrepopulatedDirectory { + subtree := &inMemorySubtree{ + filesystem: &inMemoryFilesystem{ + symlinkFactory: symlinkFactory, + statefulHandleAllocator: handleAllocator, + initialContentsSorter: initialContentsSorter, + hiddenFilesMatcher: hiddenFilesMatcher, + clock: clock, + }, + fileAllocator: fileAllocator, + errorLogger: errorLogger, + } + return subtree.createNewDirectory(EmptyInitialContentsFetcher) +} + +// Initialize the directory with the intended contents if not done so +// already. This function is used by inMemoryPrepopulatedDirectory's operations +// to gain access to the directory's contents. +func (i *inMemoryPrepopulatedDirectory) getContents() (*inMemoryDirectoryContents, error) { + if i.initialContentsFetcher != nil { + children, err := i.initialContentsFetcher.FetchContents(func(name path.Component) FileReadMonitor { return nil }) + if err != nil { + return nil, err + } + i.initialContentsFetcher = nil + i.contents.initialize() + i.contents.createChildren(i.subtree, children) + } + return &i.contents, nil +} + +func (i *inMemoryPrepopulatedDirectory) markDeleted() { + if !i.contents.isDeleted { + if i.initialContentsFetcher != nil || !i.contents.isDeletable(i.subtree.filesystem.hiddenFilesMatcher) { + panic("Attempted to delete a directory that was not empty") + } + + // The directory may still contain hidden files. Remove + // these prior to marking the directory as deleted. + // + // TODO: This should call i.handle.NotifyRemoval(), but + // that cannot be done while locks are held. Is this + // even necessary, considering that the directory is + // removed entirely? + for i.contents.entriesList.next != &i.contents.entriesList { + entry := i.contents.entriesList.next + i.contents.detach(i.subtree, entry) + _, leaf := entry.child.GetPair() + leaf.Unlink() + } + + i.contents.isDeleted = true + i.handle.Release() + } +} + +func (i *inMemoryPrepopulatedDirectory) LookupChild(name path.Component) (PrepopulatedDirectoryChild, error) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, err := i.getContents() + if err != nil { + return PrepopulatedDirectoryChild{}, err + } + + if entry, ok := contents.entriesMap[name]; ok { + child := &entry.child + directory, leaf := child.GetPair() + if directory != nil { + return PrepopulatedDirectoryChild{}.FromDirectory(directory), nil + } + return PrepopulatedDirectoryChild{}.FromLeaf(leaf), nil + } + return PrepopulatedDirectoryChild{}, syscall.ENOENT +} + +func (i *inMemoryPrepopulatedDirectory) LookupAllChildren() ([]DirectoryPrepopulatedDirEntry, []LeafPrepopulatedDirEntry, error) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, err := i.getContents() + if err != nil { + return nil, nil, err + } + + directoriesCount, leavesCount := contents.getDirectoriesAndLeavesCount(i.subtree.filesystem.hiddenFilesMatcher) + directories := make(directoryPrepopulatedDirEntryList, 0, directoriesCount) + leaves := make(leafPrepopulatedDirEntryList, 0, leavesCount) + for entry := contents.entriesList.next; entry != &contents.entriesList; entry = entry.next { + if directory, leaf := entry.child.GetPair(); directory != nil { + directories = append(directories, DirectoryPrepopulatedDirEntry{ + Child: directory, + Name: entry.name, + }) + } else if !i.subtree.filesystem.hiddenFilesMatcher(entry.name.String()) { + leaves = append(leaves, LeafPrepopulatedDirEntry{ + Child: leaf, + Name: entry.name, + }) + } + } + + sort.Sort(directories) + sort.Sort(leaves) + return directories, leaves, nil +} + +func (i *inMemoryPrepopulatedDirectory) ReadDir() ([]filesystem.FileInfo, error) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, err := i.getContents() + if err != nil { + return nil, err + } + + entries := make(filesystem.FileInfoList, 0, len(contents.entriesMap)) + for entry := contents.entriesList.next; entry != &contents.entriesList; entry = entry.next { + if directory, leaf := entry.child.GetPair(); directory != nil { + entries = append(entries, + filesystem.NewFileInfo(entry.name, filesystem.FileTypeDirectory, false)) + } else if !i.subtree.filesystem.hiddenFilesMatcher(entry.name.String()) { + entries = append(entries, GetFileInfo(entry.name, leaf)) + } + } + sort.Sort(entries) + return entries, nil +} + +func (i *inMemoryPrepopulatedDirectory) Remove(name path.Component) error { + lockPile := re_sync.LockPile{} + defer lockPile.UnlockAll() + lockPile.Lock(&i.lock) + + contents, err := i.getContents() + if err != nil { + return err + } + + if entry, ok := contents.getAndLockIfDirectory(name, &lockPile); ok { + if directory, leaf := entry.child.GetPair(); directory != nil { + // The directory has a child directory under + // that name. Perform an rmdir(). + childContents, err := directory.getContents() + if err != nil { + return err + } + if !childContents.isDeletable(i.subtree.filesystem.hiddenFilesMatcher) { + return syscall.ENOTEMPTY + } + directory.markDeleted() + } else { + // The directory has a child file/symlink under + // that name. Perform an unlink(). + leaf.Unlink() + } + contents.detach(i.subtree, entry) + lockPile.UnlockAll() + i.handle.NotifyRemoval(name) + return nil + } + + return syscall.ENOENT +} + +func (i *inMemoryPrepopulatedDirectory) RemoveAll(name path.Component) error { + i.lock.Lock() + + contents, err := i.getContents() + if err != nil { + i.lock.Unlock() + return err + } + + if entry, ok := contents.entriesMap[name]; ok { + contents.detach(i.subtree, entry) + i.lock.Unlock() + i.handle.NotifyRemoval(name) + if directory, leaf := entry.child.GetPair(); directory != nil { + // The directory has a child directory under + // that name. Perform a recursive removal. + directory.removeAllChildren(true) + } else { + // The directory has a child file/symlink under + // that name. Perform an unlink(). + leaf.Unlink() + } + return nil + } + + i.lock.Unlock() + return syscall.ENOENT +} + +func (i *inMemoryPrepopulatedDirectory) RemoveAllChildren(deleteSelf bool) error { + i.removeAllChildren(deleteSelf) + return nil +} + +func (i *inMemoryPrepopulatedDirectory) removeAllChildren(deleteSelf bool) { + i.lock.Lock() + if i.initialContentsFetcher != nil { + // The directory has not been initialized. Instead of + // initializing it as intended and removing all + // contents, forcefully initialize it as an empty + // directory. + i.initialContentsFetcher = nil + i.contents.initialize() + if deleteSelf { + i.markDeleted() + } + i.lock.Unlock() + } else { + // Detach all contents from the directory. + var entries *inMemoryDirectoryEntry + for i.contents.entriesList.next != &i.contents.entriesList { + entry := i.contents.entriesList.next + i.contents.detach(i.subtree, entry) + entry.previous = entries + entries = entry + } + if deleteSelf { + i.markDeleted() + } + i.lock.Unlock() + + i.postRemoveChildren(entries) + } +} + +// postRemoveChildren is called after bulk unlinking files and +// directories and dropping the parent directory lock. It invalidates +// all entries in the FUSE directory entry cache and recursively removes +// all files. +func (i *inMemoryPrepopulatedDirectory) postRemoveChildren(entries *inMemoryDirectoryEntry) { + for entry := entries; entry != nil; entry = entry.previous { + i.handle.NotifyRemoval(entry.name) + if directory, leaf := entry.child.GetPair(); directory != nil { + directory.removeAllChildren(true) + } else { + leaf.Unlink() + } + } +} + +func (i *inMemoryPrepopulatedDirectory) InstallHooks(fileAllocator FileAllocator, errorLogger util.ErrorLogger) { + i.lock.Lock() + defer i.lock.Unlock() + + i.subtree = &inMemorySubtree{ + filesystem: i.subtree.filesystem, + fileAllocator: fileAllocator, + errorLogger: errorLogger, + } +} + +func (i *inMemoryPrepopulatedDirectory) CreateChildren(children map[path.Component]InitialNode, overwrite bool) error { + i.lock.Lock() + contents, err := i.getContents() + if err != nil { + i.lock.Unlock() + return err + } + + if contents.isDeleted { + i.lock.Unlock() + return syscall.ENOENT + } + + // Remove entries that are about to be overwritten. + var overwrittenEntries *inMemoryDirectoryEntry + if overwrite { + for name := range children { + if entry, ok := contents.entriesMap[name]; ok { + contents.detach(i.subtree, entry) + entry.previous = overwrittenEntries + overwrittenEntries = entry + } + } + } else { + for name := range children { + if _, ok := contents.entriesMap[name]; ok { + i.lock.Unlock() + return syscall.EEXIST + } + } + } + + contents.createChildren(i.subtree, children) + i.lock.Unlock() + + i.postRemoveChildren(overwrittenEntries) + return nil +} + +func (i *inMemoryPrepopulatedDirectory) CreateAndEnterPrepopulatedDirectory(name path.Component) (PrepopulatedDirectory, error) { + i.lock.Lock() + + contents, err := i.getContents() + if err != nil { + i.lock.Unlock() + return nil, err + } + + if entry, ok := contents.entriesMap[name]; ok { + directory, leaf := entry.child.GetPair() + if directory != nil { + // Already a directory. + i.lock.Unlock() + return directory, nil + } + // Not a directory. Replace it. + contents.detach(i.subtree, entry) + leaf.Unlink() + newChild := contents.attachNewDirectory(i.subtree, name, EmptyInitialContentsFetcher) + i.lock.Unlock() + i.handle.NotifyRemoval(name) + return newChild, nil + } + + if contents.isDeleted { + return nil, syscall.ENOENT + } + child := contents.attachNewDirectory(i.subtree, name, EmptyInitialContentsFetcher) + i.lock.Unlock() + return child, nil +} + +func (i *inMemoryPrepopulatedDirectory) filterChildrenRecursive(childFilter ChildFilter) bool { + i.lock.Lock() + if initialContentsFetcher := i.initialContentsFetcher; initialContentsFetcher != nil { + // Directory is not initialized. There is no need to + // instantiate it. Simply provide the + // InitialContentsFetcher to the callback. + i.lock.Unlock() + return childFilter(InitialNode{}.FromDirectory(initialContentsFetcher), func() error { + return i.RemoveAllChildren(false) + }) + } + + // Directory is already initialized. Gather the contents. + type leafInfo struct { + name path.Component + leaf NativeLeaf + } + directoriesCount, leavesCount := i.contents.getDirectoriesAndLeavesCount(i.subtree.filesystem.hiddenFilesMatcher) + directories := make([]*inMemoryPrepopulatedDirectory, 0, directoriesCount) + leaves := make([]leafInfo, 0, leavesCount) + for entry := i.contents.entriesList.next; entry != &i.contents.entriesList; entry = entry.next { + if directory, leaf := entry.child.GetPair(); directory != nil { + directories = append(directories, directory) + } else { + leaves = append(leaves, leafInfo{ + name: entry.name, + leaf: leaf, + }) + } + } + i.lock.Unlock() + + // Invoke the callback for all children. + for _, child := range leaves { + name := child.name + if !childFilter(InitialNode{}.FromLeaf(child.leaf), func() error { + return i.Remove(name) + }) { + return false + } + } + for _, child := range directories { + if !child.filterChildrenRecursive(childFilter) { + return false + } + } + return true +} + +func (i *inMemoryPrepopulatedDirectory) FilterChildren(childFilter ChildFilter) error { + i.filterChildrenRecursive(childFilter) + return nil +} + +func (i *inMemoryPrepopulatedDirectory) virtualGetContents() (*inMemoryDirectoryContents, Status) { + contents, err := i.getContents() + if err != nil { + i.subtree.errorLogger.Log(util.StatusWrap(err, "Failed to initialize directory")) + return nil, StatusErrIO + } + return contents, StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualOpenChild(ctx context.Context, name path.Component, shareAccess ShareMask, createAttributes *Attributes, existingOptions *OpenExistingOptions, requested AttributesMask, openedFileAttributes *Attributes) (Leaf, AttributesMask, ChangeInfo, Status) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, s := i.virtualGetContents() + if s != StatusOK { + return nil, 0, ChangeInfo{}, s + } + + if entry, ok := contents.entriesMap[name]; ok { + // File already exists. + if existingOptions == nil { + return nil, 0, ChangeInfo{}, StatusErrExist + } + directory, leaf := entry.child.GetPair() + if directory != nil { + return nil, 0, ChangeInfo{}, StatusErrIsDir + } + s := leaf.VirtualOpenSelf(ctx, shareAccess, existingOptions, requested, openedFileAttributes) + return leaf, existingOptions.ToAttributesMask(), ChangeInfo{ + Before: contents.changeID, + After: contents.changeID, + }, s + } + + // File doesn't exist. + if contents.isDeleted || createAttributes == nil { + return nil, 0, ChangeInfo{}, StatusErrNoEnt + } + + // Create new file with attributes provided. + var respected AttributesMask + isExecutable := false + if permissions, ok := createAttributes.GetPermissions(); ok { + respected |= AttributesMaskPermissions + isExecutable = permissions&PermissionsExecute != 0 + } + size := uint64(0) + if sizeBytes, ok := createAttributes.GetSizeBytes(); ok { + respected |= AttributesMaskSizeBytes + size = sizeBytes + } + leaf, s := i.subtree.fileAllocator.NewFile(isExecutable, size, shareAccess) + if s != StatusOK { + return nil, 0, ChangeInfo{}, s + } + + // Attach file to the directory. + changeIDBefore := contents.changeID + contents.attach(i.subtree, name, inMemoryDirectoryChild{}.FromLeaf(leaf)) + leaf.VirtualGetAttributes(ctx, requested, openedFileAttributes) + return leaf, respected, ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK +} + +const inMemoryPrepopulatedDirectoryLockedAttributesMask = AttributesMaskChangeID | AttributesMaskLastDataModificationTime + +func (i *inMemoryPrepopulatedDirectory) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + i.virtualGetAttributesUnlocked(requested, attributes) + if requested&inMemoryPrepopulatedDirectoryLockedAttributesMask != 0 { + i.lock.Lock() + i.virtualGetAttributesLocked(requested, attributes) + i.lock.Unlock() + } +} + +func (i *inMemoryPrepopulatedDirectory) virtualGetAttributesUnlocked(requested AttributesMask, attributes *Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + // To be consistent with traditional UNIX file systems, this + // would need to be 2 + len(i.directories), but that would + // require us to initialize the directory, which is undesirable. + attributes.SetLinkCount(ImplicitDirectoryLinkCount) + attributes.SetPermissions(PermissionsRead | PermissionsWrite | PermissionsExecute) + attributes.SetSizeBytes(0) + i.handle.GetAttributes(requested, attributes) +} + +func (i *inMemoryPrepopulatedDirectory) virtualGetAttributesLocked(requested AttributesMask, attributes *Attributes) { + attributes.SetChangeID(i.contents.changeID) + attributes.SetLastDataModificationTime(i.contents.lastDataModificationTime) +} + +func (i *inMemoryPrepopulatedDirectory) VirtualLink(ctx context.Context, name path.Component, leaf Leaf, requested AttributesMask, out *Attributes) (ChangeInfo, Status) { + child, ok := leaf.(NativeLeaf) + if !ok { + // The file is not the kind that can be embedded into + // inMemoryPrepopulatedDirectory. + return ChangeInfo{}, StatusErrXDev + } + + i.lock.Lock() + defer i.lock.Unlock() + + contents, s := i.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, s + } + + if s := contents.virtualMayAttach(name); s != StatusOK { + return ChangeInfo{}, s + } + if s := child.Link(); s != StatusOK { + return ChangeInfo{}, s + } + changeIDBefore := contents.changeID + contents.attach(i.subtree, name, inMemoryDirectoryChild{}.FromLeaf(child)) + + child.VirtualGetAttributes(ctx, requested, out) + return ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualLookup(ctx context.Context, name path.Component, requested AttributesMask, out *Attributes) (DirectoryChild, Status) { + lockPile := re_sync.LockPile{} + defer lockPile.UnlockAll() + lockPile.Lock(&i.lock) + + contents, s := i.virtualGetContents() + if s != StatusOK { + return DirectoryChild{}, s + } + + // Depending on which attributes need to be returned, we either + // need to lock the child directory or not. We can't just call + // into VirtualGetAttributes() on the child directory, as that + // might cause a deadlock. + if requested&inMemoryPrepopulatedDirectoryLockedAttributesMask != 0 { + if entry, ok := contents.getAndLockIfDirectory(name, &lockPile); ok { + directory, leaf := entry.child.GetPair() + if directory != nil { + directory.virtualGetAttributesUnlocked(requested, out) + directory.virtualGetAttributesLocked(requested, out) + return DirectoryChild{}.FromDirectory(directory), StatusOK + } + leaf.VirtualGetAttributes(ctx, requested, out) + return DirectoryChild{}.FromLeaf(leaf), StatusOK + } + } else { + if entry, ok := contents.entriesMap[name]; ok { + directory, leaf := entry.child.GetPair() + if directory != nil { + directory.virtualGetAttributesUnlocked(requested, out) + return DirectoryChild{}.FromDirectory(directory), StatusOK + } + leaf.VirtualGetAttributes(ctx, requested, out) + return DirectoryChild{}.FromLeaf(leaf), StatusOK + } + } + return DirectoryChild{}, StatusErrNoEnt +} + +func (i *inMemoryPrepopulatedDirectory) VirtualMkdir(name path.Component, requested AttributesMask, out *Attributes) (Directory, ChangeInfo, Status) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, s := i.virtualGetContents() + if s != StatusOK { + return nil, ChangeInfo{}, s + } + + if s := contents.virtualMayAttach(name); s != StatusOK { + return nil, ChangeInfo{}, s + } + changeIDBefore := contents.changeID + child := contents.attachNewDirectory(i.subtree, name, EmptyInitialContentsFetcher) + + // Even though the child directory is not locked explicitly, the + // following is safe, as the directory has not been returned yet. + child.virtualGetAttributesUnlocked(requested, out) + child.virtualGetAttributesLocked(requested, out) + return child, ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualMknod(ctx context.Context, name path.Component, fileType filesystem.FileType, requested AttributesMask, out *Attributes) (Leaf, ChangeInfo, Status) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, s := i.virtualGetContents() + if s != StatusOK { + return nil, ChangeInfo{}, s + } + + if s := contents.virtualMayAttach(name); s != StatusOK { + return nil, ChangeInfo{}, s + } + // Every FIFO or UNIX domain socket needs to have its own inode + // number, as the kernel uses that to tell instances apart. We + // therefore consider it to be stateful, like a writable file. + child := i.subtree.filesystem.statefulHandleAllocator. + New(). + AsNativeLeaf(NewSpecialFile(fileType, nil)) + changeIDBefore := contents.changeID + contents.attach(i.subtree, name, inMemoryDirectoryChild{}.FromLeaf(child)) + + child.VirtualGetAttributes(ctx, requested, out) + return child, ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualReadDir(ctx context.Context, firstCookie uint64, requested AttributesMask, reporter DirectoryEntryReporter) Status { + lockPile := re_sync.LockPile{} + defer lockPile.UnlockAll() + lockPile.Lock(&i.lock) + + contents, s := i.virtualGetContents() + if s != StatusOK { + return s + } + + for entry := contents.getEntryAtCookie(firstCookie); entry != &contents.entriesList; { + if directory, leaf := entry.child.GetPair(); directory != nil { + var attributes Attributes + directory.virtualGetAttributesUnlocked(requested, &attributes) + + // The caller requested attributes that can only + // be obtained by locking the child directory. + // This may require us to briefly drop the lock + // on the parent directory, which may invalidate + // the current directory entry. + // + // Because we clear directory entries while + // detaching, we can detect this and retry by + // seeking through the directory once again. + if requested&inMemoryPrepopulatedDirectoryLockedAttributesMask != 0 { + if !lockPile.Lock(&directory.lock) && entry.next == nil { + lockPile.Unlock(&directory.lock) + entry = contents.getEntryAtCookie(entry.cookie) + continue + } + directory.virtualGetAttributesLocked(requested, &attributes) + lockPile.Unlock(&directory.lock) + } + + if !reporter.ReportEntry(entry.cookie+1, entry.name, DirectoryChild{}.FromDirectory(directory), &attributes) { + break + } + } else if !i.subtree.filesystem.hiddenFilesMatcher(entry.name.String()) { + var attributes Attributes + leaf.VirtualGetAttributes(ctx, requested, &attributes) + if !reporter.ReportEntry(entry.cookie+1, entry.name, DirectoryChild{}.FromLeaf(leaf), &attributes) { + break + } + } + entry = entry.next + } + return StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualRename(oldName path.Component, newDirectory Directory, newName path.Component) (ChangeInfo, ChangeInfo, Status) { + iOld := i + iNew, ok := newDirectory.(*inMemoryPrepopulatedDirectory) + if !ok { + return ChangeInfo{}, ChangeInfo{}, StatusErrXDev + } + + lockPile := re_sync.LockPile{} + defer lockPile.UnlockAll() + lockPile.Lock(&iOld.lock, &iNew.lock) + + oldContents, s := iOld.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, ChangeInfo{}, s + } + newContents, s := iNew.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, ChangeInfo{}, s + } + + oldChangeIDBefore := oldContents.changeID + newChangeIDBefore := newContents.changeID + if newEntry, ok := newContents.getAndLockIfDirectory(newName, &lockPile); ok { + oldEntry, ok := oldContents.entriesMap[oldName] + if !ok { + return ChangeInfo{}, ChangeInfo{}, StatusErrNoEnt + } + oldChild := oldEntry.child + oldDirectory, oldLeaf := oldChild.GetPair() + newChild := newEntry.child + if newDirectory, newLeaf := newChild.GetPair(); newDirectory != nil { + // Renaming to a location at which a directory + // already exists. + if oldDirectory == nil { + return ChangeInfo{}, ChangeInfo{}, StatusErrIsDir + } + // Renaming a directory to itself is always + // permitted, even when not empty. + if newDirectory != oldDirectory { + if iOld.subtree.filesystem != iNew.subtree.filesystem { + return ChangeInfo{}, ChangeInfo{}, StatusErrXDev + } + newChildContents, s := newDirectory.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, ChangeInfo{}, s + } + if !newChildContents.isDeletable(i.subtree.filesystem.hiddenFilesMatcher) { + return ChangeInfo{}, ChangeInfo{}, StatusErrNotEmpty + } + oldContents.detach(i.subtree, oldEntry) + // TODO: Pick up an interlock and check for + // potential creation of cyclic directory + // structures. + newContents.detach(i.subtree, newEntry) + newDirectory.markDeleted() + newContents.attach(i.subtree, newName, oldChild) + } + } else { + // Renaming to a location at which a leaf + // already exists. + if oldDirectory != nil { + return ChangeInfo{}, ChangeInfo{}, StatusErrNotDir + } + // POSIX requires that renaming a file to itself + // has no effect. After running the following + // sequence of commands, both "a" and "b" should + // still exist: "touch a; ln a b; mv a b". + if newLeaf != oldLeaf { + oldContents.detach(i.subtree, oldEntry) + newContents.detach(i.subtree, newEntry) + newLeaf.Unlink() + newContents.attach(i.subtree, newName, oldChild) + } + } + } else { + // Renaming to a location where no file exists. + if newContents.isDeleted { + return ChangeInfo{}, ChangeInfo{}, StatusErrNoEnt + } + oldEntry, ok := oldContents.entriesMap[oldName] + if !ok { + return ChangeInfo{}, ChangeInfo{}, StatusErrNoEnt + } + oldChild := oldEntry.child + if oldDirectory, _ := oldChild.GetPair(); oldDirectory != nil { + if iOld.subtree.filesystem != iNew.subtree.filesystem { + return ChangeInfo{}, ChangeInfo{}, StatusErrXDev + } + } + oldContents.detach(i.subtree, oldEntry) + newContents.attach(i.subtree, newName, oldChild) + } + return ChangeInfo{ + Before: oldChangeIDBefore, + After: oldContents.changeID, + }, ChangeInfo{ + Before: newChangeIDBefore, + After: newContents.changeID, + }, StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualRemove(name path.Component, removeDirectory, removeLeaf bool) (ChangeInfo, Status) { + lockPile := re_sync.LockPile{} + defer lockPile.UnlockAll() + lockPile.Lock(&i.lock) + + contents, s := i.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, s + } + + if entry, ok := contents.getAndLockIfDirectory(name, &lockPile); ok { + if directory, leaf := entry.child.GetPair(); directory != nil { + if !removeDirectory { + return ChangeInfo{}, StatusErrPerm + } + childContents, s := directory.virtualGetContents() + if s != StatusOK { + return ChangeInfo{}, s + } + if !childContents.isDeletable(i.subtree.filesystem.hiddenFilesMatcher) { + return ChangeInfo{}, StatusErrNotEmpty + } + directory.markDeleted() + } else { + if !removeLeaf { + return ChangeInfo{}, StatusErrNotDir + } + leaf.Unlink() + } + changeIDBefore := contents.changeID + contents.detach(i.subtree, entry) + return ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK + } + + return ChangeInfo{}, StatusErrNoEnt +} + +func (i *inMemoryPrepopulatedDirectory) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if _, ok := in.GetSizeBytes(); ok { + return StatusErrInval + } + i.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} + +func (i *inMemoryPrepopulatedDirectory) VirtualSymlink(ctx context.Context, pointedTo []byte, linkName path.Component, requested AttributesMask, out *Attributes) (Leaf, ChangeInfo, Status) { + i.lock.Lock() + defer i.lock.Unlock() + + contents, s := i.virtualGetContents() + if s != StatusOK { + return nil, ChangeInfo{}, s + } + + if s := contents.virtualMayAttach(linkName); s != StatusOK { + return nil, ChangeInfo{}, s + } + child := i.subtree.filesystem.symlinkFactory.LookupSymlink(pointedTo) + changeIDBefore := contents.changeID + contents.attach(i.subtree, linkName, inMemoryDirectoryChild{}.FromLeaf(child)) + + child.VirtualGetAttributes(ctx, requested, out) + return child, ChangeInfo{ + Before: changeIDBefore, + After: contents.changeID, + }, StatusOK +} + +// directoryPrepopulatedDirEntryList is a list of DirectoryDirEntry +// objects returned by LookupAllChildren(). This type may be used to +// sort elements in the list by name. +type directoryPrepopulatedDirEntryList []DirectoryPrepopulatedDirEntry + +func (l directoryPrepopulatedDirEntryList) Len() int { + return len(l) +} + +func (l directoryPrepopulatedDirEntryList) Less(i, j int) bool { + return l[i].Name.String() < l[j].Name.String() +} + +func (l directoryPrepopulatedDirEntryList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// leafPrepopulatedDirEntryList is a list of LeafPrepopulatedDirEntry +// objects returned by LookupAllChildren(). This type may be used to +// sort elements in the list by name. +type leafPrepopulatedDirEntryList []LeafPrepopulatedDirEntry + +func (l leafPrepopulatedDirEntryList) Len() int { + return len(l) +} + +func (l leafPrepopulatedDirEntryList) Less(i, j int) bool { + return l[i].Name.String() < l[j].Name.String() +} + +func (l leafPrepopulatedDirEntryList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/pkg/filesystem/virtual/in_memory_prepopulated_directory_test.go b/pkg/filesystem/virtual/in_memory_prepopulated_directory_test.go new file mode 100644 index 0000000..140a79d --- /dev/null +++ b/pkg/filesystem/virtual/in_memory_prepopulated_directory_test.go @@ -0,0 +1,1614 @@ +package virtual_test + +import ( + "context" + "os" + "regexp" + "sort" + "syscall" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const inMemoryPrepopulatedDirectoryAttributesMask = virtual.AttributesMaskChangeID | + virtual.AttributesMaskFileType | + virtual.AttributesMaskInodeNumber | + virtual.AttributesMaskLastDataModificationTime | + virtual.AttributesMaskLinkCount | + virtual.AttributesMaskPermissions | + virtual.AttributesMaskSizeBytes + +const specialFileAttributesMask = virtual.AttributesMaskChangeID | + virtual.AttributesMaskFileType | + virtual.AttributesMaskInodeNumber | + virtual.AttributesMaskLinkCount | + virtual.AttributesMaskPermissions | + virtual.AttributesMaskSizeBytes + +func inMemoryPrepopulatedDirectoryExpectMkdir(ctrl *gomock.Controller, handleAllocator *mock.MockStatefulHandleAllocator) *mock.MockStatefulDirectoryHandle { + handleAllocation := mock.NewMockStatefulHandleAllocation(ctrl) + handleAllocator.EXPECT().New().Return(handleAllocation) + directoryHandle := mock.NewMockStatefulDirectoryHandle(ctrl) + handleAllocation.EXPECT().AsStatefulDirectory(gomock.Any()).Return(directoryHandle) + return directoryHandle +} + +var hiddenFilesPatternForTesting = regexp.MustCompile("^\\._") + +func TestInMemoryPrepopulatedDirectoryLookupChildNonExistent(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + _, err := d.LookupChild(path.MustNewComponent("nonexistent")) + require.True(t, os.IsNotExist(err)) +} + +func TestInMemoryPrepopulatedDirectoryLookupChildFile(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + + child, err := d.LookupChild(path.MustNewComponent("file")) + require.NoError(t, err) + require.Equal(t, virtual.PrepopulatedDirectoryChild{}.FromLeaf(leaf), child) +} + +func TestInMemoryPrepopulatedDirectoryLookupChildDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + + child, err := d.LookupChild(path.MustNewComponent("subdir")) + require.NoError(t, err) + + childDirectory, childLeaf := child.GetPair() + require.NotNil(t, childDirectory) + require.Nil(t, childLeaf) +} + +func TestInMemoryPrepopulatedDirectoryLookupAllChildrenFailure(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdir"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + + child, err := d.LookupChild(path.MustNewComponent("subdir")) + require.NoError(t, err) + + childDirectory, childLeaf := child.GetPair() + require.NotNil(t, childDirectory) + require.Nil(t, childLeaf) + + // When LookupAllChildren() is called in an uninitialized + // directory and initialization fails, the error should be + // propagated to the caller. + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + Return(nil, status.Error(codes.Internal, "Network error")) + _, _, err = childDirectory.LookupAllChildren() + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Network error"), err) +} + +func TestInMemoryPrepopulatedDirectoryLookupAllChildrenSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Populate the directory with files and directories. + leaf1 := mock.NewMockNativeLeaf(ctrl) + leaf2 := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("leaf1"): virtual.InitialNode{}.FromLeaf(leaf1), + path.MustNewComponent("._leaf2"): virtual.InitialNode{}.FromLeaf(leaf2), + }, false)) + + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + subdir1, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("subdir1")) + require.NoError(t, err) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + subdir2, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("subdir2")) + require.NoError(t, err) + + // All children should be returned in sorted order. Hidden + // entries should be omitted. + directories, leaves, err := d.LookupAllChildren() + require.NoError(t, err) + require.Equal(t, []virtual.DirectoryPrepopulatedDirEntry{ + {Name: path.MustNewComponent("subdir1"), Child: subdir1}, + {Name: path.MustNewComponent("subdir2"), Child: subdir2}, + }, directories) + require.Equal(t, []virtual.LeafPrepopulatedDirEntry{ + {Name: path.MustNewComponent("leaf1"), Child: leaf1}, + }, leaves) +} + +func TestInMemoryPrepopulatedDirectoryReadDir(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Prepare file system. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + leaf1 := mock.NewMockNativeLeaf(ctrl) + leaf2 := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf1), + path.MustNewComponent("._hidden_file"): virtual.InitialNode{}.FromLeaf(leaf2), + }, false)) + + // Validate directory listing. + leaf1.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(virtual.PermissionsRead) + }) + files, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, files, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("directory"), filesystem.FileTypeDirectory, false), + filesystem.NewFileInfo(path.MustNewComponent("file"), filesystem.FileTypeRegularFile, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryRemoveNonExistent(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + require.True(t, os.IsNotExist(d.Remove(path.MustNewComponent("nonexistent")))) +} + +func TestInMemoryPrepopulatedDirectoryRemoveDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + subdirHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + + // Test that removing a directory through filesystem.Directory + // also triggers FUSE invalidations. + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("directory")) + subdirHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("directory"))) +} + +func TestInMemoryPrepopulatedDirectoryRemoveDirectoryNotEmpty(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + leaf := mock.NewMockNativeLeaf(ctrl) + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()).Return(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf), + }, nil) + + require.Equal(t, syscall.ENOTEMPTY, d.Remove(path.MustNewComponent("directory"))) +} + +func TestInMemoryPrepopulatedDirectoryRemoveFile(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + + leaf.EXPECT().Unlink() + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("file")) + require.NoError(t, d.Remove(path.MustNewComponent("file"))) +} + +// TODO: Add testing coverage for RemoveAll(). + +func TestInMemoryPrepopulatedDirectoryCreateChildrenSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + + // Initial parent directory. + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Merge another directory and file into it. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + subdirectoryFetcher := mock.NewMockInitialContentsFetcher(ctrl) + topLevelFile := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(subdirectoryFetcher), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(topLevelFile), + }, false)) + + // Validate top-level directory listing. + topLevelFile.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite) + }) + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("dir"), filesystem.FileTypeDirectory, false), + filesystem.NewFileInfo(path.MustNewComponent("file"), filesystem.FileTypeRegularFile, false), + }) + + // Validate subdirectory listing. + child, err := d.LookupChild(path.MustNewComponent("dir")) + require.NoError(t, err) + subdirectoryFile := mock.NewMockNativeLeaf(ctrl) + subdirectoryFetcher.EXPECT().FetchContents(gomock.Any()).Return(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(subdirectoryFile), + }, nil) + subdirectoryFile.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite) + }) + subdirectory, _ := child.GetPair() + entries, err = subdirectory.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("file"), filesystem.FileTypeRegularFile, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryCreateChildrenInRemovedDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a reference to a removed child directory. + childHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("directory")) + require.NoError(t, err) + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("directory")) + childHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("directory"))) + + // Merging files into the removed directory should fail. + require.Equal(t, syscall.ENOENT, child.CreateChildren(map[path.Component]virtual.InitialNode{}, false)) +} + +func TestInMemoryPrepopulatedDirectoryInstallHooks(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Initial top-level directory with custom hooks installed. + fileAllocator1 := mock.NewMockFileAllocator(ctrl) + symlinkFactory1 := mock.NewMockSymlinkFactory(ctrl) + errorLogger1 := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator1, symlinkFactory1, errorLogger1, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + fileAllocator2 := mock.NewMockFileAllocator(ctrl) + errorLogger2 := mock.NewMockErrorLogger(ctrl) + d.InstallHooks(fileAllocator2, errorLogger2) + + // Validate that the top-level directory uses both the new file + // allocator and error logger. + fileAllocator2.EXPECT().NewFile(false, uint64(0), virtual.ShareMaskWrite). + Return(nil, virtual.StatusErrIO) + var attr virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("foo"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrIO, s) + + // Validate that a subdirectory uses the new file allocator + // and error logger as well. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("dir")) + require.NoError(t, err) + fileAllocator2.EXPECT().NewFile(false, uint64(0), virtual.ShareMaskWrite). + Return(nil, virtual.StatusErrIO) + _, _, _, s = child.VirtualOpenChild( + ctx, + path.MustNewComponent("foo"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrIO, s) +} + +func TestInMemoryPrepopulatedDirectoryFilterChildren(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // In the initial state, InMemoryPrepopulatedDirectory will have + // an EmptyInitialContentsFetcher associated with it. + childFilter1 := mock.NewMockChildFilter(ctrl) + childFilter1.EXPECT().Call(virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), gomock.Any()).Return(true) + require.NoError(t, d.FilterChildren(childFilter1.Call)) + + // After attempting to access the directory's contents, the + // InitialContentsFetcher should be evaluated. Successive + // FilterChildren() calls will no longer report it. + entries, err := d.ReadDir() + require.NoError(t, err) + require.Empty(t, entries) + + childFilter2 := mock.NewMockChildFilter(ctrl) + require.NoError(t, d.FilterChildren(childFilter2.Call)) + + // Create some children and call FilterChildren() again. All + // children should be reported. Remove some of them. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + directory1 := mock.NewMockInitialContentsFetcher(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + directory2 := mock.NewMockInitialContentsFetcher(ctrl) + leaf1 := mock.NewMockNativeLeaf(ctrl) + leaf2 := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory1"): virtual.InitialNode{}.FromDirectory(directory1), + path.MustNewComponent("directory2"): virtual.InitialNode{}.FromDirectory(directory2), + path.MustNewComponent("leaf1"): virtual.InitialNode{}.FromLeaf(leaf1), + path.MustNewComponent("leaf2"): virtual.InitialNode{}.FromLeaf(leaf2), + }, false)) + + childFilter3 := mock.NewMockChildFilter(ctrl) + childFilter3.EXPECT().Call(virtual.InitialNode{}.FromDirectory(directory1), gomock.Any()). + DoAndReturn(func(initialNode virtual.InitialNode, remove func() error) bool { + require.NoError(t, remove()) + return true + }) + childFilter3.EXPECT().Call(virtual.InitialNode{}.FromDirectory(directory2), gomock.Any()).Return(true) + childFilter3.EXPECT().Call(virtual.InitialNode{}.FromLeaf(leaf1), gomock.Any()). + DoAndReturn(func(initialNode virtual.InitialNode, remove func() error) bool { + leaf1.EXPECT().Unlink() + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("leaf1")) + require.NoError(t, remove()) + return true + }) + childFilter3.EXPECT().Call(virtual.InitialNode{}.FromLeaf(leaf2), gomock.Any()).Return(true) + require.NoError(t, d.FilterChildren(childFilter3.Call)) + + // Another call to FilterChildren() should only report the + // children that were not removed previously. + childFilter4 := mock.NewMockChildFilter(ctrl) + childFilter4.EXPECT().Call(virtual.InitialNode{}.FromDirectory(directory2), gomock.Any()).Return(true) + childFilter4.EXPECT().Call(virtual.InitialNode{}.FromLeaf(leaf2), gomock.Any()).Return(true) + require.NoError(t, d.FilterChildren(childFilter4.Call)) +} + +func TestInMemoryPrepopulatedDirectoryVirtualOpenChildFileExists(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a file at the desired target location. + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("target"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + + // Trying to create the file through FUSE should fail. + var attr virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("target"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrExist, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualOpenChildDirectoryExists(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a directory at the desired target location. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("target"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + + // Trying to create the file through FUSE should fail. + var attr virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("target"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrExist, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualOpenChildAllocationFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + fileAllocator.EXPECT().NewFile(false, uint64(0), virtual.ShareMaskWrite). + Return(nil, virtual.StatusErrIO) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // File allocation errors should translate to EIO. The actual + // error should get forwarded to the error logger. + var attr virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("target"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrIO, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualOpenChildInRemovedDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a reference to a removed child directory. + childHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("directory")) + require.NoError(t, err) + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("directory")) + childHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("directory"))) + + // Trying to create the file through FUSE should return ENOENT. + var attr virtual.Attributes + _, _, _, s := child.VirtualOpenChild( + ctx, + path.MustNewComponent("target"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMask(0), + &attr) + require.Equal(t, virtual.StatusErrNoEnt, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualOpenChildSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + child := mock.NewMockNativeLeaf(ctrl) + fileAllocator.EXPECT().NewFile(false, uint64(0), virtual.ShareMaskWrite). + Return(child, virtual.StatusOK) + child.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(123) + }) + child.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(virtual.PermissionsRead) + }) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Creation of the directory should fully succeed. The file + // should be present within the directory afterwards. + var attr virtual.Attributes + newChild, respected, changeInfo, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("target"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMaskInodeNumber, + &attr) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, child, newChild) + require.Equal(t, virtual.AttributesMaskPermissions, respected) + require.Equal(t, virtual.ChangeInfo{ + Before: 0, + After: 1, + }, changeInfo) + require.Equal(t, *(&virtual.Attributes{}).SetInodeNumber(123), attr) + + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("target"), filesystem.FileTypeRegularFile, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualGetAttributes(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock) + + dHandle.EXPECT().GetAttributes(inMemoryPrepopulatedDirectoryAttributesMask, gomock.Any()). + Do(func(attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(100) + }) + var attr1 virtual.Attributes + d.VirtualGetAttributes(ctx, inMemoryPrepopulatedDirectoryAttributesMask, &attr1) + require.Equal( + t, + *(&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(100). + SetLastDataModificationTime(time.Unix(1000, 0)). + SetLinkCount(virtual.ImplicitDirectoryLinkCount). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(0), + attr1) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLinkExists(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + child := mock.NewMockNativeLeaf(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Attempting to link to a file that already exists should fail. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + var attr virtual.Attributes + _, s := d.VirtualLink(ctx, path.MustNewComponent("dir"), child, virtual.AttributesMask(0), &attr) + require.Equal(t, virtual.StatusErrExist, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLinkInRemovedDirectory(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + target := mock.NewMockNativeLeaf(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a reference to a removed child directory. + childHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("directory")) + require.NoError(t, err) + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("directory")) + childHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("directory"))) + + // Linking a file into it should fail with ENOENT. + var attr virtual.Attributes + _, s := child.VirtualLink(ctx, path.MustNewComponent("target"), target, virtual.AttributesMask(0), &attr) + require.Equal(t, virtual.StatusErrNoEnt, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLinkNotNativeLeaf(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Trying to link a file that does not implement NativeLeaf is + // not possible. We can only store leaf nodes that implement + // this interface. + child := mock.NewMockVirtualLeaf(ctrl) + var attr virtual.Attributes + _, s := d.VirtualLink(ctx, path.MustNewComponent("target"), child, virtual.AttributesMask(0), &attr) + require.Equal(t, virtual.StatusErrXDev, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLinkStale(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Attempting to link a file that has already been removed + // should fail. + child := mock.NewMockNativeLeaf(ctrl) + child.EXPECT().Link().Return(virtual.StatusErrStale) + + var attr virtual.Attributes + _, s := d.VirtualLink(ctx, path.MustNewComponent("target"), child, virtual.AttributesMaskInodeNumber, &attr) + require.Equal(t, virtual.StatusErrStale, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLinkSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + child := mock.NewMockNativeLeaf(ctrl) + child.EXPECT().Link() + child.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(123) + }) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // We should return the attributes of the existing leaf. + var attr virtual.Attributes + changeInfo, s := d.VirtualLink(ctx, path.MustNewComponent("target"), child, virtual.AttributesMaskInodeNumber, &attr) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 0, + After: 1, + }, changeInfo) + require.Equal(t, *(&virtual.Attributes{}).SetInodeNumber(123), attr) +} + +func TestInMemoryPrepopulatedDirectoryVirtualLookup(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock) + + // Create an example directory and file that we'll try to look up. + subdirHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + file := mock.NewMockNativeLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)).Times(3) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(file), + }, false)) + + t.Run("NotFound", func(*testing.T) { + var attr virtual.Attributes + _, s := d.VirtualLookup(ctx, path.MustNewComponent("missing"), virtual.AttributesMask(0), &attr) + require.Equal(t, virtual.StatusErrNoEnt, s) + }) + + t.Run("FoundDirectory", func(*testing.T) { + subdirHandle.EXPECT().GetAttributes(inMemoryPrepopulatedDirectoryAttributesMask, gomock.Any()). + Do(func(attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(101) + }) + + var attr virtual.Attributes + newChild, s := d.VirtualLookup(ctx, path.MustNewComponent("dir"), inMemoryPrepopulatedDirectoryAttributesMask, &attr) + require.Equal(t, virtual.StatusOK, s) + require.Equal( + t, + *(&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(101). + SetLastDataModificationTime(time.Unix(1001, 0)). + SetLinkCount(virtual.ImplicitDirectoryLinkCount). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(0), + attr) + + newDirectory, newLeaf := newChild.GetPair() + require.NotNil(t, newDirectory) + require.Nil(t, newLeaf) + }) + + t.Run("FoundFile", func(*testing.T) { + file.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(3) + }) + + var attr virtual.Attributes + newChild, s := d.VirtualLookup(ctx, path.MustNewComponent("file"), virtual.AttributesMaskInodeNumber, &attr) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromLeaf(file), newChild) + require.Equal(t, *(&virtual.Attributes{}).SetInodeNumber(3), attr) + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualMkdir(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock) + + t.Run("FailureInitialContentsFetcher", func(t *testing.T) { + // Create a subdirectory that has an initial contents fetcher. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)).Times(2) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdir"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + + child, err := d.LookupChild(path.MustNewComponent("subdir")) + require.NoError(t, err) + + childDirectory, childLeaf := child.GetPair() + require.NotNil(t, childDirectory) + require.Nil(t, childLeaf) + + // Creating a directory in a directory whose initial + // contents cannot be fetched, should fail. The reason + // being that we can't accurately determine whether a + // file under that name is already present. + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + Return(nil, status.Error(codes.Internal, "Network error")) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Internal, "Failed to initialize directory: Network error"))) + + _, _, s := childDirectory.VirtualMkdir(path.MustNewComponent("subsubdir"), 0, &virtual.Attributes{}) + require.Equal(t, virtual.StatusErrIO, s) + }) + + t.Run("FailureExist", func(t *testing.T) { + // The operation should fail if a file or directory + // already exists under the provided name. + existingFile := mock.NewMockNativeLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("existing_file"): virtual.InitialNode{}.FromLeaf(existingFile), + }, false)) + + _, _, s := d.VirtualMkdir(path.MustNewComponent("existing_file"), 0, &virtual.Attributes{}) + require.Equal(t, virtual.StatusErrExist, s) + }) + + t.Run("Success", func(t *testing.T) { + clock.EXPECT().Now().Return(time.Unix(1003, 0)).Times(2) + subdirHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + subdirHandle.EXPECT().GetAttributes(inMemoryPrepopulatedDirectoryAttributesMask, gomock.Any()). + Do(func(attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(101) + }) + + var out virtual.Attributes + leaf, changeInfo, s := d.VirtualMkdir(path.MustNewComponent("dir"), inMemoryPrepopulatedDirectoryAttributesMask, &out) + require.Equal(t, virtual.StatusOK, s) + require.NotNil(t, leaf) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 3, + }, changeInfo) + require.Equal( + t, + *(&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(101). + SetLastDataModificationTime(time.Unix(1003, 0)). + SetLinkCount(virtual.ImplicitDirectoryLinkCount). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(0), + out) + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualMknodExists(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Files may not be overwritten by mknod(). + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + var attr virtual.Attributes + _, _, s := d.VirtualMknod(ctx, path.MustNewComponent("dir"), filesystem.FileTypeFIFO, virtual.AttributesMask(0), &attr) + require.Equal(t, virtual.StatusErrExist, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualMknodSuccess(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a FIFO and a UNIX domain socket. + fifoHandleAllocation := mock.NewMockStatefulHandleAllocation(ctrl) + handleAllocator.EXPECT().New().Return(fifoHandleAllocation) + fifoHandleAllocation.EXPECT().AsNativeLeaf(gomock.Any()). + DoAndReturn(func(leaf virtual.NativeLeaf) virtual.NativeLeaf { return leaf }) + var fifoAttr virtual.Attributes + fifoNode, changeInfo, s := d.VirtualMknod(ctx, path.MustNewComponent("fifo"), filesystem.FileTypeFIFO, specialFileAttributesMask, &fifoAttr) + require.Equal(t, virtual.StatusOK, s) + require.NotNil(t, fifoNode) + require.Equal(t, virtual.ChangeInfo{ + Before: 0, + After: 1, + }, changeInfo) + require.Equal( + t, + *(&virtual.Attributes{}). + SetChangeID(0). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite). + SetFileType(filesystem.FileTypeFIFO). + SetSizeBytes(0), + fifoAttr) + + socketHandleAllocation := mock.NewMockStatefulHandleAllocation(ctrl) + handleAllocator.EXPECT().New().Return(socketHandleAllocation) + socketHandleAllocation.EXPECT().AsNativeLeaf(gomock.Any()). + DoAndReturn(func(leaf virtual.NativeLeaf) virtual.NativeLeaf { return leaf }) + var socketAttr virtual.Attributes + socketNode, changeInfo, s := d.VirtualMknod(ctx, path.MustNewComponent("socket"), filesystem.FileTypeSocket, specialFileAttributesMask, &socketAttr) + require.Equal(t, virtual.StatusOK, s) + require.NotNil(t, socketNode) + require.Equal(t, virtual.ChangeInfo{ + Before: 1, + After: 2, + }, changeInfo) + require.Equal( + t, + *(&virtual.Attributes{}). + SetChangeID(0). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite). + SetFileType(filesystem.FileTypeSocket). + SetSizeBytes(0), + socketAttr) + + // Check whether the devices are reported properly using the + // native ReadDir() method. + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("fifo"), filesystem.FileTypeFIFO, false), + filesystem.NewFileInfo(path.MustNewComponent("socket"), filesystem.FileTypeSocket, false), + }, entries) + + // Check whether the devices are reported properly using the + // VirtualReadDir() method. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + reporter.EXPECT().ReportEntry(uint64(1), path.MustNewComponent("fifo"), virtual.DirectoryChild{}.FromLeaf(fifoNode), &fifoAttr).Return(true) + reporter.EXPECT().ReportEntry(uint64(2), path.MustNewComponent("socket"), virtual.DirectoryChild{}.FromLeaf(socketNode), &socketAttr).Return(true) + require.Equal(t, virtual.StatusOK, d.VirtualReadDir(ctx, 0, specialFileAttributesMask, reporter)) +} + +func TestInMemoryPrepopulatedDirectoryVirtualReadDir(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock) + + // Populate the directory with subdirectory that is + // uninitialized and a file. + childDirectoryHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + childDirectory := mock.NewMockInitialContentsFetcher(ctrl) + childFile1 := mock.NewMockNativeLeaf(ctrl) + childFile2 := mock.NewMockNativeLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)).Times(4) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory"): virtual.InitialNode{}.FromDirectory(childDirectory), + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(childFile1), + path.MustNewComponent("._hidden_file"): virtual.InitialNode{}.FromLeaf(childFile2), + }, false)) + + // Obtaining the directory listing through VirtualReadDir() should + // not cause the child directory to be initialized. We don't + // depend on any of its properties to populate its DirEntry, nor + // are we returning a handle to it. A successive VirtualLookup() + // call will initialize the directory. + childDirectoryHandle.EXPECT().GetAttributes(inMemoryPrepopulatedDirectoryAttributesMask, gomock.Any()). + Do(func(attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(101) + }) + childFile1.EXPECT().VirtualGetAttributes( + ctx, + inMemoryPrepopulatedDirectoryAttributesMask, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetInodeNumber(123) + }) + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + reporter.EXPECT().ReportEntry( + uint64(2), + path.MustNewComponent("directory"), + gomock.Any(), + (&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeDirectory). + SetInodeNumber(101). + SetLastDataModificationTime(time.Unix(1001, 0)). + SetLinkCount(virtual.ImplicitDirectoryLinkCount). + SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite|virtual.PermissionsExecute). + SetSizeBytes(0), + ).Return(true) + reporter.EXPECT().ReportEntry( + uint64(3), + path.MustNewComponent("file"), + virtual.DirectoryChild{}.FromLeaf(childFile1), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(123), + ).Return(true) + + require.Equal(t, virtual.StatusOK, d.VirtualReadDir(ctx, 0, inMemoryPrepopulatedDirectoryAttributesMask, reporter)) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameSelfDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Renaming a directory to itself should be permitted, even when + // it is not empty. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("dir")) + require.NoError(t, err) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, child.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + changeInfo1, changeInfo2, s := d.VirtualRename(path.MustNewComponent("dir"), d, path.MustNewComponent("dir")) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 1, + After: 1, + }, changeInfo1) + require.Equal(t, virtual.ChangeInfo{ + Before: 1, + After: 1, + }, changeInfo2) + + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("dir"), filesystem.FileTypeDirectory, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameSelfFile(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("a"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + + leaf.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(3) + }) + var out virtual.Attributes + leaf.EXPECT().Link() + changeInfo, s := d.VirtualLink(ctx, path.MustNewComponent("b"), leaf, virtual.AttributesMaskInodeNumber, &out) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 1, + After: 2, + }, changeInfo) + require.Equal(t, *(&virtual.Attributes{}).SetInodeNumber(3), out) + + // Renaming a file to itself should have no effect. This even + // applies to hard links. Though not intuitive, this means that + // the source file may continue to exist. + changeInfo1, changeInfo2, s := d.VirtualRename(path.MustNewComponent("a"), d, path.MustNewComponent("b")) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 2, + }, changeInfo1) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 2, + }, changeInfo2) + + leaf.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(0) + }).Times(2) + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("a"), filesystem.FileTypeRegularFile, false), + filesystem.NewFileInfo(path.MustNewComponent("b"), filesystem.FileTypeRegularFile, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameDirectoryInRemovedDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a reference to a removed child directory. + childHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("removed")) + require.NoError(t, err) + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("removed")) + childHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("removed"))) + + // Moving a directory into it should fail with ENOENT. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dir"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + _, _, s := d.VirtualRename(path.MustNewComponent("dir"), child, path.MustNewComponent("dir")) + require.Equal(t, virtual.StatusErrNoEnt, s) + + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("dir"), filesystem.FileTypeDirectory, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameFileInRemovedDirectory(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create a reference to a removed child directory. + childHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + child, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("removed")) + require.NoError(t, err) + dHandle.EXPECT().NotifyRemoval(path.MustNewComponent("removed")) + childHandle.EXPECT().Release() + require.NoError(t, d.Remove(path.MustNewComponent("removed"))) + + // Moving a file into it should fail with ENOENT. + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + _, _, s := d.VirtualRename(path.MustNewComponent("file"), child, path.MustNewComponent("file")) + require.Equal(t, virtual.StatusErrNoEnt, s) + + leaf.EXPECT().VirtualGetAttributes( + gomock.Any(), + virtual.AttributesMaskFileType|virtual.AttributesMaskPermissions, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + attributes.SetPermissions(0) + }) + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("file"), filesystem.FileTypeRegularFile, false), + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameDirectoryTwice(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // Create two empty directories. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + childA, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("a")) + require.NoError(t, err) + childBHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + childB, err := d.CreateAndEnterPrepopulatedDirectory(path.MustNewComponent("b")) + require.NoError(t, err) + + // Move "a" to "b" to "c". Afterwards, only "c" should remain. + childBHandle.EXPECT().Release() + changeInfo1, changeInfo2, s := d.VirtualRename(path.MustNewComponent("a"), d, path.MustNewComponent("b")) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 5, + }, changeInfo1) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 5, + }, changeInfo2) + changeInfo1, changeInfo2, s = d.VirtualRename(path.MustNewComponent("b"), d, path.MustNewComponent("c")) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 5, + After: 7, + }, changeInfo1) + require.Equal(t, virtual.ChangeInfo{ + Before: 5, + After: 7, + }, changeInfo2) + + entries, err := d.ReadDir() + require.NoError(t, err) + require.Equal(t, entries, + []filesystem.FileInfo{ + filesystem.NewFileInfo(path.MustNewComponent("c"), filesystem.FileTypeDirectory, false), + }) + + // Directory "a" got moved over "b", meaning that only the + // former should still be usable. The latter has been deleted. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + require.NoError(t, childA.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdirectory"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + require.Equal(t, syscall.ENOENT, childB.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdirectory"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameCrossDevice1(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d1 := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + d2 := mock.NewMockVirtualDirectory(ctrl) + + // Attempting to rename a file to a directory that is of a + // completely different type is not possible. We can only rename + // objects between instances of InMemoryPrepopulatedDirectory. + _, _, s := d1.VirtualRename(path.MustNewComponent("src"), d2, path.MustNewComponent("dst")) + require.Equal(t, virtual.StatusErrXDev, s) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRenameCrossDevice2(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator1 := mock.NewMockFileAllocator(ctrl) + symlinkFactory1 := mock.NewMockSymlinkFactory(ctrl) + errorLogger1 := mock.NewMockErrorLogger(ctrl) + handleAllocator1 := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator1) + d1 := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator1, symlinkFactory1, errorLogger1, handleAllocator1, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + fileAllocator2 := mock.NewMockFileAllocator(ctrl) + symlinkFactory2 := mock.NewMockSymlinkFactory(ctrl) + errorLogger2 := mock.NewMockErrorLogger(ctrl) + handleAllocator2 := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator2) + d2 := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator2, symlinkFactory2, errorLogger2, handleAllocator2, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + // It should not be possible to rename directories from one + // hierarchy to another, as this completely messes up + // InMemoryPrepopulatedDirectory's internal bookkeeping. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator1) + require.NoError(t, d1.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("src"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator2) + require.NoError(t, d2.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("dst"): virtual.InitialNode{}.FromDirectory(virtual.EmptyInitialContentsFetcher), + }, false)) + _, _, s := d1.VirtualRename(path.MustNewComponent("src"), d2, path.MustNewComponent("dst")) + require.Equal(t, virtual.StatusErrXDev, s) + _, _, s = d1.VirtualRename(path.MustNewComponent("src"), d2, path.MustNewComponent("nonexistent")) + require.Equal(t, virtual.StatusErrXDev, s) + + // Renaming files leaf files between directory hierarchies is + // completely safe. It's generally not useful to do this, but + // even if we disallowed this explicitly, it would still be + // possible to achieve this by hardlinking. + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d1.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("leaf"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + changeInfo1, changeInfo2, s := d1.VirtualRename(path.MustNewComponent("leaf"), d2, path.MustNewComponent("leaf")) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 3, + }, changeInfo1) + require.Equal(t, virtual.ChangeInfo{ + Before: 1, + After: 2, + }, changeInfo2) +} + +func TestInMemoryPrepopulatedDirectoryVirtualRemove(t *testing.T) { + ctrl := gomock.NewController(t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + t.Run("NotFound", func(t *testing.T) { + // Attempting to remove a file that does not exist. + _, s := d.VirtualRemove(path.MustNewComponent("nonexistent"), true, true) + require.Equal(t, virtual.StatusErrNoEnt, s) + }) + + t.Run("NoDirectoryRemoval", func(t *testing.T) { + // Attempting to remove a directory, even though + // directory removal should not be performed. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("no_directory_removal"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + + _, s := d.VirtualRemove(path.MustNewComponent("no_directory_removal"), false, true) + require.Equal(t, virtual.StatusErrPerm, s) + }) + + t.Run("NoLeafRemoval", func(t *testing.T) { + // Attempting to remove a leaf, even though leaf removal + // should not be performed. + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("no_file_removal"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + + _, s := d.VirtualRemove(path.MustNewComponent("no_file_removal"), true, false) + require.Equal(t, virtual.StatusErrNotDir, s) + }) + + t.Run("ChildDirectoryInitializationFailure", func(t *testing.T) { + // If we cannot load the directory contents, we don't + // know whether the directory is empty and can be + // removed. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("broken_directory"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + Return(nil, status.Error(codes.Internal, "Network error")) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Internal, "Failed to initialize directory: Network error"))) + + _, s := d.VirtualRemove(path.MustNewComponent("broken_directory"), true, false) + require.Equal(t, virtual.StatusErrIO, s) + }) + + t.Run("ChildDirectoryNotEmpty", func(t *testing.T) { + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("non_empty_directory"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + leaf := mock.NewMockNativeLeaf(ctrl) + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()).Return(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("file"): virtual.InitialNode{}.FromLeaf(leaf), + }, nil) + + _, s := d.VirtualRemove(path.MustNewComponent("non_empty_directory"), true, false) + require.Equal(t, virtual.StatusErrNotEmpty, s) + }) + + t.Run("SuccessFile", func(t *testing.T) { + leaf := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("success"): virtual.InitialNode{}.FromLeaf(leaf), + }, false)) + leaf.EXPECT().Unlink() + + changeInfo, s := d.VirtualRemove(path.MustNewComponent("success"), true, true) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 5, + After: 6, + }, changeInfo) + }) + + t.Run("SuccessDirectory", func(t *testing.T) { + // Directories may be removed, even if they are not + // empty. In that case they should exclusively consist + // of hidden files. + dHandle := inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("directory_with_hidden_files"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + leaf1 := mock.NewMockNativeLeaf(ctrl) + leaf2 := mock.NewMockNativeLeaf(ctrl) + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()).Return(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("._hidden_file1"): virtual.InitialNode{}.FromLeaf(leaf1), + path.MustNewComponent("._hidden_file2"): virtual.InitialNode{}.FromLeaf(leaf2), + }, nil) + leaf1.EXPECT().Unlink() + leaf2.EXPECT().Unlink() + dHandle.EXPECT().Release() + + changeInfo, s := d.VirtualRemove(path.MustNewComponent("directory_with_hidden_files"), true, true) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.ChangeInfo{ + Before: 7, + After: 8, + }, changeInfo) + }) +} + +func TestInMemoryPrepopulatedDirectoryVirtualSymlink(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + fileAllocator := mock.NewMockFileAllocator(ctrl) + symlinkFactory := mock.NewMockSymlinkFactory(ctrl) + errorLogger := mock.NewMockErrorLogger(ctrl) + handleAllocator := mock.NewMockStatefulHandleAllocator(ctrl) + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + d := virtual.NewInMemoryPrepopulatedDirectory(fileAllocator, symlinkFactory, errorLogger, handleAllocator, sort.Sort, hiddenFilesPatternForTesting.MatchString, clock.SystemClock) + + t.Run("FailureInitialContentsFetcher", func(t *testing.T) { + // Create a subdirectory that has an initial contents fetcher. + inMemoryPrepopulatedDirectoryExpectMkdir(ctrl, handleAllocator) + initialContentsFetcher := mock.NewMockInitialContentsFetcher(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("subdir"): virtual.InitialNode{}.FromDirectory(initialContentsFetcher), + }, false)) + + child, err := d.LookupChild(path.MustNewComponent("subdir")) + require.NoError(t, err) + + childDirectory, childLeaf := child.GetPair() + require.NotNil(t, childDirectory) + require.Nil(t, childLeaf) + + // Creating a symlink in a directory whose initial + // contents cannot be fetched, should fail. The reason + // being that we can't accurately determine whether a + // file under that name is already present. + initialContentsFetcher.EXPECT().FetchContents(gomock.Any()). + Return(nil, status.Error(codes.Internal, "Network error")) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Internal, "Failed to initialize directory: Network error"))) + + _, _, s := childDirectory.VirtualSymlink(ctx, []byte("target"), path.MustNewComponent("symlink"), 0, &virtual.Attributes{}) + require.Equal(t, virtual.StatusErrIO, s) + }) + + t.Run("FailureExist", func(t *testing.T) { + // The operation should fail if a file or directory + // already exists under the provided name. + existingFile := mock.NewMockNativeLeaf(ctrl) + require.NoError(t, d.CreateChildren(map[path.Component]virtual.InitialNode{ + path.MustNewComponent("existing_file"): virtual.InitialNode{}.FromLeaf(existingFile), + }, false)) + + _, _, s := d.VirtualSymlink(ctx, []byte("target"), path.MustNewComponent("existing_file"), 0, &virtual.Attributes{}) + require.Equal(t, virtual.StatusErrExist, s) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockNativeLeaf(ctrl) + symlinkFactory.EXPECT().LookupSymlink([]byte("target")).Return(leaf) + leaf.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetInodeNumber(3) + }) + + var out virtual.Attributes + actualLeaf, changeInfo, s := d.VirtualSymlink(ctx, []byte("target"), path.MustNewComponent("symlink"), virtual.AttributesMaskInodeNumber, &out) + require.Equal(t, virtual.StatusOK, s) + require.NotNil(t, actualLeaf) + require.Equal(t, virtual.ChangeInfo{ + Before: 2, + After: 3, + }, changeInfo) + require.Equal(t, (&virtual.Attributes{}).SetInodeNumber(3), &out) + }) +} diff --git a/pkg/filesystem/virtual/initial_contents_fetcher.go b/pkg/filesystem/virtual/initial_contents_fetcher.go new file mode 100644 index 0000000..6e7f2f0 --- /dev/null +++ b/pkg/filesystem/virtual/initial_contents_fetcher.go @@ -0,0 +1,55 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// InitialNode is the value type of the map of directory entries +// returned by InitialContentsFetcher.FetchContents(). Either Directory +// or Leaf is set, but not both. +type InitialNode = Child[InitialContentsFetcher, NativeLeaf, any] + +// FileReadMonitor is used by the regular files created through the +// InitialContentsFetcher to indicate that one or more calls against +// VirtualRead() have occurred. This is used by +// AccessMonitoringInitialContentsFetcher to monitor file access. +type FileReadMonitor func() + +// FileReadMonitorFactory is a factory type for FileReadMonitor that is +// provided to the InitialContentsFetcher, so that the +// InitialContentsFetcher can attach the resulting monitors to any files +// that are returned. +// +// If this function returns nil, no monitor is attached to the file. +type FileReadMonitorFactory func(name path.Component) FileReadMonitor + +// InitialContentsFetcher is called into by PrepopulatedDirectory when a +// directory whose contents need to be instantiated lazily is accessed. +// The results returned by FetchContents() are used to populate the +// directory. +// +// FetchContents() should be called until it succeeds at most once. It +// may be possible FetchContents() is never called. This may happen if +// the directory in question is never accessed. +type InitialContentsFetcher interface { + FetchContents(fileReadMonitorFactory FileReadMonitorFactory) (map[path.Component]InitialNode, error) + + // GetContainingDigests() returns a set of digests of objects in + // the Content Addressable Storage that back the directories and + // leaf nodes yielded by this InitialContentsFetcher. + // + // The set returned by this function may be passed to + // ContentAddressableStorage.FindMissingBlobs() to check whether + // the all files underneath this directory still exist, and to + // prevent them from being removed in the nearby future. + // + // This API assumes that the resulting set is small enough to + // fit in memory. For hierarchies backed by Tree objects, this + // will generally hold. It may not be safe to call this method + // on InitialContentsFetchers that expand to infinitely big + // hierarchies. + GetContainingDigests(ctx context.Context) (digest.Set, error) +} diff --git a/pkg/filesystem/virtual/leaf.go b/pkg/filesystem/virtual/leaf.go new file mode 100644 index 0000000..f907d12 --- /dev/null +++ b/pkg/filesystem/virtual/leaf.go @@ -0,0 +1,97 @@ +package virtual + +import ( + "context" + "math/bits" + + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +// ShareMask is a bitmask of operations that are permitted against a +// Leaf that has been opened. +type ShareMask uint32 + +const ( + // ShareMaskRead permits calls to VirtualRead(). + ShareMaskRead ShareMask = 1 << iota + // ShareMaskWrite permits calls to VirtualWrite(). + ShareMaskWrite +) + +// Count the number of permitted operations. +func (sm ShareMask) Count() uint { + return uint(bits.OnesCount32(uint32(sm))) +} + +// OpenExistingOptions contains options that describe what should happen +// with a file when opened. The Truncate option corresponds to open()'s +// O_TRUNC option. This option has no effect on freshly created files, +// as those are always empty. +type OpenExistingOptions struct { + Truncate bool +} + +// ToAttributesMask converts open options to an AttributeMask, +// indicating which file attributes were affected by the operation. +func (o *OpenExistingOptions) ToAttributesMask() (m AttributesMask) { + if o.Truncate { + m |= AttributesMaskSizeBytes + } + return +} + +// Leaf node that is exposed through FUSE using SimpleRawFileSystem, or +// through NFSv4. Examples of leaf nodes are regular files, sockets, +// FIFOs, symbolic links and devices. +// +// TODO: Should all methods take an instance of Context? +type Leaf interface { + Node + + VirtualAllocate(off, size uint64) Status + VirtualSeek(offset uint64, regionType filesystem.RegionType) (*uint64, Status) + VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status + VirtualRead(buf []byte, offset uint64) (n int, eof bool, s Status) + VirtualReadlink(ctx context.Context) ([]byte, Status) + VirtualClose(shareAccess ShareMask) + VirtualWrite(buf []byte, offset uint64) (int, Status) +} + +// StatelessLeafLinkCount is the value that should be assigned to +// fuse.Attr.Nlink for leaf nodes that don't track an explicit link +// count, such as files backed by the Content Addressable Storage (CAS). +// +// The Linux kernel doesn't treat fuse.Attr.Nlink as an opaque value. +// Its value gets stored in the kernel inode structure's i_nlink field. +// This cached value may later be incremented and decremented inside +// fuse_link() and fuse_unlink(), meaning that it may reach zero. Once +// zero, the kernel thinks the file is unlinked, causing future link() +// calls to fail. +// +// This means that the value of fuse.Attr.Nlink should ideally reflect +// the true number of paths under which these files are visible. For +// stateless files that is impossible to achieve, as they may appear in +// an arbitrary number of places that aren't known up front. Solve this +// by using a constant value that is sufficiently high for most use +// cases. +// +// References: +// - https://github.com/torvalds/linux/blob/01c70267053d6718820ac0902d8823d5dd2a6adb/fs/fuse/inode.c#L161 +// - https://github.com/torvalds/linux/blob/01c70267053d6718820ac0902d8823d5dd2a6adb/fs/fuse/dir.c#L874 +// - https://github.com/torvalds/linux/blob/01c70267053d6718820ac0902d8823d5dd2a6adb/fs/fuse/dir.c#L726 +// - https://github.com/torvalds/linux/blob/01c70267053d6718820ac0902d8823d5dd2a6adb/fs/namei.c#L4066-L4067 +const StatelessLeafLinkCount = 9999 + +// BoundReadToFileSize is a helper function for implementations of +// VirtualRead() to limit the read size to the actual file size. +func BoundReadToFileSize(buf []byte, offset, size uint64) ([]byte, bool) { + if offset >= size { + // Read starting at or past end-of-file. + return nil, true + } + if remaining := size - offset; uint64(len(buf)) >= remaining { + // Read ending at or past end-of-file. + return buf[:remaining], true + } + return buf, false +} diff --git a/pkg/filesystem/virtual/native_leaf.go b/pkg/filesystem/virtual/native_leaf.go new file mode 100644 index 0000000..11ad2ae --- /dev/null +++ b/pkg/filesystem/virtual/native_leaf.go @@ -0,0 +1,55 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// NativeLeaf objects are non-directory nodes that can be placed in a +// PrepopulatedDirectory. +type NativeLeaf interface { + Leaf + + // Operations called into by implementations of + // PrepopulatedDirectory. The Link() operation may fail, for the + // reason that Directory.VirtualLink() may be called on leaf + // nodes that have been removed concurrently. + Link() Status + Unlink() + + // Additional operations that are used by consumers of + // PrepopulatedDirectory. + // + // TODO: Remove these once Go supports generics. We could turn + // PrepopulatedDirectory and InitialContentsFetcher into + // parameterized types, where the leaves could be any type + // that's based on NativeLeaf. + Readlink() (string, error) + UploadFile(ctx context.Context, contentAddressableStorage blobstore.BlobAccess, digestFunction digest.Function) (digest.Digest, error) + // GetContainingDigests() returns a set of digests of objects in + // the Content Addressable Storage that back the contents of + // this file. + // + // The set returned by this function may be passed to + // ContentAddressableStorage.FindMissingBlobs() to check whether + // the file still exists in its entirety, and to prevent that + // the file is removed in the nearby future. + GetContainingDigests() digest.Set + // GetOutputServiceFileStatus() returns the status of the leaf + // node in the form of a FileStatus message that is used by the + // Remote Output Service protocol. + // + // When digestFunction is not nil, a FileStatus responses for + // regular files should include the digest. + GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) + // GetOutputServiceFileStatus() appends a FileNode or + // SymlinkNode entry to a Directory message that is used to + // persist the state of a Remote Output Service output path to + // disk. + AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) +} diff --git a/pkg/filesystem/virtual/nfs_handle_allocator.go b/pkg/filesystem/virtual/nfs_handle_allocator.go new file mode 100644 index 0000000..a97b145 --- /dev/null +++ b/pkg/filesystem/virtual/nfs_handle_allocator.go @@ -0,0 +1,660 @@ +package virtual + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "sync" + + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/random" +) + +// fileHandleToInodeNumber converts a file handle to an inode number. +func fileHandleToInodeNumber(fileHandle []byte) uint64 { + hasher := fnv1aHasher{ + hash: binary.LittleEndian.Uint64(fileHandle), + } + if _, err := bytes.NewBuffer(fileHandle[8:]).WriteTo(&hasher); err != nil { + panic(err) + } + return hasher.hash +} + +// inodeNumberToBaseFileHandle converts the inode number of a +// non-resolvable file to a file handle. File handles for such files are +// simply identical to the inode number. +func inodeNumberToBaseFileHandle(inodeNumber uint64) [8]byte { + var fileHandle [8]byte + binary.LittleEndian.PutUint64(fileHandle[:], inodeNumber) + return fileHandle +} + +// setAttributesForFileHandle sets the file handle and inode number +// attributes for a given file. +func setAttributesForFileHandle(fileHandle []byte, requested AttributesMask, attributes *Attributes) { + attributes.SetFileHandle(fileHandle) + if requested&AttributesMaskInodeNumber != 0 { + attributes.SetInodeNumber(fileHandleToInodeNumber(fileHandle)) + } +} + +type nfsHandlePool struct { + lock sync.RWMutex + randomNumberGenerator random.SingleThreadedGenerator + directories map[uint64]Directory + statefulLeaves map[uint64]*nfsStatefulNativeLeaf + statelessLeaves map[uint64]*nfsStatelessNativeLeaf + resolvers map[uint64]HandleResolver +} + +func (hp *nfsHandlePool) createStatelessDirectoryLocked(inodeNumber uint64, underlyingDirectory Directory) Directory { + // Reuse an existing directory if one exists. + if directory, ok := hp.directories[inodeNumber]; ok { + return directory + } + + fileHandle := inodeNumberToBaseFileHandle(inodeNumber) + directory := &nfsStatelessDirectory{ + Directory: underlyingDirectory, + fileHandle: fileHandle[:], + } + hp.directories[inodeNumber] = directory + return directory +} + +func (hp *nfsHandlePool) createResolvableAllocatorLocked(inodeNumber uint64, resolver HandleResolver) ResolvableHandleAllocator { + if _, ok := hp.resolvers[inodeNumber]; !ok { + hp.resolvers[inodeNumber] = resolver + } + + fileHandlePrefix := inodeNumberToBaseFileHandle(inodeNumber) + return &nfsResolvableHandleAllocator{ + fileHandlePrefix: fileHandlePrefix[:], + } +} + +// NFSStatefulHandleAllocator creates a handle allocator for the purpose +// of exposing the virtual file system through NFS. It is responsible +// for decorating all files in the file system, so that they have file +// handles, inode numbers and link counts. File handles and inode +// numbers are unique for stateful (mutable) files, while they are +// identical for stateless files that share the same identifiers, +// meaning they can be deduplicated by the kernel. +// +// The NFS protocol is stateless, in the sense that the client and +// server share no state on which nodes in the file system have been +// resolved. The client does not inform the server that it has released +// files from its cache. This means that the server needs to be able to +// resolve all file handles that are either still present in the file +// system or are still opened by the client. This handle allocator is +// capable of only doing the former. The latter can be supported at a +// higher level. +// +// To work well with infinitely big directory structures (e.g., +// bb_clientd's "cas" directory), this implementation makes use of the +// handle resolver function provided to AsResolvableAllocator(). Instead +// of tracking these nodes explicitly, it generates longer file handles +// that have the value provided to ResolvableHandleAllocator.New() as +// a suffix, making it possible to regenerate these nodes on the fly. +type NFSStatefulHandleAllocator struct { + pool *nfsHandlePool +} + +var _ StatefulHandleAllocator = (*NFSStatefulHandleAllocator)(nil) + +// NewNFSHandleAllocator creates a new NFSStatefulHandleAllocator that +// does not have any resolvable objects. +func NewNFSHandleAllocator(randomNumberGenerator random.SingleThreadedGenerator) *NFSStatefulHandleAllocator { + return &NFSStatefulHandleAllocator{ + pool: &nfsHandlePool{ + randomNumberGenerator: randomNumberGenerator, + directories: map[uint64]Directory{}, + statefulLeaves: map[uint64]*nfsStatefulNativeLeaf{}, + statelessLeaves: map[uint64]*nfsStatelessNativeLeaf{}, + resolvers: map[uint64]HandleResolver{}, + }, + } +} + +// ResolveHandle resolves a directory or leaf object that corresponds +// with a file handle previously returned by Attributes.GetFileHandle(). +// +// Only files that are linked into the file system are guaranteed to be +// resolvable. Files that have been unlinked, but are still opened have +// to be tracked at a higher level. +func (hr *NFSStatefulHandleAllocator) ResolveHandle(r io.ByteReader) (DirectoryChild, Status) { + // The first eight bytes of the handle always correspond to a + // base inode number. + var inodeNumberBytes [8]byte + for i := 0; i < len(inodeNumberBytes); i++ { + c, err := r.ReadByte() + if err != nil { + return DirectoryChild{}, StatusErrBadHandle + } + inodeNumberBytes[i] = c + } + inodeNumber := binary.LittleEndian.Uint64(inodeNumberBytes[:]) + + p := hr.pool + p.lock.RLock() + if directory, ok := p.directories[inodeNumber]; ok { + p.lock.RUnlock() + return DirectoryChild{}.FromDirectory(directory), StatusOK + } + if leaf, ok := p.statefulLeaves[inodeNumber]; ok { + p.lock.RUnlock() + return DirectoryChild{}.FromLeaf(leaf), StatusOK + } + if leaf, ok := p.statelessLeaves[inodeNumber]; ok { + p.lock.RUnlock() + return DirectoryChild{}.FromLeaf(leaf), StatusOK + } + if resolver, ok := p.resolvers[inodeNumber]; ok { + p.lock.RUnlock() + return resolver(r) + } + p.lock.RUnlock() + return DirectoryChild{}, StatusErrStale +} + +// New creates a new stateful handle allocation. +func (hr *NFSStatefulHandleAllocator) New() StatefulHandleAllocation { + return &nfsStatefulHandleAllocation{ + pool: hr.pool, + } +} + +type nfsStatefulHandleAllocation struct { + pool *nfsHandlePool +} + +func (hn *nfsStatefulHandleAllocation) AsStatelessAllocator() StatelessHandleAllocator { + hp := hn.pool + hp.lock.Lock() + inodeNumberSeed := hp.randomNumberGenerator.Uint64() + hp.lock.Unlock() + *hn = nfsStatefulHandleAllocation{} + return &nfsStatelessHandleAllocator{ + pool: hp, + inodeNumberSeed: inodeNumberSeed, + } +} + +func (hn *nfsStatefulHandleAllocation) AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator { + hp := hn.pool + hp.lock.Lock() + hr := hp.createResolvableAllocatorLocked(hp.randomNumberGenerator.Uint64(), resolver) + hp.lock.Unlock() + *hn = nfsStatefulHandleAllocation{} + return hr +} + +func (hn *nfsStatefulHandleAllocation) AsStatefulDirectory(directory Directory) StatefulDirectoryHandle { + hp := hn.pool + hp.lock.Lock() + inodeNumber := hp.randomNumberGenerator.Uint64() + hp.directories[inodeNumber] = directory + hp.lock.Unlock() + + *hn = nfsStatefulHandleAllocation{} + return &nfsStatefulDirectoryHandle{ + pool: hp, + inodeNumber: inodeNumber, + } +} + +func (hn *nfsStatefulHandleAllocation) AsStatelessDirectory(underlyingDirectory Directory) Directory { + hp := hn.pool + hp.lock.Lock() + directory := hp.createStatelessDirectoryLocked(hp.randomNumberGenerator.Uint64(), underlyingDirectory) + hp.lock.Unlock() + *hn = nfsStatefulHandleAllocation{} + return directory +} + +func (hn *nfsStatefulHandleAllocation) AsNativeLeaf(underlyingLeaf NativeLeaf) NativeLeaf { + hp := hn.pool + hp.lock.Lock() + inodeNumber := hp.randomNumberGenerator.Uint64() + fileHandle := inodeNumberToBaseFileHandle(inodeNumber) + leaf := &nfsStatefulNativeLeaf{ + NativeLeaf: underlyingLeaf, + pool: hp, + fileHandle: fileHandle[:], + linkCount: 1, + } + hp.statefulLeaves[inodeNumber] = leaf + hp.lock.Unlock() + + *hn = nfsStatefulHandleAllocation{} + return leaf +} + +func (hn *nfsStatefulHandleAllocation) AsLeaf(underlyingLeaf Leaf) Leaf { + panic("Regular leaf objects cannot be used in stateful contexts, as they cannot be linked/unlinked") +} + +type nfsStatelessHandleAllocator struct { + pool *nfsHandlePool + inodeNumberSeed uint64 +} + +func (hr *nfsStatelessHandleAllocator) New(w io.WriterTo) StatelessHandleAllocation { + hasher := fnv1aHasher{ + hash: hr.inodeNumberSeed, + } + if _, err := w.WriteTo(&hasher); err != nil { + panic(err) + } + return &nfsStatelessHandleAllocation{ + pool: hr.pool, + currentInodeNumber: hasher.hash, + } +} + +type nfsStatelessHandleAllocation struct { + pool *nfsHandlePool + currentInodeNumber uint64 +} + +func (hn *nfsStatelessHandleAllocation) AsStatelessAllocator() StatelessHandleAllocator { + hr := &nfsStatelessHandleAllocator{ + pool: hn.pool, + inodeNumberSeed: hn.currentInodeNumber, + } + *hn = nfsStatelessHandleAllocation{} + return hr +} + +func (hn *nfsStatelessHandleAllocation) AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator { + hp := hn.pool + hp.lock.Lock() + hr := hp.createResolvableAllocatorLocked(hn.currentInodeNumber, resolver) + hp.lock.Unlock() + *hn = nfsStatelessHandleAllocation{} + return hr +} + +func (hn *nfsStatelessHandleAllocation) AsStatelessDirectory(underlyingDirectory Directory) Directory { + hp := hn.pool + hp.lock.Lock() + directory := hp.createStatelessDirectoryLocked(hn.currentInodeNumber, underlyingDirectory) + hp.lock.Unlock() + *hn = nfsStatelessHandleAllocation{} + return directory +} + +func (hn *nfsStatelessHandleAllocation) AsNativeLeaf(underlyingLeaf NativeLeaf) NativeLeaf { + hp := hn.pool + hp.lock.Lock() + + // Reuse an existing leaf if one exists. + if leaf, ok := hp.statelessLeaves[hn.currentInodeNumber]; ok { + leaf.linkCount++ + hp.lock.Unlock() + underlyingLeaf.Unlink() + return leaf + } + + // None exists. Create a new one. + fileHandle := inodeNumberToBaseFileHandle(hn.currentInodeNumber) + leaf := &nfsStatelessNativeLeaf{ + NativeLeaf: underlyingLeaf, + pool: hp, + fileHandle: fileHandle[:], + linkCount: 1, + } + hp.statelessLeaves[hn.currentInodeNumber] = leaf + hp.lock.Unlock() + + *hn = nfsStatelessHandleAllocation{} + return leaf +} + +func (hn *nfsStatelessHandleAllocation) AsLeaf(underlyingLeaf Leaf) Leaf { + panic("Regular leaf objects cannot be used in stateless contexts, as they cannot be linked/unlinked") +} + +type nfsResolvableHandleAllocator struct { + fileHandlePrefix []byte +} + +func (hr *nfsResolvableHandleAllocator) New(w io.WriterTo) ResolvableHandleAllocation { + fileHandle := bytes.NewBuffer(hr.fileHandlePrefix[:len(hr.fileHandlePrefix):len(hr.fileHandlePrefix)]) + if _, err := w.WriteTo(fileHandle); err != nil { + panic(err) + } + return &nfsResolvableHandleAllocation{ + currentFileHandle: fileHandle.Bytes(), + } +} + +type nfsResolvableHandleAllocation struct { + currentFileHandle []byte +} + +func (hn *nfsResolvableHandleAllocation) AsResolvableAllocator(resolver HandleResolver) ResolvableHandleAllocator { + hr := &nfsResolvableHandleAllocator{ + fileHandlePrefix: hn.currentFileHandle, + } + *hn = nfsResolvableHandleAllocation{} + return hr +} + +func (hn *nfsResolvableHandleAllocation) AsStatelessDirectory(underlyingDirectory Directory) Directory { + directory := &nfsStatelessDirectory{ + Directory: underlyingDirectory, + fileHandle: hn.currentFileHandle, + } + *hn = nfsResolvableHandleAllocation{} + return directory +} + +func (hn *nfsResolvableHandleAllocation) AsNativeLeaf(underlyingLeaf NativeLeaf) NativeLeaf { + leaf := &nfsResolvableNativeLeaf{ + NativeLeaf: underlyingLeaf, + fileHandle: hn.currentFileHandle, + } + *hn = nfsResolvableHandleAllocation{} + return leaf +} + +func (hn *nfsResolvableHandleAllocation) AsLeaf(underlyingLeaf Leaf) Leaf { + leaf := &nfsResolvableLeaf{ + Leaf: underlyingLeaf, + fileHandle: hn.currentFileHandle, + } + *hn = nfsResolvableHandleAllocation{} + return leaf +} + +// nfsStatefulDirectoryHandle is a handle for stateful directories that +// augments the results of VirtualGetAttributes() to contain a file +// handle and inode number. +type nfsStatefulDirectoryHandle struct { + pool *nfsHandlePool + inodeNumber uint64 +} + +func (dh *nfsStatefulDirectoryHandle) GetAttributes(requested AttributesMask, attributes *Attributes) { + fileHandle := inodeNumberToBaseFileHandle(dh.inodeNumber) + attributes.SetFileHandle(fileHandle[:]) + attributes.SetInodeNumber(dh.inodeNumber) +} + +func (dh *nfsStatefulDirectoryHandle) NotifyRemoval(name path.Component) { + // Removal notification could be supported using NFSv4.1's + // CB_NOTIFY operation. Unfortunately, none of the major client + // implementations seem to support it. + // https://github.com/torvalds/linux/blob/b05bf5c63b326ce1da84ef42498d8e0e292e694c/fs/nfs/callback_xdr.c#L779-L783 +} + +func (dh *nfsStatefulDirectoryHandle) Release() { + hp := dh.pool + hp.lock.Lock() + delete(hp.directories, dh.inodeNumber) + hp.lock.Unlock() +} + +// nfsStatelessDirectory is a decorator for stateless Directory objects +// that augments the results of VirtualGetAttributes() to contain a file +// handle and inode number. +type nfsStatelessDirectory struct { + Directory + fileHandle []byte +} + +func (d *nfsStatelessDirectory) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskFileHandle | AttributesMaskInodeNumber); remaining != 0 { + d.Directory.VirtualGetAttributes(ctx, remaining, attributes) + } + setAttributesForFileHandle(d.fileHandle, requested, attributes) +} + +func (d *nfsStatelessDirectory) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := d.Directory.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + setAttributesForFileHandle(d.fileHandle, requested, attributes) + return StatusOK +} + +// nfsStatefulNativeLeaf is a decorator for NativeLeaf that augments +// the results of VirtualGetAttributes() to contain a file handle, inode +// number and link count. Link() and Unlink() calls are intercepted, and +// are only forwarded if the link count drops to zero. +type nfsStatefulNativeLeaf struct { + NativeLeaf + pool *nfsHandlePool + fileHandle []byte + + // Protected by pool.lock. + linkCount uint32 + changeID uint64 +} + +func (l *nfsStatefulNativeLeaf) Link() Status { + hp := l.pool + hp.lock.Lock() + defer hp.lock.Unlock() + + if l.linkCount == 0 { + return StatusErrStale + } + l.linkCount++ + l.changeID++ + return StatusOK +} + +func (l *nfsStatefulNativeLeaf) Unlink() { + inodeNumber := fileHandleToInodeNumber(l.fileHandle) + + hp := l.pool + hp.lock.Lock() + if l.linkCount == 0 { + panic("Attempted to unlink file with link count zero") + } + l.linkCount-- + l.changeID++ + if l.linkCount == 0 { + delete(hp.statefulLeaves, inodeNumber) + hp.lock.Unlock() + l.NativeLeaf.Unlink() + } else { + hp.lock.Unlock() + } +} + +func (l *nfsStatefulNativeLeaf) injectAttributes(requested AttributesMask, attributes *Attributes) { + setAttributesForFileHandle(l.fileHandle, requested, attributes) + if requested&(AttributesMaskChangeID|AttributesMaskLinkCount) != 0 { + hp := l.pool + hp.lock.RLock() + if requested&AttributesMaskChangeID != 0 { + attributes.SetChangeID(attributes.GetChangeID() + l.changeID) + } + attributes.SetLinkCount(l.linkCount) + hp.lock.RUnlock() + } +} + +func (l *nfsStatefulNativeLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskFileHandle | AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.NativeLeaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(requested, attributes) +} + +func (l *nfsStatefulNativeLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +func (l *nfsStatefulNativeLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +// nfsStatelessNativeLeaf is a decorator for NativeLeaf that augments +// the results of VirtualGetAttributes() to contain a file handle, inode +// number and link count. +// +// Even though these files are stateless, we need to track an actual +// link count to determine when it's safe to release the file handle +// from nfsHandlePool. We do report a constant link count back to the +// user, both to prevent invalidation of the attributes and for +// consistency with FUSE. +type nfsStatelessNativeLeaf struct { + NativeLeaf + pool *nfsHandlePool + fileHandle []byte + + // Protected by pool.lock. + linkCount uint32 +} + +func (l *nfsStatelessNativeLeaf) Link() Status { + hp := l.pool + hp.lock.Lock() + defer hp.lock.Unlock() + + if l.linkCount == 0 { + return StatusErrStale + } + l.linkCount++ + return StatusOK +} + +func (l *nfsStatelessNativeLeaf) Unlink() { + inodeNumber := fileHandleToInodeNumber(l.fileHandle) + + hp := l.pool + hp.lock.Lock() + if l.linkCount == 0 { + panic("Attempted to unlink file with link count zero") + } + l.linkCount-- + if l.linkCount == 0 { + delete(hp.statelessLeaves, inodeNumber) + hp.lock.Unlock() + l.NativeLeaf.Unlink() + } else { + hp.lock.Unlock() + } +} + +func (l *nfsStatelessNativeLeaf) injectAttributes(requested AttributesMask, attributes *Attributes) { + setAttributesForFileHandle(l.fileHandle, requested, attributes) + attributes.SetLinkCount(StatelessLeafLinkCount) +} + +func (l *nfsStatelessNativeLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskFileHandle | AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.NativeLeaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(requested, attributes) +} + +func (l *nfsStatelessNativeLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +func (l *nfsStatelessNativeLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +// nfsResolvableNativeLeaf is a decorator for NativeLeaf that augments +// the results of VirtualGetAttributes() to contain a file handle, inode +// number and link count. For these kinds of files, the link count is +// just a constant. +type nfsResolvableNativeLeaf struct { + NativeLeaf + fileHandle []byte +} + +func (l *nfsResolvableNativeLeaf) Link() Status { + return StatusOK +} + +func (l *nfsResolvableNativeLeaf) Unlink() {} + +func (l *nfsResolvableNativeLeaf) injectAttributes(requested AttributesMask, attributes *Attributes) { + setAttributesForFileHandle(l.fileHandle, requested, attributes) + attributes.SetLinkCount(StatelessLeafLinkCount) +} + +func (l *nfsResolvableNativeLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskFileHandle | AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.NativeLeaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(requested, attributes) +} + +func (l *nfsResolvableNativeLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +func (l *nfsResolvableNativeLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.NativeLeaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +// nfsResolvableLeaf is a decorator for Leaf that augments the results +// of VirtualGetAttributes() to contain a file handle, inode number and +// link count. For these kinds of files, the link count is just a +// constant. +type nfsResolvableLeaf struct { + Leaf + fileHandle []byte +} + +func (l *nfsResolvableLeaf) injectAttributes(requested AttributesMask, attributes *Attributes) { + setAttributesForFileHandle(l.fileHandle, requested, attributes) + attributes.SetLinkCount(StatelessLeafLinkCount) +} + +func (l *nfsResolvableLeaf) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + if remaining := requested &^ (AttributesMaskFileHandle | AttributesMaskInodeNumber | AttributesMaskLinkCount); remaining != 0 { + l.Leaf.VirtualGetAttributes(ctx, remaining, attributes) + } + l.injectAttributes(requested, attributes) +} + +func (l *nfsResolvableLeaf) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status { + if s := l.Leaf.VirtualSetAttributes(ctx, in, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} + +func (l *nfsResolvableLeaf) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + if s := l.Leaf.VirtualOpenSelf(ctx, shareAccess, options, requested, attributes); s != StatusOK { + return s + } + l.injectAttributes(requested, attributes) + return StatusOK +} diff --git a/pkg/filesystem/virtual/nfs_handle_allocator_test.go b/pkg/filesystem/virtual/nfs_handle_allocator_test.go new file mode 100644 index 0000000..5c3d5e2 --- /dev/null +++ b/pkg/filesystem/virtual/nfs_handle_allocator_test.go @@ -0,0 +1,276 @@ +package virtual_test + +import ( + "bytes" + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestNFSHandleAllocator(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + handleAllocator := virtual.NewNFSHandleAllocator(randomNumberGenerator) + attributesMask := virtual.AttributesMaskChangeID | + virtual.AttributesMaskFileHandle | + virtual.AttributesMaskInodeNumber | + virtual.AttributesMaskLinkCount | + virtual.AttributesMaskSizeBytes + + t.Run("StatefulDirectory", func(t *testing.T) { + // Create a stateful directory. The handle that is + // returned should add a file handle and inode number to + // the attributes. + baseDirectory := mock.NewMockVirtualDirectory(ctrl) + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xfccd1fc99a8c3425)) + directoryHandle := handleAllocator.New().AsStatefulDirectory(baseDirectory) + + fileHandle := []byte{0x25, 0x34, 0x8c, 0x9a, 0xc9, 0x1f, 0xcd, 0xfc} + var attr virtual.Attributes + directoryHandle.GetAttributes(attributesMask, &attr) + require.Equal( + t, + (&virtual.Attributes{}). + SetFileHandle(fileHandle). + SetInodeNumber(0xfccd1fc99a8c3425), + &attr) + + // The directory should be resolvable. + resolvedChild, s := handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromDirectory(baseDirectory), resolvedChild) + + // After releasing the directory, it should no longer be + // resolvable. + directoryHandle.Release() + + _, s = handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusErrStale, s) + }) + + t.Run("StatelessDirectory", func(t *testing.T) { + // Create a stateless directory and wrap it. Only a file + // handle and inode number should be added, as the + // directory is still responsible for providing its own + // link count. The link count is based on the number of + // child directories. + baseDirectory := mock.NewMockVirtualDirectory(ctrl) + baseDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskChangeID|virtual.AttributesMaskLinkCount|virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetChangeID(0) + attributes.SetLinkCount(17) + attributes.SetSizeBytes(42) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xa44671c491369d36)) + wrappedDirectory := handleAllocator.New().AsStatelessDirectory(baseDirectory) + + fileHandle := []byte{0x36, 0x9d, 0x36, 0x91, 0xc4, 0x71, 0x46, 0xa4} + var attr virtual.Attributes + wrappedDirectory.VirtualGetAttributes(ctx, attributesMask, &attr) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileHandle(fileHandle). + SetInodeNumber(0xa44671c491369d36). + SetLinkCount(17). + SetSizeBytes(42), + &attr) + + // The directory should be resolvable. + resolvedChild, s := handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromDirectory(wrappedDirectory), resolvedChild) + }) + + t.Run("StatefulNativeLeaf", func(t *testing.T) { + // Create a stateful file and wrap it. A file handle, link + // count and inode number should be added. + baseLeaf := mock.NewMockNativeLeaf(ctrl) + baseLeaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskChangeID|virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetChangeID(7) + attributes.SetSizeBytes(42) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xf999bb2fd22421d8)) + wrappedLeaf := handleAllocator.New().AsNativeLeaf(baseLeaf) + + fileHandle := []byte{0xd8, 0x21, 0x24, 0xd2, 0x2f, 0xbb, 0x99, 0xf9} + var attr1 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr1) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(7). + SetFileHandle(fileHandle). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(1). + SetSizeBytes(42), + &attr1) + + // The leaf should be resolvable. + resolvedChild, s := handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromLeaf(wrappedLeaf), resolvedChild) + + // Hardlinking it should cause the link count to be + // increased. + require.Equal(t, virtual.StatusOK, wrappedLeaf.Link()) + + var attr2 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr2) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(8). + SetFileHandle(fileHandle). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(2). + SetSizeBytes(42), + &attr2) + + // Unlinking it twice should cause the underlying leaf + // node to be unlinked. It should then no longer be + // resolvable. + wrappedLeaf.Unlink() + baseLeaf.EXPECT().Unlink() + wrappedLeaf.Unlink() + + var attr3 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr3) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(10). + SetFileHandle(fileHandle). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(0). + SetSizeBytes(42), + &attr3) + + _, s = handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusErrStale, s) + + // Attempting to link it again should fail, as files + // cannot be brought back after being unlinked. + require.Equal(t, virtual.StatusErrStale, wrappedLeaf.Link()) + + var attr4 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr4) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(10). + SetFileHandle(fileHandle). + SetInodeNumber(0xf999bb2fd22421d8). + SetLinkCount(0). + SetSizeBytes(42), + &attr4) + }) + + t.Run("StatelessNativeLeaf", func(t *testing.T) { + // Create a stateless file and wrap it. A link count and + // inode number should be added. As the file is + // stateless, the reported link count uses a placeholder + // value. It does have a link count under the hood to + // determine when the file no longer needs to be + // resolvable. + // + // The inode number of the leaf corresponds with the + // FNV-1a hash of "Hello", using 0x6aae40a05f45b861 as + // the offset basis. + baseLeaf := mock.NewMockNativeLeaf(ctrl) + baseLeaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskChangeID|virtual.AttributesMaskSizeBytes, gomock.Any()). + Do(func(ctx context.Context, attributesMask virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetChangeID(0) + attributes.SetSizeBytes(123) + }).AnyTimes() + + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x6aae40a05f45b861)) + wrappedLeaf := handleAllocator. + New(). + AsStatelessAllocator(). + New(bytes.NewBuffer([]byte("Hello"))). + AsNativeLeaf(baseLeaf) + + fileHandle := []byte{0x0f, 0x81, 0x5c, 0x1c, 0xc7, 0x04, 0xac, 0x2f} + var attr1 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr1) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileHandle(fileHandle). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr1) + + // The leaf should be resolvable. + resolvedChild, s := handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromLeaf(wrappedLeaf), resolvedChild) + + // Hardlinking should have no visible effect, even + // though a link count under the hood is adjusted. + require.Equal(t, virtual.StatusOK, wrappedLeaf.Link()) + + var attr2 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr2) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileHandle(fileHandle). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr2) + + // Unlinking it twice should cause the underlying leaf + // node to be unlinked. It should then no longer be + // resolvable. + wrappedLeaf.Unlink() + baseLeaf.EXPECT().Unlink() + wrappedLeaf.Unlink() + + var attr3 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr3) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileHandle(fileHandle). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr3) + + _, s = handleAllocator.ResolveHandle(bytes.NewBuffer(fileHandle)) + require.Equal(t, virtual.StatusErrStale, s) + + // Attempting to link it again should fail, as files + // cannot be brought back after being unlinked. + require.Equal(t, virtual.StatusErrStale, wrappedLeaf.Link()) + + var attr4 virtual.Attributes + wrappedLeaf.VirtualGetAttributes(ctx, attributesMask, &attr4) + require.Equal( + t, + (&virtual.Attributes{}). + SetChangeID(0). + SetFileHandle(fileHandle). + SetInodeNumber(0x2fac04c71c5c810f). + SetLinkCount(virtual.StatelessLeafLinkCount). + SetSizeBytes(123), + &attr4) + }) +} diff --git a/pkg/filesystem/virtual/nfsv4/BUILD.bazel b/pkg/filesystem/virtual/nfsv4/BUILD.bazel new file mode 100644 index 0000000..75ca6f1 --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "nfsv4", + srcs = [ + "base_program.go", + "metrics_program.go", + "system_authenticator.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4", + visibility = ["//visibility:public"], + deps = [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/protocols/rpcv2", + "@com_github_buildbarn_go_xdr//pkg/rpcserver", + "@com_github_buildbarn_go_xdr//pkg/runtime", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_prometheus_client_golang//prometheus", + ], +) + +go_test( + name = "nfsv4_test", + srcs = [ + "base_program_test.go", + "system_authenticator_test.go", + ], + deps = [ + ":nfsv4", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/eviction", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/proto/auth", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/protocols/rpcv2", + "@com_github_golang_mock//gomock", + "@com_github_jmespath_go_jmespath//:go-jmespath", + "@com_github_stretchr_testify//require", + "@org_golang_google_protobuf//types/known/structpb", + ], +) diff --git a/pkg/filesystem/virtual/nfsv4/base_program.go b/pkg/filesystem/virtual/nfsv4/base_program.go new file mode 100644 index 0000000..be57b4f --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/base_program.go @@ -0,0 +1,3421 @@ +package nfsv4 + +import ( + "bytes" + "context" + "io" + "math" + "sync" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/buildbarn/go-xdr/pkg/runtime" + "github.com/prometheus/client_golang/prometheus" +) + +// stateIDOtherPrefixLength is the number of bytes of a state ID's +// 'other' field that are set to a constant value. This permits the +// server to detect whether state IDs belong to a previous incarnation +// of the server. +const stateIDOtherPrefixLength = 4 + +var ( + baseProgramPrometheusMetrics sync.Once + + baseProgramOpenOwnersCreated = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owners_created_total", + Help: "Number of open-owners created through NFSv4 OPEN operations.", + }) + baseProgramOpenOwnersRemoved = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owners_removed_total", + Help: "Number of open-owners removed due to inactivity.", + }) + + baseProgramOpenOwnerFilesCreated = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owner_files_created_total", + Help: "Number of open-owner files created through NFSv4 OPEN operations.", + }) + baseProgramOpenOwnerFilesRemoved = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owner_files_removed_total", + Help: "Number of open-owner files removed, either through NFSv4 CLOSE operations or due to inactivity on the open-owner.", + }) +) + +type baseProgram struct { + rootFileHandle fileHandle + handleResolver virtual.HandleResolver + rebootVerifier nfsv4.Verifier4 + stateIDOtherPrefix [stateIDOtherPrefixLength]byte + clock clock.Clock + enforcedLeaseTime time.Duration + announcedLeaseTime nfsv4.NfsLease4 + + lock sync.Mutex + now time.Time + randomNumberGenerator random.SingleThreadedGenerator + clientsByLongID map[string]*clientState + clientConfirmationsByKey map[clientConfirmationKey]*clientConfirmationState + clientConfirmationsByShortID map[nfsv4.Clientid4]*clientConfirmationState + openOwnerFilesByOther map[regularStateIDOther]*openOwnerFileState + openedFilesByHandle map[string]*openedFileState + lockOwnerFilesByOther map[regularStateIDOther]*lockOwnerFileState + idleClientConfirmations clientConfirmationState + unusedOpenOwners openOwnerState +} + +// NewBaseProgram creates an nfsv4.Nfs4Program that forwards all +// operations to a virtual file system. It implements most of the +// features of NFSv4.0. +func NewBaseProgram(rootDirectory virtual.Directory, handleResolver virtual.HandleResolver, randomNumberGenerator random.SingleThreadedGenerator, rebootVerifier nfsv4.Verifier4, stateIDOtherPrefix [stateIDOtherPrefixLength]byte, clock clock.Clock, enforcedLeaseTime, announcedLeaseTime time.Duration) nfsv4.Nfs4Program { + baseProgramPrometheusMetrics.Do(func() { + prometheus.MustRegister(baseProgramOpenOwnersCreated) + prometheus.MustRegister(baseProgramOpenOwnersRemoved) + + prometheus.MustRegister(baseProgramOpenOwnerFilesCreated) + prometheus.MustRegister(baseProgramOpenOwnerFilesRemoved) + }) + + var attributes virtual.Attributes + rootDirectory.VirtualGetAttributes(context.Background(), virtual.AttributesMaskFileHandle, &attributes) + p := &baseProgram{ + rootFileHandle: fileHandle{ + handle: attributes.GetFileHandle(), + node: virtual.DirectoryChild{}.FromDirectory(rootDirectory), + }, + handleResolver: handleResolver, + rebootVerifier: rebootVerifier, + stateIDOtherPrefix: stateIDOtherPrefix, + clock: clock, + enforcedLeaseTime: enforcedLeaseTime, + announcedLeaseTime: nfsv4.NfsLease4(announcedLeaseTime.Seconds()), + + randomNumberGenerator: randomNumberGenerator, + clientsByLongID: map[string]*clientState{}, + clientConfirmationsByKey: map[clientConfirmationKey]*clientConfirmationState{}, + clientConfirmationsByShortID: map[nfsv4.Clientid4]*clientConfirmationState{}, + openOwnerFilesByOther: map[regularStateIDOther]*openOwnerFileState{}, + openedFilesByHandle: map[string]*openedFileState{}, + lockOwnerFilesByOther: map[regularStateIDOther]*lockOwnerFileState{}, + } + p.idleClientConfirmations.previousIdle = &p.idleClientConfirmations + p.idleClientConfirmations.nextIdle = &p.idleClientConfirmations + p.unusedOpenOwners.previousUnused = &p.unusedOpenOwners + p.unusedOpenOwners.nextUnused = &p.unusedOpenOwners + return p +} + +func (*baseProgram) NfsV4Nfsproc4Null(ctx context.Context) error { + return nil +} + +func (p *baseProgram) NfsV4Nfsproc4Compound(ctx context.Context, arguments *nfsv4.Compound4args) (*nfsv4.Compound4res, error) { + // Create compound state and process all operations sequentially + // against it. + state := compoundState{program: p} + resarray := make([]nfsv4.NfsResop4, 0, len(arguments.Argarray)) + status := nfsv4.NFS4_OK + for _, operation := range arguments.Argarray { + switch op := operation.(type) { + case *nfsv4.NfsArgop4_OP_ACCESS: + res := state.opAccess(ctx, &op.Opaccess) + resarray = append(resarray, &nfsv4.NfsResop4_OP_ACCESS{ + Opaccess: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_CLOSE: + res := state.opClose(&op.Opclose) + resarray = append(resarray, &nfsv4.NfsResop4_OP_CLOSE{ + Opclose: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_COMMIT: + res := state.opCommit(&op.Opcommit) + resarray = append(resarray, &nfsv4.NfsResop4_OP_COMMIT{ + Opcommit: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_CREATE: + res := state.opCreate(ctx, &op.Opcreate) + resarray = append(resarray, &nfsv4.NfsResop4_OP_CREATE{ + Opcreate: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_DELEGPURGE: + res := state.opDelegpurge(&op.Opdelegpurge) + resarray = append(resarray, &nfsv4.NfsResop4_OP_DELEGPURGE{ + Opdelegpurge: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_DELEGRETURN: + res := state.opDelegreturn(&op.Opdelegreturn) + resarray = append(resarray, &nfsv4.NfsResop4_OP_DELEGRETURN{ + Opdelegreturn: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_GETATTR: + res := state.opGetattr(ctx, &op.Opgetattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_GETATTR{ + Opgetattr: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_GETFH: + res := state.opGetfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_GETFH{ + Opgetfh: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LINK: + res := state.opLink(ctx, &op.Oplink) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LINK{ + Oplink: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCK: + res := state.opLock(&op.Oplock) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCK{ + Oplock: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCKT: + res := state.opLockt(&op.Oplockt) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCKT{ + Oplockt: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCKU: + res := state.opLocku(&op.Oplocku) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCKU{ + Oplocku: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOOKUP: + res := state.opLookup(ctx, &op.Oplookup) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOOKUP{ + Oplookup: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_LOOKUPP: + res := state.opLookupp(ctx) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOOKUPP{ + Oplookupp: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_NVERIFY: + res := state.opNverify(ctx, &op.Opnverify) + resarray = append(resarray, &nfsv4.NfsResop4_OP_NVERIFY{ + Opnverify: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_OPEN: + res := state.opOpen(ctx, &op.Opopen) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN{ + Opopen: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_OPENATTR: + res := state.opOpenattr(&op.Opopenattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPENATTR{ + Opopenattr: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_OPEN_CONFIRM: + res := state.opOpenConfirm(&op.OpopenConfirm) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN_CONFIRM{ + OpopenConfirm: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_OPEN_DOWNGRADE: + res := state.opOpenDowngrade(&op.OpopenDowngrade) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_PUTFH: + res := state.opPutfh(&op.Opputfh) + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTFH{ + Opputfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_PUTPUBFH: + res := state.opPutpubfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTPUBFH{ + Opputpubfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_PUTROOTFH: + res := state.opPutrootfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_READ: + res := state.opRead(ctx, &op.Opread) + resarray = append(resarray, &nfsv4.NfsResop4_OP_READ{ + Opread: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_READDIR: + res := state.opReaddir(ctx, &op.Opreaddir) + resarray = append(resarray, &nfsv4.NfsResop4_OP_READDIR{ + Opreaddir: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_READLINK: + res := state.opReadlink(ctx) + resarray = append(resarray, &nfsv4.NfsResop4_OP_READLINK{ + Opreadlink: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RELEASE_LOCKOWNER: + res := state.opReleaseLockowner(&op.OpreleaseLockowner) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_REMOVE: + res := state.opRemove(&op.Opremove) + resarray = append(resarray, &nfsv4.NfsResop4_OP_REMOVE{ + Opremove: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RENAME: + res := state.opRename(&op.Oprename) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RENAME{ + Oprename: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RENEW: + res := state.opRenew(&op.Oprenew) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RENEW{ + Oprenew: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_RESTOREFH: + res := state.opRestorefh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_RESTOREFH{ + Oprestorefh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SAVEFH: + res := state.opSavefh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_SAVEFH{ + Opsavefh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SECINFO: + res := state.opSecinfo(ctx, &op.Opsecinfo) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SECINFO{ + Opsecinfo: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_SETATTR: + res := state.opSetattr(ctx, &op.Opsetattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETATTR{ + Opsetattr: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SETCLIENTID: + res := state.opSetclientid(&op.Opsetclientid) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_SETCLIENTID_CONFIRM: + res := state.opSetclientidConfirm(&op.OpsetclientidConfirm) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_VERIFY: + res := state.opVerify(ctx, &op.Opverify) + resarray = append(resarray, &nfsv4.NfsResop4_OP_VERIFY{ + Opverify: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_WRITE: + res := state.opWrite(ctx, &op.Opwrite) + resarray = append(resarray, &nfsv4.NfsResop4_OP_WRITE{ + Opwrite: res, + }) + status = res.GetStatus() + default: + res := nfsv4.Illegal4res{Status: nfsv4.NFS4ERR_OP_ILLEGAL} + resarray = append(resarray, &nfsv4.NfsResop4_OP_ILLEGAL{ + Opillegal: res, + }) + status = res.Status + } + if status != nfsv4.NFS4_OK { + // Terminate evaluation of further operations + // upon failure. + break + } + } + return &nfsv4.Compound4res{ + Status: status, + Tag: arguments.Tag, + Resarray: resarray, + }, nil +} + +// enter acquires the lock on the NFSv4 server. After acquiring the +// lock, it cleans up state belonging to clients and open-owners that +// have stopped contacting the server. +func (p *baseProgram) enter() { + for { + now := p.clock.Now() + p.lock.Lock() + if p.now.Before(now) { + p.now = now + } + + // Remove clients that have not renewed their state in + // some time. Close all of the files and release all + // locks owned by these clients. + var ll leavesToClose + minimumLastSeen := p.now.Add(-p.enforcedLeaseTime) + for p.idleClientConfirmations.nextIdle != &p.idleClientConfirmations && p.idleClientConfirmations.nextIdle.lastSeen.Before(minimumLastSeen) { + p.idleClientConfirmations.nextIdle.remove(p, &ll) + } + + // Remove open-owners that no longer have any open files + // associated with them, or are unconfirmed, and have + // not been used for some time. If the client decides to + // use the same open-owner once again, the next OPEN + // operation will need to be confirmed using + // OPEN_CONFIRM. + for p.unusedOpenOwners.nextUnused != &p.unusedOpenOwners && p.unusedOpenOwners.nextUnused.lastUsed.Before(minimumLastSeen) { + p.unusedOpenOwners.nextUnused.remove(p, &ll) + } + + // If the code above ended up yielding files that need + // to be closed, we close the files and retry. + if ll.empty() { + return + } + p.lock.Unlock() + ll.closeAll() + } +} + +func (p *baseProgram) leave() { + p.lock.Unlock() +} + +// getConfirmedClientByShortID looks up a confirmed client by short +// client ID. +func (p *baseProgram) getConfirmedClientByShortID(shortID nfsv4.Clientid4) (*confirmedClientState, nfsv4.Nfsstat4) { + clientConfirmation, ok := p.clientConfirmationsByShortID[shortID] + if !ok { + return nil, nfsv4.NFS4ERR_STALE_CLIENTID + } + confirmedClient := clientConfirmation.client.confirmed + if confirmedClient == nil || confirmedClient.confirmation != clientConfirmation { + return nil, nfsv4.NFS4ERR_STALE_CLIENTID + } + return confirmedClient, nfsv4.NFS4_OK +} + +// getOpenOwnerByOtherForTransaction looks up an open-owner by +// open-owner state ID. It waits for any existing transactions to +// complete. This makes it possible to start a new transaction. +func (p *baseProgram) getOpenOwnerByOtherForTransaction(other regularStateIDOther) (*openOwnerState, nfsv4.Nfsstat4) { + for { + oofs, ok := p.openOwnerFilesByOther[other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if oos := oofs.openOwner; oos.waitForCurrentTransactionCompletion(p) { + return oos, nfsv4.NFS4_OK + } + } +} + +// getLockOwnerByOtherForTransaction looks up a lock-owner by lock-owner +// state ID, for the purpose of starting a new transaction. +// +// Unlike getOpenOwnerByOtherForTransaction() it does not need to wait +// for other transactions to complete, as we don't need to support any +// blocking operations against locks. +func (p *baseProgram) getLockOwnerByOtherForTransaction(other regularStateIDOther) (*lockOwnerState, nfsv4.Nfsstat4) { + lofs, ok := p.lockOwnerFilesByOther[other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return lofs.lockOwner, nfsv4.NFS4_OK +} + +// newRegularStateID allocates a new open-owner or lock-owner state ID. +func (p *baseProgram) newRegularStateID(seqID nfsv4.Seqid4) (stateID regularStateID) { + stateID.seqID = seqID + p.randomNumberGenerator.Read(stateID.other[:]) + return +} + +// internalizeStateID converts a state ID that's provided as part of a +// request to the format that's used internally. +// +// This method returns a nil state ID when the provided state ID is +// special (i.e., an anonymous state ID or READ bypass state ID). +func (p *baseProgram) internalizeStateID(stateID *nfsv4.Stateid4) (*regularStateID, nfsv4.Nfsstat4) { + switch stateID.Other { + case [nfsv4.NFS4_OTHER_SIZE]byte{}: + // Anonymous state ID. + if stateID.Seqid != 0 { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return nil, nfsv4.NFS4_OK + case [...]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}: + // READ bypass state ID. + if stateID.Seqid != 0xffffffff { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return nil, nfsv4.NFS4_OK + default: + // Regular state ID. Only permit state IDs with a given + // prefix, so that we can accurately distinguish between + // NFS4ERR_STALE_STATEID and NFS4ERR_BAD_STATEID. + var prefix [stateIDOtherPrefixLength]byte + copy(prefix[:], stateID.Other[:]) + if prefix != p.stateIDOtherPrefix { + // State ID is from before a reboot/restart. + return nil, nfsv4.NFS4ERR_STALE_STATEID + } + internalStateID := ®ularStateID{seqID: stateID.Seqid} + copy(internalStateID.other[:], stateID.Other[stateIDOtherPrefixLength:]) + return internalStateID, nfsv4.NFS4_OK + } +} + +// internalizeRegularStateID is identical to internalizeStateID, except +// that it denies the use of special state IDs. +func (p *baseProgram) internalizeRegularStateID(stateID *nfsv4.Stateid4) (regularStateID, nfsv4.Nfsstat4) { + internalStateID, st := p.internalizeStateID(stateID) + if st != nfsv4.NFS4_OK { + return regularStateID{}, st + } + if internalStateID == nil { + return regularStateID{}, nfsv4.NFS4ERR_BAD_STATEID + } + return *internalStateID, nfsv4.NFS4_OK +} + +// externalizeStateID converts a regular state ID that's encoded in the +// internal format to the format used by the NFSv4 protocol. +func (p *baseProgram) externalizeStateID(stateID regularStateID) nfsv4.Stateid4 { + externalStateID := nfsv4.Stateid4{Seqid: stateID.seqID} + copy(externalStateID.Other[:], p.stateIDOtherPrefix[:]) + copy(externalStateID.Other[stateIDOtherPrefixLength:], stateID.other[:]) + return externalStateID +} + +// writeAttributes converts file attributes returned by the virtual file +// system into the NFSv4 wire format. It also returns a bitmask +// indicating which attributes were actually emitted. +func (p *baseProgram) writeAttributes(attributes *virtual.Attributes, attrRequest nfsv4.Bitmap4, w io.Writer) nfsv4.Bitmap4 { + attrMask := make(nfsv4.Bitmap4, len(attrRequest)) + if len(attrRequest) > 0 { + // Attributes 0 to 31. + f := attrRequest[0] + var s uint32 + if b := uint32(1 << nfsv4.FATTR4_SUPPORTED_ATTRS); f&b != 0 { + s |= b + nfsv4.WriteBitmap4(w, nfsv4.Bitmap4{ + (1 << nfsv4.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4.FATTR4_TYPE) | + (1 << nfsv4.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4.FATTR4_CHANGE) | + (1 << nfsv4.FATTR4_SIZE) | + (1 << nfsv4.FATTR4_LINK_SUPPORT) | + (1 << nfsv4.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4.FATTR4_NAMED_ATTR) | + (1 << nfsv4.FATTR4_FSID) | + (1 << nfsv4.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4.FATTR4_LEASE_TIME) | + (1 << nfsv4.FATTR4_RDATTR_ERROR) | + (1 << nfsv4.FATTR4_FILEHANDLE) | + (1 << nfsv4.FATTR4_FILEID), + (1 << (nfsv4.FATTR4_MODE - 32)) | + (1 << (nfsv4.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4.FATTR4_TIME_MODIFY - 32)), + }) + } + if b := uint32(1 << nfsv4.FATTR4_TYPE); f&b != 0 { + s |= b + switch attributes.GetFileType() { + case filesystem.FileTypeRegularFile: + nfsv4.NF4REG.WriteTo(w) + case filesystem.FileTypeDirectory: + nfsv4.NF4DIR.WriteTo(w) + case filesystem.FileTypeSymlink: + nfsv4.NF4LNK.WriteTo(w) + case filesystem.FileTypeBlockDevice: + nfsv4.NF4BLK.WriteTo(w) + case filesystem.FileTypeCharacterDevice: + nfsv4.NF4CHR.WriteTo(w) + case filesystem.FileTypeFIFO: + nfsv4.NF4FIFO.WriteTo(w) + case filesystem.FileTypeSocket: + nfsv4.NF4SOCK.WriteTo(w) + default: + panic("Unknown file type") + } + } + if b := uint32(1 << nfsv4.FATTR4_FH_EXPIRE_TYPE); f&b != 0 { + s |= b + // Using HandleResolver, we can resolve any + // object in the file system until it is removed + // from the file system. + nfsv4.WriteUint32T(w, nfsv4.FH4_PERSISTENT) + } + if b := uint32(1 << nfsv4.FATTR4_CHANGE); f&b != 0 { + s |= b + nfsv4.WriteChangeid4(w, attributes.GetChangeID()) + } + if b := uint32(1 << nfsv4.FATTR4_SIZE); f&b != 0 { + sizeBytes, ok := attributes.GetSizeBytes() + if !ok { + panic("FATTR4_SIZE is a required attribute") + } + s |= b + nfsv4.WriteUint64T(w, sizeBytes) + } + if b := uint32(1 << nfsv4.FATTR4_LINK_SUPPORT); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_SYMLINK_SUPPORT); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_NAMED_ATTR); f&b != 0 { + s |= b + runtime.WriteBool(w, false) + } + if b := uint32(1 << nfsv4.FATTR4_FSID); f&b != 0 { + s |= b + fsid := nfsv4.Fsid4{ + Major: 1, + Minor: 1, + } + fsid.WriteTo(w) + } + if b := uint32(1 << nfsv4.FATTR4_UNIQUE_HANDLES); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_LEASE_TIME); f&b != 0 { + s |= b + nfsv4.WriteNfsLease4(w, p.announcedLeaseTime) + } + if b := uint32(1 << nfsv4.FATTR4_FILEHANDLE); f&b != 0 { + s |= b + nfsv4.WriteNfsFh4(w, attributes.GetFileHandle()) + } + if b := uint32(1 << nfsv4.FATTR4_FILEID); f&b != 0 { + s |= b + nfsv4.WriteUint64T(w, attributes.GetInodeNumber()) + } + attrMask[0] = s + } + if len(attrRequest) > 1 { + // Attributes 32 to 63. + f := attrRequest[1] + var s uint32 + if b := uint32(1 << (nfsv4.FATTR4_MODE - 32)); f&b != 0 { + if permissions, ok := attributes.GetPermissions(); ok { + s |= b + nfsv4.WriteMode4(w, permissions.ToMode()) + } + } + if b := uint32(1 << (nfsv4.FATTR4_NUMLINKS - 32)); f&b != 0 { + s |= b + nfsv4.WriteUint32T(w, attributes.GetLinkCount()) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_ACCESS - 32)); f&b != 0 { + s |= b + deterministicNfstime4.WriteTo(w) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_METADATA - 32)); f&b != 0 { + s |= b + deterministicNfstime4.WriteTo(w) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_MODIFY - 32)); f&b != 0 { + s |= b + t := deterministicNfstime4 + if lastDataModificationTime, ok := attributes.GetLastDataModificationTime(); ok { + t = timeToNfstime4(lastDataModificationTime) + } + t.WriteTo(w) + } + attrMask[1] = s + } + return attrMask +} + +// attributesToFattr4 converts attributes returned by the virtual file +// system layer to an NFSv4 fattr4 structure. As required by the +// protocol, attributes are stored in the order of the FATTR4_* +// constants. +func (p *baseProgram) attributesToFattr4(attributes *virtual.Attributes, attrRequest nfsv4.Bitmap4) nfsv4.Fattr4 { + w := bytes.NewBuffer(nil) + attrMask := p.writeAttributes(attributes, attrRequest, w) + return nfsv4.Fattr4{ + Attrmask: attrMask, + AttrVals: w.Bytes(), + } +} + +// regularStateID is an internal representation of non-special +// open-owner or lock-owner state IDs. +type regularStateID struct { + seqID nfsv4.Seqid4 + other regularStateIDOther +} + +// regularStateIDOther is an internal representation of the 'other' +// field of non-special open-owner or lock-owner state IDs. +type regularStateIDOther [nfsv4.NFS4_OTHER_SIZE - stateIDOtherPrefixLength]byte + +// compoundState contains the state that needs to be tracked during the +// lifetime of a single NFSv4 COMPOUND procedure. It provides +// implementations of each of the operations contained in the COMPOUND +// procedure. +type compoundState struct { + program *baseProgram + + currentFileHandle fileHandle + savedFileHandle fileHandle +} + +// getOpenOwnerFileByStateID obtains an open-owner file by open state +// ID. It also checks whether the open state ID corresponds to the +// current file handle, and that the client provided sequence ID matches +// the server's value. +func (s *compoundState) getOpenOwnerFileByStateID(stateID regularStateID, allowUnconfirmed bool) (*openOwnerFileState, nfsv4.Nfsstat4) { + p := s.program + oofs, ok := p.openOwnerFilesByOther[stateID.other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !s.currentFileHandle.node.IsSet() { + return nil, nfsv4.NFS4ERR_NOFILEHANDLE + } + if oofs.shareAccess == 0 { + // File has already been closed. + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !bytes.Equal(s.currentFileHandle.handle, oofs.openedFile.handle) { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !oofs.openOwner.confirmed && !allowUnconfirmed { + // The state ID was returned by a previous OPEN call + // that still requires a call to OPEN_CONFIRM. We should + // treat the state ID as non-existent until OPEN_CONFIRM + // has been called. + // + // More details: RFC 7530, section 16.18.5, paragraph 6. + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if st := compareStateSeqID(stateID.seqID, oofs.stateID.seqID); st != nfsv4.NFS4_OK { + return nil, st + } + return oofs, nfsv4.NFS4_OK +} + +// getLockOwnerFileByStateID obtains a lock-owner file by state ID. It +// also checks whether the lock state ID corresponds to the current file +// handle, and that the client provided sequence ID matches the server's +// value. +func (s *compoundState) getLockOwnerFileByStateID(stateID regularStateID) (*lockOwnerFileState, nfsv4.Nfsstat4) { + p := s.program + lofs, ok := p.lockOwnerFilesByOther[stateID.other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !s.currentFileHandle.node.IsSet() { + return nil, nfsv4.NFS4ERR_NOFILEHANDLE + } + if !bytes.Equal(s.currentFileHandle.handle, lofs.openOwnerFile.openedFile.handle) { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if st := compareStateSeqID(stateID.seqID, lofs.stateID.seqID); st != nfsv4.NFS4_OK { + return nil, st + } + return lofs, nfsv4.NFS4_OK +} + +// getOpenedLeaf is used by READ and WRITE operations to obtain an +// opened leaf corresponding to a file handle and open-owner state ID. +// +// When a special state ID is provided, it ensures the file is +// temporarily opened for the duration of the operation. When a +// non-special state ID is provided, it ensures that the file was +// originally opened with the correct share access mask. +func (s *compoundState) getOpenedLeaf(ctx context.Context, stateID *nfsv4.Stateid4, shareAccess virtual.ShareMask) (virtual.Leaf, func(), nfsv4.Nfsstat4) { + p := s.program + internalStateID, st := p.internalizeStateID(stateID) + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + + if internalStateID == nil { + // Client provided the anonymous state ID or READ bypass + // state ID. Temporarily open the file to perform the + // operation. + currentLeaf, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + if vs := currentLeaf.VirtualOpenSelf( + ctx, + shareAccess, + &virtual.OpenExistingOptions{}, + 0, + &virtual.Attributes{}, + ); vs != virtual.StatusOK { + return nil, nil, toNFSv4Status(vs) + } + return currentLeaf, func() { + currentLeaf.VirtualClose(shareAccess) + }, nfsv4.NFS4_OK + } + + p.enter() + defer p.leave() + + oofs, st := s.getOpenOwnerFileByStateID(*internalStateID, false) + switch st { + case nfsv4.NFS4_OK: + if shareAccess&^oofs.shareAccess != 0 { + // Attempting to write to a file opened for + // reading, or vice versa. + return nil, nil, nfsv4.NFS4ERR_OPENMODE + } + case nfsv4.NFS4ERR_BAD_STATEID: + // Client may have provided a lock state ID. + lofs, st := s.getLockOwnerFileByStateID(*internalStateID) + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + oofs = lofs.openOwnerFile + if shareAccess&^lofs.shareAccess != 0 { + // Attempted to write to a file that was opened + // for reading at the time the lock-owner state + // was established, or vice versa. + // + // More details: RFC 7530, section 9.1.6, + // paragraph 7. + return nil, nil, nfsv4.NFS4ERR_OPENMODE + } + default: + return nil, nil, st + } + + // Ensure that both the client and file are not released while + // the I/O operation is taking place. + clientConfirmation := oofs.openOwner.confirmedClient.confirmation + clientConfirmation.hold(p) + clonedShareAccess := oofs.shareCount.clone(shareAccess) + return oofs.openedFile.leaf, func() { + var ll leavesToClose + p.enter() + oofs.downgradeShareAccess(&clonedShareAccess, 0, &ll) + clientConfirmation.release(p) + p.leave() + ll.closeAll() + }, nfsv4.NFS4_OK +} + +// verifyAttributes is the common implementation of the VERIFY and +// NVERIFY operations. +func (s *compoundState) verifyAttributes(ctx context.Context, fattr *nfsv4.Fattr4) nfsv4.Nfsstat4 { + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return st + } + + // Request attributes of the file. Don't actually store them in + // a fattr4 structure. Use comparingWriter to check whether the + // generated attributes are equal to the ones provided. + attrRequest := fattr.Attrmask + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(ctx, attrRequestToAttributesMask(attrRequest), &attributes) + w := comparingWriter{ + reference: fattr.AttrVals, + status: nfsv4.NFS4ERR_SAME, + } + p := s.program + attrMask := p.writeAttributes(&attributes, attrRequest, &w) + + for i := 0; i < len(attrRequest); i++ { + if attrMask[i] != attrRequest[i] { + // One or more of the provided attributes were + // not generated. This either means that the + // client provided unsupported attributes or + // ones that are write-only. + if attrRequest[0]&(1< 0 { + // Provided attributes contain trailing data. + return nfsv4.NFS4ERR_BADXDR + } + return w.status +} + +func (s *compoundState) opAccess(ctx context.Context, args *nfsv4.Access4args) nfsv4.Access4res { + currentNode, isDirectory, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Access4res_default{Status: st} + } + + // Depending on whether the node is a directory or a leaf, we + // need to report different NFSv4 acccess permissions. + readMask := uint32(nfsv4.ACCESS4_READ) + writeMask := uint32(nfsv4.ACCESS4_EXTEND | nfsv4.ACCESS4_MODIFY) + executeMask := uint32(0) + if isDirectory { + writeMask |= nfsv4.ACCESS4_DELETE + executeMask |= nfsv4.ACCESS4_LOOKUP + } else { + executeMask |= nfsv4.ACCESS4_EXECUTE + } + + // Request node permissions and convert them to NFSv4 values. + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(ctx, virtual.AttributesMaskPermissions, &attributes) + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Permissions attribute requested, but not returned") + } + var access nfsv4.Uint32T + if permissions&virtual.PermissionsRead != 0 { + access |= readMask + } + if permissions&virtual.PermissionsWrite != 0 { + access |= writeMask + } + if permissions&virtual.PermissionsExecute != 0 { + access |= executeMask + } + + return &nfsv4.Access4res_NFS4_OK{ + Resok4: nfsv4.Access4resok{ + Supported: (readMask | writeMask | executeMask) & args.Access, + Access: access & args.Access, + }, + } +} + +func (s *compoundState) opClose(args *nfsv4.Close4args) nfsv4.Close4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Close4res); ok { + if okResponse, ok := lastResponse.(*nfsv4.Close4res_NFS4_OK); !ok || isNextStateID(&okResponse.OpenStateid, &args.OpenStateid) { + return r + } + } + return &nfsv4.Close4res_default{Status: st} + } + response, closedFile := s.txClose(openStateID, &ll) + transaction.complete(&openOwnerLastResponse{ + response: response, + closedFile: closedFile, + }) + return response +} + +func (s *compoundState) txClose(openStateID regularStateID, ll *leavesToClose) (nfsv4.Close4res, *openOwnerFileState) { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st}, nil + } + + // Only half-close the file, so that the state ID remains valid + // for doing replays of the CLOSE request. + // + // More details: RFC 7530, section 9.10.1. + p := s.program + oofs.removeStart(p, ll) + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + return &nfsv4.Close4res_NFS4_OK{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, oofs +} + +func (s *compoundState) opCommit(args *nfsv4.Commit4args) nfsv4.Commit4res { + // As this implementation is purely built for the purpose of + // doing builds, there is no need to actually commit to storage. + if _, st := s.currentFileHandle.getLeaf(); st != nfsv4.NFS4_OK { + return &nfsv4.Commit4res_default{Status: st} + } + return &nfsv4.Commit4res_NFS4_OK{ + Resok4: nfsv4.Commit4resok{ + Writeverf: s.program.rebootVerifier, + }, + } +} + +func (s *compoundState) opCreate(ctx context.Context, args *nfsv4.Create4args) nfsv4.Create4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Create4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Objname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Create4res_default{Status: st} + } + + var attributes virtual.Attributes + var changeInfo virtual.ChangeInfo + var fileHandle fileHandle + var vs virtual.Status + switch objectType := args.Objtype.(type) { + case *nfsv4.Createtype4_NF4BLK, *nfsv4.Createtype4_NF4CHR: + // Character and block devices can only be provided as + // part of input roots, if workers are set up to provide + // them. They can't be created through the virtual file + // system. + return &nfsv4.Create4res_default{Status: nfsv4.NFS4ERR_PERM} + case *nfsv4.Createtype4_NF4DIR: + var directory virtual.Directory + directory, changeInfo, vs = currentDirectory.VirtualMkdir(name, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.node = virtual.DirectoryChild{}.FromDirectory(directory) + case *nfsv4.Createtype4_NF4FIFO: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualMknod(ctx, name, filesystem.FileTypeFIFO, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.node = virtual.DirectoryChild{}.FromLeaf(leaf) + case *nfsv4.Createtype4_NF4LNK: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualSymlink(ctx, objectType.Linkdata, name, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.node = virtual.DirectoryChild{}.FromLeaf(leaf) + case *nfsv4.Createtype4_NF4SOCK: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualMknod(ctx, name, filesystem.FileTypeSocket, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.node = virtual.DirectoryChild{}.FromLeaf(leaf) + default: + return &nfsv4.Create4res_default{Status: nfsv4.NFS4ERR_BADTYPE} + } + if vs != virtual.StatusOK { + return &nfsv4.Create4res_default{Status: toNFSv4Status(vs)} + } + fileHandle.handle = attributes.GetFileHandle() + + s.currentFileHandle = fileHandle + return &nfsv4.Create4res_NFS4_OK{ + Resok4: nfsv4.Create4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opDelegpurge(args *nfsv4.Delegpurge4args) nfsv4.Delegpurge4res { + // This implementation does not support CLAIM_DELEGATE_PREV, so + // there is no need to implement DELEGPURGE. + return nfsv4.Delegpurge4res{Status: nfsv4.NFS4ERR_NOTSUPP} +} + +func (s *compoundState) opDelegreturn(args *nfsv4.Delegreturn4args) nfsv4.Delegreturn4res { + // This implementation never hands out any delegations to the + // client, meaning that any state ID provided to this operation + // is invalid. + return nfsv4.Delegreturn4res{Status: nfsv4.NFS4ERR_BAD_STATEID} +} + +func (s *compoundState) opGetattr(ctx context.Context, args *nfsv4.Getattr4args) nfsv4.Getattr4res { + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Getattr4res_default{Status: st} + } + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(ctx, attrRequestToAttributesMask(args.AttrRequest), &attributes) + p := s.program + return &nfsv4.Getattr4res_NFS4_OK{ + Resok4: nfsv4.Getattr4resok{ + ObjAttributes: p.attributesToFattr4(&attributes, args.AttrRequest), + }, + } +} + +func (s *compoundState) opGetfh() nfsv4.Getfh4res { + _, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Getfh4res_default{Status: st} + } + return &nfsv4.Getfh4res_NFS4_OK{ + Resok4: nfsv4.Getfh4resok{ + Object: s.currentFileHandle.handle, + }, + } +} + +func (s *compoundState) opLink(ctx context.Context, args *nfsv4.Link4args) nfsv4.Link4res { + sourceLeaf, st := s.savedFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + targetDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Newname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + changeInfo, vs := targetDirectory.VirtualLink(ctx, name, sourceLeaf, 0, &virtual.Attributes{}) + if vs != virtual.StatusOK { + return &nfsv4.Link4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Link4res_NFS4_OK{ + Resok4: nfsv4.Link4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opLock(args *nfsv4.Lock4args) nfsv4.Lock4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + switch locker := args.Locker.(type) { + case *nfsv4.Locker4_TRUE: + // Create a new lock-owner file. + owner := &locker.OpenOwner + openStateID, st := p.internalizeRegularStateID(&owner.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + transaction, lastResponse, st := oos.startTransaction(p, owner.OpenSeqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + return r + } + return &nfsv4.Lock4res_default{Status: st} + } + response := s.txLockInitial(args, openStateID, owner) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response + case *nfsv4.Locker4_FALSE: + // Add additional lock to existing lock-owner file. + owner := &locker.LockOwner + lockStateID, st := p.internalizeRegularStateID(&owner.LockStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + los, st := p.getLockOwnerByOtherForTransaction(lockStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + transaction, lastResponse, st := los.startTransaction(p, owner.LockSeqid, false) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + if okResponse, ok := lastResponse.(*nfsv4.Lock4res_NFS4_OK); !ok || isNextStateID(&okResponse.Resok4.LockStateid, &owner.LockStateid) { + return r + } + } + return &nfsv4.Lock4res_default{Status: st} + } + response := s.txLockSuccessive(args, lockStateID, owner) + transaction.complete(response) + return response + default: + // Incorrectly encoded boolean value. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_BADXDR} + } +} + +func (s *compoundState) txLockInitial(args *nfsv4.Lock4args, openStateID regularStateID, owner *nfsv4.OpenToLockOwner4) nfsv4.Lock4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + oos := oofs.openOwner + if owner.LockOwner.Clientid != oos.confirmedClient.confirmation.key.shortClientID { + // Provided lock-owner's client ID does not match with + // that of the open-owner. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + confirmedClient := oos.confirmedClient + lockOwnerKey := string(owner.LockOwner.Owner) + los, ok := confirmedClient.lockOwners[lockOwnerKey] + initialTransaction := false + if !ok { + // Lock-owner does not yet exist. Create a new one. + los = &lockOwnerState{ + confirmedClient: confirmedClient, + owner: owner.LockOwner.Owner, + } + confirmedClient.lockOwners[lockOwnerKey] = los + initialTransaction = true + } else { + if _, ok := oofs.lockOwnerFiles[los]; ok { + // Lock-owner has already been associated with + // this file. We should have gone through + // txLockSuccessive() instead. + // + // More details: RFC 7530, section 16.10.5, + // bullet point 2. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_BAD_SEQID} + } + } + + // Start a nested transaction on the lock-owner. + p := s.program + transaction, lastResponse, st := los.startTransaction(p, owner.LockSeqid, initialTransaction) + if st != nfsv4.NFS4_OK { + if initialTransaction { + panic("Failed to start transaction on a new lock-owner, which is impossible. This would cause the lock-owner to leak.") + } + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + return r + } + return &nfsv4.Lock4res_default{Status: st} + } + + // Create a new lock-owner file. Set the sequence ID to zero, as + // txLockCommon() will already bump it to one. + lofs := &lockOwnerFileState{ + lockOwner: los, + openOwnerFile: oofs, + shareAccess: oofs.shareCount.clone(oofs.shareAccess), + lockOwnerIndex: len(los.files), + stateID: p.newRegularStateID(0), + } + p.lockOwnerFilesByOther[lofs.stateID.other] = lofs + oofs.lockOwnerFiles[los] = lofs + los.files = append(los.files, lofs) + + response := s.txLockCommon(args, lofs) + transaction.complete(response) + + // Upon failure, undo the creation of the newly created + // lock-owner file. This may also remove the lock-owner if it + // references no other files. + if response.GetStatus() != nfsv4.NFS4_OK { + if lofs.lockCount > 0 { + panic("Failed to acquire lock on a newly created lock-owner file, yet its lock count is non-zero") + } + lofs.remove(p, nil) + } + return response +} + +func (s *compoundState) txLockSuccessive(args *nfsv4.Lock4args, lockStateID regularStateID, owner *nfsv4.ExistLockOwner4) nfsv4.Lock4res { + lofs, st := s.getLockOwnerFileByStateID(lockStateID) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + return s.txLockCommon(args, lofs) +} + +func (s *compoundState) txLockCommon(args *nfsv4.Lock4args, lofs *lockOwnerFileState) nfsv4.Lock4res { + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + lockType, st := nfsLockType4ToByteRangeLockType(args.Locktype) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: start, + End: end, + Type: lockType, + } + + // Test whether the new lock conflicts with an existing one. + openedFile := lofs.openOwnerFile.openedFile + if conflictingLock := openedFile.locks.Test(lock); conflictingLock != nil { + return &nfsv4.Lock4res_NFS4ERR_DENIED{ + Denied: byteRangeLockToLock4Denied(conflictingLock), + } + } + + lofs.lockCount += openedFile.locks.Set(lock) + if lofs.lockCount < 0 { + panic("Negative lock count") + } + lofs.stateID.seqID = nextSeqID(lofs.stateID.seqID) + p := s.program + return &nfsv4.Lock4res_NFS4_OK{ + Resok4: nfsv4.Lock4resok{ + LockStateid: p.externalizeStateID(lofs.stateID), + }, + } +} + +func (s *compoundState) opLockt(args *nfsv4.Lockt4args) nfsv4.Lockt4res { + _, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + handleKey := string(s.currentFileHandle.handle) + + p := s.program + p.enter() + defer p.leave() + + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + lockType, st := nfsLockType4ToByteRangeLockType(args.Locktype) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + + openedFile, ok := p.openedFilesByHandle[handleKey] + if !ok { + // File isn't opened by anyone, meaning no locks may + // cause a conflict. Just return success. + return &nfsv4.Lockt4res_NFS4_OK{} + } + + confirmedClient, st := p.getConfirmedClientByShortID(args.Owner.Clientid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + // Attempt to obtain the lock owner that is provided in the + // arguments. It may be the case that none exists, in which case + // we just pass a nil value to ByteRangeLockSet.Test(), + // indicating a lock-owner that differs from any existing one. + los := confirmedClient.lockOwners[string(args.Owner.Owner)] + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: los, + Start: start, + End: end, + Type: lockType, + } + + if conflictingLock := openedFile.locks.Test(lock); conflictingLock != nil { + return &nfsv4.Lockt4res_NFS4ERR_DENIED{ + Denied: byteRangeLockToLock4Denied(conflictingLock), + } + } + return &nfsv4.Lockt4res_NFS4_OK{} +} + +func (s *compoundState) opLocku(args *nfsv4.Locku4args) nfsv4.Locku4res { + p := s.program + p.enter() + defer p.leave() + + lockStateID, st := p.internalizeRegularStateID(&args.LockStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + los, st := p.getLockOwnerByOtherForTransaction(lockStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + transaction, lastResponse, st := los.startTransaction(p, args.Seqid, false) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Locku4res); ok { + if okResponse, ok := lastResponse.(*nfsv4.Locku4res_NFS4_OK); !ok || isNextStateID(&okResponse.LockStateid, &args.LockStateid) { + return r + } + } + return &nfsv4.Locku4res_default{Status: st} + } + response := s.txLocku(args, lockStateID) + transaction.complete(response) + return response +} + +func (s *compoundState) txLocku(args *nfsv4.Locku4args, lockStateID regularStateID) nfsv4.Locku4res { + lofs, st := s.getLockOwnerFileByStateID(lockStateID) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: start, + End: end, + Type: virtual.ByteRangeLockTypeUnlocked, + } + + lofs.lockCount += lofs.openOwnerFile.openedFile.locks.Set(lock) + if lofs.lockCount < 0 { + panic("Negative lock count") + } + lofs.stateID.seqID = nextSeqID(lofs.stateID.seqID) + p := s.program + return &nfsv4.Locku4res_NFS4_OK{ + LockStateid: p.externalizeStateID(lofs.stateID), + } +} + +func (s *compoundState) opLookup(ctx context.Context, args *nfsv4.Lookup4args) nfsv4.Lookup4res { + currentDirectory, st := s.currentFileHandle.getDirectoryOrSymlink(ctx) + if st != nfsv4.NFS4_OK { + return nfsv4.Lookup4res{Status: st} + } + name, st := nfsv4NewComponent(args.Objname) + if st != nfsv4.NFS4_OK { + return nfsv4.Lookup4res{Status: st} + } + var attributes virtual.Attributes + child, vs := currentDirectory.VirtualLookup(ctx, name, virtual.AttributesMaskFileHandle, &attributes) + if vs != virtual.StatusOK { + return nfsv4.Lookup4res{Status: toNFSv4Status(vs)} + } + s.currentFileHandle = fileHandle{ + handle: attributes.GetFileHandle(), + node: child, + } + return nfsv4.Lookup4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opLookupp(ctx context.Context) nfsv4.Lookupp4res { + if _, st := s.currentFileHandle.getDirectoryOrSymlink(ctx); st != nfsv4.NFS4_OK { + return nfsv4.Lookupp4res{Status: st} + } + + // TODO: Do we want to implement this method as well? For most + // directory types (e.g., CAS backed directories) this method is + // hard to implement, as they don't necessarily have a single + // parent. + return nfsv4.Lookupp4res{Status: nfsv4.NFS4ERR_NOENT} +} + +func (s *compoundState) opNverify(ctx context.Context, args *nfsv4.Nverify4args) nfsv4.Nverify4res { + if st := s.verifyAttributes(ctx, &args.ObjAttributes); st != nfsv4.NFS4ERR_NOT_SAME { + return nfsv4.Nverify4res{Status: st} + } + return nfsv4.Nverify4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opOpen(ctx context.Context, args *nfsv4.Open4args) nfsv4.Open4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + openOwnerKey := string(args.Owner.Owner) + var oos *openOwnerState + for { + // Obtain confirmed client state. + confirmedClient, st := p.getConfirmedClientByShortID(args.Owner.Clientid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + var ok bool + oos, ok = confirmedClient.openOwners[openOwnerKey] + if !ok { + // Open-owner has never been seen before. Create + // a new one that is in the unconfirmed state. + oos = &openOwnerState{ + confirmedClient: confirmedClient, + key: openOwnerKey, + filesByHandle: map[string]*openOwnerFileState{}, + } + confirmedClient.openOwners[openOwnerKey] = oos + baseProgramOpenOwnersCreated.Inc() + } + + if oos.waitForCurrentTransactionCompletion(p) { + break + } + } + + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyReinitialize) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Open4res); ok { + // Last call was also an OPEN. Return cached response. + return r + } + return &nfsv4.Open4res_default{Status: st} + } + response := s.txOpen(ctx, args, oos, &ll) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpen(ctx context.Context, args *nfsv4.Open4args, oos *openOwnerState, ll *leavesToClose) nfsv4.Open4res { + // This method may need to drop the lock, as VirtualOpenChild may + // block. This is safe to do within open-owner transactions. + // Provide logic for re-establishing the lock in failure cases. + p := s.program + isLocked := true + defer func() { + if !isLocked { + p.enter() + } + }() + + // Convert share_* fields. + shareAccess, st := shareAccessToShareMask(args.ShareAccess) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + // As with most UNIX-like systems, we don't support share_deny. + // Only permit this field to be set to OPEN4_SHARE_DENY_NONE, + // behaving as if there's an implicit OPEN4_SHARE_ACCESS_BOTH on + // all files. + // + // More details: RFC 7530, section 16.16.5, paragraph 6. + switch args.ShareDeny { + case nfsv4.OPEN4_SHARE_DENY_NONE: + case nfsv4.OPEN4_SHARE_DENY_READ, nfsv4.OPEN4_SHARE_DENY_WRITE, nfsv4.OPEN4_SHARE_DENY_BOTH: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_SHARE_DENIED} + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + // Convert openhow. + var createAttributes *virtual.Attributes + var existingOptions *virtual.OpenExistingOptions + if openHow, ok := args.Openhow.(*nfsv4.Openflag4_OPEN4_CREATE); ok { + createAttributes = &virtual.Attributes{} + switch how := openHow.How.(type) { + case *nfsv4.Createhow4_UNCHECKED4: + // Create a file, allowing the file to already exist. + if st := fattr4ToAttributes(&how.Createattrs, createAttributes); st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + existingOptions = &virtual.OpenExistingOptions{} + if sizeBytes, ok := createAttributes.GetSizeBytes(); ok && sizeBytes == 0 { + existingOptions.Truncate = true + } + case *nfsv4.Createhow4_GUARDED4: + // Create a file, disallowing the file to already exist. + if st := fattr4ToAttributes(&how.Createattrs, createAttributes); st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + case *nfsv4.Createhow4_EXCLUSIVE4: + // Create a file, allowing the file to exist if + // it was created by a previous call that + // provided the same verifier. + // TODO: Implement this! + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + } else { + // Don't create a new file. Only open an existing file. + existingOptions = &virtual.OpenExistingOptions{} + } + + // Convert claim. As we don't support delegations, we can only + // meaningfully support CLAIM_NULL and CLAIM_PREVIOUS. + switch claim := args.Claim.(type) { + case *nfsv4.OpenClaim4_CLAIM_NULL: + p.leave() + isLocked = false + + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + name, st := nfsv4NewComponent(claim.File) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + // Open the file. + var attributes virtual.Attributes + leaf, respected, changeInfo, vs := currentDirectory.VirtualOpenChild( + ctx, + name, + shareAccess, + createAttributes, + existingOptions, + virtual.AttributesMaskFileHandle, + &attributes) + if vs != virtual.StatusOK { + return &nfsv4.Open4res_default{Status: toNFSv4Status(vs)} + } + + handle := attributes.GetFileHandle() + handleKey := string(handle) + + s.currentFileHandle = fileHandle{ + handle: handle, + node: virtual.DirectoryChild{}.FromLeaf(leaf), + } + + response := &nfsv4.Open4res_NFS4_OK{ + Resok4: nfsv4.Open4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + Rflags: nfsv4.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: attributesMaskToBitmap4(respected), + Delegation: &nfsv4.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + } + + p.enter() + isLocked = true + + oofs, ok := oos.filesByHandle[handleKey] + if ok { + // This file has already been opened by this open-owner, + // meaning we should upgrade the existing opened file. + // The newly opened file can be closed again. + // + // More details: RFC 7530, section 9.11. + oofs.upgrade(shareAccess, leaf, ll) + } else { + openedFile, ok := p.openedFilesByHandle[handleKey] + if ok { + openedFile.openOwnersCount.increase() + } else { + // This file has not been opened by any + // open-owner. Keep track of it, so that we + // don't need to call into HandleResolver. This + // ensures that the file remains accessible + // while opened, even when unlinked. + openedFile = &openedFileState{ + handle: handle, + handleKey: handleKey, + leaf: leaf, + openOwnersCount: 1, + } + openedFile.locks.Initialize() + p.openedFilesByHandle[handleKey] = openedFile + } + + // This file has not been opened by this open-owner. + // Create a new state ID. + oofs = &openOwnerFileState{ + openOwner: oos, + openedFile: openedFile, + stateID: p.newRegularStateID(1), + lockOwnerFiles: map[*lockOwnerState]*lockOwnerFileState{}, + } + if oofs.shareCount.upgrade(&oofs.shareAccess, shareAccess) != 0 { + panic("Share access reservations can't overlap for newly created files") + } + oos.filesByHandle[handleKey] = oofs + p.openOwnerFilesByOther[oofs.stateID.other] = oofs + baseProgramOpenOwnerFilesCreated.Inc() + } + + response.Resok4.Stateid = p.externalizeStateID(oofs.stateID) + if !oos.confirmed { + // The first time that this open-owner is used. Request + // that the caller issues an OPEN_CONFIRM operation. + response.Resok4.Rflags |= nfsv4.OPEN4_RESULT_CONFIRM + } + return response + case *nfsv4.OpenClaim4_CLAIM_PREVIOUS: + // Check whether the current open-owner has opened the + // file before, using the same delegation type. + currentLeaf, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + oofs, ok := oos.filesByHandle[string(s.currentFileHandle.handle)] + if !ok || claim.DelegateType != nfsv4.OPEN_DELEGATE_NONE { + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_RECLAIM_BAD} + } + + if !oos.confirmed { + // The server MUST NOT require confirmation on a + // reclaim-type open. In this implementation + // this should be impossible to reach, as + // sending OPEN against an unconfirmed + // open-owner causes it to be reinitialized. + // + // More details: RFC 7530, section 9.1.11, + // bullet point 2. + panic("If the open-owner is unconfirmed, starting the transaction should have reinitialized it") + } + + if existingOptions == nil { + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_EXIST} + } + + p.leave() + isLocked = false + + // Reopen the file, as the share reservations may be + // upgraded, or truncation may be requested. + var attributes virtual.Attributes + if vs := currentLeaf.VirtualOpenSelf( + ctx, + shareAccess, + existingOptions, + 0, + &attributes, + ); vs != virtual.StatusOK { + return &nfsv4.Open4res_default{Status: toNFSv4Status(vs)} + } + + p.enter() + isLocked = true + + oofs.upgrade(shareAccess, currentLeaf, ll) + return &nfsv4.Open4res_NFS4_OK{ + Resok4: nfsv4.Open4resok{ + Stateid: p.externalizeStateID(oofs.stateID), + Rflags: nfsv4.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: attributesMaskToBitmap4(existingOptions.ToAttributesMask()), + Delegation: &nfsv4.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + } + case *nfsv4.OpenClaim4_CLAIM_DELEGATE_CUR: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_RECLAIM_BAD} + case *nfsv4.OpenClaim4_CLAIM_DELEGATE_PREV: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_NOTSUPP} + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } +} + +func (s *compoundState) opOpenattr(args *nfsv4.Openattr4args) nfsv4.Openattr4res { + // This implementation does not support named attributes. + if _, _, st := s.currentFileHandle.getNode(); st != nfsv4.NFS4_OK { + return nfsv4.Openattr4res{Status: st} + } + return nfsv4.Openattr4res{Status: nfsv4.NFS4ERR_NOTSUPP} +} + +func (s *compoundState) opOpenConfirm(args *nfsv4.OpenConfirm4args) nfsv4.OpenConfirm4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyAllow) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.OpenConfirm4res); ok { + if okResponse, ok := lastResponse.(*nfsv4.OpenConfirm4res_NFS4_OK); !ok || isNextStateID(&okResponse.Resok4.OpenStateid, &args.OpenStateid) { + return r + } + } + return &nfsv4.OpenConfirm4res_default{Status: st} + } + response := s.txOpenConfirm(openStateID) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpenConfirm(openStateID regularStateID) nfsv4.OpenConfirm4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, true) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + oofs.openOwner.confirmed = true + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + p := s.program + return &nfsv4.OpenConfirm4res_NFS4_OK{ + Resok4: nfsv4.OpenConfirm4resok{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, + } +} + +func (s *compoundState) opOpenDowngrade(args *nfsv4.OpenDowngrade4args) nfsv4.OpenDowngrade4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.OpenDowngrade4res); ok { + if okResponse, ok := lastResponse.(*nfsv4.OpenDowngrade4res_NFS4_OK); !ok || isNextStateID(&okResponse.Resok4.OpenStateid, &args.OpenStateid) { + return r + } + } + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + response := s.txOpenDowngrade(args, openStateID, &ll) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpenDowngrade(args *nfsv4.OpenDowngrade4args, openStateID regularStateID, ll *leavesToClose) nfsv4.OpenDowngrade4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + + shareAccess, st := shareAccessToShareMask(args.ShareAccess) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + if shareAccess&^oofs.shareAccess != 0 || args.ShareDeny != nfsv4.OPEN4_SHARE_DENY_NONE { + // Attempted to upgrade. The client should have called OPEN. + // + // More details: RFC 7530, section 16.19.4, paragraph 2. + return &nfsv4.OpenDowngrade4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + oofs.downgradeShareAccess(&oofs.shareAccess, shareAccess, ll) + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + p := s.program + return &nfsv4.OpenDowngrade4res_NFS4_OK{ + Resok4: nfsv4.OpenDowngrade4resok{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, + } +} + +func (s *compoundState) opPutfh(args *nfsv4.Putfh4args) nfsv4.Putfh4res { + p := s.program + p.enter() + if openedFile, ok := p.openedFilesByHandle[string(args.Object)]; ok { + // File is opened at least once. Return this copy, so + // that we're guaranteed to work, even if the file has + // been removed from the file system. + s.currentFileHandle = fileHandle{ + handle: openedFile.handle, + node: virtual.DirectoryChild{}.FromLeaf(openedFile.leaf), + } + p.leave() + } else { + // File is currently not open. Call into the handle + // resolver to do a lookup. + p.leave() + child, vs := p.handleResolver(bytes.NewBuffer(args.Object)) + if vs != virtual.StatusOK { + return nfsv4.Putfh4res{Status: toNFSv4Status(vs)} + } + s.currentFileHandle = fileHandle{ + handle: args.Object, + node: child, + } + } + return nfsv4.Putfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opPutpubfh() nfsv4.Putpubfh4res { + p := s.program + s.currentFileHandle = p.rootFileHandle + return nfsv4.Putpubfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opPutrootfh() nfsv4.Putrootfh4res { + p := s.program + s.currentFileHandle = p.rootFileHandle + return nfsv4.Putrootfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRead(ctx context.Context, args *nfsv4.Read4args) nfsv4.Read4res { + currentLeaf, cleanup, st := s.getOpenedLeaf(ctx, &args.Stateid, virtual.ShareMaskRead) + if st != nfsv4.NFS4_OK { + return &nfsv4.Read4res_default{Status: st} + } + defer cleanup() + + buf := make([]byte, args.Count) + n, eof, vs := currentLeaf.VirtualRead(buf, args.Offset) + if vs != virtual.StatusOK { + return &nfsv4.Read4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Read4res_NFS4_OK{ + Resok4: nfsv4.Read4resok{ + Eof: eof, + Data: buf[:n], + }, + } +} + +func (s *compoundState) opReaddir(ctx context.Context, args *nfsv4.Readdir4args) nfsv4.Readdir4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Readdir4res_default{Status: st} + } + + // Validate the cookie verifier. + // TODO: The macOS NFSv4 client may sometimes not set the cookie + // verifier properly, so we allow it to be zero. Remove this + // logic in due time. Issue: rdar://91034875 + p := s.program + if args.Cookie != 0 && args.Cookieverf != p.rebootVerifier && (args.Cookieverf != nfsv4.Verifier4{}) { + return &nfsv4.Readdir4res_default{Status: nfsv4.NFS4ERR_NOT_SAME} + } + + // Restore read offset. + firstCookie := uint64(0) + if args.Cookie > lastReservedCookie { + firstCookie = args.Cookie - lastReservedCookie + } + + // Empty response. + res := nfsv4.Readdir4res_NFS4_OK{ + Resok4: nfsv4.Readdir4resok{ + Cookieverf: p.rebootVerifier, + Reply: nfsv4.Dirlist4{ + Eof: true, + }, + }, + } + + // Attach entries. + reporter := readdirReporter{ + program: p, + attrRequest: args.AttrRequest, + maxCount: args.Maxcount, + dirCount: args.Dircount, + + currentMaxCount: nfsv4.Count4(res.Resok4.GetEncodedSizeBytes()), + nextEntry: &res.Resok4.Reply.Entries, + endOfFile: &res.Resok4.Reply.Eof, + } + if vs := currentDirectory.VirtualReadDir( + ctx, + firstCookie, + attrRequestToAttributesMask(args.AttrRequest), + &reporter, + ); vs != virtual.StatusOK { + return &nfsv4.Readdir4res_default{Status: toNFSv4Status(vs)} + } + if res.Resok4.Reply.Entries == nil && !res.Resok4.Reply.Eof { + // Not enough space to store a single entry. + return &nfsv4.Readdir4res_default{Status: nfsv4.NFS4ERR_TOOSMALL} + } + return &res +} + +func (s *compoundState) opReadlink(ctx context.Context) nfsv4.Readlink4res { + currentLeaf, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + if st == nfsv4.NFS4ERR_ISDIR { + return &nfsv4.Readlink4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + return &nfsv4.Readlink4res_default{Status: st} + } + target, vs := currentLeaf.VirtualReadlink(ctx) + if vs != virtual.StatusOK { + return &nfsv4.Readlink4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Readlink4res_NFS4_OK{ + Resok4: nfsv4.Readlink4resok{ + Link: target, + }, + } +} + +func (s *compoundState) opReleaseLockowner(args *nfsv4.ReleaseLockowner4args) nfsv4.ReleaseLockowner4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + confirmedClient, st := p.getConfirmedClientByShortID(args.LockOwner.Clientid) + if st != nfsv4.NFS4_OK { + return nfsv4.ReleaseLockowner4res{Status: st} + } + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + lockOwnerKey := string(args.LockOwner.Owner) + if los, ok := confirmedClient.lockOwners[lockOwnerKey]; ok { + // Check whether any of the files associated with this + // lock-owner still have locks held. In that case the + // client should call LOCKU first. + // + // More details: RFC 7530, section 16.37.4, last sentence. + for _, lofs := range los.files { + if lofs.lockCount > 0 { + return nfsv4.ReleaseLockowner4res{Status: nfsv4.NFS4ERR_LOCKS_HELD} + } + } + + // None of the files have locks held. Remove the state + // associated with all files. The final call to remove() + // will also remove the lock-owner state. + for len(los.files) > 0 { + lofs := los.files[len(los.files)-1] + lofs.remove(p, &ll) + } + if _, ok := confirmedClient.lockOwners[lockOwnerKey]; ok { + panic("Removing all lock-owner files did not remove lock-owner") + } + } + + return nfsv4.ReleaseLockowner4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRename(args *nfsv4.Rename4args) nfsv4.Rename4res { + oldDirectory, st := s.savedFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + oldName, st := nfsv4NewComponent(args.Oldname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + newDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + newName, st := nfsv4NewComponent(args.Newname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: nfsv4.NFS4ERR_BADNAME} + } + + oldChangeInfo, newChangeInfo, vs := oldDirectory.VirtualRename(oldName, newDirectory, newName) + if vs != virtual.StatusOK { + return &nfsv4.Rename4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Rename4res_NFS4_OK{ + Resok4: nfsv4.Rename4resok{ + SourceCinfo: toNFSv4ChangeInfo(&oldChangeInfo), + TargetCinfo: toNFSv4ChangeInfo(&newChangeInfo), + }, + } +} + +func (s *compoundState) opRemove(args *nfsv4.Remove4args) nfsv4.Remove4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Remove4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Target) + if st != nfsv4.NFS4_OK { + return &nfsv4.Remove4res_default{Status: st} + } + + changeInfo, vs := currentDirectory.VirtualRemove(name, true, true) + if vs != virtual.StatusOK { + return &nfsv4.Remove4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Remove4res_NFS4_OK{ + Resok4: nfsv4.Remove4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opRenew(args *nfsv4.Renew4args) nfsv4.Renew4res { + p := s.program + p.enter() + defer p.leave() + + confirmedClient, st := p.getConfirmedClientByShortID(args.Clientid) + if st != nfsv4.NFS4_OK { + return nfsv4.Renew4res{Status: st} + } + + // Hold and release the client, so that the time at which the + // client gets garbage collected is extended. + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + return nfsv4.Renew4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRestorefh() nfsv4.Restorefh4res { + if !s.savedFileHandle.node.IsSet() { + return nfsv4.Restorefh4res{Status: nfsv4.NFS4ERR_RESTOREFH} + } + s.currentFileHandle = s.savedFileHandle + return nfsv4.Restorefh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opSavefh() nfsv4.Savefh4res { + _, _, st := s.currentFileHandle.getNode() + if st == nfsv4.NFS4_OK { + s.savedFileHandle = s.currentFileHandle + } + return nfsv4.Savefh4res{Status: st} +} + +func (s *compoundState) opSecinfo(ctx context.Context, args *nfsv4.Secinfo4args) nfsv4.Secinfo4res { + // The standard states that the SECINFO operation is expected to + // be used by the NFS client when the error value of + // NFS4ERR_WRONGSEC is returned from another NFS operation. In + // practice, we even see it being called if no such error was + // returned. + // + // Because this NFS server is intended to be used for loopback + // purposes only, simply announce the use of AUTH_NONE. + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Secinfo4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Name) + if st != nfsv4.NFS4_OK { + return &nfsv4.Secinfo4res_default{Status: st} + } + if _, vs := currentDirectory.VirtualLookup(ctx, name, 0, &virtual.Attributes{}); vs != virtual.StatusOK { + return &nfsv4.Secinfo4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Secinfo4res_NFS4_OK{ + Resok4: []nfsv4.Secinfo4{ + &nfsv4.Secinfo4_default{ + Flavor: rpcv2.AUTH_NONE, + }, + }, + } +} + +func (s *compoundState) opSetattr(ctx context.Context, args *nfsv4.Setattr4args) nfsv4.Setattr4res { + // TODO: Respect the state ID, if provided! + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return nfsv4.Setattr4res{Status: st} + } + var attributes virtual.Attributes + if st := fattr4ToAttributes(&args.ObjAttributes, &attributes); st != nfsv4.NFS4_OK { + return nfsv4.Setattr4res{Status: st} + } + if vs := currentNode.VirtualSetAttributes(ctx, &attributes, 0, &virtual.Attributes{}); vs != virtual.StatusOK { + return nfsv4.Setattr4res{Status: toNFSv4Status(vs)} + } + return nfsv4.Setattr4res{ + Status: st, + Attrsset: args.ObjAttributes.Attrmask, + } +} + +func (s *compoundState) opSetclientid(args *nfsv4.Setclientid4args) nfsv4.Setclientid4res { + p := s.program + p.enter() + defer p.leave() + + // As we don't care about using the client callback, our + // implementation of SETCLIENTID can be a lot simpler than + // what's described by the spec. SETCLIENTID can normally be + // used to update the client callback as well, which is + // something we don't need to care about. + + longID := string(args.Client.Id) + clientVerifier := args.Client.Verifier + client, ok := p.clientsByLongID[longID] + if !ok { + // Client has not been observed before. Create it. + client = &clientState{ + longID: longID, + confirmationsByClientVerifier: map[nfsv4.Verifier4]*clientConfirmationState{}, + } + p.clientsByLongID[longID] = client + } + + confirmation, ok := client.confirmationsByClientVerifier[clientVerifier] + if !ok { + // Create a new confirmation record for SETCLIENTID_CONFIRM. + confirmation = &clientConfirmationState{ + client: client, + clientVerifier: clientVerifier, + key: clientConfirmationKey{ + shortClientID: p.randomNumberGenerator.Uint64(), + }, + } + p.randomNumberGenerator.Read(confirmation.key.serverVerifier[:]) + client.confirmationsByClientVerifier[clientVerifier] = confirmation + p.clientConfirmationsByKey[confirmation.key] = confirmation + p.clientConfirmationsByShortID[confirmation.key.shortClientID] = confirmation + confirmation.insertIntoIdleList(p) + } + + return &nfsv4.Setclientid4res_NFS4_OK{ + Resok4: nfsv4.Setclientid4resok{ + Clientid: confirmation.key.shortClientID, + SetclientidConfirm: confirmation.key.serverVerifier, + }, + } +} + +func (s *compoundState) opSetclientidConfirm(args *nfsv4.SetclientidConfirm4args) nfsv4.SetclientidConfirm4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + key := clientConfirmationKey{ + shortClientID: args.Clientid, + serverVerifier: args.SetclientidConfirm, + } + confirmation, ok := p.clientConfirmationsByKey[key] + if !ok { + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4ERR_STALE_CLIENTID} + } + + client := confirmation.client + if confirmedClient := client.confirmed; confirmedClient == nil || confirmedClient.confirmation != confirmation { + // Client record has not been confirmed yet. + confirmation.hold(p) + defer confirmation.release(p) + + if confirmedClient != nil { + // The client has another confirmed entry. + // Remove all state, such as open files and locks. + oldConfirmation := confirmedClient.confirmation + if oldConfirmation.holdCount > 0 { + // The client is currently running one + // or more blocking operations. This + // prevents us from closing files and + // releasing locks. + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4ERR_DELAY} + } + oldConfirmation.remove(p, &ll) + } + + if client.confirmed != nil { + panic("Attempted to replace confirmed client record") + } + client.confirmed = &confirmedClientState{ + confirmation: confirmation, + openOwners: map[string]*openOwnerState{}, + lockOwners: map[string]*lockOwnerState{}, + } + } + + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opWrite(ctx context.Context, args *nfsv4.Write4args) nfsv4.Write4res { + currentLeaf, cleanup, st := s.getOpenedLeaf(ctx, &args.Stateid, virtual.ShareMaskWrite) + if st != nfsv4.NFS4_OK { + return &nfsv4.Write4res_default{Status: st} + } + defer cleanup() + + n, vs := currentLeaf.VirtualWrite(args.Data, args.Offset) + if vs != virtual.StatusOK { + return &nfsv4.Write4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Write4res_NFS4_OK{ + Resok4: nfsv4.Write4resok{ + Count: nfsv4.Count4(n), + Committed: nfsv4.FILE_SYNC4, + Writeverf: s.program.rebootVerifier, + }, + } +} + +func (s *compoundState) opVerify(ctx context.Context, args *nfsv4.Verify4args) nfsv4.Verify4res { + if st := s.verifyAttributes(ctx, &args.ObjAttributes); st != nfsv4.NFS4ERR_SAME { + return nfsv4.Verify4res{Status: st} + } + return nfsv4.Verify4res{Status: nfsv4.NFS4_OK} +} + +// comparingWriter is an io.Writer that merely compares data that is +// written to a reference value. +type comparingWriter struct { + reference []byte + status nfsv4.Nfsstat4 +} + +func (w *comparingWriter) Write(p []byte) (int, error) { + if w.status == nfsv4.NFS4ERR_SAME { + if len(p) > len(w.reference) { + if bytes.Equal(p[:len(w.reference)], w.reference) { + // Reference value is a prefix of the provided + // data. With XDR this is never possible. + *w = comparingWriter{status: nfsv4.NFS4ERR_BADXDR} + } else { + *w = comparingWriter{status: nfsv4.NFS4ERR_NOT_SAME} + } + } else { + if bytes.Equal(p, w.reference[:len(p)]) { + w.reference = w.reference[len(p):] + } else { + *w = comparingWriter{status: nfsv4.NFS4ERR_NOT_SAME} + } + } + } + return len(p), nil +} + +type referenceCount int + +func (rc *referenceCount) increase() { + if *rc <= 0 { + panic("Attempted to increase zero reference count") + } + (*rc)++ +} + +func (rc *referenceCount) decrease() bool { + if *rc <= 0 { + panic("Attempted to decrease zero reference count") + } + (*rc)-- + return *rc == 0 +} + +// shareCount keeps track of the total number of share reservations +// belonging to a given open-owner file and all of its associated +// lock-owner files. This is necessary to ensure that a file remains +// readable/writable while locks are held or I/O operations take place, +// even if OPEN_DOWNGRADE is called. +type shareCount struct { + readers referenceCount + writers referenceCount +} + +// Upgrade the set of share reservations, for example as a result of an +// opened file being upgraded from read/write-only to both readable and +// writable. +func (sc *shareCount) upgrade(shareAccess *virtual.ShareMask, newShareAccess virtual.ShareMask) virtual.ShareMask { + var overlap virtual.ShareMask + if newShareAccess&virtual.ShareMaskRead != 0 { + if sc.readers > 0 { + overlap |= virtual.ShareMaskRead + } + if *shareAccess&virtual.ShareMaskRead == 0 { + sc.readers++ + } + } + if newShareAccess&virtual.ShareMaskWrite != 0 { + if sc.writers > 0 { + overlap |= virtual.ShareMaskWrite + } + if *shareAccess&virtual.ShareMaskWrite == 0 { + sc.writers++ + } + } + *shareAccess |= newShareAccess + return overlap +} + +// Downgrade the set of share reservations, for example as a result of +// closing a file, downgrading an opened file, or releasing a lock +// owner. +func (sc *shareCount) downgrade(shareAccess *virtual.ShareMask, newShareAccess virtual.ShareMask) virtual.ShareMask { + cleared := *shareAccess &^ newShareAccess + var becameZero virtual.ShareMask + if cleared&virtual.ShareMaskRead != 0 && sc.readers.decrease() { + becameZero |= virtual.ShareMaskRead + } + if cleared&virtual.ShareMaskWrite != 0 && sc.writers.decrease() { + becameZero |= virtual.ShareMaskWrite + } + *shareAccess = newShareAccess + return becameZero +} + +// Clone share reservations from an open-owner file into a lock-owner file. +func (sc *shareCount) clone(shareAccess virtual.ShareMask) virtual.ShareMask { + if shareAccess&virtual.ShareMaskRead != 0 { + sc.readers.increase() + } + if shareAccess&virtual.ShareMaskWrite != 0 { + sc.writers.increase() + } + return shareAccess +} + +// fileHandle contains information on the current or saved file handle +// that is tracked in a COMPOUND procedure. +type fileHandle struct { + handle nfsv4.NfsFh4 + node virtual.DirectoryChild +} + +func (fh *fileHandle) getNode() (virtual.Node, bool, nfsv4.Nfsstat4) { + if directory, leaf := fh.node.GetPair(); directory != nil { + return directory, true, nfsv4.NFS4_OK + } else if leaf != nil { + return leaf, false, nfsv4.NFS4_OK + } + return nil, false, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getDirectory() (virtual.Directory, nfsv4.Nfsstat4) { + if directory, leaf := fh.node.GetPair(); directory != nil { + return directory, nfsv4.NFS4_OK + } else if leaf != nil { + return nil, nfsv4.NFS4ERR_NOTDIR + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getDirectoryOrSymlink(ctx context.Context) (virtual.Directory, nfsv4.Nfsstat4) { + if directory, leaf := fh.node.GetPair(); directory != nil { + return directory, nfsv4.NFS4_OK + } else if leaf != nil { + // This call requires that we return NFS4ERR_SYMLINK if + // we stumble upon a symlink. That way the client knows + // that symlink expansion needs to be performed. + var attributes virtual.Attributes + leaf.VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, &attributes) + if attributes.GetFileType() == filesystem.FileTypeSymlink { + return nil, nfsv4.NFS4ERR_SYMLINK + } + return nil, nfsv4.NFS4ERR_NOTDIR + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getLeaf() (virtual.Leaf, nfsv4.Nfsstat4) { + if directory, leaf := fh.node.GetPair(); directory != nil { + return nil, nfsv4.NFS4ERR_ISDIR + } else if leaf != nil { + return leaf, nfsv4.NFS4_OK + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +// toNFSv4Status converts a status code returned by the virtual file +// system to its NFSv4 equivalent. +func toNFSv4Status(s virtual.Status) nfsv4.Nfsstat4 { + switch s { + case virtual.StatusErrAccess: + return nfsv4.NFS4ERR_ACCESS + case virtual.StatusErrBadHandle: + return nfsv4.NFS4ERR_BADHANDLE + case virtual.StatusErrExist: + return nfsv4.NFS4ERR_EXIST + case virtual.StatusErrInval: + return nfsv4.NFS4ERR_INVAL + case virtual.StatusErrIO: + return nfsv4.NFS4ERR_IO + case virtual.StatusErrIsDir: + return nfsv4.NFS4ERR_ISDIR + case virtual.StatusErrNoEnt: + return nfsv4.NFS4ERR_NOENT + case virtual.StatusErrNotDir: + return nfsv4.NFS4ERR_NOTDIR + case virtual.StatusErrNotEmpty: + return nfsv4.NFS4ERR_NOTEMPTY + case virtual.StatusErrNXIO: + return nfsv4.NFS4ERR_NXIO + case virtual.StatusErrPerm: + return nfsv4.NFS4ERR_PERM + case virtual.StatusErrROFS: + return nfsv4.NFS4ERR_ROFS + case virtual.StatusErrStale: + return nfsv4.NFS4ERR_STALE + case virtual.StatusErrSymlink: + return nfsv4.NFS4ERR_SYMLINK + case virtual.StatusErrXDev: + return nfsv4.NFS4ERR_XDEV + default: + panic("Unknown status") + } +} + +// toNFSv4ChangeInfo converts directory change information returned by +// the virtual file system to its NFSv4 equivalent. +func toNFSv4ChangeInfo(changeInfo *virtual.ChangeInfo) nfsv4.ChangeInfo4 { + // Implementations of virtual.Directory should make sure that + // mutations are implemented atomically, so it's safe to report + // the operation as being atomic. + return nfsv4.ChangeInfo4{ + Atomic: true, + Before: changeInfo.Before, + After: changeInfo.After, + } +} + +// clientState keeps track of all state corresponding to a single +// client. For every client we track one or more confirmations that can +// be completed using SETCLIENTID_CONFIRM. If SETCLIENTID_CONFIRM is +// called at least once, we track a confirmed client state. +type clientState struct { + longID string + + confirmationsByClientVerifier map[nfsv4.Verifier4]*clientConfirmationState + confirmed *confirmedClientState +} + +// clientConfirmationState keeps track of all state corresponding to a +// single client confirmation record created through SETCLIENTID. +type clientConfirmationState struct { + client *clientState + clientVerifier nfsv4.Verifier4 + key clientConfirmationKey + + nextIdle *clientConfirmationState + previousIdle *clientConfirmationState + lastSeen time.Time + holdCount int +} + +// removeFromIdleList removes the client confirmation from the list of +// clients that are currently not performing any operations against the +// server. +func (ccs *clientConfirmationState) removeFromIdleList() { + ccs.previousIdle.nextIdle = ccs.nextIdle + ccs.nextIdle.previousIdle = ccs.previousIdle + ccs.previousIdle = nil + ccs.nextIdle = nil +} + +// insertIntoIdleList inserts the client confirmation into the list of +// clients that are currently not performing any operations against the +// server. +func (ccs *clientConfirmationState) insertIntoIdleList(p *baseProgram) { + ccs.previousIdle = p.idleClientConfirmations.previousIdle + ccs.nextIdle = &p.idleClientConfirmations + ccs.previousIdle.nextIdle = ccs + ccs.nextIdle.previousIdle = ccs + ccs.lastSeen = p.now +} + +// hold the client confirmation in such a way that it's not garbage +// collected. This needs to be called prior to performing a blocking +// operation. +func (ccs *clientConfirmationState) hold(p *baseProgram) { + if ccs.holdCount == 0 { + ccs.removeFromIdleList() + } + ccs.holdCount++ +} + +// release the client confirmation in such a way that it may be garbage +// collected. +func (ccs *clientConfirmationState) release(p *baseProgram) { + if ccs.holdCount == 0 { + panic("Attempted to decrease zero hold count") + } + ccs.holdCount-- + if ccs.holdCount == 0 { + ccs.insertIntoIdleList(p) + } +} + +// remove the client confirmation. If the client was confirmed through +// SETCLIENTID_CONFIRM, all open files and acquired locks will be +// released. +func (ccs *clientConfirmationState) remove(p *baseProgram, ll *leavesToClose) { + if ccs.holdCount != 0 { + panic("Attempted to remove a client confirmation that was running one or more blocking operations") + } + + client := ccs.client + confirmedClient := client.confirmed + if confirmedClient != nil && confirmedClient.confirmation == ccs { + // This client confirmation record was confirmed, + // meaning that removing it should also close all opened + // files and release all locks. + for _, oos := range confirmedClient.openOwners { + oos.remove(p, ll) + } + if len(confirmedClient.lockOwners) != 0 { + panic("Removing open-owners should have removed lock-owners as well") + } + client.confirmed = nil + } + + // Remove the client confirmation. + delete(client.confirmationsByClientVerifier, ccs.clientVerifier) + delete(p.clientConfirmationsByKey, ccs.key) + delete(p.clientConfirmationsByShortID, ccs.key.shortClientID) + ccs.removeFromIdleList() + + // Remove the client if it no longer contains any confirmations. + if len(client.confirmationsByClientVerifier) == 0 { + delete(p.clientsByLongID, client.longID) + } +} + +// confirmedClientState stores all state for a client that has been +// confirmed through SETCLIENTID_CONFIRM. +type confirmedClientState struct { + confirmation *clientConfirmationState + openOwners map[string]*openOwnerState + lockOwners map[string]*lockOwnerState +} + +// clientConfirmationKey contains the information that a client must +// provide through SETCLIENTID_CONFIRM to confirm the client's +// registration. +type clientConfirmationKey struct { + shortClientID uint64 + serverVerifier nfsv4.Verifier4 +} + +// openOwnerState stores information on a single open-owner, which is a +// single process running on the client that opens files through the +// mount. +type openOwnerState struct { + confirmedClient *confirmedClientState + key string + + // When not nil, an OPEN or OPEN_CONFIRM operation is in progress. + currentTransactionWait <-chan struct{} + + confirmed bool + lastSeqID nfsv4.Seqid4 + lastResponse *openOwnerLastResponse + filesByHandle map[string]*openOwnerFileState + + // Double linked list for open-owners that are unused. These + // need to be garbage collected after some time, as the client + // does not do that explicitly. + nextUnused *openOwnerState + previousUnused *openOwnerState + lastUsed time.Time +} + +// waitForCurrentTransactionCompletion blocks until any transaction that +// is running right now completes. Because it needs to drop the lock +// while waiting, this method returns a boolean value indicating whether +// it's safe to progress. If not, the caller should retry the lookup of +// the open-owner state. +func (oos *openOwnerState) waitForCurrentTransactionCompletion(p *baseProgram) bool { + if wait := oos.currentTransactionWait; wait != nil { + p.leave() + <-wait + p.enter() + return false + } + return true +} + +// forgetLastResponse can be called when the cached response of the last +// transaction needs to be removed. This is at the start of any +// subsequent transaction, or when reinitializing/removing the +// open-owner state. +// +// This method MUST be called before making any mutations to the +// open-owner state, as it also removes resources that were released +// during the previous transaction. +func (oos *openOwnerState) forgetLastResponse(p *baseProgram) { + if oolr := oos.lastResponse; oolr != nil { + oos.lastResponse = nil + if oofs := oolr.closedFile; oofs != nil { + oofs.removeFinalize(p) + } + } +} + +// reinitialize the open-owner state in such a way that no files are +// opened. This method can be called when an unconfirmed open-owner is +// repurposed, or prior to forcefully removing an open-owner. +func (oos *openOwnerState) reinitialize(p *baseProgram, ll *leavesToClose) { + if oos.currentTransactionWait != nil { + panic("Attempted to reinitialize an open-owner while a transaction is in progress") + } + + oos.forgetLastResponse(p) + for _, oofs := range oos.filesByHandle { + oofs.removeStart(p, ll) + oofs.removeFinalize(p) + } +} + +// isUnused returns whether the open-owner state is unused, meaning that +// it should be garbage collected if a sufficient amount of time passes. +func (oos *openOwnerState) isUnused() bool { + return len(oos.filesByHandle) == 0 || + (len(oos.filesByHandle) == 1 && oos.lastResponse != nil && oos.lastResponse.closedFile != nil) || + !oos.confirmed +} + +// removeFromUnusedList removes the open-owner state from the list of +// open-owner states that have no open files or are not confirmed. These +// are garbage collected if a sufficient amount of time passes. +func (oos *openOwnerState) removeFromUnusedList() { + oos.previousUnused.nextUnused = oos.nextUnused + oos.nextUnused.previousUnused = oos.previousUnused + oos.previousUnused = nil + oos.nextUnused = nil +} + +// remove the open-owner state. All opened files will be closed. +func (oos *openOwnerState) remove(p *baseProgram, ll *leavesToClose) { + oos.reinitialize(p, ll) + if oos.nextUnused != nil { + oos.removeFromUnusedList() + } + delete(oos.confirmedClient.openOwners, oos.key) + baseProgramOpenOwnersRemoved.Inc() +} + +// unconfirmedOpenOwnerPolicy is an enumeration that describes how +// startTransaction() should behave when called against an open-owner +// that has not been confirmed. +type unconfirmedOpenOwnerPolicy int + +const ( + // Allow the transaction to take place against unconfirmed + // open-owners. This should be used by OPEN_CONFIRM. + unconfirmedOpenOwnerPolicyAllow = iota + // Don't allow the transaction to take place against unconfirmed + // open-owners. This should be used by CLOSE, OPEN_DOWNGRADE, + // etc.. + unconfirmedOpenOwnerPolicyDeny + // Allow the transaction to take place against unconfirmed + // open-owners, but do reinitialize them before progressing. + // This should be used by OPEN, as it should assume that the + // previously sent operation was a replay. + // + // More details: RFC 7530, section 16.18.5, paragraph 5. + unconfirmedOpenOwnerPolicyReinitialize +) + +func (oos *openOwnerState) startTransaction(p *baseProgram, seqID nfsv4.Seqid4, ll *leavesToClose, policy unconfirmedOpenOwnerPolicy) (*openOwnerTransaction, interface{}, nfsv4.Nfsstat4) { + if oos.currentTransactionWait != nil { + panic("Attempted to start a new transaction while another one is in progress") + } + + if lastResponse := oos.lastResponse; lastResponse != nil && seqID == oos.lastSeqID { + // Replay of the last request, meaning we should return + // a cached response. This can only be done when the + // type of operation is the same, which is determined by + // the caller. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + return nil, lastResponse.response, nfsv4.NFS4ERR_BAD_SEQID + } + + if oos.confirmed { + // For confirmed open-owners, only permit operations + // that start the next transaction. + if seqID != nextSeqID(oos.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + } else { + switch policy { + case unconfirmedOpenOwnerPolicyAllow: + if seqID != nextSeqID(oos.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + case unconfirmedOpenOwnerPolicyDeny: + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + case unconfirmedOpenOwnerPolicyReinitialize: + oos.reinitialize(p, ll) + } + } + + // Start a new transaction. Because the client has sent a + // request with a new sequence ID, we know it will no longer + // attempt to retry the previous operation. Release the last + // response and any state IDs that were closed during the + // previous operation. + oos.forgetLastResponse(p) + wait := make(chan struct{}, 1) + oos.currentTransactionWait = wait + + if oos.nextUnused != nil { + // Prevent garbage collection of the open-owner while + // operation takes place. It will be reinserted upon + // completion of the transaction, if needed. + oos.removeFromUnusedList() + } + + // Prevent garbage collection of the client. + oos.confirmedClient.confirmation.hold(p) + + return &openOwnerTransaction{ + program: p, + state: oos, + seqID: seqID, + wait: wait, + }, nil, nfsv4.NFS4_OK +} + +// openOwnerTransaction is a helper object for completing a transaction +// that was created using startTransaction(). +type openOwnerTransaction struct { + program *baseProgram + state *openOwnerState + seqID nfsv4.Seqid4 + wait chan<- struct{} +} + +func (oot *openOwnerTransaction) complete(lastResponse *openOwnerLastResponse) { + close(oot.wait) + oos := oot.state + oos.currentTransactionWait = nil + + if transactionShouldComplete(lastResponse.response.GetStatus()) { + oos.lastSeqID = oot.seqID + oos.lastResponse = lastResponse + } + + p := oot.program + if oos.isUnused() { + // Open-owner should be garbage collected. Insert it + // into the list of open-owners to be removed. + oos.previousUnused = p.unusedOpenOwners.previousUnused + oos.nextUnused = &p.unusedOpenOwners + oos.previousUnused.nextUnused = oos + oos.nextUnused.previousUnused = oos + oos.lastUsed = p.now + } + + // Re-enable garbage collection of the client. + oos.confirmedClient.confirmation.release(p) +} + +// responseMessage is an interface for response messages of an +// open-owner or lock-owner transaction. +type responseMessage interface{ GetStatus() nfsv4.Nfsstat4 } + +// openOwnerLastResponse contains information on the outcome of the last +// transaction of a given open-owner. This information is needed both to +// respond to retries, but also to definitively remove state IDs closed +// by the last transaction. +type openOwnerLastResponse struct { + response responseMessage + closedFile *openOwnerFileState +} + +// openOwnerFileState stores information on a file that is currently +// opened within the context of a single open-owner. +type openOwnerFileState struct { + // Constant fields. + openedFile *openedFileState + + // Variable fields. + openOwner *openOwnerState + shareAccess virtual.ShareMask + stateID regularStateID + shareCount shareCount + lockOwnerFiles map[*lockOwnerState]*lockOwnerFileState +} + +// downgradeShareAccess downgrades the share reservations of an +// open-owner file or lock-owner-file. If this causes a given share +// reservation to become unused by the open-owner file and all of its +// lock-owner files, it schedules (partial) closure of the underlying +// virtual.Leaf object. +// +// This method is called at the end of CLOSE and RELEASE_LOCKOWNER, but +// also at the end of READ or WRITE. A call to CLOSE/RELEASE_LOCKOWNER +// may not immediately close a file if one or more READ/WRITE operations +// are still in progress. +func (oofs *openOwnerFileState) downgradeShareAccess(shareAccess *virtual.ShareMask, newShareAccess virtual.ShareMask, ll *leavesToClose) { + if shareAccessToClose := oofs.shareCount.downgrade(shareAccess, newShareAccess); shareAccessToClose != 0 { + ll.leaves = append(ll.leaves, leafToClose{ + leaf: oofs.openedFile.leaf, + shareAccess: shareAccessToClose, + }) + } +} + +func (oofs *openOwnerFileState) removeStart(p *baseProgram, ll *leavesToClose) { + // Release lock state IDs associated with the file. This should + // be done as part of CLOSE; not LOCKU. If these have one or + // more byte ranges locked, we unlock them. It would also be + // permitted to return NFS4ERR_LOCKS_HELD, requiring that the + // client issues LOCKU operations before retrying, but that is + // less efficient. + // + // More details: + // - RFC 7530, section 9.1.4.4, paragraph 1. + // - RFC 7530, section 9.10, paragraph 3. + // - RFC 7530, section 16.2.4, paragraph 2. + for _, lofs := range oofs.lockOwnerFiles { + lofs.remove(p, ll) + } + + oofs.downgradeShareAccess(&oofs.shareAccess, 0, ll) +} + +// removeFinalize removes an opened file from the open-owner state. This +// method is not called during CLOSE, but during the next transaction on +// an open-owner. This ensures that its state ID remains resolvable, +// allowing the CLOSE operation to be retried. +func (oofs *openOwnerFileState) removeFinalize(p *baseProgram) { + // Disconnect the openOwnerFileState. + handleKey := oofs.openedFile.handleKey + delete(oofs.openOwner.filesByHandle, handleKey) + delete(p.openOwnerFilesByOther, oofs.stateID.other) + oofs.openOwner = nil + baseProgramOpenOwnerFilesRemoved.Inc() + + // Disconnect the openedFileState. Do leave it attached to the + // openOwnerFileState, so that in-flight READ and WRITE + // operations can still safely call close(). + if oofs.openedFile.openOwnersCount.decrease() { + delete(p.openedFilesByHandle, handleKey) + } +} + +// Upgrade the share access mask of an opened file (e.g., from reading +// to reading and writing). This needs to be performed if an open-owner +// attempts to open the same file multiple times. +func (oofs *openOwnerFileState) upgrade(shareAccess virtual.ShareMask, leaf virtual.Leaf, ll *leavesToClose) { + if overlap := oofs.shareCount.upgrade(&oofs.shareAccess, shareAccess); overlap != 0 { + // As there is overlap between the share access masks, + // the file is opened redundantly. Close it. + ll.leaves = append(ll.leaves, leafToClose{ + leaf: leaf, + shareAccess: overlap, + }) + } + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) +} + +// openedFileState stores information on a file that is currently opened +// at least once. It is stored in the openedFilesByHandle map. This +// allows these files to be resolvable through PUTFH, even if they are +// no longer linked in the file system. +type openedFileState struct { + // Constant fields. + handle nfsv4.NfsFh4 + handleKey string + leaf virtual.Leaf + + // Variable fields. + openOwnersCount referenceCount + locks virtual.ByteRangeLockSet[*lockOwnerState] +} + +// lockOwnerState represents byte-range locking state associated with a +// given opened file and given lock-owner. Because lock-owners are bound +// to a single file (i.e., they can't contain locks belonging to +// different files), it is contained in the openedFileState. +// +// More details: RFC 7530, section 16.10.5, paragraph 6. +type lockOwnerState struct { + confirmedClient *confirmedClientState + owner []byte + + lastSeqID nfsv4.Seqid4 + lastResponse responseMessage + files []*lockOwnerFileState +} + +func (los *lockOwnerState) forgetLastResponse(p *baseProgram) { + los.lastResponse = nil +} + +func (los *lockOwnerState) startTransaction(p *baseProgram, seqID nfsv4.Seqid4, initialTransaction bool) (*lockOwnerTransaction, interface{}, nfsv4.Nfsstat4) { + if lastResponse := los.lastResponse; lastResponse != nil && seqID == los.lastSeqID { + // Replay of the last request, meaning we should return + // a cached response. This can only be done when the + // type of operation is the same, which is determined by + // the caller. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + return nil, lastResponse, nfsv4.NFS4ERR_BAD_SEQID + } + + if !initialTransaction && seqID != nextSeqID(los.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + + // Start a new transaction. Because the client has sent a + // request with a new sequence ID, we know it will no longer + // attempt to retry the previous operation. Release the last + // response and any state IDs that were closed during the + // previous operation. + los.forgetLastResponse(p) + + // Prevent garbage collection of the client. + los.confirmedClient.confirmation.hold(p) + + return &lockOwnerTransaction{ + program: p, + state: los, + seqID: seqID, + }, nil, nfsv4.NFS4_OK +} + +type lockOwnerTransaction struct { + program *baseProgram + state *lockOwnerState + seqID nfsv4.Seqid4 +} + +func (lot *lockOwnerTransaction) complete(lastResponse responseMessage) { + los := lot.state + if transactionShouldComplete(lastResponse.GetStatus()) { + los.lastSeqID = lot.seqID + los.lastResponse = lastResponse + } + + // Re-enable garbage collection of the client. + p := lot.program + los.confirmedClient.confirmation.release(p) +} + +type lockOwnerFileState struct { + // Constant fields. + lockOwner *lockOwnerState + openOwnerFile *openOwnerFileState + shareAccess virtual.ShareMask + + // Variable fields. + lockOwnerIndex int + stateID regularStateID + lockCount int +} + +func (lofs *lockOwnerFileState) remove(p *baseProgram, ll *leavesToClose) { + if lofs.lockCount > 0 { + // Lock-owner still has one or more locks held on this + // file. Issue an unlock operation that spans the full + // range of the file to release all locks at once. + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: 0, + End: math.MaxUint64, + Type: virtual.ByteRangeLockTypeUnlocked, + } + lofs.lockCount += lofs.openOwnerFile.openedFile.locks.Set(lock) + if lofs.lockCount != 0 { + panic("Failed to release locks") + } + } + + // Remove the lock-owner file from maps. + delete(p.lockOwnerFilesByOther, lofs.stateID.other) + los := lofs.lockOwner + oofs := lofs.openOwnerFile + delete(oofs.lockOwnerFiles, los) + oofs.downgradeShareAccess(&lofs.shareAccess, 0, ll) + + // Remove the lock-owner file from the list in the lock-owner. + // We do need to make sure the list remains contiguous. + lastIndex := len(los.files) - 1 + lastLOFS := los.files[lastIndex] + lastLOFS.lockOwnerIndex = lofs.lockOwnerIndex + los.files[lastLOFS.lockOwnerIndex] = lastLOFS + los.files[lastIndex] = nil + los.files = los.files[:lastIndex] + lofs.lockOwnerIndex = -1 + + // Remove the lock-owner if there are no longer any files + // associated with it. + if len(los.files) == 0 { + delete(los.confirmedClient.lockOwners, string(los.owner)) + } +} + +// leafToClose contains information on a virtual file system leaf node +// that needs to be closed at the end of the current operation, after +// locks have been released. +type leafToClose struct { + leaf virtual.Leaf + shareAccess virtual.ShareMask +} + +// leavesToClose is a list of virtual file system leaf nodes that need +// to be closed at the end of the current operation, after locks have +// been released. +type leavesToClose struct { + leaves []leafToClose +} + +func (ll *leavesToClose) empty() bool { + return len(ll.leaves) == 0 +} + +func (ll *leavesToClose) closeAll() { + for _, l := range ll.leaves { + l.leaf.VirtualClose(l.shareAccess) + } +} + +// attrRequestToAttributesMask converts a bitmap of NFSv4 attributes to +// their virtual file system counterparts. This method is used by +// GETATTR and READDIR to determine which attributes need to be +// requested. +func attrRequestToAttributesMask(attrRequest nfsv4.Bitmap4) virtual.AttributesMask { + var attributesMask virtual.AttributesMask + if len(attrRequest) > 0 { + // Attributes 0 to 31. + f := attrRequest[0] + if f&uint32(1< 1 { + // Attributes 32 to 63. + f := attrRequest[1] + if f&uint32(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + attributesMask |= virtual.AttributesMaskPermissions + } + if f&uint32(1<<(nfsv4.FATTR4_NUMLINKS-32)) != 0 { + attributesMask |= virtual.AttributesMaskLinkCount + } + if f&uint32(1<<(nfsv4.FATTR4_TIME_MODIFY-32)) != 0 { + attributesMask |= virtual.AttributesMaskLastDataModificationTime + } + } + return attributesMask +} + +// deterministicNfstime4 is a timestamp that is reported as the access, +// metadata and modify time of all files. If these timestamps were not +// returned, clients would use 1970-01-01T00:00:00Z. As this tends to +// confuse many tools, a deterministic timestamp is used instead. +var deterministicNfstime4 = timeToNfstime4(filesystem.DeterministicFileModificationTimestamp) + +func attributesMaskToBitmap4(in virtual.AttributesMask) []uint32 { + out := make([]uint32, 2) + if in&virtual.AttributesMaskPermissions != 0 { + out[1] |= (1 << (nfsv4.FATTR4_MODE - 32)) + } + if in&virtual.AttributesMaskSizeBytes != 0 { + out[0] |= (1 << nfsv4.FATTR4_SIZE) + } + for len(out) > 0 && out[len(out)-1] == 0 { + out = out[:len(out)-1] + } + return out +} + +// nextSeqID increments a sequence ID according to the rules described +// in RFC 7530, section 9.1.3. +func nextSeqID(seqID nfsv4.Seqid4) nfsv4.Seqid4 { + if seqID == math.MaxUint32 { + return 1 + } + return seqID + 1 +} + +// isNextStateID returns true if state ID a is the successor of state ID b. +// +// At a minimum, the standard states that when returning a cached +// response to operations such as CLOSE, LOCK, LOCKU, OPEN_CONFIRM, and +// OPEN_DOWNGRADE, it is sufficient to compare the original operation +// type and the operation's sequence ID. This function can be used to +// increase strictness by ensuring that the state IDs in the request +// also match the originally provided values. +// +// More details: RFC 7530, section 9.1.9, bullet point 3. +func isNextStateID(a, b *nfsv4.Stateid4) bool { + return a.Other == b.Other && a.Seqid == nextSeqID(b.Seqid) +} + +// toShareMask converts NFSv4 share_access values that are part of OPEN +// and OPEN_DOWNGRADE requests to our equivalent ShareMask values. +func shareAccessToShareMask(in uint32) (virtual.ShareMask, nfsv4.Nfsstat4) { + switch in { + case nfsv4.OPEN4_SHARE_ACCESS_READ: + return virtual.ShareMaskRead, nfsv4.NFS4_OK + case nfsv4.OPEN4_SHARE_ACCESS_WRITE: + return virtual.ShareMaskWrite, nfsv4.NFS4_OK + case nfsv4.OPEN4_SHARE_ACCESS_BOTH: + return virtual.ShareMaskRead | virtual.ShareMaskWrite, nfsv4.NFS4_OK + default: + return 0, nfsv4.NFS4ERR_INVAL + } +} + +// Even though no "." and ".." entries should be returned, the NFSv4 +// spec requires that cookie values 0, 1 and 2 are never returned. +// Offset all responses by this value. +const lastReservedCookie = 2 + +// readdirReporter is an implementation of DirectoryEntryReporter that +// reports the contents of a directory in the NFSv4 directory entry +// format. +type readdirReporter struct { + program *baseProgram + attrRequest nfsv4.Bitmap4 + maxCount nfsv4.Count4 + dirCount nfsv4.Count4 + + currentMaxCount nfsv4.Count4 + currentDirCount nfsv4.Count4 + nextEntry **nfsv4.Entry4 + endOfFile *bool +} + +func (r *readdirReporter) ReportEntry(nextCookie uint64, name path.Component, child virtual.DirectoryChild, attributes *virtual.Attributes) bool { + // The dircount field is a hint of the maximum number of bytes + // of directory information that should be returned. Only the + // size of the XDR encoded filename and cookie should contribute + // to its value. + filename := name.String() + if r.dirCount != 0 { + r.currentDirCount += nfsv4.Count4(nfsv4.GetComponent4EncodedSizeBytes(filename) + nfsv4.NfsCookie4EncodedSizeBytes) + if r.currentDirCount > r.dirCount { + *r.endOfFile = false + return false + } + } + + p := r.program + entry := nfsv4.Entry4{ + Cookie: lastReservedCookie + nextCookie, + Name: filename, + Attrs: p.attributesToFattr4(attributes, r.attrRequest), + } + + // The maxcount field is the maximum number of bytes for the + // READDIR4resok structure. + r.currentMaxCount += nfsv4.Count4(entry.GetEncodedSizeBytes()) + if r.currentMaxCount > r.maxCount { + *r.endOfFile = false + return false + } + + *r.nextEntry = &entry + r.nextEntry = &entry.Nextentry + return true +} + +// fattr4ToAttributes converts a client-provided NFSv4 fattr4 to a set +// of virtual file system attributes. Only attributes that are both +// writable and supported by this implementation are accepted. +func fattr4ToAttributes(in *nfsv4.Fattr4, out *virtual.Attributes) nfsv4.Nfsstat4 { + r := bytes.NewBuffer(in.AttrVals) + if len(in.Attrmask) > 0 { + // Attributes 0 to 31. + f := in.Attrmask[0] + if f&^(1< 1 { + // Attributes 32 to 63. + f := in.Attrmask[1] + if f&^(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + return nfsv4.NFS4ERR_ATTRNOTSUPP + } + if f&(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + mode, _, err := nfsv4.ReadMode4(r) + if err != nil { + return nfsv4.NFS4ERR_BADXDR + } + out.SetPermissions(virtual.NewPermissionsFromMode(mode)) + } + } + for i := 2; i < len(in.Attrmask); i++ { + // Attributes 64 or higher. + if in.Attrmask[i] != 0 { + return nfsv4.NFS4ERR_ATTRNOTSUPP + } + } + if r.Len() != 0 { + // Provided attributes contain trailing data. + return nfsv4.NFS4ERR_BADXDR + } + return nfsv4.NFS4_OK +} + +// transactionShouldComplete returns whether a transaction should be +// completed, based on the resulting status code of the transaction. +// Even in the case where an errors occurs, should the sequence number +// of the client be advanced. The only exception is if the operation +// fails with any of the errors listed below. +// +// More details: RFC 7530, section 9.1.7, last paragraph. +func transactionShouldComplete(st nfsv4.Nfsstat4) bool { + return st != nfsv4.NFS4ERR_STALE_CLIENTID && + st != nfsv4.NFS4ERR_STALE_STATEID && + st != nfsv4.NFS4ERR_BAD_STATEID && + st != nfsv4.NFS4ERR_BAD_SEQID && + st != nfsv4.NFS4ERR_BADXDR && + st != nfsv4.NFS4ERR_RESOURCE && + st != nfsv4.NFS4ERR_NOFILEHANDLE && + st != nfsv4.NFS4ERR_MOVED +} + +// compareStateSeqID compares a client-provided sequence ID value with +// one present on the server. The error that needs to be returned in +// case of non-matching sequence IDs depends on whether the value lies +// in the past or future. +// +// More details: RFC 7530, section 9.1.3, last paragraph. +func compareStateSeqID(clientValue, serverValue nfsv4.Seqid4) nfsv4.Nfsstat4 { + if clientValue == serverValue { + return nfsv4.NFS4_OK + } + if int32(clientValue-serverValue) > 0 { + return nfsv4.NFS4ERR_BAD_STATEID + } + return nfsv4.NFS4ERR_OLD_STATEID +} + +// nfsLockType4ToByteRangeLockType converts an NFSv4 lock type to a +// virtual file system byte range lock type. As this implementation does +// not attempt to provide any fairness, no distinction is made between +// waiting and non-waiting lock type variants. +func nfsLockType4ToByteRangeLockType(in nfsv4.NfsLockType4) (virtual.ByteRangeLockType, nfsv4.Nfsstat4) { + switch in { + case nfsv4.READ_LT, nfsv4.READW_LT: + return virtual.ByteRangeLockTypeLockedShared, nfsv4.NFS4_OK + case nfsv4.WRITE_LT, nfsv4.WRITEW_LT: + return virtual.ByteRangeLockTypeLockedExclusive, nfsv4.NFS4_OK + default: + return 0, nfsv4.NFS4ERR_INVAL + } +} + +// offsetLengthToStartEnd converts an (offset, length) pair to a +// (start, end) pair. The former is used by NFSv4, while the latter is +// used by ByteRangeLock. +// +// More details: RFC 7530, section 16.10.4, paragraph 2. +func offsetLengthToStartEnd(offset, length uint64) (uint64, uint64, nfsv4.Nfsstat4) { + switch length { + case 0: + return 0, 0, nfsv4.NFS4ERR_INVAL + case math.MaxUint64: + // A length of all ones indicates end-of-file. + return offset, math.MaxUint64, nfsv4.NFS4_OK + default: + if length > math.MaxUint64-offset { + // The end exceeds the maximum 64-bit unsigned + // integer value. + return 0, 0, nfsv4.NFS4ERR_INVAL + } + return offset, offset + length, nfsv4.NFS4_OK + } +} + +// byteRangeLockToLock4Denied converts information on a conflicting byte +// range lock into a LOCK4denied response. +func byteRangeLockToLock4Denied(lock *virtual.ByteRangeLock[*lockOwnerState]) nfsv4.Lock4denied { + length := uint64(math.MaxUint64) + if lock.End != math.MaxUint64 { + length = lock.End - lock.Start + } + var lockType nfsv4.NfsLockType4 + switch lock.Type { + case virtual.ByteRangeLockTypeLockedShared: + lockType = nfsv4.READ_LT + case virtual.ByteRangeLockTypeLockedExclusive: + lockType = nfsv4.WRITE_LT + default: + panic("Unexpected lock type") + } + los := lock.Owner + return nfsv4.Lock4denied{ + Offset: lock.Start, + Length: length, + Locktype: lockType, + Owner: nfsv4.LockOwner4{ + Clientid: los.confirmedClient.confirmation.key.shortClientID, + Owner: los.owner, + }, + } +} + +// nfsv4NewComponent converts a filename string that's provided as part +// of an incoming request to a pathname component that can be provided +// to the virtual file system layer. +func nfsv4NewComponent(name string) (path.Component, nfsv4.Nfsstat4) { + if name == "" { + // Inherently invalid name. + return path.Component{}, nfsv4.NFS4ERR_INVAL + } + component, ok := path.NewComponent(name) + if !ok { + // Name that is invalid for this implementation. + return path.Component{}, nfsv4.NFS4ERR_BADNAME + } + return component, nfsv4.NFS4_OK +} + +// timeToNfstime4 converts a timestamp to its NFSv4 equivalent. +func timeToNfstime4(t time.Time) nfsv4.Nfstime4 { + nanos := t.UnixNano() + return nfsv4.Nfstime4{ + Seconds: nanos / 1e9, + Nseconds: uint32(nanos % 1e9), + } +} diff --git a/pkg/filesystem/virtual/nfsv4/base_program_test.go b/pkg/filesystem/virtual/nfsv4/base_program_test.go new file mode 100644 index 0000000..5a60f8b --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/base_program_test.go @@ -0,0 +1,6960 @@ +package nfsv4_test + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + nfsv4_xdr "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func handleResolverExpectCall(t *testing.T, handleResolver *mock.MockHandleResolver, expectedID []byte, child virtual.DirectoryChild, status virtual.Status) { + handleResolver.EXPECT().Call(gomock.Any()). + DoAndReturn(func(id io.WriterTo) (virtual.DirectoryChild, virtual.Status) { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(len(expectedID)), n) + require.Equal(t, expectedID, idBuf.Bytes()) + return child, status + }) +} + +func randomNumberGeneratorExpectRead(randomNumberGenerator *mock.MockSingleThreadedGenerator, data []byte) { + randomNumberGenerator.EXPECT().Read(gomock.Len(len(data))). + DoAndReturn(func(p []byte) (int, error) { + return copy(p, data), nil + }) +} + +func setClientIDForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, shortClientID nfsv4_xdr.Clientid4) { + randomNumberGenerator.EXPECT().Uint64().Return(uint64(shortClientID)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0x95, 0x38, 0xc4, 0xfc, 0x81, 0x3e, 0x92, 0x2a}, + Id: []byte{0xa6, 0x9d, 0x64, 0x34, 0xdb, 0xcb, 0x09, 0x53}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0x8554a7c7, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.196.95", + }, + }, + CallbackIdent: 0xa2bef9ca, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: shortClientID, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: shortClientID, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func openUnconfirmedFileForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, rootDirectory *mock.MockVirtualDirectory, leaf *mock.MockVirtualLeaf, fileHandle nfsv4_xdr.NfsFh4, shortClientID nfsv4_xdr.Clientid4, seqID nfsv4_xdr.Seqid4, stateIDOther [nfsv4_xdr.NFS4_OTHER_SIZE]byte) { + rootDirectory.EXPECT().VirtualOpenChild( + ctx, + path.MustNewComponent("Hello"), + virtual.ShareMaskRead, + nil, + &virtual.OpenExistingOptions{}, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, name path.Component, shareAccess virtual.ShareMask, createAttributes *virtual.Attributes, existingOptions *virtual.OpenExistingOptions, requested virtual.AttributesMask, openedFileAttributes *virtual.Attributes) (virtual.Leaf, virtual.AttributesMask, virtual.ChangeInfo, virtual.Status) { + openedFileAttributes.SetFileHandle(fileHandle) + return leaf, 0, virtual.ChangeInfo{ + Before: 0x29291f1b07caf9ea, + After: 0x360e671892329978, + }, virtual.StatusOK + }) + randomNumberGeneratorExpectRead(randomNumberGenerator, stateIDOther[4:]) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: seqID, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: shortClientID, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_NULL{ + File: "Hello", + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_NFS4_OK{ + Resok4: nfsv4_xdr.Open4resok{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: stateIDOther, + }, + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x29291f1b07caf9ea, + After: 0x360e671892329978, + }, + Rflags: nfsv4_xdr.OPEN4_RESULT_CONFIRM | nfsv4_xdr.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: nfsv4_xdr.Bitmap4{}, + Delegation: &nfsv4_xdr.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: fileHandle, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func openConfirmForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, fileHandle nfsv4_xdr.NfsFh4, seqID nfsv4_xdr.Seqid4, stateIDOther [nfsv4_xdr.NFS4_OTHER_SIZE]byte) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: fileHandle, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_CONFIRM{ + OpopenConfirm: nfsv4_xdr.OpenConfirm4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: stateIDOther, + }, + Seqid: seqID, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_CONFIRM{ + OpopenConfirm: &nfsv4_xdr.OpenConfirm4res_NFS4_OK{ + Resok4: nfsv4_xdr.OpenConfirm4resok{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: stateIDOther, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func TestBaseProgramCompound_OP_ACCESS(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x63, 0x40, 0xb6, 0x51, 0x6d, 0xa1, 0x7f, 0xcb}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x96, 0x63, 0x54, 0xf1, 0xa2, 0x6b, 0x8c, 0x61} + stateIDOtherPrefix := [...]byte{0x68, 0x78, 0x20, 0xb7} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling ACCESS without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Directory", func(t *testing.T) { + // Access checks against a directory. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskPermissions, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetPermissions(virtual.PermissionsExecute) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_NFS4_OK{ + Resok4: nfsv4_xdr.Access4resok{ + Supported: nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + Access: nfsv4_xdr.ACCESS4_LOOKUP, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("File", func(t *testing.T) { + // Access checks against a file. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskPermissions, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_NFS4_OK{ + Resok4: nfsv4_xdr.Access4resok{ + Supported: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_READ, + Access: nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_CLOSE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x14, 0x55, 0xb5, 0x51, 0x02, 0x31, 0xd6, 0x75}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x9f, 0xa8, 0x23, 0x40, 0x68, 0x9f, 0x3e, 0xac} + stateIDOtherPrefix := [...]byte{0xf5, 0x47, 0xa8, 0x88} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("AnonymousStateID", func(t *testing.T) { + // Calling CLOSE against the anonymous state ID is of + // course not permitted. This operation only works when + // called against regular state IDs. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0x33cfa3a9, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("StaleStateID", func(t *testing.T) { + // Providing an arbitrary state ID that does not start + // with a known prefix should return + // NFS4ERR_STALE_STATEID, as it's likely from before a + // restart. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0x299f061e, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 0x7746b4d2, + Other: [...]byte{ + 0x36, 0xeb, 0x77, 0x13, + 0x42, 0xfa, 0x7f, 0xbc, + 0xe2, 0x36, 0x20, 0x1b, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, res) + }) + + t.Run("BadStateID", func(t *testing.T) { + // Providing an arbitrary state ID that does not start with + // the known prefix should return NFS4ERR_BAD_STATEID. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0xf4cf976e, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 0x444b408c, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x91, 0x2a, 0x94, 0x35, + 0x7f, 0xc9, 0x06, 0x70, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xc4cf32ab1168aabc) + + // Open a file for reading, but don't confirm it yet. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + /* shortClientID = */ 0xc4cf32ab1168aabc, + /* seqID = */ 241, + /* stateIDOther = */ [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }) + + t.Run("UnconfirmedStateID", func(t *testing.T) { + // CLOSE can't be called against an open-owner that + // hasn't been confirmed yet. + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 242, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, res) + }) + + // Confirm the open-owner for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + /* seqID = */ 242, + /* stateIDOther = */ [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }) + + t.Run("OldStateID", func(t *testing.T) { + // Can't call CLOSE on a state ID from the past. + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 243, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, res) + }) + + t.Run("FuturisticStateID", func(t *testing.T) { + // Can't call CLOSE on a state ID from the future. + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling CLOSE without a file handle should fail. + clock.EXPECT().Now().Return(time.Unix(1016, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // Actually close the file. It should be safe to call + // this multiple times, as it should just return a + // cached response. + for i := int64(0); i < 2*10; i++ { + clock.EXPECT().Now().Return(time.Unix(1017+i, 0)) + } + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + for i := uint32(0); i < 10; i++ { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_NFS4_OK{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + } + }) + + t.Run("RetransmissionWithMismatchingStateID", func(t *testing.T) { + // At a minimum, the standard states that when returning + // a cached response it is sufficient to compare the + // original operation type and sequence ID. Let's be a + // bit more strict and actually check whether the + // provided state ID matches the one that was provided + // as part of the original request. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + clock.EXPECT().Now().Return(time.Unix(1037, 0)) + clock.EXPECT().Now().Return(time.Unix(1038, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, res) + }) + + t.Run("CloseAfterClosed", func(t *testing.T) { + // We should no longer be able to interact with the + // state ID after closing it. Attempting to close a file + // that has already been closed should just return + // NFS4ERR_BAD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1039, 0)) + clock.EXPECT().Now().Return(time.Unix(1040, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 245, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_COMMIT(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x5e, 0x1e, 0xca, 0x70, 0xcc, 0x9d, 0x5e, 0xd5}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x1a, 0xa6, 0x7e, 0x3b, 0xf7, 0x29, 0xa4, 0x7b} + stateIDOtherPrefix := [...]byte{0x24, 0xa7, 0x48, 0xbc} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling COMMIT without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotFile", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_default{ + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_NFS4_OK{ + Resok4: nfsv4_xdr.Commit4resok{ + Writeverf: rebootVerifier, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_CREATE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x9b, 0xe9, 0x83, 0x67, 0x8d, 0x92, 0x5e, 0x62}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x8d, 0x3d, 0xe8, 0x2e, 0xee, 0x3b, 0xca, 0x60} + stateIDOtherPrefix := [...]byte{0x60, 0xf5, 0x56, 0x97} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling CREATE without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("BadType", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_default{ + Type: nfsv4_xdr.NF4REG, + }, + Objname: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADTYPE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADTYPE, + }, res) + }) + + t.Run("SymlinkFailure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualSymlink( + ctx, + []byte("target"), + path.MustNewComponent("symlink"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).Return(nil, virtual.ChangeInfo{}, virtual.StatusErrAccess) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4LNK{ + Linkdata: nfsv4_xdr.Linktext4("target"), + }, + Objname: "symlink", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, res) + }) + + t.Run("SymlinkSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualSymlink( + ctx, + []byte("target"), + path.MustNewComponent("symlink"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, target []byte, name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0xbe, 0xb7, 0xe9, 0xb1, 0xbb, 0x21, 0x9a, 0xa8}) + return leaf, virtual.ChangeInfo{ + Before: 0x803325cc21deffd8, + After: 0xa1b8abe75e185bb5, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4LNK{ + Linkdata: nfsv4_xdr.Linktext4("target"), + }, + Objname: "symlink", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x803325cc21deffd8, + After: 0xa1b8abe75e185bb5, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0xbe, 0xb7, 0xe9, 0xb1, 0xbb, 0x21, 0x9a, 0xa8, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("BlockDeviceFailure", func(t *testing.T) { + // Disallow the creation of block devices. There is no + // need for build actions to do that. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4BLK{ + Devdata: nfsv4_xdr.Specdata4{ + Specdata1: 8, + Specdata2: 0, + }, + }, + Objname: "sda", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_PERM, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_PERM, + }, res) + }) + + t.Run("CharacterDeviceFailure", func(t *testing.T) { + // Disallow the creation of character devices. There is no + // need for build actions to do that. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4CHR{ + Devdata: nfsv4_xdr.Specdata4{ + Specdata1: 1, + Specdata2: 3, + }, + }, + Objname: "null", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_PERM, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_PERM, + }, res) + }) + + t.Run("SocketSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualMknod( + ctx, + path.MustNewComponent("socket"), + filesystem.FileTypeSocket, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, name path.Component, fileType filesystem.FileType, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0xe0, 0x45, 0x9a, 0xca, 0x4f, 0x67, 0x7c, 0xaa}) + return leaf, virtual.ChangeInfo{ + Before: 0xf46dd045aaf43210, + After: 0xc687134057752dbb, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4SOCK{}, + Objname: "socket", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0xf46dd045aaf43210, + After: 0xc687134057752dbb, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0xe0, 0x45, 0x9a, 0xca, 0x4f, 0x67, 0x7c, 0xaa, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("FIFOSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualMknod( + ctx, + path.MustNewComponent("fifo"), + filesystem.FileTypeFIFO, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, name path.Component, fileType filesystem.FileType, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0x73, 0x9c, 0x31, 0x40, 0x63, 0x49, 0xbb, 0x09}) + return leaf, virtual.ChangeInfo{ + Before: 0x1e80315f7745fc50, + After: 0xe280a823543ce5ac, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4FIFO{}, + Objname: "fifo", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x1e80315f7745fc50, + After: 0xe280a823543ce5ac, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x73, 0x9c, 0x31, 0x40, 0x63, 0x49, 0xbb, 0x09, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("DirectorySuccess", func(t *testing.T) { + directory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualMkdir( + path.MustNewComponent("dir"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Directory, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0x19, 0xe5, 0x26, 0x1b, 0xee, 0x25, 0x4a, 0x76}) + return directory, virtual.ChangeInfo{ + Before: 0x60a4a64a5af2116f, + After: 0x58e160960c2d0339, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "dir", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x60a4a64a5af2116f, + After: 0x58e160960c2d0339, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x19, 0xe5, 0x26, 0x1b, 0xee, 0x25, 0x4a, 0x76, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_DELEGPURGE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x45, 0x22, 0xbb, 0xf6, 0xf0, 0x61, 0x71, 0x6d}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x0b, 0xb3, 0x0d, 0xa3, 0x50, 0x11, 0x6b, 0x38} + stateIDOtherPrefix := [...]byte{0x17, 0x18, 0x71, 0xc6} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NotSupported", func(t *testing.T) { + // As we don't support CLAIM_DELEGATE_PREV, this method + // is required to return NFS4ERR_NOTSUPP. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_DELEGPURGE{ + Opdelegpurge: nfsv4_xdr.Delegpurge4args{ + Clientid: 0xc08f7e033702ee2c, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_DELEGPURGE{ + Opdelegpurge: nfsv4_xdr.Delegpurge4res{ + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, res) + }) +} + +// TODO: DELEGRETURN + +func TestBaseProgramCompound_OP_GETATTR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x9b, 0x51, 0x40, 0x9b, 0x8c, 0x7a, 0x54, 0x47}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x5e, 0x5f, 0xfe, 0x34, 0x05, 0x98, 0x9d, 0xf1} + stateIDOtherPrefix := [...]byte{0x3d, 0xc0, 0x5d, 0xd2} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling GETATTR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_GETATTR{ + Opgetattr: nfsv4_xdr.Getattr4args{ + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NoAttributes", func(t *testing.T) { + // Request absolutely no attributes. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMask(0), gomock.Any()) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETATTR{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getattr4resok{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{}, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("AllAttributes", func(t *testing.T) { + // Request all supported attributes. + rootDirectory.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskChangeID|virtual.AttributesMaskFileHandle|virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber|virtual.AttributesMaskLastDataModificationTime|virtual.AttributesMaskLinkCount|virtual.AttributesMaskPermissions|virtual.AttributesMaskSizeBytes, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetChangeID(0xeaab7253dad16ee5) + attributes.SetFileHandle([]byte{0xcd, 0xe9, 0xc7, 0x4c, 0x8b, 0x8d, 0x58, 0xef, 0xd9, 0x9f}) + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0xfcadd45521cb1db2) + attributes.SetLastDataModificationTime(time.Unix(1654791566, 4839067173)) + attributes.SetLinkCount(12) + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsExecute) + attributes.SetSizeBytes(8192) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETATTR{ + Opgetattr: nfsv4_xdr.Getattr4args{ + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4_xdr.FATTR4_CHANGE) | + (1 << nfsv4_xdr.FATTR4_SIZE) | + (1 << nfsv4_xdr.FATTR4_LINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_NAMED_ATTR) | + (1 << nfsv4_xdr.FATTR4_FSID) | + (1 << nfsv4_xdr.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4_xdr.FATTR4_LEASE_TIME) | + (1 << nfsv4_xdr.FATTR4_FILEHANDLE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + (1 << (nfsv4_xdr.FATTR4_MODE - 32)) | + (1 << (nfsv4_xdr.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_MODIFY - 32)), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getattr4resok{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4_xdr.FATTR4_CHANGE) | + (1 << nfsv4_xdr.FATTR4_SIZE) | + (1 << nfsv4_xdr.FATTR4_LINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_NAMED_ATTR) | + (1 << nfsv4_xdr.FATTR4_FSID) | + (1 << nfsv4_xdr.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4_xdr.FATTR4_LEASE_TIME) | + (1 << nfsv4_xdr.FATTR4_FILEHANDLE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + (1 << (nfsv4_xdr.FATTR4_MODE - 32)) | + (1 << (nfsv4_xdr.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_MODIFY - 32)), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_SUPPORTED_ATTRS. + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x18, 0x0f, 0xff, + 0x00, 0x30, 0x80, 0x0a, + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FH_EXPIRE_TYPE == FH4_PERSISTENT. + 0x00, 0x00, 0x00, 0x00, + // FATTR4_CHANGE. + 0xea, 0xab, 0x72, 0x53, 0xda, 0xd1, 0x6e, 0xe5, + // FATTR4_SIZE. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + // FATTR4_LINK_SUPPORT == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_SYMLINK_SUPPORT == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_NAMED_ATTR == FALSE. + 0x00, 0x00, 0x00, 0x00, + // FATTR4_FSID. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // FATTR4_UNIQUE_HANDLES == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_LEASE_TIME == 60 seconds. + 0x00, 0x00, 0x00, 0x3c, + // FATTR4_FILEHANDLE. + 0x00, 0x00, 0x00, 0x0a, + 0xcd, 0xe9, 0xc7, 0x4c, 0x8b, 0x8d, 0x58, 0xef, 0xd9, 0x9f, 0x00, 0x00, + // FATTR4_FILEID. + 0xfc, 0xad, 0xd4, 0x55, 0x21, 0xcb, 0x1d, 0xb2, + // FATTR4_MODE. + 0x00, 0x00, 0x01, 0x6d, + // FATTR4_NUMLINKS. + 0x00, 0x00, 0x00, 0x0c, + // FATTR4_TIME_ACCESS == 2000-01-01T00:00:00Z. + 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x43, 0x80, + 0x00, 0x00, 0x00, 0x00, + // FATTR4_TIME_METADATA == 2000-01-01T00:00:00Z. + 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x43, 0x80, + 0x00, 0x00, 0x00, 0x00, + // FATTR4_TIME_MODIFY == 2022-06-09T16:19:26.4839067173Z. + 0x00, 0x00, 0x00, 0x00, 0x62, 0xa2, 0x1d, 0x92, + 0x32, 0x03, 0x26, 0x25, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_GETFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x85, 0xc5, 0x54, 0x77, 0x90, 0x7c, 0xf1, 0xf9}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x3c, 0x79, 0xba, 0xfe, 0xd6, 0x87, 0x1e, 0x32} + stateIDOtherPrefix := [...]byte{0x95, 0xce, 0xb4, 0x96} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling GETFH without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "getfh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "getfh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "getfh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "getfh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x85, 0xc5, 0x54, 0x77, 0x90, 0x7c, 0xf1, 0xf9, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_ILLEGAL(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x0e, 0xad, 0xf1, 0x83, 0xb1, 0xc0, 0xfc, 0x6f}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x42, 0x51, 0x65, 0x8b, 0xd2, 0x27, 0xc4, 0x13} + stateIDOtherPrefix := [...]byte{0x01, 0x22, 0xe2, 0xaa} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("Failure", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "illegal", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_ILLEGAL{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "illegal", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_ILLEGAL{ + Opillegal: nfsv4_xdr.Illegal4res{ + Status: nfsv4_xdr.NFS4ERR_OP_ILLEGAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OP_ILLEGAL, + }, res) + }) +} + +func TestBaseProgramCompound_OP_LINK(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x27, 0xec, 0x12, 0x85, 0xcb, 0x2d, 0x57, 0xe2}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x8d, 0x94, 0x96, 0x9c, 0xe9, 0x4b, 0xcf, 0xf5} + stateIDOtherPrefix := [...]byte{0xdf, 0xdb, 0x0d, 0x38} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle1", func(t *testing.T) { + // Calling LINK without any file handles should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NoFileHandle2", func(t *testing.T) { + // Calling LINK without a saved file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + // Calling LINK with a bad filename should fail. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + // Calling LINK with a name of length zero should fail + // with NFS4ERR_INVAL. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("SourceIsDirectory", func(t *testing.T) { + // Calling LINK with a directory as a source object should fail. + directory := mock.NewMockVirtualDirectory(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x92, 0xcc, 0xd9, 0x59, 0xef, 0xf3, 0xef, 0x0a}, virtual.DirectoryChild{}.FromDirectory(directory), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x92, 0xcc, 0xd9, 0x59, 0xef, 0xf3, 0xef, 0x0a, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, res) + }) + + t.Run("LinkCreationFailure", func(t *testing.T) { + // All arguments are correct, but the underlying + // directory does not allow the link to be created. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + rootDirectory.EXPECT().VirtualLink( + ctx, + path.MustNewComponent("Hello"), + leaf, + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.ChangeInfo{}, virtual.StatusErrXDev) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_XDEV, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_XDEV, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + rootDirectory.EXPECT().VirtualLink( + ctx, + path.MustNewComponent("Hello"), + leaf, + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.ChangeInfo{ + Before: 0x6eee6c2bf6db7101, + After: 0x5d2447d9e6bec4b8, + }, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_NFS4_OK{ + Resok4: nfsv4_xdr.Link4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x6eee6c2bf6db7101, + After: 0x5d2447d9e6bec4b8, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_LOOKUP(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x5a, 0x8a, 0xf7, 0x7b, 0x6f, 0x5e, 0xbc, 0xff}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xf5, 0x66, 0xea, 0xae, 0x76, 0x70, 0xd1, 0x5b} + stateIDOtherPrefix := [...]byte{0x2d, 0x48, 0xd3, 0x9b} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling LOOKUP without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + // When called against files other than symbolic links, + // LOOKUP should return NFS4ERR_NOTDIR. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("Symlink", func(t *testing.T) { + // When called against symbolic links, LOOKUP should + // return NFS4ERR_SYMLINK. That way the client knows it + // may need to do symlink expansion. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeSymlink) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_SYMLINK, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_SYMLINK, + }, res) + }) + + t.Run("NotFound", func(t *testing.T) { + rootDirectory.EXPECT().VirtualLookup( + gomock.Any(), + path.MustNewComponent("Hello"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).Return(virtual.DirectoryChild{}, virtual.StatusErrNoEnt) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + ctx, + path.MustNewComponent("Hello"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + attributes.SetFileHandle([]byte{0x98, 0xb2, 0xdc, 0x6e, 0x34, 0xa2, 0xcf, 0xa5}) + return virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0xb2, 0xdc, 0x6e, 0x34, 0xa2, 0xcf, 0xa5, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: LOOKUPP + +func TestBaseProgramCompound_OP_NVERIFY(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe0, 0x7a, 0x5b, 0x53, 0x03, 0x7a, 0x0a, 0x6f}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xab, 0x23, 0xe8, 0x04, 0x79, 0x23, 0x0a, 0x27} + stateIDOtherPrefix := [...]byte{0x41, 0x40, 0x91, 0x69} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + // Only basic testing coverage for NVERIFY is provided, as it is + // assumed most of the logic is shared with VERIFY. + + t.Run("Match", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0x676b7bcb66d92ed6) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "nverify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FILEID. + 0x67, 0x6b, 0x7b, 0xcb, 0x66, 0xd9, 0x2e, 0xd6, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "nverify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4res{ + Status: nfsv4_xdr.NFS4ERR_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_SAME, + }, res) + }) + + t.Run("Mismatch", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "nverify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4BLK. + 0x00, 0x00, 0x00, 0x03, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "nverify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_OPEN(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x3d, 0xed, 0x6d, 0xff, 0x3e, 0x69, 0x19, 0xcb}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x18, 0xe4, 0x47, 0xf1, 0x31, 0x1c, 0xe2, 0x94} + stateIDOtherPrefix := [...]byte{0x5c, 0x71, 0xa6, 0x0d} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xc9beea1ca8ba0be3) + + t.Run("ClaimPrevious", func(t *testing.T) { + t.Run("UnopenedFile", func(t *testing.T) { + // Calling CLAIM_PREVIOUS against a file handle + // that hasn't been opened before should fail. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + leaf := mock.NewMockVirtualLeaf(ctrl) + handleResolverExpectCall(t, handleResolver, []byte{0x04, 0x72, 0xba, 0x69, 0x15, 0xeb, 0x78, 0x97}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x04, 0x72, 0xba, 0x69, 0x15, 0xeb, 0x78, 0x97}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 342, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_WRITE, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xda, 0x95, 0x91, 0x97, 0x45, 0xea, 0xb6, 0x79}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_default{ + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, res) + }) + + t.Run("ClaimAgainstUnconfirmed", func(t *testing.T) { + // We should not be able to use CLAIM_PREVIOUS + // against unconfirmed open-owners. + // + // More details: RFC 7530, section 9.1.11, + // bullet point 2. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xd7, 0x5a, 0xe0, 0xe7, 0xc3, 0x56, 0x29, 0x78}, + /* shortClientID = */ 0xc9beea1ca8ba0be3, + /* seqID = */ 342, + /* stateIDOther = */ [...]byte{ + 0x5c, 0x71, 0xa6, 0x0d, + 0xe3, 0x12, 0x33, 0xb2, + 0x43, 0xda, 0x5f, 0xaf, + }) + + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd7, 0x5a, 0xe0, 0xe7, 0xc3, 0x56, 0x29, 0x78}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 343, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_default{ + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, res) + }) + + // The remainder of the tests for CLAIM_PREVIOUS assume + // that a file has been opened properly. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + /* shortClientID = */ 0xc9beea1ca8ba0be3, + /* seqID = */ 344, + /* stateIDOther = */ [...]byte{ + 0x5c, 0x71, 0xa6, 0x0d, + 0xe2, 0x73, 0x1b, 0xe6, + 0x3b, 0x79, 0x04, 0x81, + }) + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + /* seqID = */ 345, + /* stateIDOther = */ [...]byte{ + 0x5c, 0x71, 0xa6, 0x0d, + 0xe2, 0x73, 0x1b, 0xe6, + 0x3b, 0x79, 0x04, 0x81, + }) + + t.Run("MismatchingDelegateType", func(t *testing.T) { + // When calling CLAIM_PREVIOUS, the client must + // provide a delegate type that is compatible + // with the state on the server. As we don't + // support delegations, the client MUST provide + // OPEN_DELEGATE_NONE. + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 346, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_READ, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_default{ + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_RECLAIM_BAD, + }, res) + }) + + t.Run("Guarded", func(t *testing.T) { + // CLAIM_PREVIOUS should always open a file that + // already exists. It should therefore be + // impossible to use it in combination with + // GUARDED4. + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 347, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_OPEN4_CREATE{ + How: &nfsv4_xdr.Createhow4_GUARDED4{}, + }, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_default{ + Status: nfsv4_xdr.NFS4ERR_EXIST, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_EXIST, + }, res) + }) + + t.Run("OpenFailure", func(t *testing.T) { + // RFC 7530 doesn't explicitly disallow + // CLAIM_PREVIOUS to be used to upgrade the + // share reservations or truncate the file. This + // requires reopening the file, which may fail. + clock.EXPECT().Now().Return(time.Unix(1016, 0)) + clock.EXPECT().Now().Return(time.Unix(1017, 0)) + leaf.EXPECT().VirtualOpenSelf( + ctx, + virtual.ShareMaskWrite, + &virtual.OpenExistingOptions{Truncate: true}, + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.StatusErrAccess) + clock.EXPECT().Now().Return(time.Unix(1018, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 348, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_WRITE, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_OPEN4_CREATE{ + How: &nfsv4_xdr.Createhow4_UNCHECKED4{ + Createattrs: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_SIZE), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_SIZE == 0. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + }, + }, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_default{ + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + clock.EXPECT().Now().Return(time.Unix(1019, 0)) + clock.EXPECT().Now().Return(time.Unix(1020, 0)) + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + clock.EXPECT().Now().Return(time.Unix(1021, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x28, 0xef, 0x5d, 0x33, 0xfb, 0x4b, 0x0f, 0x77}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 349, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc9beea1ca8ba0be3, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_NFS4_OK{ + Resok4: nfsv4_xdr.Open4resok{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x5c, 0x71, 0xa6, 0x0d, + 0xe2, 0x73, 0x1b, 0xe6, + 0x3b, 0x79, 0x04, 0x81, + }, + }, + Rflags: nfsv4_xdr.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: nfsv4_xdr.Bitmap4{}, + Delegation: &nfsv4_xdr.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + }) + + // TODO: We're missing testing coverage for CLAIM_NULL, + // EXCLUSIVE4, etc. +} + +func TestBaseProgramCompound_OP_OPENATTR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x03, 0x86, 0xd4, 0xcb, 0x44, 0x7c, 0x7e, 0x77}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe6, 0x7e, 0xb7, 0xdb, 0x52, 0x9c, 0x7c, 0x86} + stateIDOtherPrefix := [...]byte{0x06, 0x00, 0x7c, 0x9d} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling OPENATTR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "openattr", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4args{ + Createdir: true, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "openattr", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotSupported", func(t *testing.T) { + // This implementation does not support named attributes. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "openattr", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4args{ + Createdir: true, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "openattr", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4res{ + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, res) + }) +} + +func TestBaseProgramCompound_OP_OPEN_CONFIRM(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x2e, 0x8d, 0x48, 0x03, 0xc4, 0xc3, 0x2d, 0x6c}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x42, 0xa8, 0x3f, 0xd1, 0xde, 0x65, 0x74, 0x2a} + stateIDOtherPrefix := [...]byte{0xfa, 0xc3, 0xf7, 0x18} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0x2e5550c498b2b463) + + t.Run("RetransmissionSuccess", func(t *testing.T) { + // It should be valid to send OPEN_CONFIRM repeatedly in + // case of connection drops. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xff, 0x27, 0xc7, 0x8f, 0xd5, 0x6a, 0xfb, 0xee}, + /* shortClientID = */ 0x2e5550c498b2b463, + /* seqID = */ 1205, + /* stateIDOther = */ [...]byte{ + 0xfa, 0xc3, 0xf7, 0x18, + 0x80, 0x57, 0x5b, 0x95, + 0x08, 0x16, 0x41, 0x0a, + }) + + for i := int64(0); i < 10; i++ { + clock.EXPECT().Now().Return(time.Unix(1004+i*2, 0)) + clock.EXPECT().Now().Return(time.Unix(1005+i*2, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0xff, 0x27, 0xc7, 0x8f, 0xd5, 0x6a, 0xfb, 0xee}, + /* seqID = */ 1206, + /* stateIDOther = */ [...]byte{ + 0xfa, 0xc3, 0xf7, 0x18, + 0x80, 0x57, 0x5b, 0x95, + 0x08, 0x16, 0x41, 0x0a, + }) + } + }) + + t.Run("RetransmissionWithMismatchingStateID", func(t *testing.T) { + // At a minimum, the standard states that when returning + // a cached response it is sufficient to compare the + // original operation type and sequence ID. Let's be a + // bit more strict and actually check whether the + // provided state ID matches the one that was provided + // as part of the original request. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + clock.EXPECT().Now().Return(time.Unix(1024, 0)) + clock.EXPECT().Now().Return(time.Unix(1025, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xff, 0x27, 0xc7, 0x8f, 0xd5, 0x6a, 0xfb, 0xee}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_CONFIRM{ + OpopenConfirm: nfsv4_xdr.OpenConfirm4args{ + Seqid: 1206, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xfa, 0xc3, 0xf7, 0x18, + 0x80, 0x57, 0x5b, 0x95, + 0x08, 0x16, 0x41, 0x0a, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_CONFIRM{ + OpopenConfirm: &nfsv4_xdr.OpenConfirm4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, res) + }) + + // TODO: Any more cases we want to test? +} + +func TestBaseProgramCompound_OP_OPEN_DOWNGRADE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x17, 0x7e, 0x65, 0xc0, 0x10, 0xaf, 0x8c, 0x24}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x4d, 0x0d, 0xc1, 0xca, 0xd9, 0xeb, 0x73, 0xc9} + stateIDOtherPrefix := [...]byte{0x2c, 0xa4, 0xce, 0xdc} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("AnonymousStateID", func(t *testing.T) { + // Calling OPEN_DOWNGRADE against the anonymous state ID + // is of course not permitted. This operation only works + // when called against regular state IDs. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + Seqid: 0x33cfa3a9, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("StaleStateID", func(t *testing.T) { + // Providing an arbitrary state ID that does not start + // with a known prefix should return + // NFS4ERR_STALE_STATEID, as it's likely from before a + // restart. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 0xcc2e292c, + Other: [...]byte{ + 0xf1, 0x96, 0x5a, 0xa1, + 0xb8, 0x8b, 0x1b, 0x27, + 0xdf, 0x8c, 0xc4, 0x5b, + }, + }, + Seqid: 0xce84c893, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_default{ + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, res) + }) + + // The remainder of the test assumes the availability of a + // client ID and an opened file. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xc14e56bbe220a24e) + + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + /* shortClientID = */ 0xc14e56bbe220a24e, + /* seqID = */ 427, + /* stateIDOther = */ [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }) + + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + /* seqID = */ 428, + /* stateIDOther = */ [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }) + + t.Run("Upgrade", func(t *testing.T) { + // It's not permitted to use OPEN_DOWNGRADE to upgrade + // the share reservations of a file. A file that's been + // opened for reading can't be upgraded to reading and + // writing. + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Seqid: 429, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_BOTH, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Noop", func(t *testing.T) { + // Though pointless, it is permitted to downgrade a file + // to exactly the same set of share reservations. + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Seqid: 430, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_NFS4_OK{ + Resok4: nfsv4_xdr.OpenDowngrade4resok{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("SuccessWithImmediateClose", func(t *testing.T) { + // When upgrading the file to being both readable and + // writable, a subsequent downgrade to read-only should + // close the file for writing. + clock.EXPECT().Now().Return(time.Unix(1019, 0)) + clock.EXPECT().Now().Return(time.Unix(1020, 0)) + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskRead|virtual.ShareMaskWrite, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()) + clock.EXPECT().Now().Return(time.Unix(1021, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 431, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_BOTH, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc14e56bbe220a24e, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_NFS4_OK{ + Resok4: nfsv4_xdr.Open4resok{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 4, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Rflags: nfsv4_xdr.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: nfsv4_xdr.Bitmap4{}, + Delegation: &nfsv4_xdr.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1022, 0)) + clock.EXPECT().Now().Return(time.Unix(1023, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskWrite) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 4, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Seqid: 432, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_NFS4_OK{ + Resok4: nfsv4_xdr.OpenDowngrade4resok{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 5, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("SuccessWithDelayedClose", func(t *testing.T) { + // If a lock has been acquired against a file, a call to + // OPEN_DOWNGRADE won't have an immediate effect. This + // is because the lock-owner state ID can still be used + // to access the file using its original share + // reservations. Calling RELEASE_LOCKOWNER should cause + // the file to be closed partially. + clock.EXPECT().Now().Return(time.Unix(1022, 0)) + clock.EXPECT().Now().Return(time.Unix(1023, 0)) + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskWrite, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()) + clock.EXPECT().Now().Return(time.Unix(1024, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: 433, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_WRITE, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: 0xc14e56bbe220a24e, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_PREVIOUS{ + DelegateType: nfsv4_xdr.OPEN_DELEGATE_NONE, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_NFS4_OK{ + Resok4: nfsv4_xdr.Open4resok{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 6, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Rflags: nfsv4_xdr.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: nfsv4_xdr.Bitmap4{}, + Delegation: &nfsv4_xdr.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1025, 0)) + clock.EXPECT().Now().Return(time.Unix(1026, 0)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0x38, 0x51, 0x33, 0xcc, 0x1c, 0x67, 0x79, 0xb5}) + + resLock, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_TRUE{ + OpenOwner: nfsv4_xdr.OpenToLockOwner4{ + OpenSeqid: 434, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 6, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + LockSeqid: 923, + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xc14e56bbe220a24e, + Owner: []byte{0x33, 0xee, 0xa6, 0xdb, 0xdd, 0xb8, 0x8c, 0xeb}, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_NFS4_OK{ + Resok4: nfsv4_xdr.Lock4resok{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0x38, 0x51, 0x33, 0xcc, + 0x1c, 0x67, 0x79, 0xb5, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, resLock) + + clock.EXPECT().Now().Return(time.Unix(1027, 0)) + clock.EXPECT().Now().Return(time.Unix(1028, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_downgrade", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: nfsv4_xdr.OpenDowngrade4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 6, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + Seqid: 435, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_downgrade", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: &nfsv4_xdr.OpenDowngrade4res_NFS4_OK{ + Resok4: nfsv4_xdr.OpenDowngrade4resok{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 7, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1029, 0)) + clock.EXPECT().Now().Return(time.Unix(1030, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCKU{ + Oplocku: nfsv4_xdr.Locku4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Seqid: 924, + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0x38, 0x51, 0x33, 0xcc, + 0x1c, 0x67, 0x79, 0xb5, + }, + }, + Offset: 100, + Length: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCKU{ + Oplocku: &nfsv4_xdr.Locku4res_NFS4_OK{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0x38, 0x51, 0x33, 0xcc, + 0x1c, 0x67, 0x79, 0xb5, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1031, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskWrite) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xc14e56bbe220a24e, + Owner: []byte{0x33, 0xee, 0xa6, 0xdb, 0xdd, 0xb8, 0x8c, 0xeb}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Closing the file should release the underlying file. + clock.EXPECT().Now().Return(time.Unix(1032, 0)) + clock.EXPECT().Now().Return(time.Unix(1033, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x3d, 0x11, 0xc3, 0xe7, 0x71, 0x5c, 0xd5, 0x2a}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 436, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 7, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_NFS4_OK{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 8, + Other: [...]byte{ + 0x2c, 0xa4, 0xce, 0xdc, + 0xfd, 0x2b, 0x5f, 0x18, + 0xe7, 0x2d, 0xf9, 0x61, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +// TODO: PUTFH +// TODO: PUTPUBFH +// TODO: PUTROOTFH + +func TestBaseProgramCompound_OP_READ(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x37, 0xfd, 0xd0, 0xfc, 0x45, 0x2b, 0x79, 0x32}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x58, 0x61, 0xb4, 0xff, 0x82, 0x40, 0x8f, 0x1a} + stateIDOtherPrefix := [...]byte{0x55, 0xc7, 0xc6, 0xa0} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("StaleStateID", func(t *testing.T) { + // Providing a state ID that uses an unknown prefix + // should cause READ to fail with NFS4ERR_STALE_STATEID, + // as it likely refers to a state ID from before a + // restart. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 0xce56be4e, + Other: [...]byte{ + 0x88, 0xa8, 0x5a, 0x60, + 0x01, 0xa8, 0x3e, 0xff, + 0x36, 0xe4, 0xcf, 0xd8, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, res) + }) + + t.Run("BadStateID", func(t *testing.T) { + // The prefix of the state ID matches, but it does not + // correspond to a known value. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 0xce56be4e, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xdf, 0xa1, 0xb4, 0x3b, + 0xb2, 0x4c, 0x2b, 0x5f, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("BadReadBypassStateID", func(t *testing.T) { + // The standard requires that if the "other" field in + // the state ID is all zeroes or all ones, the "seqid" + // field must match. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 123, + Other: [...]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("AnonymousStateIDNoFileHandle", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("AnonymousStateIDOpenFailure", func(t *testing.T) { + // A state ID consisting exclusively of zero bits is + // referred to as the anonymous state ID. It should + // cause the underlying file to be opened temporarily. + // Failures when doing so should propagate. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()).Return(virtual.StatusErrIO) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("AnonymousStateIDReadFailure", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + gomock.InOrder( + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()), + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)).Return(0, false, virtual.StatusErrIO), + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("AnonymousStateIDSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + gomock.InOrder( + leaf.EXPECT().VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()), + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }), + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xf86e57129c7a628a) + + // Open a file for reading, but don't confirm it yet. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + /* shortClientID = */ 0xf86e57129c7a628a, + /* seqID = */ 7010, + /* stateIDOther = */ [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }) + + t.Run("UnconfirmedStateID", func(t *testing.T) { + // The state ID belongs to an open-owner that has not + // been confirmed using OPEN_CONFIRM yet. The READ + // operation should not be permitted. + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + // Confirm the open-owner for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + /* seqID = */ 7011, + /* stateIDOther = */ [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }) + + t.Run("OldStateID", func(t *testing.T) { + // The OPEN_CONFIRM call above increased the sequence ID + // of the state ID to 2. Calling READ with a lower value + // should cause us to return NFS4ERR_OLD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + clock.EXPECT().Now().Return(time.Unix(1016, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, res) + }) + + t.Run("FuturisticStateID", func(t *testing.T) { + // Similarly, using sequence ID 3 is too new, as it's + // never been handed out by the server. + clock.EXPECT().Now().Return(time.Unix(1017, 0)) + clock.EXPECT().Now().Return(time.Unix(1018, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("OpenStateIDSuccess", func(t *testing.T) { + clock.EXPECT().Now().Return(time.Unix(1019, 0)) + clock.EXPECT().Now().Return(time.Unix(1020, 0)) + clock.EXPECT().Now().Return(time.Unix(1021, 0)) + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Acquire a lock on the file to test the behaviour of READ when + // called with a lock state ID. + clock.EXPECT().Now().Return(time.Unix(1022, 0)) + clock.EXPECT().Now().Return(time.Unix(1023, 0)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xe8, 0xf2, 0xf2, 0x43, 0xc1, 0x91, 0x76, 0x91}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_TRUE{ + OpenOwner: nfsv4_xdr.OpenToLockOwner4{ + OpenSeqid: 7012, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + LockSeqid: 9640, + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf86e57129c7a628a, + Owner: []byte{0x58, 0xa8, 0x53, 0x4c, 0xf8, 0xe8, 0xaa, 0xf3}, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_NFS4_OK{ + Resok4: nfsv4_xdr.Lock4resok{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe8, 0xf2, 0xf2, 0x43, + 0xc1, 0x91, 0x76, 0x91, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("LockStateIDSuccess", func(t *testing.T) { + // It's also permitted to call READ using a lock state ID. + clock.EXPECT().Now().Return(time.Unix(1024, 0)) + clock.EXPECT().Now().Return(time.Unix(1025, 0)) + clock.EXPECT().Now().Return(time.Unix(1026, 0)) + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe8, 0xf2, 0xf2, 0x43, + 0xc1, 0x91, 0x76, 0x91, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Close the file for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1027, 0)) + clock.EXPECT().Now().Return(time.Unix(1028, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 7013, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_NFS4_OK{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("ClosedStateIDBefore", func(t *testing.T) { + // Normally a subsequent operation on a state ID with a + // sequence ID that's too low should return + // NFS4ERR_OLD_STATEID. Because the state ID has been + // closed altogether, we should see NFS4ERR_BAD_STATEID + // instead. + clock.EXPECT().Now().Return(time.Unix(1029, 0)) + clock.EXPECT().Now().Return(time.Unix(1030, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("ClosedStateIDAfter", func(t *testing.T) { + // Similar to the above, using the file with the exact + // state ID should also return NFS4ERR_BAD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1030, 0)) + clock.EXPECT().Now().Return(time.Unix(1031, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_READDIR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x52, 0x5e, 0x17, 0x6e, 0xad, 0x2f, 0xc3, 0xf9}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x80, 0x29, 0x6e, 0xe3, 0x1a, 0xf1, 0xec, 0x41} + stateIDOtherPrefix := [...]byte{0xce, 0x11, 0x76, 0xe8} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling READDIR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDir", func(t *testing.T) { + // Calling READDIR with a non-directory file handle + // should fail. + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + ctx, + path.MustNewComponent("file"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.DirectoryChild, virtual.Status) { + attributes.SetFileHandle([]byte{0x1c, 0xae, 0xab, 0x22, 0xdf, 0xf4, 0x9e, 0x93}) + return virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "file", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("EmptyDirectory", func(t *testing.T) { + // Returning no results should cause EOF to be set. + rootDirectory.EXPECT().VirtualReadDir( + ctx, + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Return(virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readdir4resok{ + Cookieverf: rebootVerifier, + Reply: nfsv4_xdr.Dirlist4{ + Eof: true, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("TooSmall", func(t *testing.T) { + // The requested entry is going to be 56 bytes in size. + // If READDIR is called with maxcount set to 59, the + // request should fail with NFS4ERR_TOOSMALL. + rootDirectory.EXPECT().VirtualReadDir( + ctx, + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, attributesMask virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + leaf := mock.NewMockVirtualLeaf(ctrl) + require.False(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("file"), + virtual.DirectoryChild{}.FromLeaf(leaf), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(123))) + return virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 59, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_TOOSMALL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_TOOSMALL, + }, res) + }) + + t.Run("JustBigEnough", func(t *testing.T) { + // The same test as the one above, but with a maxcount + // of 60 bytes. This should make the call succeed. + rootDirectory.EXPECT().VirtualReadDir( + ctx, + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, firstCookie uint64, attributesMask virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + leaf := mock.NewMockVirtualLeaf(ctrl) + require.True(t, reporter.ReportEntry( + uint64(1), + path.MustNewComponent("file"), + virtual.DirectoryChild{}.FromLeaf(leaf), + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(123))) + return virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 60, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readdir4resok{ + Cookieverf: rebootVerifier, + Reply: nfsv4_xdr.Dirlist4{ + Eof: true, + Entries: &nfsv4_xdr.Entry4{ + Cookie: 3, + Name: "file", + Attrs: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4REG. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_FILEID == 123. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + }, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("IncorrectVerifier", func(t *testing.T) { + // Passing in a cookie rebootVerifier that doesn't match with + // what was handed out previously should cause an + // NFS4ERR_NOT_SAME error. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Cookie: 72, + Cookieverf: nfsv4_xdr.Verifier4{ + 0xb, 0xa, 0xd, 0xc, 0x00, 0xc, 0x1, 0xe, + }, + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, res) + }) +} + +func TestBaseProgramCompound_OP_READLINK(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe7, 0x09, 0xea, 0x64, 0xd4, 0x5a, 0xf2, 0x87}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xa8, 0x90, 0x8c, 0x43, 0xb7, 0xd6, 0x0f, 0x74} + stateIDOtherPrefix := [...]byte{0x46, 0x64, 0x44, 0x31} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling READLINK without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Directory", func(t *testing.T) { + // Even though most file operations will return + // NFS4ERR_ISDIR when called against a directory, + // READLINK is required to return NFS4ERR_INVAL. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Failure", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualReadlink(ctx).Return(nil, virtual.StatusErrIO) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + leaf.EXPECT().VirtualReadlink(ctx).Return([]byte("target"), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readlink4resok{ + Link: nfsv4_xdr.Linktext4("target"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_RELEASE_LOCKOWNER(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x8e, 0x16, 0xec, 0x1a, 0x60, 0x6a, 0x9d, 0x3d}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x27, 0xe1, 0xcd, 0x6a, 0x3f, 0xf8, 0xb7, 0xb2} + stateIDOtherPrefix := [...]byte{0xab, 0x4f, 0xf6, 0x1c} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("StaleClientID", func(t *testing.T) { + // Calling RELEASE_LOCKOWNER against a non-existent + // short client ID should result in failure. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xac, 0xce, 0x68, 0x2f, 0x60, 0x36, 0x4f, 0xbf}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xf7fdfdc38f805b08) + + t.Run("SuccessNoOp", func(t *testing.T) { + // Now that a client ID has been allocated, the + // RELEASE_LOCKOWNER call should succeed. Because we + // haven't acquired any locks yet, it should still be a + // no-op. + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Open a file and acquire a lock on it for the remainder of + // this test. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + /* shortClientID = */ 0xf7fdfdc38f805b08, + /* seqID = */ 30430, + /* stateIDOther = */ [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + /* seqID = */ 30431, + /* stateIDOther = */ [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }) + + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xe8, 0xef, 0xf4, 0x3d, 0x9b, 0x99, 0x0e, 0xf1}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_TRUE{ + OpenOwner: nfsv4_xdr.OpenToLockOwner4{ + OpenSeqid: 30432, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }, + }, + LockSeqid: 16946, + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_NFS4_OK{ + Resok4: nfsv4_xdr.Lock4resok{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("LocksHeld", func(t *testing.T) { + // Now that this lock-owner holds one or more locks, + // RELEASE_LOCKOWNER can no longer be called. + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4ERR_LOCKS_HELD, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_LOCKS_HELD, + }, res) + }) + + // Drop the lock. + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCKU{ + Oplocku: nfsv4_xdr.Locku4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Seqid: 16947, + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + Offset: 50, + Length: 200, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCKU{ + Oplocku: &nfsv4_xdr.Locku4res_NFS4_OK{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("SuccessAfterUnlock", func(t *testing.T) { + // Now that the file has been unlocked, it should be + // possible to call RELEASE_LOCKOWNER once again. + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + // As a consequence, future LOCK operations for the + // lock-owner should fail, as long as no + // open_to_lock_owner4 is provided. + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_FALSE{ + LockOwner: nfsv4_xdr.ExistLockOwner4{ + LockSeqid: 16947, + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_REMOVE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe3, 0x85, 0x4a, 0x60, 0x0d, 0xaf, 0x14, 0x20}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe7, 0x77, 0x33, 0xf4, 0x21, 0xad, 0x7a, 0x1b} + stateIDOtherPrefix := [...]byte{0x4b, 0x46, 0x62, 0x3c} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling REMOVE without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Failure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualRemove( + path.MustNewComponent("file"), + true, + true, + ).Return(virtual.ChangeInfo{}, virtual.StatusErrAccess) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + rootDirectory.EXPECT().VirtualRemove( + path.MustNewComponent("file"), + true, + true, + ).Return(virtual.ChangeInfo{ + Before: 0x65821b4665becdc0, + After: 0x9c6360fa70cc3aea, + }, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_NFS4_OK{ + Resok4: nfsv4_xdr.Remove4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x65821b4665becdc0, + After: 0x9c6360fa70cc3aea, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: RENAME +// TODO: RENEW + +func TestBaseProgramCompound_OP_RESTOREFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x16, 0xb9, 0x45, 0x1d, 0x06, 0x85, 0xc4, 0xbb}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x5f, 0x98, 0x5c, 0xdf, 0x8a, 0xac, 0x4d, 0x97} + stateIDOtherPrefix := [...]byte{0xd4, 0x7c, 0xd1, 0x8f} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoSavedFileHandle", func(t *testing.T) { + // Calling RESTOREFH without a saved file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "restorefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RESTOREFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "restorefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RESTOREFH{ + Oprestorefh: nfsv4_xdr.Restorefh4res{ + Status: nfsv4_xdr.NFS4ERR_RESTOREFH, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_RESTOREFH, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // RESTOREFH should restore the file that was saved + // previously. The current file handle for successive + // operations should apply to that file instead. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "restorefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_RESTOREFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "restorefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_RESTOREFH{ + Oprestorefh: nfsv4_xdr.Restorefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_SAVEFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xc4, 0x2b, 0x0e, 0x04, 0xde, 0x15, 0x66, 0x77}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe9, 0xf5, 0x40, 0xa0, 0x20, 0xd9, 0x2c, 0x52} + stateIDOtherPrefix := [...]byte{0xf1, 0xd0, 0x0e, 0xa0} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling SAVEFH without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "savefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "savefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + // The success case is tested as part of OP_RESTOREFH. +} + +func TestBaseProgramCompound_OP_SECINFO(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x0a, 0xa2, 0x92, 0x2f, 0x06, 0x66, 0xd8, 0x80}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x70, 0x34, 0xc6, 0x7a, 0x25, 0x6e, 0x08, 0xc0} + stateIDOtherPrefix := [...]byte{0xf9, 0x44, 0xa6, 0x25} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling SECINFO without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + // Even though LOOKUP may return NFS4ERR_SYMLINK when + // called against a symbolic link, SECINFO has no such + // requirement. It should always return NFS4ERR_NOTDIR. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("NotFound", func(t *testing.T) { + rootDirectory.EXPECT().VirtualLookup( + gomock.Any(), + path.MustNewComponent("Hello"), + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.DirectoryChild{}, virtual.StatusErrNoEnt) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockNativeLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + gomock.Any(), + path.MustNewComponent("Hello"), + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.DirectoryChild{}.FromLeaf(leaf), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_NFS4_OK{ + Resok4: []nfsv4_xdr.Secinfo4{ + &nfsv4_xdr.Secinfo4_default{ + Flavor: rpcv2.AUTH_NONE, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: SETATTR +// TODO: SETCLIENTID + +func TestBaseProgramCompound_OP_SETCLIENTID_CONFIRM(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x3d, 0x01, 0x56, 0xaf, 0xab, 0x16, 0xe9, 0x23}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x73, 0xaf, 0xeb, 0xd6, 0x5b, 0x96, 0x74, 0xde} + stateIDOtherPrefix := [...]byte{0xdb, 0xd3, 0xb5, 0x41} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoKnownClientID", func(t *testing.T) { + // Calling SETCLIENTID_CONFIRM without calling + // SETCLIENTID first doesn't work. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x90fee2857d7b5f5b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xa1, 0x30, 0xf6, 0x1a, 0xc0, 0xac, 0x1f, 0x36}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + t.Run("TooSlow", func(t *testing.T) { + // As the server was created with a maximum lease time + // of 120 seconds, we should see SETCLIENTID_CONFIRM + // fail if there are 121 seconds in between. + clock.EXPECT().Now().Return(time.Unix(1100, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xabd34c548970a69b)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0xcd, 0xef, 0x89, 0x02, 0x4c, 0x39, 0x2d, 0xeb}, + Id: []byte{0x06, 0x3f, 0xfe, 0x38, 0x30, 0xc5, 0xa8, 0xbc}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0x7b3f75b9, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.123", + }, + }, + CallbackIdent: 0x1d004919, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0xabd34c548970a69b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1221, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0xabd34c548970a69b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // Successfully confirm a client. + clock.EXPECT().Now().Return(time.Unix(1300, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x23078b2a3f2e1856)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0xd4, 0x3e, 0x5a, 0x75, 0x93, 0x4f, 0x01, 0x7c}, + Id: []byte{0x75, 0x89, 0x89, 0xbf, 0x89, 0x10, 0x20, 0xd3}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0xc32f5c62, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.472", + }, + }, + CallbackIdent: 0xf5dc603e, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1301, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Open a file for the remainder of this test. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1302, 0)) + clock.EXPECT().Now().Return(time.Unix(1303, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xc0, 0xa3, 0xb8, 0x99, 0x08, 0x03, 0xe8, 0x45}, + /* shortClientID = */ 0x23078b2a3f2e1856, + /* seqID = */ 3726, + /* stateIDOther = */ [...]byte{ + 0xdb, 0xd3, 0xb5, 0x41, + 0xc3, 0x2f, 0x5c, 0x62, + 0xf5, 0xdc, 0x60, 0x3e, + }) + + t.Run("Idempotence", func(t *testing.T) { + // Sending the same confirmation as before should cause + // no meaningful change. We should see the same response + // as before. + clock.EXPECT().Now().Return(time.Unix(1304, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("DifferentVerifier", func(t *testing.T) { + // Sending a new SETCLIENTID request with the same ID, + // but a different verifier should cause the server to + // return a new verifier as well. + clock.EXPECT().Now().Return(time.Unix(1305, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x23078b2a3f2e1856)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0x2c, 0xf9, 0x38, 0xc4, 0xc6, 0xea, 0x03, 0x72}, + Id: []byte{0x75, 0x89, 0x89, 0xbf, 0x89, 0x10, 0x20, 0xd3}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0xc32f5c62, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.472", + }, + }, + CallbackIdent: 0xf5dc603e, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + // Confirming the new verifier should cause the server + // to discard all state associated with the previous + // one, as it indicated the client rebooted. + clock.EXPECT().Now().Return(time.Unix(1306, 0)) + leaf.EXPECT().VirtualClose(virtual.ShareMaskRead) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_VERIFY(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(gomock.Any(), virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe1, 0x79, 0xc1, 0x39, 0x2a, 0xef, 0xbb, 0xde}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x71, 0x69, 0x6c, 0x7c, 0x90, 0x79, 0x3b, 0x13} + stateIDOtherPrefix := [...]byte{0x19, 0xed, 0x93, 0x5f} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling VERIFY without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadXDR1", func(t *testing.T) { + // If the client provides attributes that are an exact + // prefix of what we compute ourselves, then the data + // provided by the client must be corrupted. XDR would + // never allow that. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE, truncated. + 0x00, 0x00, 0x00, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, res) + }) + + t.Run("BadXDR2", func(t *testing.T) { + // The same holds for when the client provides more data + // than we generate. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // Trailing garbage. + 0xde, 0xad, 0xc0, 0xde, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, res) + }) + + t.Run("UnsupportedAttribute", func(t *testing.T) { + // We don't support the 'system' attribute. Providing it + // as part of VERIFY should cause us to return + // NFS4ERR_ATTRNOTSUPP. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + 1 << (nfsv4_xdr.FATTR4_SYSTEM - 32), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_SYSTEM == TRUE. + 0x00, 0x00, 0x00, 0x01, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_ATTRNOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ATTRNOTSUPP, + }, res) + }) + + t.Run("InvalidAttribute", func(t *testing.T) { + // The 'rdattr_error' attribute is only returned as part + // of READDIR. It cannot be provided to VERIFY. + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_RDATTR_ERROR), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_RDATTR_ERROR == NFS4ERR_IO. + 0x00, 0x00, 0x00, 0x05, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Mismatch", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4BLK. + 0x00, 0x00, 0x00, 0x03, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, res) + }) + + t.Run("Match", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, gomock.Any()). + Do(func(ctx context.Context, requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0x676b7bcb66d92ed6) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FILEID. + 0x67, 0x6b, 0x7b, 0xcb, 0x66, 0xd9, 0x2e, 0xd6, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: WRITE diff --git a/pkg/filesystem/virtual/nfsv4/metrics_program.go b/pkg/filesystem/virtual/nfsv4/metrics_program.go new file mode 100644 index 0000000..623be55 --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/metrics_program.go @@ -0,0 +1,86 @@ +package nfsv4 + +import ( + "context" + "sync" + + "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + programPrometheusMetrics sync.Once + + programCompoundOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "program_compound_operations_total", + Help: "Number of operations provided as part of calls to NFSv4 COMPOUND.", + }, + []string{"operation", "status"}) + programCompoundOperationsOK map[nfsv4.NfsOpnum4]prometheus.Counter +) + +type metricsProgram struct { + nfsv4.Nfs4Program +} + +// NewMetricsProgram creates a decorator for nfsv4.Nfs4Program that +// exposes Prometheus metrics for all compound operations called. +// +// Right now it only provides counters for the number of operations +// called. Timing of operation is not exposed, as it can only be +// computed at the procedure level, which isn't meaningful in practice. +func NewMetricsProgram(base nfsv4.Nfs4Program) nfsv4.Nfs4Program { + programPrometheusMetrics.Do(func() { + prometheus.MustRegister(programCompoundOperations) + + // Already create counters for all of the operations + // where status is NFS4_OK. This allows us to skip calls + // to WithLabelValues() in the common case. + programCompoundOperationsOK = map[nfsv4.NfsOpnum4]prometheus.Counter{} + for code, name := range nfsv4.NfsOpnum4_name { + programCompoundOperationsOK[code] = programCompoundOperations.WithLabelValues(name, nfsv4.Nfsstat4_name[nfsv4.NFS4_OK]) + } + }) + + return &metricsProgram{ + Nfs4Program: base, + } +} + +func (p *metricsProgram) NfsV4Nfsproc4Compound(ctx context.Context, arguments *nfsv4.Compound4args) (*nfsv4.Compound4res, error) { + compoundRes, err := p.Nfs4Program.NfsV4Nfsproc4Compound(ctx, arguments) + if err != nil { + return nil, err + } + + for i, res := range compoundRes.Resarray { + operation := res.GetResop() + status := nfsv4.NFS4_OK + if i == len(compoundRes.Resarray)-1 { + status = compoundRes.Status + } + if status == nfsv4.NFS4_OK { + if counter, ok := programCompoundOperationsOK[operation]; ok { + // Fast path: a known operation has succeeded. + counter.Inc() + continue + } + } + + // Slow path: either the operation is not known, or it failed. + operationStr, ok := nfsv4.NfsOpnum4_name[operation] + if !ok { + operationStr = "UNKNOWN" + } + statusStr, ok := nfsv4.Nfsstat4_name[status] + if !ok { + statusStr = "UNKNOWN" + } + programCompoundOperations.WithLabelValues(operationStr, statusStr).Inc() + } + + return compoundRes, nil +} diff --git a/pkg/filesystem/virtual/nfsv4/system_authenticator.go b/pkg/filesystem/virtual/nfsv4/system_authenticator.go new file mode 100644 index 0000000..123656c --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/system_authenticator.go @@ -0,0 +1,127 @@ +package nfsv4 + +import ( + "bytes" + "context" + "crypto/sha256" + "log" + "sync" + + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/eviction" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/buildbarn/go-xdr/pkg/rpcserver" + "github.com/jmespath/go-jmespath" +) + +// SystemAuthenticatorCacheKey is the key type that's used by +// the system authenticato's eviction set. +type SystemAuthenticatorCacheKey [sha256.Size]byte + +type systemAuthenticator struct { + metadataExtractor *jmespath.JMESPath + + lock sync.Mutex + cache map[SystemAuthenticatorCacheKey]*auth.AuthenticationMetadata + maximumCacheSize int + evictionSet eviction.Set[SystemAuthenticatorCacheKey] +} + +// NewSystemAuthenticator is an RPCv2 Authenticator that requires that +// requests provide credentials of flavor AUTH_SYS, as described in RFC +// 5531, appendix A. The body of the credentials are converted to an +// AuthenticationMetadata object using a JMESPath expression. The +// resulting metadata is attached to the Context. +func NewSystemAuthenticator(metadataExtractor *jmespath.JMESPath, maximumCacheSize int, evictionSet eviction.Set[SystemAuthenticatorCacheKey]) rpcserver.Authenticator { + return &systemAuthenticator{ + metadataExtractor: metadataExtractor, + + maximumCacheSize: maximumCacheSize, + cache: map[SystemAuthenticatorCacheKey]*auth.AuthenticationMetadata{}, + evictionSet: evictionSet, + } +} + +func (a *systemAuthenticator) Authenticate(ctx context.Context, credentials, verifier *rpcv2.OpaqueAuth) (context.Context, rpcv2.OpaqueAuth, rpcv2.AuthStat) { + switch credentials.Flavor { + case rpcv2.AUTH_SYS: + key := sha256.Sum256(credentials.Body) + + a.lock.Lock() + defer a.lock.Unlock() + + authenticationMetadata, ok := a.cache[key] + if ok { + // Client either ignores AUTH_SHORT, or the + // request got retransmitted. Return the cached + // credentials. + a.evictionSet.Touch(key) + } else { + // Parse system authentication data. + var credentialsBody rpcv2.AuthsysParms + b := bytes.NewBuffer(credentials.Body) + if _, err := credentialsBody.ReadFrom(b); err != nil || b.Len() != 0 { + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_BADCRED + } + + // Convert to authentication metadata. + gids := make([]any, 0, len(credentialsBody.Gids)) + for _, gid := range credentialsBody.Gids { + gids = append(gids, gid) + } + raw, err := a.metadataExtractor.Search(map[string]any{ + "stamp": credentialsBody.Stamp, + "machinename": credentialsBody.Machinename, + "uid": credentialsBody.Uid, + "gid": credentialsBody.Gid, + "gids": gids, + }) + if err != nil { + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_BADCRED + } + authenticationMetadata, err = auth.NewAuthenticationMetadataFromRaw(raw) + if err != nil { + log.Print("Failed to create authentication metadata: ", err) + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_BADCRED + } + + // Insert the authentication metadata into the cache. + for len(a.cache) > 0 && len(a.cache) > a.maximumCacheSize { + delete(a.cache, a.evictionSet.Peek()) + a.evictionSet.Remove() + } + a.cache[key] = authenticationMetadata + a.evictionSet.Insert(key) + } + return auth.NewContextWithAuthenticationMetadata(ctx, authenticationMetadata), + rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SHORT, + Body: key[:], + }, + rpcv2.AUTH_OK + case rpcv2.AUTH_SHORT: + // Client is referring back to previously used + // credentials. Look up the credentials from the cache. + if len(credentials.Body) != sha256.Size { + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_BADCRED + } + var key SystemAuthenticatorCacheKey + copy(key[:], credentials.Body) + + a.lock.Lock() + defer a.lock.Unlock() + + if authenticationMetadata, ok := a.cache[key]; ok { + a.evictionSet.Touch(key) + return auth.NewContextWithAuthenticationMetadata(ctx, authenticationMetadata), + rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}, + rpcv2.AUTH_OK + } + + // Client needs to provide the original credentials + // again, as they are no longer present in the cache. + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_REJECTEDCRED + default: + return nil, rpcv2.OpaqueAuth{}, rpcv2.AUTH_BADCRED + } +} diff --git a/pkg/filesystem/virtual/nfsv4/system_authenticator_test.go b/pkg/filesystem/virtual/nfsv4/system_authenticator_test.go new file mode 100644 index 0000000..e79429e --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/system_authenticator_test.go @@ -0,0 +1,197 @@ +package nfsv4_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4" + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/eviction" + auth_pb "github.com/buildbarn/bb-storage/pkg/proto/auth" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/jmespath/go-jmespath" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/structpb" +) + +func TestSystemAuthenticator(t *testing.T) { + ctx := context.Background() + + evictionSet := eviction.NewLRUSet[nfsv4.SystemAuthenticatorCacheKey]() + authenticator := nfsv4.NewSystemAuthenticator(jmespath.MustCompile("{\"public\": @}"), 10, evictionSet) + + t.Run("InvalidFlavor", func(t *testing.T) { + // Flavor of requests must be AUTH_SHORT or AUTH_SYS. + _, _, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_BADCRED, s) + }) + + t.Run("AuthSysInvalidBody", func(t *testing.T) { + // Body of AUTH_SYS credentials can't be empty. + _, _, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_SYS}, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_BADCRED, s) + }) + + t.Run("AuthSysTrailingGarbage", func(t *testing.T) { + // AUTH_SYS credentials are malformed, because they have + // some trailing garbage. + _, _, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SYS, + Body: []byte{ + // stamp. + 0x7b, 0xfe, 0x88, 0xfc, + // machinename. + 0x00, 0x00, 0x00, 0x09, + 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x68, 0x6f, 0x73, + 0x74, 0x00, 0x00, 0x00, + // uid. + 0x00, 0x00, 0x03, 0xe8, + // gid. + 0x00, 0x00, 0x00, 0x64, + // gids. + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x0c, + 0x00, 0x00, 0x00, 0x14, + // Trailing garbage. + 0xf2, 0x61, 0x9e, 0xca, + }, + }, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_BADCRED, s) + }) + + t.Run("AuthShortInvalidBodyLength", func(t *testing.T) { + // This implementation requires that AUTH_SHORT + // credentials have a given length. + _, _, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SHORT, + Body: []byte{1, 2, 3}, + }, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_BADCRED, s) + }) + + t.Run("AuthShortUnknownCredentials", func(t *testing.T) { + // If we receive AUTH_SHORT credentials using a key that + // is not part of the cache, we should return + // AUTH_REJECTEDCRED, so that the client can retry the + // request using the full AUTH_SYS credentials. + _, _, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SHORT, + Body: []byte{ + 0x7d, 0x23, 0x54, 0xf5, 0x3c, 0xdf, 0x0d, 0x29, + 0x0a, 0xb1, 0x7c, 0xe6, 0xa8, 0xcc, 0xf2, 0x66, + 0x52, 0x30, 0x12, 0xb1, 0x17, 0x8c, 0xe5, 0xe0, + 0xbb, 0xe1, 0xd6, 0x1f, 0x37, 0x42, 0xb2, 0x42, + }, + }, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_REJECTEDCRED, s) + }) + + t.Run("Success", func(t *testing.T) { + // Credentials are well formed, meaning that they will + // be attached to the Context object. A short + // authentication key will be returned, which may be + // used to refer to the credentials later. + ctx1, verifier1, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SYS, + Body: []byte{ + // stamp. + 0x7b, 0xfe, 0x88, 0xfc, + // machinename. + 0x00, 0x00, 0x00, 0x09, + 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x68, 0x6f, 0x73, + 0x74, 0x00, 0x00, 0x00, + // uid. + 0x00, 0x00, 0x03, 0xe8, + // gid. + 0x00, 0x00, 0x00, 0x64, + // gids. + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x0c, + 0x00, 0x00, 0x00, 0x14, + }, + }, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_OK, s) + metadata1, _ := auth.AuthenticationMetadataFromContext(ctx1).GetPublicProto() + testutil.RequireEqualProto(t, &auth_pb.AuthenticationMetadata{ + Public: structpb.NewStructValue(&structpb.Struct{ + Fields: map[string]*structpb.Value{ + "stamp": structpb.NewNumberValue(0x7bfe88fc), + "machinename": structpb.NewStringValue("localhost"), + "uid": structpb.NewNumberValue(1000), + "gid": structpb.NewNumberValue(100), + "gids": structpb.NewListValue(&structpb.ListValue{ + Values: []*structpb.Value{ + structpb.NewNumberValue(12), + structpb.NewNumberValue(20), + }, + }), + }, + }), + }, metadata1) + require.Equal(t, rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SHORT, + Body: []byte{ + 0x68, 0xe7, 0xce, 0xa7, 0x9b, 0x51, 0x58, 0x83, + 0x16, 0x5f, 0x74, 0xbc, 0x46, 0x44, 0x4e, 0x50, + 0x12, 0x26, 0xc1, 0xb4, 0xa0, 0xd4, 0xbf, 0x51, + 0xb5, 0xc3, 0xdc, 0xe2, 0x4d, 0x7b, 0x07, 0x69, + }, + }, verifier1) + + // During subsequent requests we may use the short + // authentication key to refer to the same credentials. + ctx2, verifier2, s := authenticator.Authenticate( + ctx, + &rpcv2.OpaqueAuth{ + Flavor: rpcv2.AUTH_SHORT, + Body: []byte{ + 0x68, 0xe7, 0xce, 0xa7, 0x9b, 0x51, 0x58, 0x83, + 0x16, 0x5f, 0x74, 0xbc, 0x46, 0x44, 0x4e, 0x50, + 0x12, 0x26, 0xc1, 0xb4, 0xa0, 0xd4, 0xbf, 0x51, + 0xb5, 0xc3, 0xdc, 0xe2, 0x4d, 0x7b, 0x07, 0x69, + }, + }, + &rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}) + require.Equal(t, rpcv2.AUTH_OK, s) + metadata2, _ := auth.AuthenticationMetadataFromContext(ctx2).GetPublicProto() + testutil.RequireEqualProto(t, &auth_pb.AuthenticationMetadata{ + Public: structpb.NewStructValue(&structpb.Struct{ + Fields: map[string]*structpb.Value{ + "stamp": structpb.NewNumberValue(0x7bfe88fc), + "machinename": structpb.NewStringValue("localhost"), + "uid": structpb.NewNumberValue(1000), + "gid": structpb.NewNumberValue(100), + "gids": structpb.NewListValue(&structpb.ListValue{ + Values: []*structpb.Value{ + structpb.NewNumberValue(12), + structpb.NewNumberValue(20), + }, + }), + }, + }), + }, metadata2) + require.Equal(t, rpcv2.OpaqueAuth{Flavor: rpcv2.AUTH_NONE}, verifier2) + }) +} diff --git a/pkg/filesystem/virtual/node.go b/pkg/filesystem/virtual/node.go new file mode 100644 index 0000000..4fa4e6b --- /dev/null +++ b/pkg/filesystem/virtual/node.go @@ -0,0 +1,30 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// Node is the intersection between Directory and Leaf. These are the +// operations that can be applied to both kinds of objects. +type Node interface { + VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) + VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, attributes *Attributes) Status +} + +// GetFileInfo extracts the attributes of a node and returns it in the +// form of a FileInfo object. +func GetFileInfo(name path.Component, node Node) filesystem.FileInfo { + var attributes Attributes + node.VirtualGetAttributes(context.TODO(), AttributesMaskFileType|AttributesMaskPermissions, &attributes) + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Node did not return permissions attribute, even though it was requested") + } + return filesystem.NewFileInfo( + name, + attributes.GetFileType(), + permissions&PermissionsExecute != 0) +} diff --git a/pkg/filesystem/virtual/permissions.go b/pkg/filesystem/virtual/permissions.go new file mode 100644 index 0000000..707f5a5 --- /dev/null +++ b/pkg/filesystem/virtual/permissions.go @@ -0,0 +1,49 @@ +package virtual + +// Permissions of a file. Unlike regular UNIX file system, no +// distinction is made between owner, group and all permissions. This is +// because the virtual file system is effectively single user. +type Permissions uint8 + +const ( + // PermissionsRead indicates that file contents may be read, or + // that files in a directory may be listed. + PermissionsRead Permissions = 1 << iota + // PermissionsWrite indicates that file contents may be written + // to, or that files in a directory may be added, removed or + // renamed. + PermissionsWrite + // PermissionsExecute indicates that a file is executable, or + // that files in a directory may be looked up. + PermissionsExecute +) + +// NewPermissionsFromMode creates a set of permissions from a +// traditional UNIX style mode. +func NewPermissionsFromMode(m uint32) (p Permissions) { + if m&0o444 != 0 { + p |= PermissionsRead + } + if m&0o222 != 0 { + p |= PermissionsWrite + } + if m&0o111 != 0 { + p |= PermissionsExecute + } + return +} + +// ToMode converts a set of permissions to a traditional UNIX style +// mode. The permissions for the owner, group and all will be identical. +func (p Permissions) ToMode() (m uint32) { + if p&PermissionsRead != 0 { + m |= 0o444 + } + if p&PermissionsWrite != 0 { + m |= 0o222 + } + if p&PermissionsExecute != 0 { + m |= 0o111 + } + return +} diff --git a/pkg/filesystem/virtual/placeholder_file.go b/pkg/filesystem/virtual/placeholder_file.go new file mode 100644 index 0000000..572c4fc --- /dev/null +++ b/pkg/filesystem/virtual/placeholder_file.go @@ -0,0 +1,58 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// placeholderFile provides a common implementation for the part of the +// Leaf interface that is identical for all types of files that are +// merely placeholders on the file system, such as symbolic links, +// character devices and FIFOs. +type placeholderFile struct{} + +func (placeholderFile) Link() Status { + return StatusOK +} + +func (placeholderFile) Unlink() { +} + +func (placeholderFile) UploadFile(ctx context.Context, contentAddressableStorage blobstore.BlobAccess, digestFunction digest.Function) (digest.Digest, error) { + return digest.BadDigest, status.Error(codes.InvalidArgument, "This file cannot be uploaded, as it is a placeholder") +} + +func (placeholderFile) GetContainingDigests() digest.Set { + return digest.EmptySet +} + +func (placeholderFile) VirtualAllocate(off, size uint64) Status { + return StatusErrWrongType +} + +func (placeholderFile) VirtualClose(shareAccess ShareMask) {} + +func (placeholderFile) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + // Even though this file may not necessarily be a symbolic link, + // the NFSv4 specification requires that NFS4ERR_SYMLINK is + // returned for all irregular files. + return StatusErrSymlink +} + +func (placeholderFile) VirtualRead(buf []byte, offset uint64) (int, bool, Status) { + panic("Request to read from special file should have been intercepted") +} + +func (placeholderFile) VirtualSeek(offset uint64, regionType filesystem.RegionType) (*uint64, Status) { + panic("Request to seek on special file should have been intercepted") +} + +func (placeholderFile) VirtualWrite(buf []byte, off uint64) (int, Status) { + panic("Request to write to symbolic link should have been intercepted") +} diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator.go b/pkg/filesystem/virtual/pool_backed_file_allocator.go new file mode 100644 index 0000000..891e416 --- /dev/null +++ b/pkg/filesystem/virtual/pool_backed_file_allocator.go @@ -0,0 +1,436 @@ +package virtual + +import ( + "context" + "io" + "math" + "sync" + "syscall" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type poolBackedFileAllocator struct { + pool re_filesystem.FilePool + errorLogger util.ErrorLogger +} + +// NewPoolBackedFileAllocator creates an allocator for a leaf node that +// may be stored in an PrepopulatedDirectory, representing a mutable +// regular file. All operations to mutate file contents (reads, writes +// and truncations) are forwarded to a file obtained from a FilePool. +// +// When the file becomes unreachable (i.e., both its link count and open +// file descriptor count reach zero), Close() is called on the +// underlying backing file descriptor. This may be used to request +// deletion from underlying storage. +func NewPoolBackedFileAllocator(pool re_filesystem.FilePool, errorLogger util.ErrorLogger) FileAllocator { + return &poolBackedFileAllocator{ + pool: pool, + errorLogger: errorLogger, + } +} + +func (fa *poolBackedFileAllocator) NewFile(isExecutable bool, size uint64, shareAccess ShareMask) (NativeLeaf, Status) { + file, err := fa.pool.NewFile() + if err != nil { + fa.errorLogger.Log(util.StatusWrapf(err, "Failed to create new file")) + return nil, StatusErrIO + } + if size > 0 { + if err := file.Truncate(int64(size)); err != nil { + fa.errorLogger.Log(util.StatusWrapf(err, "Failed to truncate file to length %d", size)) + file.Close() + return nil, StatusErrIO + } + } + return &fileBackedFile{ + errorLogger: fa.errorLogger, + + file: file, + isExecutable: isExecutable, + size: size, + referenceCount: 1 + shareAccess.Count(), + unfreezeWakeup: make(chan struct{}), + cachedDigest: digest.BadDigest, + }, StatusOK +} + +type fileBackedFile struct { + errorLogger util.ErrorLogger + + lock sync.RWMutex + file filesystem.FileReadWriter + isExecutable bool + size uint64 + referenceCount uint + openFrozenDescriptors uint + unfreezeWakeup chan struct{} + cachedDigest digest.Digest + changeID uint64 +} + +// lockMutatingData picks up the exclusive lock of the file and waits +// for any pending uploads of the file to complete. This function needs +// to be called in operations that mutate f.file and f.size. +func (f *fileBackedFile) lockMutatingData() { + f.lock.Lock() + for f.openFrozenDescriptors > 0 { + c := f.unfreezeWakeup + f.lock.Unlock() + <-c + f.lock.Lock() + } +} + +func (f *fileBackedFile) acquireFrozen() bool { + f.lock.Lock() + defer f.lock.Unlock() + + if f.referenceCount == 0 { + return false + } + f.referenceCount++ + f.openFrozenDescriptors++ + return true +} + +func (f *fileBackedFile) release(count uint, frozen bool) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.referenceCount < count { + panic("Invalid reference count") + } + f.referenceCount -= count + + if frozen { + if f.openFrozenDescriptors < count { + panic("Invalid open frozen descriptor count") + } + f.openFrozenDescriptors -= count + if f.openFrozenDescriptors == 0 { + close(f.unfreezeWakeup) + f.unfreezeWakeup = make(chan struct{}) + } + } + f.closeIfNeeded() +} + +func (f *fileBackedFile) closeIfNeeded() { + if f.referenceCount == 0 { + f.file.Close() + f.file = nil + } +} + +func (f *fileBackedFile) Link() Status { + f.lock.Lock() + defer f.lock.Unlock() + + if f.referenceCount == 0 { + return StatusErrStale + } + f.referenceCount++ + return StatusOK +} + +func (f *fileBackedFile) Readlink() (string, error) { + return "", syscall.EINVAL +} + +func (f *fileBackedFile) Unlink() { + f.release(1, false) +} + +func (f *fileBackedFile) getCachedDigest() digest.Digest { + f.lock.RLock() + defer f.lock.RUnlock() + return f.cachedDigest +} + +// updateCachedDigest returns the digest of the file. It either returns +// a cached value, or computes the digest and caches it. It is only safe +// to call this function while the file is frozen (i.e., calling +// f.acquireFrozen()). +func (f *fileBackedFile) updateCachedDigest(digestFunction digest.Function) (digest.Digest, error) { + // Check whether the cached digest we have is still valid. + if cachedDigest := f.getCachedDigest(); cachedDigest != digest.BadDigest && cachedDigest.UsesDigestFunction(digestFunction) { + return cachedDigest, nil + } + + // If not, compute a new digest. + digestGenerator := digestFunction.NewGenerator(math.MaxInt64) + if _, err := io.Copy(digestGenerator, io.NewSectionReader(f, 0, math.MaxInt64)); err != nil { + return digest.BadDigest, util.StatusWrapWithCode(err, codes.Internal, "Failed to compute file digest") + } + newDigest := digestGenerator.Sum() + + // Store the resulting cached digest. + f.lock.Lock() + f.cachedDigest = newDigest + f.lock.Unlock() + return newDigest, nil +} + +func (f *fileBackedFile) UploadFile(ctx context.Context, contentAddressableStorage blobstore.BlobAccess, digestFunction digest.Function) (digest.Digest, error) { + // Create a file handle that temporarily freezes the contents of + // this file. This ensures that the file's contents don't change + // between the digest computation and upload phase. This allows + // us to safely use NewValidatedBufferFromFileReader(). + if !f.acquireFrozen() { + return digest.BadDigest, status.Error(codes.NotFound, "File was unlinked before uploading could start") + } + + blobDigest, err := f.updateCachedDigest(digestFunction) + if err != nil { + f.Close() + return digest.BadDigest, err + } + + if err := contentAddressableStorage.Put( + ctx, + blobDigest, + buffer.NewValidatedBufferFromReaderAt(f, blobDigest.GetSizeBytes())); err != nil { + return digest.BadDigest, util.StatusWrap(err, "Failed to upload file") + } + return blobDigest, nil +} + +func (f *fileBackedFile) GetContainingDigests() digest.Set { + return digest.EmptySet +} + +func (f *fileBackedFile) GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) { + fileStatus := &remoteoutputservice.FileStatus_File{} + if digestFunction != nil { + // TODO: Omit the digest if the file is opened for + // writing. The kernel may still hold on to data that + // needs to be written, meaning that digests computed on + // this end are inaccurate. + if !f.acquireFrozen() { + return nil, status.Error(codes.NotFound, "File was unlinked before digest computation could start") + } + blobDigest, err := f.updateCachedDigest(*digestFunction) + f.release(1, true) + if err != nil { + return nil, err + } + fileStatus.Digest = blobDigest.GetProto() + } + return &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: fileStatus, + }, + }, nil +} + +func (f *fileBackedFile) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { + // Because bb_clientd is mostly intended to be used in + // combination with remote execution, we don't want to spend too + // much effort persisting locally created output files. Those + // may easily exceed the size of the state file, making + // finalization of builds expensive. + // + // Most of the time people still enable remote caching for + // locally running actions, or have Build Event Streams enabled. + // In that case there is a fair chance that the file is present + // in the CAS anyway. + // + // In case we have a cached digest for the file available, let's + // generate an entry for it in the persistent state file. This + // means that after a restart, the file is silently converted to + // a CAS-backed file. If it turns out this assumption is + // incorrect, StartBuild() will clean up the file for us. + if cachedDigest := f.getCachedDigest(); cachedDigest != digest.BadDigest { + directory.Files = append(directory.Files, &remoteexecution.FileNode{ + Name: name.String(), + Digest: f.cachedDigest.GetProto(), + IsExecutable: f.isExecutable, + }) + } +} + +func (f *fileBackedFile) Close() error { + f.release(1, true) + return nil +} + +func (f *fileBackedFile) ReadAt(b []byte, off int64) (int, error) { + f.lock.Lock() + defer f.lock.Unlock() + + return f.file.ReadAt(b, off) +} + +func (f *fileBackedFile) VirtualAllocate(off, size uint64) Status { + f.lockMutatingData() + defer f.lock.Unlock() + + if end := uint64(off) + uint64(size); f.size < end { + if s := f.virtualTruncate(end); s != StatusOK { + return s + } + } + return StatusOK +} + +// virtualGetAttributesUnlocked gets file attributes that can be +// obtained without picking up any locks. +func (f *fileBackedFile) virtualGetAttributesUnlocked(attributes *Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) +} + +// virtualGetAttributesUnlocked gets file attributes that can only be +// obtained while picking up the file's lock. +func (f *fileBackedFile) virtualGetAttributesLocked(attributes *Attributes) { + attributes.SetChangeID(f.changeID) + permissions := PermissionsRead | PermissionsWrite + if f.isExecutable { + permissions |= PermissionsExecute + } + attributes.SetPermissions(permissions) + attributes.SetSizeBytes(f.size) +} + +func (f *fileBackedFile) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + // Only pick up the file's lock when the caller requests + // attributes that require locking. + f.virtualGetAttributesUnlocked(attributes) + if requested&(AttributesMaskChangeID|AttributesMaskPermissions|AttributesMaskSizeBytes) != 0 { + f.lock.RLock() + f.virtualGetAttributesLocked(attributes) + f.lock.RUnlock() + } +} + +func (f *fileBackedFile) VirtualSeek(offset uint64, regionType filesystem.RegionType) (*uint64, Status) { + f.lock.Lock() + if offset >= f.size { + f.lock.Unlock() + return nil, StatusErrNXIO + } + off, err := f.file.GetNextRegionOffset(int64(offset), regionType) + f.lock.Unlock() + if err == io.EOF { + // NFSv4's SEEK operation with NFS4_CONTENT_DATA differs + // from lseek(). If there is a hole at the end of the + // file, we should return success with sr_eof set, + // instead of failing with ENXIO. + return nil, StatusOK + } else if err != nil { + f.errorLogger.Log(util.StatusWrapf(err, "Failed to get next region offset at offset %d", offset)) + return nil, StatusErrIO + } + result := uint64(off) + return &result, StatusOK +} + +func (f *fileBackedFile) VirtualOpenSelf(ctx context.Context, shareAccess ShareMask, options *OpenExistingOptions, requested AttributesMask, attributes *Attributes) Status { + f.lock.Lock() + defer f.lock.Unlock() + + if f.referenceCount == 0 { + return StatusErrStale + } + + // Handling of O_TRUNC. + if options.Truncate { + if s := f.virtualTruncate(0); s != StatusOK { + return s + } + } + + f.referenceCount += shareAccess.Count() + f.virtualGetAttributesUnlocked(attributes) + f.virtualGetAttributesLocked(attributes) + return StatusOK +} + +func (f *fileBackedFile) VirtualRead(buf []byte, off uint64) (int, bool, Status) { + f.lock.Lock() + defer f.lock.Unlock() + + buf, eof := BoundReadToFileSize(buf, off, f.size) + if len(buf) > 0 { + if n, err := f.file.ReadAt(buf, int64(off)); n != len(buf) { + f.errorLogger.Log(util.StatusWrapf(err, "Failed to read from file at offset %d", off)) + return 0, false, StatusErrIO + } + } + return len(buf), eof, StatusOK +} + +func (f *fileBackedFile) VirtualReadlink(ctx context.Context) ([]byte, Status) { + return nil, StatusErrInval +} + +func (f *fileBackedFile) VirtualClose(shareAccess ShareMask) { + f.release(shareAccess.Count(), false) +} + +func (f *fileBackedFile) virtualTruncate(size uint64) Status { + if err := f.file.Truncate(int64(size)); err != nil { + f.errorLogger.Log(util.StatusWrapf(err, "Failed to truncate file to length %d", size)) + return StatusErrIO + } + f.cachedDigest = digest.BadDigest + f.size = size + f.changeID++ + return StatusOK +} + +func (f *fileBackedFile) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + sizeBytes, hasSizeBytes := in.GetSizeBytes() + if hasSizeBytes { + f.lockMutatingData() + } else { + f.lock.Lock() + } + defer f.lock.Unlock() + + if hasSizeBytes { + if s := f.virtualTruncate(sizeBytes); s != StatusOK { + return s + } + } + if permissions, ok := in.GetPermissions(); ok { + f.isExecutable = (permissions & PermissionsExecute) != 0 + f.changeID++ + } + + f.virtualGetAttributesUnlocked(out) + f.virtualGetAttributesLocked(out) + return StatusOK +} + +func (f *fileBackedFile) VirtualWrite(buf []byte, offset uint64) (int, Status) { + f.lockMutatingData() + defer f.lock.Unlock() + + nWritten, err := f.file.WriteAt(buf, int64(offset)) + if nWritten > 0 { + f.cachedDigest = digest.BadDigest + if end := offset + uint64(nWritten); f.size < end { + f.size = end + } + f.changeID++ + } + if err != nil { + f.errorLogger.Log(util.StatusWrapf(err, "Failed to write to file at offset %d", offset)) + return nWritten, StatusErrIO + } + return nWritten, StatusOK +} diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator_test.go b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go new file mode 100644 index 0000000..1adea38 --- /dev/null +++ b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go @@ -0,0 +1,584 @@ +package virtual_test + +import ( + "context" + "io" + "syscall" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestPoolBackedFileAllocatorGetOutputServiceFileStatus(t *testing.T) { + ctrl := gomock.NewController(t) + + // Create a file and initialize it with some contents. + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskRead|virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + underlyingFile.EXPECT().WriteAt([]byte("Hello"), int64(0)).Return(5, nil) + n, s := f.VirtualWrite([]byte("Hello"), 0) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 5, n) + + // When the provided digest.Function is nil, we should only + // report that this is a file. + fileStatus, err := f.GetOutputServiceFileStatus(nil) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{}, + }, + }, fileStatus) + + // When the provided digest.Function is set, the digest of the + // file should be computed on demand. This is more efficient + // than letting the build client read the file through the FUSE + // file system. + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return copy(p, "Hello"), io.EOF + }) + digestFunction1 := digest.MustNewFunction("Hello", remoteexecution.DigestFunction_MD5) + fileStatus, err = f.GetOutputServiceFileStatus(&digestFunction1) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{ + Digest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 5, + }, + }, + }, + }, fileStatus) + + // Calling the function a second time should not generate any + // reads against the file, as the contents of the file have not + // changed. A cached value should be returned. + fileStatus, err = f.GetOutputServiceFileStatus(&digestFunction1) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{ + Digest: &remoteexecution.Digest{ + Hash: "8b1a9953c4611296a827abf8c47804d7", + SizeBytes: 5, + }, + }, + }, + }, fileStatus) + + // Change the file's contents to invalidate the cached digest. A + // successive call to GetOutputServiceFileStatus() should + // recompute the digest. + underlyingFile.EXPECT().WriteAt([]byte(" world"), int64(5)).Return(6, nil) + n, s = f.VirtualWrite([]byte(" world"), 5) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 6, n) + + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return copy(p, "Hello world"), io.EOF + }) + fileStatus, err = f.GetOutputServiceFileStatus(&digestFunction1) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{ + Digest: &remoteexecution.Digest{ + Hash: "3e25960a79dbc69b674cd4ec67a72c62", + SizeBytes: 11, + }, + }, + }, + }, fileStatus) + + // The cached digest should be ignored in case the instance name + // or hashing function is changed. + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return copy(p, "Hello world"), io.EOF + }) + digestFunction2 := digest.MustNewFunction("Hello", remoteexecution.DigestFunction_SHA256) + fileStatus, err = f.GetOutputServiceFileStatus(&digestFunction2) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteoutputservice.FileStatus{ + FileType: &remoteoutputservice.FileStatus_File_{ + File: &remoteoutputservice.FileStatus_File{ + Digest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + }, + }, + }, fileStatus) + + // Once a cached digest is present, it should also become part + // of output path persistent state file. + var directory outputpathpersistency.Directory + f.AppendOutputPathPersistencyDirectoryNode(&directory, path.MustNewComponent("hello.txt")) + testutil.RequireEqualProto(t, &outputpathpersistency.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "hello.txt", + Digest: &remoteexecution.Digest{ + Hash: "64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c", + SizeBytes: 11, + }, + }, + }, + }, &directory) + + underlyingFile.EXPECT().Close() + f.Unlink() + f.VirtualClose(virtual.ShareMaskRead | virtual.ShareMaskWrite) +} + +// For plain lseek() operations such as SEEK_SET, SEEK_CUR and SEEK_END, +// the kernel never calls into userspace, as the kernel is capable of +// handling those requests directly. However, For SEEK_HOLE and +// SEEK_DATA, the kernel does create calls, as the kernel is unaware of +// which parts of the file contain holes. +func TestPoolBackedFileAllocatorVirtualSeek(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskRead|virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + // Grow the file. + underlyingFile.EXPECT().Truncate(int64(1000)) + + require.Equal(t, virtual.StatusOK, f.VirtualSetAttributes( + ctx, + (&virtual.Attributes{}).SetSizeBytes(1000), + 0, + &virtual.Attributes{})) + + t.Run("Failure", func(t *testing.T) { + // I/O errors on the file should be captured. + underlyingFile.EXPECT().GetNextRegionOffset(int64(123), filesystem.Data). + Return(int64(0), status.Error(codes.Internal, "Disk on fire")) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Internal, "Failed to get next region offset at offset 123: Disk on fire"))) + + _, s := f.VirtualSeek(123, filesystem.Data) + require.Equal(t, virtual.StatusErrIO, s) + }) + + t.Run("AtEndOfFile", func(t *testing.T) { + // End-of-file errors should be converted to ENXIO, as + // described in the lseek() manual page. + _, s := f.VirtualSeek(1000, filesystem.Hole) + require.Equal(t, virtual.StatusErrNXIO, s) + }) + + t.Run("PastEndOfFile", func(t *testing.T) { + _, s := f.VirtualSeek(1001, filesystem.Hole) + require.Equal(t, virtual.StatusErrNXIO, s) + }) + + t.Run("SuccessData", func(t *testing.T) { + underlyingFile.EXPECT().GetNextRegionOffset(int64(789), filesystem.Data). + Return(int64(790), nil) + + offset, s := f.VirtualSeek(789, filesystem.Data) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, uint64(790), *offset) + }) + + t.Run("SuccessFinalHole", func(t *testing.T) { + underlyingFile.EXPECT().GetNextRegionOffset(int64(912), filesystem.Data). + Return(int64(0), io.EOF) + + offset, s := f.VirtualSeek(912, filesystem.Data) + require.Equal(t, virtual.StatusOK, s) + require.Nil(t, offset) + }) +} + +// Removal of files through the filesystem.Directory interface will not +// update the name cache of go-virtual. References to inodes may continue +// to exist after inodes are removed from the directory hierarchy. This +// could cause go-fuse to call Open() on a file that is already closed. +// Nothing bad should happen when this occurs. +func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterUnlink(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + underlyingFile.EXPECT().Close() + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + f.VirtualClose(virtual.ShareMaskWrite) + f.Unlink() + + require.Equal( + t, + virtual.StatusErrStale, + f.VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, 0, &virtual.Attributes{})) +} + +// This test is the same as the above, except that the file reference +// count drops from one to zero due to Release() (i.e., file descriptor +// closure), as opposed to Unlink(). +func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterClose(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + underlyingFile.EXPECT().Close() + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + f.Unlink() + f.VirtualClose(virtual.ShareMaskWrite) + + require.Equal( + t, + virtual.StatusErrStale, + f.VirtualOpenSelf(ctx, virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, 0, &virtual.Attributes{})) +} + +func TestPoolBackedFileAllocatorVirtualRead(t *testing.T) { + ctrl := gomock.NewController(t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskRead|virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + // Let initial tests assume an empty file. + t.Run("EmptyFileAtStart", func(t *testing.T) { + var p [10]byte + n, eof, s := f.VirtualRead(p[:], 0) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 0, n) + require.True(t, eof) + }) + + t.Run("EmptyFilePastEnd", func(t *testing.T) { + var p [10]byte + n, eof, s := f.VirtualRead(p[:], 10) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 0, n) + require.True(t, eof) + }) + + // Let the remainder of the tests assume a non-empty file. + underlyingFile.EXPECT().WriteAt([]byte("Hello"), int64(0)).Return(5, nil) + n, s := f.VirtualWrite([]byte("Hello"), 0) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 5, n) + + t.Run("IOFailure", func(t *testing.T) { + // Read errors should be converted to EIO errors. In + // order to capture error details, the underlying error + // is forwarded to an error logger. + underlyingFile.EXPECT().ReadAt(gomock.Len(3), int64(2)). + Return(0, status.Error(codes.Unavailable, "Storage backends offline")) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Unavailable, "Failed to read from file at offset 2: Storage backends offline"))) + + var p [10]byte + _, _, s := f.VirtualRead(p[:], 2) + require.Equal(t, virtual.StatusErrIO, s) + }) + + t.Run("EOF", func(t *testing.T) { + // Read EOF errors should not be converted to EIO + // errors. They should simply be translated to + // go_fuse.OK, as POSIX read() returns zero to indicate + // end-of-file. + underlyingFile.EXPECT().ReadAt(gomock.Len(3), int64(2)).DoAndReturn( + func(p []byte, off int64) (int, error) { + return copy(p, "llo"), io.EOF + }) + + var p [10]byte + n, eof, s := f.VirtualRead(p[:], 2) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 3, n) + require.True(t, eof) + require.Equal(t, []byte("llo"), p[:3]) + }) + + underlyingFile.EXPECT().Close() + + f.VirtualClose(virtual.ShareMaskRead | virtual.ShareMaskWrite) + f.Unlink() +} + +// Truncation errors should be converted to EIO errors. In order to +// capture error details, the underlying error is forwarded to an error +// logger. +func TestPoolBackedFileAllocatorFUSETruncateFailure(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + underlyingFile.EXPECT().Truncate(int64(42)).Return(status.Error(codes.Unavailable, "Storage backends offline")) + underlyingFile.EXPECT().Close() + + errorLogger := mock.NewMockErrorLogger(ctrl) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Unavailable, "Failed to truncate file to length 42: Storage backends offline"))) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + require.Equal(t, virtual.StatusErrIO, f.VirtualSetAttributes( + ctx, + (&virtual.Attributes{}).SetSizeBytes(42), + 0, + &virtual.Attributes{})) + f.VirtualClose(virtual.ShareMaskWrite) + f.Unlink() +} + +// Write errors should be converted to EIO errors. In order to capture +// error details, the underlying error is forwarded to an error logger. +func TestPoolBackedFileAllocatorVirtualWriteFailure(t *testing.T) { + ctrl := gomock.NewController(t) + + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + var p [10]byte + underlyingFile.EXPECT().WriteAt(p[:], int64(42)).Return(0, status.Error(codes.Unavailable, "Storage backends offline")) + underlyingFile.EXPECT().Close() + + errorLogger := mock.NewMockErrorLogger(ctrl) + errorLogger.EXPECT().Log(testutil.EqStatus(t, status.Error(codes.Unavailable, "Failed to write to file at offset 42: Storage backends offline"))) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + _, s = f.VirtualWrite(p[:], 42) + require.Equal(t, virtual.StatusErrIO, s) + f.VirtualClose(virtual.ShareMaskWrite) + f.Unlink() +} + +func TestPoolBackedFileAllocatorFUSEUploadFile(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Create a file backed by a FilePool. + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + // Initialize the file with the contents "Hello". + underlyingFile.EXPECT().WriteAt([]byte("Hello"), int64(0)).Return(5, nil) + n, s := f.VirtualWrite([]byte("Hello"), 0) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 5, n) + + fileDigest := digest.MustNewDigest("example", remoteexecution.DigestFunction_MD5, "8b1a9953c4611296a827abf8c47804d7", 5) + digestFunction := fileDigest.GetDigestFunction() + + t.Run("DigestComputationIOFailure", func(t *testing.T) { + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).Return(0, syscall.EIO) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + + _, err := f.UploadFile(ctx, contentAddressableStorage, digestFunction) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to compute file digest: input/output error"), err) + }) + + t.Run("UploadFailure", func(t *testing.T) { + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn(func(p []byte, off int64) (int, error) { + copy(p, "Hello") + return 5, io.EOF + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put(ctx, fileDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + b.Discard() + return status.Error(codes.Internal, "Server on fire") + }) + + _, err := f.UploadFile(ctx, contentAddressableStorage, digestFunction) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to upload file: Server on fire"), err) + }) + + t.Run("Success", func(t *testing.T) { + underlyingFile.EXPECT().ReadAt(gomock.Any(), int64(0)).DoAndReturn(func(p []byte, off int64) (int, error) { + copy(p, "Hello") + return 5, io.EOF + }) + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Put(ctx, fileDigest, gomock.Any()). + DoAndReturn(func(ctx context.Context, digest digest.Digest, b buffer.Buffer) error { + // As long as we haven't completely read + // the file, any operation that modifies + // the file's contents should block. + // Tests for affected operations below. + a1 := make(chan struct{}) + go func() { + require.Equal(t, virtual.StatusOK, f.VirtualAllocate(1, 1)) + close(a1) + }() + + a2 := make(chan struct{}) + go func() { + require.Equal(t, virtual.StatusOK, f.VirtualSetAttributes( + ctx, + (&virtual.Attributes{}).SetSizeBytes(123), + 0, + &virtual.Attributes{})) + close(a2) + }() + + a3 := make(chan struct{}) + go func() { + n, s := f.VirtualWrite([]byte("Foo"), 123) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, 3, n) + close(a3) + }() + + // Even though VirtualSetAttributes() + // with a size (truncate()) should + // block, it is perfectly fine to change + // the file's permissions. + require.Equal(t, virtual.StatusOK, f.VirtualSetAttributes( + ctx, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite|virtual.PermissionsExecute), + 0, + &virtual.Attributes{})) + + underlyingFile.EXPECT().Truncate(int64(123)).Times(1) + underlyingFile.EXPECT().WriteAt([]byte("Foo"), gomock.Any()).Return(3, nil) + + // Complete reading the file. + data, err := b.ToByteSlice(10) + require.NoError(t, err) + require.Equal(t, []byte("Hello"), data) + + // All mutable operations should now be + // able to complete. + <-a1 + <-a2 + <-a3 + return nil + }) + + uploadedDigest, err := f.UploadFile(ctx, contentAddressableStorage, digestFunction) + require.NoError(t, err) + require.Equal(t, fileDigest, uploadedDigest) + }) + + underlyingFile.EXPECT().Close() + f.VirtualClose(virtual.ShareMaskWrite) + f.Unlink() + + t.Run("Stale", func(t *testing.T) { + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + + // Uploading a file that has already been released + // should fail. It should not cause accidental access to + // the closed file handle. + _, err := f.UploadFile(ctx, contentAddressableStorage, digestFunction) + testutil.RequireEqualStatus(t, status.Error(codes.NotFound, "File was unlinked before uploading could start"), err) + }) +} + +func TestPoolBackedFileAllocatorVirtualClose(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + // Create a new file. + pool := mock.NewMockFilePool(ctrl) + underlyingFile := mock.NewMockFileReadWriter(ctrl) + pool.EXPECT().NewFile().Return(underlyingFile, nil) + errorLogger := mock.NewMockErrorLogger(ctrl) + + f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). + NewFile(false, 0, virtual.ShareMaskWrite) + require.Equal(t, virtual.StatusOK, s) + + // Initially it should be opened exactly once. Open it a couple + // more times. + for i := 0; i < 10; i++ { + require.Equal( + t, + virtual.StatusOK, + f.VirtualOpenSelf( + ctx, + virtual.ShareMaskRead, + &virtual.OpenExistingOptions{}, + 0, + &virtual.Attributes{})) + } + for i := 0; i < 10; i++ { + require.Equal( + t, + virtual.StatusOK, + f.VirtualOpenSelf( + ctx, + virtual.ShareMaskRead|virtual.ShareMaskWrite, + &virtual.OpenExistingOptions{}, + 0, + &virtual.Attributes{})) + } + + // Unlinking the file should not cause the underlying file to be + // released, as it's opened. + f.Unlink() + + // The underlying file should be released only when the close + // count matches the number of times the file was opened. + for i := 0; i < 10; i++ { + f.VirtualClose(virtual.ShareMaskRead) + } + for i := 0; i < 10; i++ { + f.VirtualClose(virtual.ShareMaskRead | virtual.ShareMaskWrite) + } + for i := 0; i < 100; i++ { + f.VirtualClose(0) + } + underlyingFile.EXPECT().Close() + f.VirtualClose(virtual.ShareMaskWrite) +} diff --git a/pkg/filesystem/virtual/prepopulated_directory.go b/pkg/filesystem/virtual/prepopulated_directory.go new file mode 100644 index 0000000..9e74f0c --- /dev/null +++ b/pkg/filesystem/virtual/prepopulated_directory.go @@ -0,0 +1,116 @@ +package virtual + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" +) + +// ChildRemover is a callback that is provided to ChildFilter to remove +// the provided file or contents of the provided directory. +type ChildRemover func() error + +// ChildFilter is a callback that is invoked by +// PrepopulatedDirectory.FilterChildren() for each of the children +// underneath the current directory hierarchy. +// +// For each of the children, an InitialNode object is provided that +// describes the contents of that file or directory. In addition to +// that, a callback is provided that can remove the file or the contents +// of the directory. This callback may be invoked synchronously or +// asynchronously, potentially after FilterChildren() has completed. +// +// The boolean return value of this function signals whether traversal +// should continue. When false, traversal will stop immediately. +type ChildFilter func(node InitialNode, remove ChildRemover) bool + +// DirectoryPrepopulatedDirEntry contains information about a directory +// node that is stored in a PrepopulatedDirectory. +type DirectoryPrepopulatedDirEntry struct { + Child PrepopulatedDirectory + Name path.Component +} + +// LeafPrepopulatedDirEntry contains information about a leaf node that +// is stored in a PrepopulatedDirectory. +type LeafPrepopulatedDirEntry struct { + Child NativeLeaf + Name path.Component +} + +// PrepopulatedDirectoryChild is either a PrepopulatedDirectory or a +// NativeLeaf, as returned by PrepopulatedDirectory.LookupChild(). +type PrepopulatedDirectoryChild = Child[PrepopulatedDirectory, NativeLeaf, Node] + +// PrepopulatedDirectory is a Directory that is writable and can contain +// files of type NativeLeaf. +// +// By making use of InitialContentsFetcher, it is possible to create +// subdirectories that are prepopulated with files and directories. +// These will be instantiated only when accessed. This feature is used +// by bb_worker to lazily load the input root while a build action is +// being executed. Similarly, it is used by bb_clientd to lazily +// instantiate the contents of a Tree object. +type PrepopulatedDirectory interface { + Directory + + // LookupChild() looks up a file or directory contained in a + // PrepopulatedDirectory. This method is similar to + // VirtualLookup(), except that it returns the native types + // managed by PrepopulatedDirectory. + // + // TODO: Can't use PrepopulatedDirectoryChild in the return type + // here, due to https://github.com/golang/go/issues/50259. + LookupChild(name path.Component) (Child[PrepopulatedDirectory, NativeLeaf, Node], error) + // LookupAllChildren() looks up all files and directories + // contained in a PrepopulatedDirectory. This method is similar + // to VirtualReadDir(), except that it returns the native types + // managed by PrepopulatedDirectory. Entries are returned in + // alphabetical order. + LookupAllChildren() ([]DirectoryPrepopulatedDirEntry, []LeafPrepopulatedDirEntry, error) + // CreateChildren() creates one or more files or directories in + // the current directory. + // + // If the overwrite flag is set, existing files and directories + // will be replaced. If the overwrite flag is not set, the call + // will fail if one or more entries already exist. No changes + // will be made to the directory in that case. + CreateChildren(children map[path.Component]InitialNode, overwrite bool) error + // CreateAndEnterPrepopulatedDirectory() is similar to + // LookupChild(), except that it creates the specified directory + // if it does not yet exist. If a file already exists, it will + // be removed. + CreateAndEnterPrepopulatedDirectory(name path.Component) (PrepopulatedDirectory, error) + // RemoveAllChildren() removes all files and directories + // contained in the current directory. + // + // This method is identical to the one filesystem.Directory, + // except that the forbidNewChildren flag may be set to + // permanently mark the directory in such a way that no further + // files may be added. When called on the root directory, all + // resources associated with the directory hierarchy will be + // released. + RemoveAllChildren(forbidNewChildren bool) error + // InstallHooks sets up hooks for creating files and logging + // errors that occur under the directory subtree. + // + // This function is identical to BuildDirectory.InstallHooks(), + // except that it uses the FUSE specific FileAllocator instead + // of FilePool. + InstallHooks(fileAllocator FileAllocator, errorLogger util.ErrorLogger) + // FilterChildren() can be used to traverse over all of the + // InitialContentsFetcher and NativeLeaf objects stored in this + // directory hierarchy. For each of the objects, a callback is + // provided that can be used to remove the file or the contents + // of the directory associated with this object. + // + // This function can be used by bb_clientd to purge files or + // directories that are no longer present in the Content + // Addressable Storage at the start of the build. + FilterChildren(childFilter ChildFilter) error + + // Functions inherited from filesystem.Directory. + ReadDir() ([]filesystem.FileInfo, error) + RemoveAll(name path.Component) error + Remove(name path.Component) error +} diff --git a/pkg/filesystem/virtual/read_only_directory.go b/pkg/filesystem/virtual/read_only_directory.go new file mode 100644 index 0000000..31ddc3a --- /dev/null +++ b/pkg/filesystem/virtual/read_only_directory.go @@ -0,0 +1,77 @@ +package virtual + +import ( + "context" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +// ReadOnlyDirectory can be embedded into a Directory to disable all +// operations that mutate the directory contents. +type ReadOnlyDirectory struct{} + +// VirtualLink is an implementation of the link() system call that +// treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualLink(ctx context.Context, name path.Component, leaf Leaf, requested AttributesMask, out *Attributes) (ChangeInfo, Status) { + return ChangeInfo{}, StatusErrROFS +} + +// VirtualMkdir is an implementation of the mkdir() system call that +// treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualMkdir(name path.Component, requested AttributesMask, out *Attributes) (Directory, ChangeInfo, Status) { + return nil, ChangeInfo{}, StatusErrROFS +} + +// VirtualMknod is an implementation of the mknod() system call that +// treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualMknod(ctx context.Context, name path.Component, fileType filesystem.FileType, requested AttributesMask, out *Attributes) (Leaf, ChangeInfo, Status) { + return nil, ChangeInfo{}, StatusErrROFS +} + +// VirtualRename is an implementation of the rename() system call that +// treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualRename(oldName path.Component, newDirectory Directory, newName path.Component) (ChangeInfo, ChangeInfo, Status) { + return ChangeInfo{}, ChangeInfo{}, StatusErrROFS +} + +// VirtualRemove is an implementation of the unlink() and rmdir() system +// calls that treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualRemove(name path.Component, removeDirectory, removeLeaf bool) (ChangeInfo, Status) { + return ChangeInfo{}, StatusErrROFS +} + +// VirtualSetAttributes is an implementation of the chmod(), +// utimensat(), etc. system calls that treats the target directory as +// being read-only. +func (ReadOnlyDirectory) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + return StatusErrROFS +} + +// VirtualSymlink is an implementation of the symlink() system call that +// treats the target directory as being read-only. +func (ReadOnlyDirectory) VirtualSymlink(ctx context.Context, pointedTo []byte, linkName path.Component, requested AttributesMask, out *Attributes) (Leaf, ChangeInfo, Status) { + return nil, ChangeInfo{}, StatusErrROFS +} + +// ReadOnlyDirectoryOpenChildWrongFileType is a helper function for +// implementing Directory.VirtualOpenChild() for read-only directories. +// It can be used to obtain return values in case the directory already +// contains a file under a given name. +func ReadOnlyDirectoryOpenChildWrongFileType(existingOptions *OpenExistingOptions, s Status) (Leaf, AttributesMask, ChangeInfo, Status) { + if existingOptions == nil { + return nil, 0, ChangeInfo{}, StatusErrExist + } + return nil, 0, ChangeInfo{}, s +} + +// ReadOnlyDirectoryOpenChildDoesntExist is a helper function for +// implementing Directory.VirtualOpenChild() for read-only directories. +// It can be used to obtain return values in case the directory doesn't +// contains any file under a given name. +func ReadOnlyDirectoryOpenChildDoesntExist(createAttributes *Attributes) (Leaf, AttributesMask, ChangeInfo, Status) { + if createAttributes == nil { + return nil, 0, ChangeInfo{}, StatusErrNoEnt + } + return nil, 0, ChangeInfo{}, StatusErrROFS +} diff --git a/pkg/filesystem/virtual/resolvable_digest_handle_allocator.go b/pkg/filesystem/virtual/resolvable_digest_handle_allocator.go new file mode 100644 index 0000000..f2e1655 --- /dev/null +++ b/pkg/filesystem/virtual/resolvable_digest_handle_allocator.go @@ -0,0 +1,59 @@ +package virtual + +import ( + "bytes" + "io" + + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// DigestHandleResolver is a handle resolver that needs to be +// implemented by users of ResolvableDigestHandleAllocator. This +// callback is responsible for looking up a file or directory that +// corresponds to a given REv2 digest that was reobtained from a file +// handle. +type DigestHandleResolver func(blobDigest digest.Digest, remainder io.ByteReader) (DirectoryChild, Status) + +// ResolvableDigestHandleAllocator is a convenience type for the handle +// allocator API, used for identifying objects by REv2 digest. It is +// used by bb_clientd's "cas" directory to give unique file handles to +// objects stored in the Content Addressable Storage. +// +// Because REv2 objects that include their instance name are too long to +// fit in NFS file handles, this type uses a stateless allocator for the +// instance name part. It uses a resolvable allocator only for the +// object hash and size, as those are generally small enough to fit in +// the file handle. +// +// This means that every time a new instance name is used, a resolver +// gets leaked. This is acceptable for most use cases, as the instance +// names in use tend to be limited. +type ResolvableDigestHandleAllocator struct { + allocator StatelessHandleAllocator + resolver DigestHandleResolver +} + +// NewResolvableDigestHandleAllocator creates a new +// NewResolvableDigestHandleAllocator. +func NewResolvableDigestHandleAllocator(allocation StatelessHandleAllocation, resolver DigestHandleResolver) *ResolvableDigestHandleAllocator { + return &ResolvableDigestHandleAllocator{ + allocator: allocation.AsStatelessAllocator(), + resolver: resolver, + } +} + +// New creates a new handle allocation for an object with a given digest. +func (a *ResolvableDigestHandleAllocator) New(blobDigest digest.Digest) ResolvableHandleAllocation { + instanceName := blobDigest.GetInstanceName() + resolver := a.resolver + return a.allocator. + New(ByteSliceID([]byte(instanceName.String()))). + AsResolvableAllocator(func(r io.ByteReader) (DirectoryChild, Status) { + blobDigest, err := instanceName.NewDigestFromCompactBinary(r) + if err != nil { + return DirectoryChild{}, StatusErrBadHandle + } + return resolver(blobDigest, r) + }). + New(bytes.NewBuffer(blobDigest.GetCompactBinary())) +} diff --git a/pkg/filesystem/virtual/resolvable_handle_allocating_cas_file_factory.go b/pkg/filesystem/virtual/resolvable_handle_allocating_cas_file_factory.go new file mode 100644 index 0000000..f345850 --- /dev/null +++ b/pkg/filesystem/virtual/resolvable_handle_allocating_cas_file_factory.go @@ -0,0 +1,64 @@ +package virtual + +import ( + "bytes" + "io" + + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type resolvableHandleAllocatingCASFileFactory struct { + base CASFileFactory + allocator *ResolvableDigestHandleAllocator +} + +// NewResolvableHandleAllocatingCASFileFactory creates a decorator for +// CASFileFactory that creates read-only files for files stored in the +// Content Addressable Storage that have a stateless handle associated +// with them. +// +// This decorator is intended to be used in places where CASFileFactory +// is used to hand out files with an indefinite lifetime, such as +// bb_clientd's "cas" directory. File handles will be larger, as the +// hash, size and executable bit of of the file will be stored in the +// file handle. +func NewResolvableHandleAllocatingCASFileFactory(base CASFileFactory, allocation StatelessHandleAllocation) CASFileFactory { + cff := &resolvableHandleAllocatingCASFileFactory{ + base: base, + } + cff.allocator = NewResolvableDigestHandleAllocator(allocation, cff.resolve) + return cff +} + +func (cff *resolvableHandleAllocatingCASFileFactory) LookupFile(blobDigest digest.Digest, isExecutable bool, fileReadMonitor FileReadMonitor) NativeLeaf { + if fileReadMonitor != nil { + panic("Cannot monitor reads against CAS files with a resolvable handle, as the monitor would get lost across lookups") + } + + var isExecutableField [1]byte + if isExecutable { + isExecutableField[0] = 1 + } + return cff.allocator. + New(blobDigest). + AsResolvableAllocator(func(r io.ByteReader) (DirectoryChild, Status) { + return cff.resolve(blobDigest, r) + }). + New(bytes.NewBuffer(isExecutableField[:])). + AsNativeLeaf(cff.base.LookupFile(blobDigest, isExecutable, nil)) +} + +func (cff *resolvableHandleAllocatingCASFileFactory) resolve(blobDigest digest.Digest, remainder io.ByteReader) (DirectoryChild, Status) { + isExecutable, err := remainder.ReadByte() + if err != nil { + return DirectoryChild{}, StatusErrBadHandle + } + switch isExecutable { + case 0: + return DirectoryChild{}.FromLeaf(cff.LookupFile(blobDigest, false, nil)), StatusOK + case 1: + return DirectoryChild{}.FromLeaf(cff.LookupFile(blobDigest, true, nil)), StatusOK + default: + return DirectoryChild{}, StatusErrBadHandle + } +} diff --git a/pkg/filesystem/virtual/sorter.go b/pkg/filesystem/virtual/sorter.go new file mode 100644 index 0000000..1fda06a --- /dev/null +++ b/pkg/filesystem/virtual/sorter.go @@ -0,0 +1,24 @@ +package virtual + +import ( + "sort" + + "github.com/buildbarn/bb-storage/pkg/random" +) + +// Sorter is a function type for a sorting algorithm. Its signature is +// identical to the sort.Sort() function. +// +// This type is used by InMemoryPrepopulatedDirectory to make the policy +// for sorting the results of VirtualReadDir() configurable. Depending +// on the use case, it is desirable to use a deterministic algorithm +// (e.g., alphabetic sorting) or an undeterministic one (e.g., random +// shuffling). +type Sorter func(data sort.Interface) + +var _ Sorter = sort.Sort + +// Shuffle elements in a list using the Fisher-Yates algorithm. +func Shuffle(data sort.Interface) { + random.FastThreadSafeGenerator.Shuffle(data.Len(), data.Swap) +} diff --git a/pkg/filesystem/virtual/special_file.go b/pkg/filesystem/virtual/special_file.go new file mode 100644 index 0000000..0189ef7 --- /dev/null +++ b/pkg/filesystem/virtual/special_file.go @@ -0,0 +1,66 @@ +package virtual + +import ( + "context" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type specialFile struct { + placeholderFile + + fileType filesystem.FileType + deviceNumber *filesystem.DeviceNumber +} + +// NewSpecialFile creates a node that may be used as a character device, +// block device, FIFO or UNIX domain socket. Nodes of these types are +// mere placeholders. The kernel is responsible for capturing calls to +// open() and connect(). +func NewSpecialFile(fileType filesystem.FileType, deviceNumber *filesystem.DeviceNumber) NativeLeaf { + return &specialFile{ + fileType: fileType, + deviceNumber: deviceNumber, + } +} + +func (f *specialFile) Readlink() (string, error) { + return "", syscall.EINVAL +} + +func (f *specialFile) GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) { + return &remoteoutputservice.FileStatus{}, nil +} + +func (f *specialFile) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { + // UNIX sockets or FIFOs do not need to be preserved across + // restarts of bb_clientd, so there is no need to emit any + // persistency state. +} + +func (f *specialFile) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + attributes.SetChangeID(0) + if f.deviceNumber != nil { + attributes.SetDeviceNumber(*f.deviceNumber) + } + attributes.SetFileType(f.fileType) + attributes.SetPermissions(PermissionsRead | PermissionsWrite) + attributes.SetSizeBytes(0) +} + +func (f *specialFile) VirtualReadlink(ctx context.Context) ([]byte, Status) { + return nil, StatusErrInval +} + +func (f *specialFile) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if _, ok := in.GetSizeBytes(); ok { + return StatusErrInval + } + f.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} diff --git a/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory.go b/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory.go new file mode 100644 index 0000000..db7a50c --- /dev/null +++ b/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory.go @@ -0,0 +1,86 @@ +package virtual + +import ( + "io" + "sync" + + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type statelessHandleAllocatingCASFileFactory struct { + base CASFileFactory + allocator StatelessHandleAllocator +} + +// NewStatelessHandleAllocatingCASFileFactory creates a decorator for +// CASFileFactory that creates read-only files for files stored in the +// Content Addressable Storage that have a stateless handle associated +// with them. +// +// This decorator is intended to be used in places where CASFileFactory +// is used to place files in mutable directories that properly track +// lifetimes of files. By making these files stateless, as opposed to +// resolvable, implementations of HandleAllocator may deduplicate +// multiple instances of the same file in the file system. +func NewStatelessHandleAllocatingCASFileFactory(base CASFileFactory, allocation StatelessHandleAllocation) CASFileFactory { + cff := &statelessHandleAllocatingCASFileFactory{ + base: base, + } + cff.allocator = allocation.AsStatelessAllocator() + return cff +} + +func (cff *statelessHandleAllocatingCASFileFactory) LookupFile(blobDigest digest.Digest, isExecutable bool, readMonitor FileReadMonitor) NativeLeaf { + leaf := cff.base.LookupFile(blobDigest, isExecutable, nil) + if readMonitor != nil { + leaf = &readMonitoringNativeLeaf{ + NativeLeaf: leaf, + monitor: readMonitor, + } + } + return cff.allocator. + New(&casFileID{ + blobDigest: blobDigest, + isExecutable: isExecutable, + }). + AsNativeLeaf(leaf) +} + +// casFileID is capable of converting the parameters that were used to +// construct a file through CASFileFactory to a unique identifier to be +// provided to StatelessHandleAllocator. +type casFileID struct { + blobDigest digest.Digest + isExecutable bool +} + +func (id *casFileID) WriteTo(w io.Writer) (nTotal int64, err error) { + n, _ := ByteSliceID(id.blobDigest.GetKey(digest.KeyWithInstance)).WriteTo(w) + nTotal += n + if id.isExecutable { + n, _ := w.Write([]byte{1}) + nTotal += int64(n) + } else { + n, _ := w.Write([]byte{0}) + nTotal += int64(n) + } + return +} + +// readMonitoringNativeLeaf is a decorator for NativeLeaf that reports +// read operations against files to a FileReadMonitor. +type readMonitoringNativeLeaf struct { + NativeLeaf + once sync.Once + monitor FileReadMonitor +} + +func (l *readMonitoringNativeLeaf) reportRead() { + l.monitor() + l.monitor = nil +} + +func (l *readMonitoringNativeLeaf) VirtualRead(buf []byte, off uint64) (int, bool, Status) { + l.once.Do(l.reportRead) + return l.NativeLeaf.VirtualRead(buf, off) +} diff --git a/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory_test.go b/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory_test.go new file mode 100644 index 0000000..0e1409e --- /dev/null +++ b/pkg/filesystem/virtual/stateless_handle_allocating_cas_file_factory_test.go @@ -0,0 +1,138 @@ +package virtual_test + +import ( + "bytes" + "io" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestStatelessHandleAllocatingCASFileFactory(t *testing.T) { + ctrl := gomock.NewController(t) + + baseCASFileFactory := mock.NewMockCASFileFactory(ctrl) + handleAllocation := mock.NewMockStatelessHandleAllocation(ctrl) + handleAllocator := mock.NewMockStatelessHandleAllocator(ctrl) + handleAllocation.EXPECT().AsStatelessAllocator().Return(handleAllocator) + casFileFactory := virtual.NewStatelessHandleAllocatingCASFileFactory(baseCASFileFactory, handleAllocation) + + t.Run("NotExecutable", func(t *testing.T) { + blobDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_SHA256, "bc126902a442931481d7f89552a41b1891cf06dd8d3675062eede66d104d97b4", 123) + underlyingLeaf := mock.NewMockNativeLeaf(ctrl) + baseCASFileFactory.EXPECT().LookupFile( + blobDigest, + /* isExecutable = */ false, + /* readMonitor = */ nil, + ).Return(underlyingLeaf) + wrappedLeaf := mock.NewMockNativeLeaf(ctrl) + leafHandleAllocation := mock.NewMockStatelessHandleAllocation(ctrl) + handleAllocator.EXPECT().New(gomock.Any()).DoAndReturn(func(id io.WriterTo) virtual.StatelessHandleAllocation { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(78), n) + require.Equal(t, []byte( + // Length of digest. + "\x4c"+ + // Digest. + "1-bc126902a442931481d7f89552a41b1891cf06dd8d3675062eede66d104d97b4-123-hello"+ + // Executable flag. + "\x00"), idBuf.Bytes()) + return leafHandleAllocation + }) + leafHandleAllocation.EXPECT().AsNativeLeaf(underlyingLeaf).Return(wrappedLeaf) + + require.Equal(t, wrappedLeaf, casFileFactory.LookupFile(blobDigest, false, nil)) + }) + + t.Run("Executable", func(t *testing.T) { + blobDigest := digest.MustNewDigest("foobar", remoteexecution.DigestFunction_MD5, "c8a4ddfcd3a5a0caf4cc1d64883df421", 456) + underlyingLeaf := mock.NewMockNativeLeaf(ctrl) + baseCASFileFactory.EXPECT().LookupFile( + blobDigest, + /* isExecutable = */ true, + /* readMonitor = */ nil, + ).Return(underlyingLeaf) + wrappedLeaf := mock.NewMockNativeLeaf(ctrl) + leafHandleAllocation := mock.NewMockStatelessHandleAllocation(ctrl) + handleAllocator.EXPECT().New(gomock.Any()).DoAndReturn(func(id io.WriterTo) virtual.StatelessHandleAllocation { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(47), n) + require.Equal(t, []byte( + // Length of digest. + "\x2d"+ + // Digest. + "3-c8a4ddfcd3a5a0caf4cc1d64883df421-456-foobar"+ + // Executable flag. + "\x01"), idBuf.Bytes()) + return leafHandleAllocation + }) + leafHandleAllocation.EXPECT().AsNativeLeaf(underlyingLeaf).Return(wrappedLeaf) + + require.Equal(t, wrappedLeaf, casFileFactory.LookupFile(blobDigest, true, nil)) + }) + + t.Run("WithReadMonitor", func(t *testing.T) { + // Create a CAS file that has a read monitor installed. + // This should cause the returned file to be wrapped + // twice: once to intercept VirtualRead() calls, and + // once by the HandleAllocator. + blobDigest := digest.MustNewDigest("foobar", remoteexecution.DigestFunction_MD5, "1234fc8071156282a346e0563ef92b6f", 123) + underlyingLeaf := mock.NewMockNativeLeaf(ctrl) + baseCASFileFactory.EXPECT().LookupFile( + blobDigest, + /* isExecutable = */ true, + /* readMonitor = */ nil, + ).Return(underlyingLeaf) + wrappedLeaf := mock.NewMockNativeLeaf(ctrl) + leafHandleAllocation := mock.NewMockStatelessHandleAllocation(ctrl) + handleAllocator.EXPECT().New(gomock.Any()).DoAndReturn(func(id io.WriterTo) virtual.StatelessHandleAllocation { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(47), n) + require.Equal(t, []byte( + // Length of digest. + "\x2d"+ + // Digest. + "3-1234fc8071156282a346e0563ef92b6f-123-foobar"+ + // Executable flag. + "\x01"), idBuf.Bytes()) + return leafHandleAllocation + }) + var monitoringLeaf virtual.NativeLeaf + leafHandleAllocation.EXPECT().AsNativeLeaf(gomock.Any()).DoAndReturn(func(leaf virtual.NativeLeaf) virtual.NativeLeaf { + monitoringLeaf = leaf + return wrappedLeaf + }) + fileReadMonitor := mock.NewMockFileReadMonitor(ctrl) + + require.Equal(t, wrappedLeaf, casFileFactory.LookupFile(blobDigest, true, fileReadMonitor.Call)) + + // Reading the file's contents should cause it to be reported + // as being read. This should only happen just once. + fileReadMonitor.EXPECT().Call() + underlyingLeaf.EXPECT().VirtualRead(gomock.Len(5), uint64(0)). + DoAndReturn(func(buf []byte, off uint64) (int, bool, virtual.Status) { + copy(buf, "Hello") + return 5, false, virtual.StatusOK + }). + Times(10) + + for i := 0; i < 10; i++ { + var buf [5]byte + n, eof, s := monitoringLeaf.VirtualRead(buf[:], 0) + require.Equal(t, 5, n) + require.False(t, eof) + require.Equal(t, virtual.StatusOK, s) + } + }) +} diff --git a/pkg/filesystem/virtual/static_directory.go b/pkg/filesystem/virtual/static_directory.go new file mode 100644 index 0000000..0cba6c3 --- /dev/null +++ b/pkg/filesystem/virtual/static_directory.go @@ -0,0 +1,112 @@ +package virtual + +import ( + "context" + "sort" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" +) + +type staticDirectoryEntry struct { + name path.Component + child DirectoryChild +} + +type staticDirectoryEntryList []staticDirectoryEntry + +func (l staticDirectoryEntryList) Len() int { + return len(l) +} + +func (l staticDirectoryEntryList) Less(i, j int) bool { + return l[i].name.String() < l[j].name.String() +} + +func (l staticDirectoryEntryList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +type staticDirectory struct { + ReadOnlyDirectory + + entries []staticDirectoryEntry + linkCount uint32 +} + +// NewStaticDirectory creates a Directory that contains a hardcoded list +// of child files or directories. The contents of this directory are +// immutable. +func NewStaticDirectory(directories map[path.Component]DirectoryChild) Directory { + // Place all directory entries in a sorted list. This allows us + // to do lookups by performing a binary search, while also + // making it possible to implement readdir() deterministically. + entries := make(staticDirectoryEntryList, 0, len(directories)) + linkCount := EmptyDirectoryLinkCount + for name, child := range directories { + entries = append(entries, staticDirectoryEntry{ + name: name, + child: child, + }) + if directory, _ := child.GetPair(); directory != nil { + linkCount++ + } + } + sort.Sort(entries) + + return &staticDirectory{ + entries: entries, + linkCount: linkCount, + } +} + +func (d *staticDirectory) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + attributes.SetChangeID(0) + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetLinkCount(d.linkCount) + attributes.SetPermissions(PermissionsRead | PermissionsExecute) + attributes.SetSizeBytes(0) +} + +func (d *staticDirectory) VirtualLookup(ctx context.Context, name path.Component, requested AttributesMask, out *Attributes) (DirectoryChild, Status) { + if i := sort.Search(len(d.entries), func(i int) bool { + return d.entries[i].name.String() >= name.String() + }); i < len(d.entries) && d.entries[i].name == name { + child := d.entries[i].child + child.GetNode().VirtualGetAttributes(ctx, requested, out) + return (child), StatusOK + } + return DirectoryChild{}, StatusErrNoEnt +} + +func (d *staticDirectory) VirtualOpenChild(ctx context.Context, name path.Component, shareAccess ShareMask, createAttributes *Attributes, existingOptions *OpenExistingOptions, requested AttributesMask, openedFileAttributes *Attributes) (Leaf, AttributesMask, ChangeInfo, Status) { + if i := sort.Search(len(d.entries), func(i int) bool { + return d.entries[i].name.String() >= name.String() + }); i < len(d.entries) && d.entries[i].name == name { + if existingOptions == nil { + return nil, 0, ChangeInfo{}, StatusErrExist + } + directory, leaf := d.entries[i].child.GetPair() + if directory != nil { + return nil, 0, ChangeInfo{}, StatusErrIsDir + } + s := leaf.VirtualOpenSelf(ctx, shareAccess, existingOptions, requested, openedFileAttributes) + return leaf, existingOptions.ToAttributesMask(), ChangeInfo{ + Before: 0, + After: 0, + }, s + } + return ReadOnlyDirectoryOpenChildDoesntExist(createAttributes) +} + +func (d *staticDirectory) VirtualReadDir(ctx context.Context, firstCookie uint64, requested AttributesMask, reporter DirectoryEntryReporter) Status { + for i := firstCookie; i < uint64(len(d.entries)); i++ { + entry := d.entries[i] + var attributes Attributes + entry.child.GetNode().VirtualGetAttributes(ctx, requested, &attributes) + if !reporter.ReportEntry(i+1, entry.name, entry.child, &attributes) { + break + } + } + return StatusOK +} diff --git a/pkg/filesystem/virtual/static_directory_test.go b/pkg/filesystem/virtual/static_directory_test.go new file mode 100644 index 0000000..10a6920 --- /dev/null +++ b/pkg/filesystem/virtual/static_directory_test.go @@ -0,0 +1,214 @@ +package virtual_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestStaticDirectoryVirtualLookup(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + child := mock.NewMockVirtualDirectory(ctrl) + d := virtual.NewStaticDirectory(map[path.Component]virtual.DirectoryChild{ + path.MustNewComponent("child"): virtual.DirectoryChild{}.FromDirectory(child), + }) + + t.Run("NotFound", func(t *testing.T) { + var out virtual.Attributes + _, s := d.VirtualLookup(ctx, path.MustNewComponent("nonexistent"), 0, &out) + require.Equal(t, virtual.StatusErrNoEnt, s) + }) + + t.Run("Success", func(t *testing.T) { + child.EXPECT().VirtualGetAttributes( + ctx, + virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Do(func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetInodeNumber(456) + }) + + var out virtual.Attributes + actualChild, s := d.VirtualLookup(ctx, path.MustNewComponent("child"), virtual.AttributesMaskInodeNumber, &out) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, virtual.DirectoryChild{}.FromDirectory(child), actualChild) + require.Equal(t, *(&virtual.Attributes{}).SetInodeNumber(456), out) + }) +} + +func TestStaticDirectoryVirtualOpenChild(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + child := mock.NewMockVirtualDirectory(ctrl) + d := virtual.NewStaticDirectory(map[path.Component]virtual.DirectoryChild{ + path.MustNewComponent("child"): virtual.DirectoryChild{}.FromDirectory(child), + }) + + t.Run("NotFound", func(t *testing.T) { + // Child does not exist, and we're not instructed to + // create anything. + var out virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("nonexistent"), + virtual.ShareMaskRead, + nil, + &virtual.OpenExistingOptions{}, + virtual.AttributesMaskInodeNumber, + &out) + require.Equal(t, virtual.StatusErrNoEnt, s) + }) + + t.Run("ReadOnlyFileSystem", func(t *testing.T) { + // Child does not exist, and we're don't support + // creating anything. + var out virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("nonexistent"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + &virtual.OpenExistingOptions{}, + virtual.AttributesMaskInodeNumber, + &out) + require.Equal(t, virtual.StatusErrROFS, s) + }) + + t.Run("Exists", func(t *testing.T) { + // A directory already exists under this name, so we + // can't create a file. + var out virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("child"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + nil, + virtual.AttributesMaskInodeNumber, + &out) + require.Equal(t, virtual.StatusErrExist, s) + }) + + t.Run("IsDirectory", func(t *testing.T) { + // A directory already exists under this name, so we + // can't open it as a file. + var out virtual.Attributes + _, _, _, s := d.VirtualOpenChild( + ctx, + path.MustNewComponent("child"), + virtual.ShareMaskWrite, + (&virtual.Attributes{}).SetPermissions(virtual.PermissionsRead|virtual.PermissionsWrite), + &virtual.OpenExistingOptions{}, + virtual.AttributesMaskInodeNumber, + &out) + require.Equal(t, virtual.StatusErrIsDir, s) + }) +} + +func TestStaticDirectoryVirtualReadDir(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + childA := mock.NewMockVirtualDirectory(ctrl) + childB := mock.NewMockVirtualDirectory(ctrl) + d := virtual.NewStaticDirectory(map[path.Component]virtual.DirectoryChild{ + path.MustNewComponent("a"): virtual.DirectoryChild{}.FromDirectory(childA), + path.MustNewComponent("b"): virtual.DirectoryChild{}.FromDirectory(childB), + }) + + t.Run("FromStart", func(t *testing.T) { + // Read the directory in its entirety. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + childA.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskInodeNumber, gomock.Any()).Do( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetInodeNumber(123) + }) + reporter.EXPECT().ReportEntry( + uint64(1), + path.MustNewComponent("a"), + virtual.DirectoryChild{}.FromDirectory(childA), + (&virtual.Attributes{}).SetInodeNumber(123), + ).Return(true) + childB.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskInodeNumber, gomock.Any()).Do( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetInodeNumber(456) + }) + reporter.EXPECT().ReportEntry( + uint64(2), + path.MustNewComponent("b"), + virtual.DirectoryChild{}.FromDirectory(childB), + (&virtual.Attributes{}).SetInodeNumber(456), + ).Return(true) + + require.Equal( + t, + virtual.StatusOK, + d.VirtualReadDir(ctx, 0, virtual.AttributesMaskInodeNumber, reporter)) + }) + + t.Run("NoSpace", func(t *testing.T) { + // Not even enough space to store the first entry. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + childA.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskInodeNumber, gomock.Any()).Do( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetInodeNumber(123) + }) + reporter.EXPECT().ReportEntry( + uint64(1), + path.MustNewComponent("a"), + virtual.DirectoryChild{}.FromDirectory(childA), + (&virtual.Attributes{}).SetInodeNumber(123), + ).Return(false) + + require.Equal( + t, + virtual.StatusOK, + d.VirtualReadDir(ctx, 0, virtual.AttributesMaskInodeNumber, reporter)) + }) + + t.Run("Partial", func(t *testing.T) { + // Only read the last entry. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + childB.EXPECT().VirtualGetAttributes(ctx, virtual.AttributesMaskInodeNumber, gomock.Any()).Do( + func(ctx context.Context, requested virtual.AttributesMask, out *virtual.Attributes) { + out.SetInodeNumber(456) + }) + reporter.EXPECT().ReportEntry( + uint64(2), + path.MustNewComponent("b"), + virtual.DirectoryChild{}.FromDirectory(childB), + (&virtual.Attributes{}).SetInodeNumber(456), + ).Return(true) + + require.Equal( + t, + virtual.StatusOK, + d.VirtualReadDir(ctx, 1, virtual.AttributesMaskInodeNumber, reporter)) + }) + + t.Run("AtEOF", func(t *testing.T) { + // Reading at the end-of-file should yield no entries. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + + require.Equal( + t, + virtual.StatusOK, + d.VirtualReadDir(ctx, 2, virtual.AttributesMaskInodeNumber, reporter)) + }) + + t.Run("BeyondEOF", func(t *testing.T) { + // Reading past the end-of-file should not cause incorrect + // behaviour. + reporter := mock.NewMockDirectoryEntryReporter(ctrl) + + require.Equal( + t, + virtual.StatusOK, + d.VirtualReadDir(ctx, 3, virtual.AttributesMaskInodeNumber, reporter)) + }) +} diff --git a/pkg/filesystem/virtual/status.go b/pkg/filesystem/virtual/status.go new file mode 100644 index 0000000..b47a614 --- /dev/null +++ b/pkg/filesystem/virtual/status.go @@ -0,0 +1,65 @@ +package virtual + +// Status response of operations applied against Node objects. +type Status int + +const ( + // StatusOK indicates that the operation succeeded. + StatusOK Status = iota + // StatusErrAccess indicates that the operation failed due to + // permission being denied. + StatusErrAccess + // StatusErrBadHandle indicates that the provided file handle + // failed internal consistency checks. + StatusErrBadHandle + // StatusErrExist indicates that a file system object of the + // specified target name (when creating, renaming or linking) + // already exists. + StatusErrExist + // StatusErrInval indicates that the arguments for this + // operation are not valid. + StatusErrInval + // StatusErrIO indicates that the operation failed due to an I/O + // error. + StatusErrIO + // StatusErrIsDir indicates that a request is made against a + // directory when the current operation does not allow a + // directory as a target. + StatusErrIsDir + // StatusErrNoEnt indicate sthat the operation failed due to a + // file not existing. + StatusErrNoEnt + // StatusErrNotDir indicates that a request is made against a + // leaf when the current operation does not allow a leaf as a + // target. + StatusErrNotDir + // StatusErrNotEmpty indicates that attempt was made to remove a + // directory that was not empty. + StatusErrNotEmpty + // StatusErrNXIO indicates that a request is made beyond the + // limits of the file or device. + StatusErrNXIO + // StatusErrPerm indicates that the operation was not allowed + // because the caller is neither a privileged user (root) nor + // the owner of the target of the operation. + StatusErrPerm + // StatusErrROFS indicates that a modifying operation was + // attempted on a read-only file system. + StatusErrROFS + // StatusErrStale indicates that the file system object referred + // to by the file handle no longer exists, or access to it has + // been revoked. + StatusErrStale + // StatusErrSymlink indicates that a request is made against a + // symbolic link when the current operation does not allow a + // symbolic link as a target. + StatusErrSymlink + // StatusErrWrongType that a request is made against an object + // that is of an invalid type for the current operation, and + // there is no more specific error (such as StatusErrIsDir or + // StatusErrSymlink) that applies. + StatusErrWrongType + // StatusErrXDev indicates an attempt to do an operation, such + // as linking, that inappropriately crosses a boundary. + StatusErrXDev +) diff --git a/pkg/filesystem/virtual/symlink_factory.go b/pkg/filesystem/virtual/symlink_factory.go new file mode 100644 index 0000000..c7673a4 --- /dev/null +++ b/pkg/filesystem/virtual/symlink_factory.go @@ -0,0 +1,9 @@ +package virtual + +// SymlinkFactory is a factory type for symbolic links. Symbolic links are +// immutable files; the target to which they point can only be altered by +// replacing the node entirely (e.g., by first unlinking it from the +// directory). +type SymlinkFactory interface { + LookupSymlink(target []byte) NativeLeaf +} diff --git a/pkg/filesystem/virtual/user_settable_symlink.go b/pkg/filesystem/virtual/user_settable_symlink.go new file mode 100644 index 0000000..fd45752 --- /dev/null +++ b/pkg/filesystem/virtual/user_settable_symlink.go @@ -0,0 +1,148 @@ +package virtual + +import ( + "context" + "sync" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice" + "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer" + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/emptypb" +) + +// UserSettableSymlink is an implementation of a symbolic link, whose +// target can be modified using the TemporaryDirectoryInstaller gRPC +// API. +// +// Instead of just pointing to a single target, this type is capable of +// storing one target per user. Both when reading and adjusting the +// symbolic link's target, the public authentication metadata is used to +// identify the user. +type UserSettableSymlink struct { + placeholderFile + + buildDirectory *path.Builder + + lock sync.Mutex + targets map[string][]byte + changeID uint64 +} + +var ( + _ NativeLeaf = (*UserSettableSymlink)(nil) + _ tmp_installer.TemporaryDirectoryInstallerServer = (*UserSettableSymlink)(nil) +) + +// NewUserSettableSymlink creates a UserSettableSymlink that doesn't +// have any targets configured. +func NewUserSettableSymlink(buildDirectory *path.Builder) *UserSettableSymlink { + return &UserSettableSymlink{ + buildDirectory: buildDirectory, + targets: map[string][]byte{}, + } +} + +// CheckReadiness returns whether the target of the symbolic link is +// capable of being mutated. +func (f *UserSettableSymlink) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil +} + +// InstallTemporaryDirectory sets the target of the symbolic link for +// the user stored in the authentication metadata. +func (f *UserSettableSymlink) InstallTemporaryDirectory(ctx context.Context, request *tmp_installer.InstallTemporaryDirectoryRequest) (*emptypb.Empty, error) { + publicAuthenticationMetadata, _ := auth.AuthenticationMetadataFromContext(ctx).GetPublicProto() + key := protojson.Format(publicAuthenticationMetadata) + + temporaryDirectory, scopeWalker := f.buildDirectory.Join(path.NewRelativeScopeWalker(path.VoidComponentWalker)) + if err := path.Resolve(request.TemporaryDirectory, scopeWalker); err != nil { + return nil, err + } + target := []byte(temporaryDirectory.String()) + + f.lock.Lock() + f.targets[key] = target + f.lock.Unlock() + return &emptypb.Empty{}, nil +} + +// Readlink returns the target of the symbolic link. This method always +// fails, as it's called in places where no Context is available. +func (f *UserSettableSymlink) Readlink() (string, error) { + return "", status.Error(codes.InvalidArgument, "Target of user settable symlinks can only be obtained through the virtual file system") +} + +// GetOutputServiceFileStatus returns the status of the symbolic link, +// so that it may be reported through the Remote Output Service. This +// method is a no-op, as this type is not used in combination with the +// Remote Output Service. +func (f *UserSettableSymlink) GetOutputServiceFileStatus(digestFunction *digest.Function) (*remoteoutputservice.FileStatus, error) { + return &remoteoutputservice.FileStatus{}, nil +} + +// AppendOutputPathPersistencyDirectoryNode returns the status of the +// symbolic link, so that it may be persisted on disk. This method is a +// no-op, as this type is not used as part of build output paths. +func (f *UserSettableSymlink) AppendOutputPathPersistencyDirectoryNode(directory *outputpathpersistency.Directory, name path.Component) { +} + +// VirtualGetAttributes returns the file system attributes of the +// symbolic link. +func (f *UserSettableSymlink) VirtualGetAttributes(ctx context.Context, requested AttributesMask, attributes *Attributes) { + attributes.SetFileType(filesystem.FileTypeSymlink) + attributes.SetPermissions(PermissionsRead | PermissionsWrite | PermissionsExecute) + + if requested&(AttributesMaskChangeID|AttributesMaskSizeBytes) != 0 { + var key string + if requested&AttributesMaskSizeBytes != 0 { + publicAuthenticationMetadata, _ := auth.AuthenticationMetadataFromContext(ctx).GetPublicProto() + key = protojson.Format(publicAuthenticationMetadata) + } + + f.lock.Lock() + if requested&AttributesMaskChangeID != 0 { + // Clients may use the change ID to determine + // whether the target of the symbolic link + // changes. Ensure no caching is performed by + // incrementing the change ID when requested. + attributes.SetChangeID(f.changeID) + f.changeID++ + } + if requested&AttributesMaskSizeBytes != 0 { + attributes.SetSizeBytes(uint64(len(f.targets[key]))) + } + f.lock.Unlock() + } +} + +// VirtualReadlink returns the target of the symbolic link for the +// calling user. +func (f *UserSettableSymlink) VirtualReadlink(ctx context.Context) ([]byte, Status) { + publicAuthenticationMetadata, _ := auth.AuthenticationMetadataFromContext(ctx).GetPublicProto() + key := protojson.Format(publicAuthenticationMetadata) + + f.lock.Lock() + defer f.lock.Unlock() + + if target, ok := f.targets[key]; ok { + return target, StatusOK + } + return nil, StatusErrNoEnt +} + +// VirtualSetAttributes adjusts the attributes of the symbolic link. +func (f *UserSettableSymlink) VirtualSetAttributes(ctx context.Context, in *Attributes, requested AttributesMask, out *Attributes) Status { + if _, ok := in.GetSizeBytes(); ok { + return StatusErrInval + } + f.VirtualGetAttributes(ctx, requested, out) + return StatusOK +} diff --git a/pkg/filesystem/virtual/user_settable_symlink_test.go b/pkg/filesystem/virtual/user_settable_symlink_test.go new file mode 100644 index 0000000..b90252e --- /dev/null +++ b/pkg/filesystem/virtual/user_settable_symlink_test.go @@ -0,0 +1,124 @@ +package virtual_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer" + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + auth_pb "github.com/buildbarn/bb-storage/pkg/proto/auth" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestUserSettableSymlink(t *testing.T) { + buildDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + require.NoError(t, path.Resolve("/var/build", scopeWalker)) + symlink := virtual.NewUserSettableSymlink(buildDirectory) + + ctx1 := auth.NewContextWithAuthenticationMetadata( + context.Background(), + auth.MustNewAuthenticationMetadataFromProto(&auth_pb.AuthenticationMetadata{ + Public: structpb.NewStringValue("user1"), + })) + ctx2 := auth.NewContextWithAuthenticationMetadata( + context.Background(), + auth.MustNewAuthenticationMetadataFromProto(&auth_pb.AuthenticationMetadata{ + Public: structpb.NewStringValue("user2"), + })) + + t.Run("InstallTemporaryDirectory", func(t *testing.T) { + // Attempt to set some symlink target paths for the + // tests that follow. + + t.Run("InvalidPath", func(t *testing.T) { + _, err := symlink.InstallTemporaryDirectory(context.Background(), &tmp_installer.InstallTemporaryDirectoryRequest{ + TemporaryDirectory: "/foo", + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Path is absolute, while a relative path was expected"), err) + }) + + t.Run("Success", func(t *testing.T) { + _, err := symlink.InstallTemporaryDirectory(ctx1, &tmp_installer.InstallTemporaryDirectoryRequest{ + TemporaryDirectory: "125/tmp", + }) + require.NoError(t, err) + + _, err = symlink.InstallTemporaryDirectory(ctx2, &tmp_installer.InstallTemporaryDirectoryRequest{ + TemporaryDirectory: "4857/tmp", + }) + require.NoError(t, err) + }) + }) + + t.Run("VirtualReadlink", func(t *testing.T) { + // The target returned by the symbolic link depends on + // the authentication metadata that is provided. + + t.Run("UnknownUser", func(t *testing.T) { + _, s := symlink.VirtualReadlink(context.Background()) + require.Equal(t, virtual.StatusErrNoEnt, s) + }) + + t.Run("Success", func(t *testing.T) { + target1, s := symlink.VirtualReadlink(ctx1) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, []byte("/var/build/125/tmp"), target1) + + target2, s := symlink.VirtualReadlink(ctx2) + require.Equal(t, virtual.StatusOK, s) + require.Equal(t, []byte("/var/build/4857/tmp"), target2) + }) + }) + + t.Run("VirtualGetAttributes", func(t *testing.T) { + // The size of the symbolic link depends on the user + // that requests it. As VirtualGetAttributes() can't + // fail, we return zero in case an unknown user requests + // it. The change ID should be incremented every time it + // is requested. + + requestedAttributes := virtual.AttributesMaskChangeID | + virtual.AttributesMaskFileType | + virtual.AttributesMaskPermissions | + virtual.AttributesMaskSizeBytes + + t.Run("UnknownUser", func(t *testing.T) { + var attributes virtual.Attributes + symlink.VirtualGetAttributes(context.Background(), requestedAttributes, &attributes) + require.Equal(t, *(&virtual.Attributes{}). + SetChangeID(0). + SetFileType(filesystem.FileTypeSymlink). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(0), + attributes) + }) + + t.Run("Success", func(t *testing.T) { + var attributes1 virtual.Attributes + symlink.VirtualGetAttributes(ctx1, requestedAttributes, &attributes1) + require.Equal(t, *(&virtual.Attributes{}). + SetChangeID(1). + SetFileType(filesystem.FileTypeSymlink). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(18), + attributes1) + + var attributes2 virtual.Attributes + symlink.VirtualGetAttributes(ctx2, requestedAttributes, &attributes2) + require.Equal(t, *(&virtual.Attributes{}). + SetChangeID(2). + SetFileType(filesystem.FileTypeSymlink). + SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite | virtual.PermissionsExecute). + SetSizeBytes(19), + attributes2) + }) + }) +} diff --git a/pkg/proto/buildqueuestate/BUILD.bazel b/pkg/proto/buildqueuestate/BUILD.bazel new file mode 100644 index 0000000..f755af8 --- /dev/null +++ b/pkg/proto/buildqueuestate/BUILD.bazel @@ -0,0 +1,36 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "buildqueuestate_proto", + srcs = ["buildqueuestate.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:empty_proto", + "@com_google_protobuf//:timestamp_proto", + "@googleapis//google/rpc:status_proto", + ], +) + +go_proto_library( + name = "buildqueuestate_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate", + proto = ":buildqueuestate_proto", + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@org_golang_google_genproto_googleapis_rpc//status", + ], +) + +go_library( + name = "buildqueuestate", + embed = [":buildqueuestate_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/buildqueuestate/buildqueuestate.pb.go b/pkg/proto/buildqueuestate/buildqueuestate.pb.go new file mode 100644 index 0000000..4ece3c4 --- /dev/null +++ b/pkg/proto/buildqueuestate/buildqueuestate.pb.go @@ -0,0 +1,3662 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/buildqueuestate/buildqueuestate.proto + +package buildqueuestate + +import ( + context "context" + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListInvocationChildrenRequest_Filter int32 + +const ( + ListInvocationChildrenRequest_ALL ListInvocationChildrenRequest_Filter = 0 + ListInvocationChildrenRequest_ACTIVE ListInvocationChildrenRequest_Filter = 1 + ListInvocationChildrenRequest_QUEUED ListInvocationChildrenRequest_Filter = 2 +) + +// Enum value maps for ListInvocationChildrenRequest_Filter. +var ( + ListInvocationChildrenRequest_Filter_name = map[int32]string{ + 0: "ALL", + 1: "ACTIVE", + 2: "QUEUED", + } + ListInvocationChildrenRequest_Filter_value = map[string]int32{ + "ALL": 0, + "ACTIVE": 1, + "QUEUED": 2, + } +) + +func (x ListInvocationChildrenRequest_Filter) Enum() *ListInvocationChildrenRequest_Filter { + p := new(ListInvocationChildrenRequest_Filter) + *p = x + return p +} + +func (x ListInvocationChildrenRequest_Filter) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ListInvocationChildrenRequest_Filter) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_enumTypes[0].Descriptor() +} + +func (ListInvocationChildrenRequest_Filter) Type() protoreflect.EnumType { + return &file_pkg_proto_buildqueuestate_buildqueuestate_proto_enumTypes[0] +} + +func (x ListInvocationChildrenRequest_Filter) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ListInvocationChildrenRequest_Filter.Descriptor instead. +func (ListInvocationChildrenRequest_Filter) EnumDescriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{17, 0} +} + +type PaginationInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartIndex uint32 `protobuf:"varint,1,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"` + TotalEntries uint32 `protobuf:"varint,2,opt,name=total_entries,json=totalEntries,proto3" json:"total_entries,omitempty"` +} + +func (x *PaginationInfo) Reset() { + *x = PaginationInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PaginationInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PaginationInfo) ProtoMessage() {} + +func (x *PaginationInfo) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PaginationInfo.ProtoReflect.Descriptor instead. +func (*PaginationInfo) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{0} +} + +func (x *PaginationInfo) GetStartIndex() uint32 { + if x != nil { + return x.StartIndex + } + return 0 +} + +func (x *PaginationInfo) GetTotalEntries() uint32 { + if x != nil { + return x.TotalEntries + } + return 0 +} + +type PlatformQueueName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InstanceNamePrefix string `protobuf:"bytes,1,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,2,opt,name=platform,proto3" json:"platform,omitempty"` +} + +func (x *PlatformQueueName) Reset() { + *x = PlatformQueueName{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformQueueName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformQueueName) ProtoMessage() {} + +func (x *PlatformQueueName) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformQueueName.ProtoReflect.Descriptor instead. +func (*PlatformQueueName) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{1} +} + +func (x *PlatformQueueName) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *PlatformQueueName) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +type SizeClassQueueName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlatformQueueName *PlatformQueueName `protobuf:"bytes,1,opt,name=platform_queue_name,json=platformQueueName,proto3" json:"platform_queue_name,omitempty"` + SizeClass uint32 `protobuf:"varint,2,opt,name=size_class,json=sizeClass,proto3" json:"size_class,omitempty"` +} + +func (x *SizeClassQueueName) Reset() { + *x = SizeClassQueueName{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SizeClassQueueName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SizeClassQueueName) ProtoMessage() {} + +func (x *SizeClassQueueName) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SizeClassQueueName.ProtoReflect.Descriptor instead. +func (*SizeClassQueueName) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{2} +} + +func (x *SizeClassQueueName) GetPlatformQueueName() *PlatformQueueName { + if x != nil { + return x.PlatformQueueName + } + return nil +} + +func (x *SizeClassQueueName) GetSizeClass() uint32 { + if x != nil { + return x.SizeClass + } + return 0 +} + +type InvocationName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SizeClassQueueName *SizeClassQueueName `protobuf:"bytes,1,opt,name=size_class_queue_name,json=sizeClassQueueName,proto3" json:"size_class_queue_name,omitempty"` + Ids []*anypb.Any `protobuf:"bytes,2,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *InvocationName) Reset() { + *x = InvocationName{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvocationName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationName) ProtoMessage() {} + +func (x *InvocationName) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationName.ProtoReflect.Descriptor instead. +func (*InvocationName) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{3} +} + +func (x *InvocationName) GetSizeClassQueueName() *SizeClassQueueName { + if x != nil { + return x.SizeClassQueueName + } + return nil +} + +func (x *InvocationName) GetIds() []*anypb.Any { + if x != nil { + return x.Ids + } + return nil +} + +type OperationState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + InvocationName *InvocationName `protobuf:"bytes,2,opt,name=invocation_name,json=invocationName,proto3" json:"invocation_name,omitempty"` + ExpectedDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=expected_duration,json=expectedDuration,proto3" json:"expected_duration,omitempty"` + QueuedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=queued_timestamp,json=queuedTimestamp,proto3" json:"queued_timestamp,omitempty"` + ActionDigest *v2.Digest `protobuf:"bytes,5,opt,name=action_digest,json=actionDigest,proto3" json:"action_digest,omitempty"` + Timeout *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Types that are assignable to Stage: + // + // *OperationState_Queued + // *OperationState_Executing + // *OperationState_Completed + Stage isOperationState_Stage `protobuf_oneof:"stage"` + TargetId string `protobuf:"bytes,11,opt,name=target_id,json=targetId,proto3" json:"target_id,omitempty"` + Priority int32 `protobuf:"varint,12,opt,name=priority,proto3" json:"priority,omitempty"` + InstanceNameSuffix string `protobuf:"bytes,13,opt,name=instance_name_suffix,json=instanceNameSuffix,proto3" json:"instance_name_suffix,omitempty"` + DigestFunction v2.DigestFunction_Value `protobuf:"varint,15,opt,name=digest_function,json=digestFunction,proto3,enum=build.bazel.remote.execution.v2.DigestFunction_Value" json:"digest_function,omitempty"` +} + +func (x *OperationState) Reset() { + *x = OperationState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OperationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationState) ProtoMessage() {} + +func (x *OperationState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationState.ProtoReflect.Descriptor instead. +func (*OperationState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{4} +} + +func (x *OperationState) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *OperationState) GetInvocationName() *InvocationName { + if x != nil { + return x.InvocationName + } + return nil +} + +func (x *OperationState) GetExpectedDuration() *durationpb.Duration { + if x != nil { + return x.ExpectedDuration + } + return nil +} + +func (x *OperationState) GetQueuedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.QueuedTimestamp + } + return nil +} + +func (x *OperationState) GetActionDigest() *v2.Digest { + if x != nil { + return x.ActionDigest + } + return nil +} + +func (x *OperationState) GetTimeout() *timestamppb.Timestamp { + if x != nil { + return x.Timeout + } + return nil +} + +func (m *OperationState) GetStage() isOperationState_Stage { + if m != nil { + return m.Stage + } + return nil +} + +func (x *OperationState) GetQueued() *emptypb.Empty { + if x, ok := x.GetStage().(*OperationState_Queued); ok { + return x.Queued + } + return nil +} + +func (x *OperationState) GetExecuting() *emptypb.Empty { + if x, ok := x.GetStage().(*OperationState_Executing); ok { + return x.Executing + } + return nil +} + +func (x *OperationState) GetCompleted() *v2.ExecuteResponse { + if x, ok := x.GetStage().(*OperationState_Completed); ok { + return x.Completed + } + return nil +} + +func (x *OperationState) GetTargetId() string { + if x != nil { + return x.TargetId + } + return "" +} + +func (x *OperationState) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *OperationState) GetInstanceNameSuffix() string { + if x != nil { + return x.InstanceNameSuffix + } + return "" +} + +func (x *OperationState) GetDigestFunction() v2.DigestFunction_Value { + if x != nil { + return x.DigestFunction + } + return v2.DigestFunction_Value(0) +} + +type isOperationState_Stage interface { + isOperationState_Stage() +} + +type OperationState_Queued struct { + Queued *emptypb.Empty `protobuf:"bytes,8,opt,name=queued,proto3,oneof"` +} + +type OperationState_Executing struct { + Executing *emptypb.Empty `protobuf:"bytes,9,opt,name=executing,proto3,oneof"` +} + +type OperationState_Completed struct { + Completed *v2.ExecuteResponse `protobuf:"bytes,10,opt,name=completed,proto3,oneof"` +} + +func (*OperationState_Queued) isOperationState_Stage() {} + +func (*OperationState_Executing) isOperationState_Stage() {} + +func (*OperationState_Completed) isOperationState_Stage() {} + +type SizeClassQueueState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SizeClass uint32 `protobuf:"varint,1,opt,name=size_class,json=sizeClass,proto3" json:"size_class,omitempty"` + Timeout *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + WorkersCount uint32 `protobuf:"varint,5,opt,name=workers_count,json=workersCount,proto3" json:"workers_count,omitempty"` + DrainsCount uint32 `protobuf:"varint,7,opt,name=drains_count,json=drainsCount,proto3" json:"drains_count,omitempty"` + RootInvocation *InvocationState `protobuf:"bytes,9,opt,name=root_invocation,json=rootInvocation,proto3" json:"root_invocation,omitempty"` +} + +func (x *SizeClassQueueState) Reset() { + *x = SizeClassQueueState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SizeClassQueueState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SizeClassQueueState) ProtoMessage() {} + +func (x *SizeClassQueueState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SizeClassQueueState.ProtoReflect.Descriptor instead. +func (*SizeClassQueueState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{5} +} + +func (x *SizeClassQueueState) GetSizeClass() uint32 { + if x != nil { + return x.SizeClass + } + return 0 +} + +func (x *SizeClassQueueState) GetTimeout() *timestamppb.Timestamp { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *SizeClassQueueState) GetWorkersCount() uint32 { + if x != nil { + return x.WorkersCount + } + return 0 +} + +func (x *SizeClassQueueState) GetDrainsCount() uint32 { + if x != nil { + return x.DrainsCount + } + return 0 +} + +func (x *SizeClassQueueState) GetRootInvocation() *InvocationState { + if x != nil { + return x.RootInvocation + } + return nil +} + +type PlatformQueueState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *PlatformQueueName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + SizeClassQueues []*SizeClassQueueState `protobuf:"bytes,2,rep,name=size_class_queues,json=sizeClassQueues,proto3" json:"size_class_queues,omitempty"` +} + +func (x *PlatformQueueState) Reset() { + *x = PlatformQueueState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformQueueState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformQueueState) ProtoMessage() {} + +func (x *PlatformQueueState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformQueueState.ProtoReflect.Descriptor instead. +func (*PlatformQueueState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{6} +} + +func (x *PlatformQueueState) GetName() *PlatformQueueName { + if x != nil { + return x.Name + } + return nil +} + +func (x *PlatformQueueState) GetSizeClassQueues() []*SizeClassQueueState { + if x != nil { + return x.SizeClassQueues + } + return nil +} + +type InvocationState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QueuedOperationsCount uint32 `protobuf:"varint,2,opt,name=queued_operations_count,json=queuedOperationsCount,proto3" json:"queued_operations_count,omitempty"` + ExecutingWorkersCount uint32 `protobuf:"varint,4,opt,name=executing_workers_count,json=executingWorkersCount,proto3" json:"executing_workers_count,omitempty"` + IdleWorkersCount uint32 `protobuf:"varint,5,opt,name=idle_workers_count,json=idleWorkersCount,proto3" json:"idle_workers_count,omitempty"` + IdleSynchronizingWorkersCount uint32 `protobuf:"varint,6,opt,name=idle_synchronizing_workers_count,json=idleSynchronizingWorkersCount,proto3" json:"idle_synchronizing_workers_count,omitempty"` + ChildrenCount uint32 `protobuf:"varint,7,opt,name=children_count,json=childrenCount,proto3" json:"children_count,omitempty"` + ActiveChildrenCount uint32 `protobuf:"varint,8,opt,name=active_children_count,json=activeChildrenCount,proto3" json:"active_children_count,omitempty"` + QueuedChildrenCount uint32 `protobuf:"varint,9,opt,name=queued_children_count,json=queuedChildrenCount,proto3" json:"queued_children_count,omitempty"` +} + +func (x *InvocationState) Reset() { + *x = InvocationState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvocationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationState) ProtoMessage() {} + +func (x *InvocationState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationState.ProtoReflect.Descriptor instead. +func (*InvocationState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{7} +} + +func (x *InvocationState) GetQueuedOperationsCount() uint32 { + if x != nil { + return x.QueuedOperationsCount + } + return 0 +} + +func (x *InvocationState) GetExecutingWorkersCount() uint32 { + if x != nil { + return x.ExecutingWorkersCount + } + return 0 +} + +func (x *InvocationState) GetIdleWorkersCount() uint32 { + if x != nil { + return x.IdleWorkersCount + } + return 0 +} + +func (x *InvocationState) GetIdleSynchronizingWorkersCount() uint32 { + if x != nil { + return x.IdleSynchronizingWorkersCount + } + return 0 +} + +func (x *InvocationState) GetChildrenCount() uint32 { + if x != nil { + return x.ChildrenCount + } + return 0 +} + +func (x *InvocationState) GetActiveChildrenCount() uint32 { + if x != nil { + return x.ActiveChildrenCount + } + return 0 +} + +func (x *InvocationState) GetQueuedChildrenCount() uint32 { + if x != nil { + return x.QueuedChildrenCount + } + return 0 +} + +type InvocationChildState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id *anypb.Any `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State *InvocationState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` +} + +func (x *InvocationChildState) Reset() { + *x = InvocationChildState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvocationChildState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationChildState) ProtoMessage() {} + +func (x *InvocationChildState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationChildState.ProtoReflect.Descriptor instead. +func (*InvocationChildState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{8} +} + +func (x *InvocationChildState) GetId() *anypb.Any { + if x != nil { + return x.Id + } + return nil +} + +func (x *InvocationChildState) GetState() *InvocationState { + if x != nil { + return x.State + } + return nil +} + +type WorkerState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id map[string]string `protobuf:"bytes,1,rep,name=id,proto3" json:"id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Timeout *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + CurrentOperation *OperationState `protobuf:"bytes,3,opt,name=current_operation,json=currentOperation,proto3" json:"current_operation,omitempty"` + Drained bool `protobuf:"varint,4,opt,name=drained,proto3" json:"drained,omitempty"` +} + +func (x *WorkerState) Reset() { + *x = WorkerState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerState) ProtoMessage() {} + +func (x *WorkerState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerState.ProtoReflect.Descriptor instead. +func (*WorkerState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{9} +} + +func (x *WorkerState) GetId() map[string]string { + if x != nil { + return x.Id + } + return nil +} + +func (x *WorkerState) GetTimeout() *timestamppb.Timestamp { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *WorkerState) GetCurrentOperation() *OperationState { + if x != nil { + return x.CurrentOperation + } + return nil +} + +func (x *WorkerState) GetDrained() bool { + if x != nil { + return x.Drained + } + return false +} + +type DrainState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerIdPattern map[string]string `protobuf:"bytes,1,rep,name=worker_id_pattern,json=workerIdPattern,proto3" json:"worker_id_pattern,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +} + +func (x *DrainState) Reset() { + *x = DrainState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DrainState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DrainState) ProtoMessage() {} + +func (x *DrainState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DrainState.ProtoReflect.Descriptor instead. +func (*DrainState) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{10} +} + +func (x *DrainState) GetWorkerIdPattern() map[string]string { + if x != nil { + return x.WorkerIdPattern + } + return nil +} + +func (x *DrainState) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + +type GetOperationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OperationName string `protobuf:"bytes,1,opt,name=operation_name,json=operationName,proto3" json:"operation_name,omitempty"` +} + +func (x *GetOperationRequest) Reset() { + *x = GetOperationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationRequest) ProtoMessage() {} + +func (x *GetOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationRequest.ProtoReflect.Descriptor instead. +func (*GetOperationRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{11} +} + +func (x *GetOperationRequest) GetOperationName() string { + if x != nil { + return x.OperationName + } + return "" +} + +type GetOperationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operation *OperationState `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` +} + +func (x *GetOperationResponse) Reset() { + *x = GetOperationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationResponse) ProtoMessage() {} + +func (x *GetOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationResponse.ProtoReflect.Descriptor instead. +func (*GetOperationResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{12} +} + +func (x *GetOperationResponse) GetOperation() *OperationState { + if x != nil { + return x.Operation + } + return nil +} + +type ListOperationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PageSize uint32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + StartAfter *ListOperationsRequest_StartAfter `protobuf:"bytes,2,opt,name=start_after,json=startAfter,proto3" json:"start_after,omitempty"` + FilterInvocationId *anypb.Any `protobuf:"bytes,3,opt,name=filter_invocation_id,json=filterInvocationId,proto3" json:"filter_invocation_id,omitempty"` + FilterStage v2.ExecutionStage_Value `protobuf:"varint,4,opt,name=filter_stage,json=filterStage,proto3,enum=build.bazel.remote.execution.v2.ExecutionStage_Value" json:"filter_stage,omitempty"` +} + +func (x *ListOperationsRequest) Reset() { + *x = ListOperationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListOperationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListOperationsRequest) ProtoMessage() {} + +func (x *ListOperationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListOperationsRequest.ProtoReflect.Descriptor instead. +func (*ListOperationsRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{13} +} + +func (x *ListOperationsRequest) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListOperationsRequest) GetStartAfter() *ListOperationsRequest_StartAfter { + if x != nil { + return x.StartAfter + } + return nil +} + +func (x *ListOperationsRequest) GetFilterInvocationId() *anypb.Any { + if x != nil { + return x.FilterInvocationId + } + return nil +} + +func (x *ListOperationsRequest) GetFilterStage() v2.ExecutionStage_Value { + if x != nil { + return x.FilterStage + } + return v2.ExecutionStage_Value(0) +} + +type ListOperationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operations []*OperationState `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + PaginationInfo *PaginationInfo `protobuf:"bytes,2,opt,name=pagination_info,json=paginationInfo,proto3" json:"pagination_info,omitempty"` +} + +func (x *ListOperationsResponse) Reset() { + *x = ListOperationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListOperationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListOperationsResponse) ProtoMessage() {} + +func (x *ListOperationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListOperationsResponse.ProtoReflect.Descriptor instead. +func (*ListOperationsResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{14} +} + +func (x *ListOperationsResponse) GetOperations() []*OperationState { + if x != nil { + return x.Operations + } + return nil +} + +func (x *ListOperationsResponse) GetPaginationInfo() *PaginationInfo { + if x != nil { + return x.PaginationInfo + } + return nil +} + +type KillOperationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter *KillOperationsRequest_Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *KillOperationsRequest) Reset() { + *x = KillOperationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KillOperationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillOperationsRequest) ProtoMessage() {} + +func (x *KillOperationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillOperationsRequest.ProtoReflect.Descriptor instead. +func (*KillOperationsRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{15} +} + +func (x *KillOperationsRequest) GetFilter() *KillOperationsRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *KillOperationsRequest) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +type ListPlatformQueuesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlatformQueues []*PlatformQueueState `protobuf:"bytes,1,rep,name=platform_queues,json=platformQueues,proto3" json:"platform_queues,omitempty"` +} + +func (x *ListPlatformQueuesResponse) Reset() { + *x = ListPlatformQueuesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPlatformQueuesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPlatformQueuesResponse) ProtoMessage() {} + +func (x *ListPlatformQueuesResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPlatformQueuesResponse.ProtoReflect.Descriptor instead. +func (*ListPlatformQueuesResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{16} +} + +func (x *ListPlatformQueuesResponse) GetPlatformQueues() []*PlatformQueueState { + if x != nil { + return x.PlatformQueues + } + return nil +} + +type ListInvocationChildrenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationName *InvocationName `protobuf:"bytes,1,opt,name=invocation_name,json=invocationName,proto3" json:"invocation_name,omitempty"` + Filter ListInvocationChildrenRequest_Filter `protobuf:"varint,2,opt,name=filter,proto3,enum=buildbarn.buildqueuestate.ListInvocationChildrenRequest_Filter" json:"filter,omitempty"` +} + +func (x *ListInvocationChildrenRequest) Reset() { + *x = ListInvocationChildrenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListInvocationChildrenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListInvocationChildrenRequest) ProtoMessage() {} + +func (x *ListInvocationChildrenRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListInvocationChildrenRequest.ProtoReflect.Descriptor instead. +func (*ListInvocationChildrenRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{17} +} + +func (x *ListInvocationChildrenRequest) GetInvocationName() *InvocationName { + if x != nil { + return x.InvocationName + } + return nil +} + +func (x *ListInvocationChildrenRequest) GetFilter() ListInvocationChildrenRequest_Filter { + if x != nil { + return x.Filter + } + return ListInvocationChildrenRequest_ALL +} + +type ListInvocationChildrenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Children []*InvocationChildState `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` +} + +func (x *ListInvocationChildrenResponse) Reset() { + *x = ListInvocationChildrenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListInvocationChildrenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListInvocationChildrenResponse) ProtoMessage() {} + +func (x *ListInvocationChildrenResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListInvocationChildrenResponse.ProtoReflect.Descriptor instead. +func (*ListInvocationChildrenResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{18} +} + +func (x *ListInvocationChildrenResponse) GetChildren() []*InvocationChildState { + if x != nil { + return x.Children + } + return nil +} + +type ListQueuedOperationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationName *InvocationName `protobuf:"bytes,1,opt,name=invocation_name,json=invocationName,proto3" json:"invocation_name,omitempty"` + PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + StartAfter *ListQueuedOperationsRequest_StartAfter `protobuf:"bytes,4,opt,name=start_after,json=startAfter,proto3" json:"start_after,omitempty"` +} + +func (x *ListQueuedOperationsRequest) Reset() { + *x = ListQueuedOperationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListQueuedOperationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListQueuedOperationsRequest) ProtoMessage() {} + +func (x *ListQueuedOperationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListQueuedOperationsRequest.ProtoReflect.Descriptor instead. +func (*ListQueuedOperationsRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{19} +} + +func (x *ListQueuedOperationsRequest) GetInvocationName() *InvocationName { + if x != nil { + return x.InvocationName + } + return nil +} + +func (x *ListQueuedOperationsRequest) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListQueuedOperationsRequest) GetStartAfter() *ListQueuedOperationsRequest_StartAfter { + if x != nil { + return x.StartAfter + } + return nil +} + +type ListQueuedOperationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QueuedOperations []*OperationState `protobuf:"bytes,1,rep,name=queued_operations,json=queuedOperations,proto3" json:"queued_operations,omitempty"` + PaginationInfo *PaginationInfo `protobuf:"bytes,2,opt,name=pagination_info,json=paginationInfo,proto3" json:"pagination_info,omitempty"` +} + +func (x *ListQueuedOperationsResponse) Reset() { + *x = ListQueuedOperationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListQueuedOperationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListQueuedOperationsResponse) ProtoMessage() {} + +func (x *ListQueuedOperationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListQueuedOperationsResponse.ProtoReflect.Descriptor instead. +func (*ListQueuedOperationsResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{20} +} + +func (x *ListQueuedOperationsResponse) GetQueuedOperations() []*OperationState { + if x != nil { + return x.QueuedOperations + } + return nil +} + +func (x *ListQueuedOperationsResponse) GetPaginationInfo() *PaginationInfo { + if x != nil { + return x.PaginationInfo + } + return nil +} + +type ListWorkersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter *ListWorkersRequest_Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + StartAfter *ListWorkersRequest_StartAfter `protobuf:"bytes,4,opt,name=start_after,json=startAfter,proto3" json:"start_after,omitempty"` +} + +func (x *ListWorkersRequest) Reset() { + *x = ListWorkersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListWorkersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListWorkersRequest) ProtoMessage() {} + +func (x *ListWorkersRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListWorkersRequest.ProtoReflect.Descriptor instead. +func (*ListWorkersRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{21} +} + +func (x *ListWorkersRequest) GetFilter() *ListWorkersRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ListWorkersRequest) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListWorkersRequest) GetStartAfter() *ListWorkersRequest_StartAfter { + if x != nil { + return x.StartAfter + } + return nil +} + +type ListWorkersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workers []*WorkerState `protobuf:"bytes,1,rep,name=workers,proto3" json:"workers,omitempty"` + PaginationInfo *PaginationInfo `protobuf:"bytes,2,opt,name=pagination_info,json=paginationInfo,proto3" json:"pagination_info,omitempty"` +} + +func (x *ListWorkersResponse) Reset() { + *x = ListWorkersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListWorkersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListWorkersResponse) ProtoMessage() {} + +func (x *ListWorkersResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListWorkersResponse.ProtoReflect.Descriptor instead. +func (*ListWorkersResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{22} +} + +func (x *ListWorkersResponse) GetWorkers() []*WorkerState { + if x != nil { + return x.Workers + } + return nil +} + +func (x *ListWorkersResponse) GetPaginationInfo() *PaginationInfo { + if x != nil { + return x.PaginationInfo + } + return nil +} + +type TerminateWorkersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerIdPattern map[string]string `protobuf:"bytes,1,rep,name=worker_id_pattern,json=workerIdPattern,proto3" json:"worker_id_pattern,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *TerminateWorkersRequest) Reset() { + *x = TerminateWorkersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TerminateWorkersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateWorkersRequest) ProtoMessage() {} + +func (x *TerminateWorkersRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateWorkersRequest.ProtoReflect.Descriptor instead. +func (*TerminateWorkersRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{23} +} + +func (x *TerminateWorkersRequest) GetWorkerIdPattern() map[string]string { + if x != nil { + return x.WorkerIdPattern + } + return nil +} + +type ListDrainsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SizeClassQueueName *SizeClassQueueName `protobuf:"bytes,1,opt,name=size_class_queue_name,json=sizeClassQueueName,proto3" json:"size_class_queue_name,omitempty"` +} + +func (x *ListDrainsRequest) Reset() { + *x = ListDrainsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDrainsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDrainsRequest) ProtoMessage() {} + +func (x *ListDrainsRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDrainsRequest.ProtoReflect.Descriptor instead. +func (*ListDrainsRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{24} +} + +func (x *ListDrainsRequest) GetSizeClassQueueName() *SizeClassQueueName { + if x != nil { + return x.SizeClassQueueName + } + return nil +} + +type ListDrainsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Drains []*DrainState `protobuf:"bytes,1,rep,name=drains,proto3" json:"drains,omitempty"` +} + +func (x *ListDrainsResponse) Reset() { + *x = ListDrainsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDrainsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDrainsResponse) ProtoMessage() {} + +func (x *ListDrainsResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDrainsResponse.ProtoReflect.Descriptor instead. +func (*ListDrainsResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{25} +} + +func (x *ListDrainsResponse) GetDrains() []*DrainState { + if x != nil { + return x.Drains + } + return nil +} + +type AddOrRemoveDrainRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SizeClassQueueName *SizeClassQueueName `protobuf:"bytes,1,opt,name=size_class_queue_name,json=sizeClassQueueName,proto3" json:"size_class_queue_name,omitempty"` + WorkerIdPattern map[string]string `protobuf:"bytes,2,rep,name=worker_id_pattern,json=workerIdPattern,proto3" json:"worker_id_pattern,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AddOrRemoveDrainRequest) Reset() { + *x = AddOrRemoveDrainRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddOrRemoveDrainRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddOrRemoveDrainRequest) ProtoMessage() {} + +func (x *AddOrRemoveDrainRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddOrRemoveDrainRequest.ProtoReflect.Descriptor instead. +func (*AddOrRemoveDrainRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{26} +} + +func (x *AddOrRemoveDrainRequest) GetSizeClassQueueName() *SizeClassQueueName { + if x != nil { + return x.SizeClassQueueName + } + return nil +} + +func (x *AddOrRemoveDrainRequest) GetWorkerIdPattern() map[string]string { + if x != nil { + return x.WorkerIdPattern + } + return nil +} + +type BackgroundLearning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BackgroundLearning) Reset() { + *x = BackgroundLearning{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackgroundLearning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackgroundLearning) ProtoMessage() {} + +func (x *BackgroundLearning) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackgroundLearning.ProtoReflect.Descriptor instead. +func (*BackgroundLearning) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{27} +} + +type ListOperationsRequest_StartAfter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OperationName string `protobuf:"bytes,1,opt,name=operation_name,json=operationName,proto3" json:"operation_name,omitempty"` +} + +func (x *ListOperationsRequest_StartAfter) Reset() { + *x = ListOperationsRequest_StartAfter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListOperationsRequest_StartAfter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListOperationsRequest_StartAfter) ProtoMessage() {} + +func (x *ListOperationsRequest_StartAfter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListOperationsRequest_StartAfter.ProtoReflect.Descriptor instead. +func (*ListOperationsRequest_StartAfter) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *ListOperationsRequest_StartAfter) GetOperationName() string { + if x != nil { + return x.OperationName + } + return "" +} + +type KillOperationsRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *KillOperationsRequest_Filter_OperationName + // *KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers + Type isKillOperationsRequest_Filter_Type `protobuf_oneof:"type"` +} + +func (x *KillOperationsRequest_Filter) Reset() { + *x = KillOperationsRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KillOperationsRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillOperationsRequest_Filter) ProtoMessage() {} + +func (x *KillOperationsRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillOperationsRequest_Filter.ProtoReflect.Descriptor instead. +func (*KillOperationsRequest_Filter) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{15, 0} +} + +func (m *KillOperationsRequest_Filter) GetType() isKillOperationsRequest_Filter_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *KillOperationsRequest_Filter) GetOperationName() string { + if x, ok := x.GetType().(*KillOperationsRequest_Filter_OperationName); ok { + return x.OperationName + } + return "" +} + +func (x *KillOperationsRequest_Filter) GetSizeClassQueueWithoutWorkers() *SizeClassQueueName { + if x, ok := x.GetType().(*KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers); ok { + return x.SizeClassQueueWithoutWorkers + } + return nil +} + +type isKillOperationsRequest_Filter_Type interface { + isKillOperationsRequest_Filter_Type() +} + +type KillOperationsRequest_Filter_OperationName struct { + OperationName string `protobuf:"bytes,1,opt,name=operation_name,json=operationName,proto3,oneof"` +} + +type KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers struct { + SizeClassQueueWithoutWorkers *SizeClassQueueName `protobuf:"bytes,2,opt,name=size_class_queue_without_workers,json=sizeClassQueueWithoutWorkers,proto3,oneof"` +} + +func (*KillOperationsRequest_Filter_OperationName) isKillOperationsRequest_Filter_Type() {} + +func (*KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers) isKillOperationsRequest_Filter_Type() { +} + +type ListQueuedOperationsRequest_StartAfter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"` + ExpectedDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=expected_duration,json=expectedDuration,proto3" json:"expected_duration,omitempty"` + QueuedTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=queued_timestamp,json=queuedTimestamp,proto3" json:"queued_timestamp,omitempty"` +} + +func (x *ListQueuedOperationsRequest_StartAfter) Reset() { + *x = ListQueuedOperationsRequest_StartAfter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListQueuedOperationsRequest_StartAfter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListQueuedOperationsRequest_StartAfter) ProtoMessage() {} + +func (x *ListQueuedOperationsRequest_StartAfter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListQueuedOperationsRequest_StartAfter.ProtoReflect.Descriptor instead. +func (*ListQueuedOperationsRequest_StartAfter) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ListQueuedOperationsRequest_StartAfter) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *ListQueuedOperationsRequest_StartAfter) GetExpectedDuration() *durationpb.Duration { + if x != nil { + return x.ExpectedDuration + } + return nil +} + +func (x *ListQueuedOperationsRequest_StartAfter) GetQueuedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.QueuedTimestamp + } + return nil +} + +type ListWorkersRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *ListWorkersRequest_Filter_All + // *ListWorkersRequest_Filter_Executing + // *ListWorkersRequest_Filter_IdleSynchronizing + Type isListWorkersRequest_Filter_Type `protobuf_oneof:"type"` +} + +func (x *ListWorkersRequest_Filter) Reset() { + *x = ListWorkersRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListWorkersRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListWorkersRequest_Filter) ProtoMessage() {} + +func (x *ListWorkersRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListWorkersRequest_Filter.ProtoReflect.Descriptor instead. +func (*ListWorkersRequest_Filter) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{21, 0} +} + +func (m *ListWorkersRequest_Filter) GetType() isListWorkersRequest_Filter_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ListWorkersRequest_Filter) GetAll() *SizeClassQueueName { + if x, ok := x.GetType().(*ListWorkersRequest_Filter_All); ok { + return x.All + } + return nil +} + +func (x *ListWorkersRequest_Filter) GetExecuting() *InvocationName { + if x, ok := x.GetType().(*ListWorkersRequest_Filter_Executing); ok { + return x.Executing + } + return nil +} + +func (x *ListWorkersRequest_Filter) GetIdleSynchronizing() *InvocationName { + if x, ok := x.GetType().(*ListWorkersRequest_Filter_IdleSynchronizing); ok { + return x.IdleSynchronizing + } + return nil +} + +type isListWorkersRequest_Filter_Type interface { + isListWorkersRequest_Filter_Type() +} + +type ListWorkersRequest_Filter_All struct { + All *SizeClassQueueName `protobuf:"bytes,1,opt,name=all,proto3,oneof"` +} + +type ListWorkersRequest_Filter_Executing struct { + Executing *InvocationName `protobuf:"bytes,2,opt,name=executing,proto3,oneof"` +} + +type ListWorkersRequest_Filter_IdleSynchronizing struct { + IdleSynchronizing *InvocationName `protobuf:"bytes,3,opt,name=idle_synchronizing,json=idleSynchronizing,proto3,oneof"` +} + +func (*ListWorkersRequest_Filter_All) isListWorkersRequest_Filter_Type() {} + +func (*ListWorkersRequest_Filter_Executing) isListWorkersRequest_Filter_Type() {} + +func (*ListWorkersRequest_Filter_IdleSynchronizing) isListWorkersRequest_Filter_Type() {} + +type ListWorkersRequest_StartAfter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerId map[string]string `protobuf:"bytes,1,rep,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ListWorkersRequest_StartAfter) Reset() { + *x = ListWorkersRequest_StartAfter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListWorkersRequest_StartAfter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListWorkersRequest_StartAfter) ProtoMessage() {} + +func (x *ListWorkersRequest_StartAfter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListWorkersRequest_StartAfter.ProtoReflect.Descriptor instead. +func (*ListWorkersRequest_StartAfter) Descriptor() ([]byte, []int) { + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *ListWorkersRequest_StartAfter) GetWorkerId() map[string]string { + if x != nil { + return x.WorkerId + } + return nil +} + +var File_pkg_proto_buildqueuestate_buildqueuestate_proto protoreflect.FileDescriptor + +var file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x19, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x56, 0x0a, 0x0e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x8c, + 0x01, 0x0a, 0x11, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x91, 0x01, + 0x0a, 0x12, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x5c, 0x0a, 0x13, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, + 0x11, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x22, 0x9a, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x15, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x52, 0x12, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, + 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xa7, + 0x06, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x69, 0x6e, 0x76, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x45, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x4c, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x30, 0x0a, 0x06, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x12, 0x36, + 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x09, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x12, 0x5e, 0x0a, 0x0f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x4a, 0x04, 0x08, 0x03, + 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0x9f, 0x02, 0x0a, 0x13, 0x53, 0x69, 0x7a, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, + 0x34, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x72, + 0x61, 0x69, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0b, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, + 0x0f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x0e, 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xb2, 0x01, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x5a, 0x0a, 0x11, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x7a, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, + 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x22, + 0x93, 0x03, 0x0a, 0x0f, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x10, 0x69, 0x64, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x47, 0x0a, 0x20, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, + 0x6f, 0x6e, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1d, 0x69, 0x64, 0x6c, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0d, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x43, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x7e, 0x0a, 0x14, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, + 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xac, 0x02, 0x0a, 0x0b, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x02, 0x69, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x1a, 0x35, 0x0a, + 0x07, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x81, 0x02, 0x0a, 0x0a, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x66, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x50, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x47, 0x0a, 0x11, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x42, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3c, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, + 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x09, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xe9, 0x02, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x5c, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x14, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x58, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x67, 0x65, 0x1a, 0x33, + 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, + 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x70, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x70, + 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xc9, 0x02, + 0x0a, 0x15, 0x4b, 0x69, 0x6c, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0xb2, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x27, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x77, 0x0a, 0x20, 0x73, 0x69, 0x7a, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, + 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x48, 0x00, 0x52, 0x1c, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, + 0x65, 0x75, 0x65, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x73, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x74, 0x0a, 0x1a, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x0e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x22, + 0xf7, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x52, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x29, + 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, + 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x02, 0x22, 0x6d, 0x0a, 0x1e, 0x4c, 0x69, 0x73, + 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, + 0x72, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0xb2, 0x03, 0x0a, 0x1b, 0x4c, 0x69, 0x73, + 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, + 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x69, 0x6e, + 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x62, 0x0a, 0x0b, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x51, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x1a, 0xb7, 0x01, + 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x45, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xca, 0x01, + 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, + 0x0a, 0x11, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x10, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x70, 0x61, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x88, 0x05, 0x0a, 0x12, 0x4c, + 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4c, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x34, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x59, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x1a, 0xfa, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x41, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x48, 0x00, + 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x49, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x12, 0x5a, 0x0a, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, + 0x6e, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x11, 0x69, 0x64, 0x6c, 0x65, 0x53, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xae, 0x01, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xab, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, + 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, + 0x52, 0x0a, 0x0f, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x22, 0xd2, 0x01, 0x0a, 0x17, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x73, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61, 0x74, + 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x50, 0x61, 0x74, + 0x74, 0x65, 0x72, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x75, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, + 0x15, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x12, 0x73, 0x69, 0x7a, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x64, 0x72, + 0x61, 0x69, 0x6e, 0x73, 0x22, 0xb4, 0x02, 0x0a, 0x17, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x60, 0x0a, 0x15, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x12, + 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x73, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x5f, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x49, 0x64, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x14, 0x0a, 0x12, 0x42, + 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x32, 0xc0, 0x09, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, + 0x0e, 0x4b, 0x69, 0x6c, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x63, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, + 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x38, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, + 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x36, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x37, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x10, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x69, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x72, + 0x61, 0x69, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x56, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x59, 0x0a, 0x0b, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescOnce sync.Once + file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescData = file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDesc +) + +func file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescGZIP() []byte { + file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescOnce.Do(func() { + file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescData) + }) + return file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDescData +} + +var file_pkg_proto_buildqueuestate_buildqueuestate_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes = make([]protoimpl.MessageInfo, 38) +var file_pkg_proto_buildqueuestate_buildqueuestate_proto_goTypes = []interface{}{ + (ListInvocationChildrenRequest_Filter)(0), // 0: buildbarn.buildqueuestate.ListInvocationChildrenRequest.Filter + (*PaginationInfo)(nil), // 1: buildbarn.buildqueuestate.PaginationInfo + (*PlatformQueueName)(nil), // 2: buildbarn.buildqueuestate.PlatformQueueName + (*SizeClassQueueName)(nil), // 3: buildbarn.buildqueuestate.SizeClassQueueName + (*InvocationName)(nil), // 4: buildbarn.buildqueuestate.InvocationName + (*OperationState)(nil), // 5: buildbarn.buildqueuestate.OperationState + (*SizeClassQueueState)(nil), // 6: buildbarn.buildqueuestate.SizeClassQueueState + (*PlatformQueueState)(nil), // 7: buildbarn.buildqueuestate.PlatformQueueState + (*InvocationState)(nil), // 8: buildbarn.buildqueuestate.InvocationState + (*InvocationChildState)(nil), // 9: buildbarn.buildqueuestate.InvocationChildState + (*WorkerState)(nil), // 10: buildbarn.buildqueuestate.WorkerState + (*DrainState)(nil), // 11: buildbarn.buildqueuestate.DrainState + (*GetOperationRequest)(nil), // 12: buildbarn.buildqueuestate.GetOperationRequest + (*GetOperationResponse)(nil), // 13: buildbarn.buildqueuestate.GetOperationResponse + (*ListOperationsRequest)(nil), // 14: buildbarn.buildqueuestate.ListOperationsRequest + (*ListOperationsResponse)(nil), // 15: buildbarn.buildqueuestate.ListOperationsResponse + (*KillOperationsRequest)(nil), // 16: buildbarn.buildqueuestate.KillOperationsRequest + (*ListPlatformQueuesResponse)(nil), // 17: buildbarn.buildqueuestate.ListPlatformQueuesResponse + (*ListInvocationChildrenRequest)(nil), // 18: buildbarn.buildqueuestate.ListInvocationChildrenRequest + (*ListInvocationChildrenResponse)(nil), // 19: buildbarn.buildqueuestate.ListInvocationChildrenResponse + (*ListQueuedOperationsRequest)(nil), // 20: buildbarn.buildqueuestate.ListQueuedOperationsRequest + (*ListQueuedOperationsResponse)(nil), // 21: buildbarn.buildqueuestate.ListQueuedOperationsResponse + (*ListWorkersRequest)(nil), // 22: buildbarn.buildqueuestate.ListWorkersRequest + (*ListWorkersResponse)(nil), // 23: buildbarn.buildqueuestate.ListWorkersResponse + (*TerminateWorkersRequest)(nil), // 24: buildbarn.buildqueuestate.TerminateWorkersRequest + (*ListDrainsRequest)(nil), // 25: buildbarn.buildqueuestate.ListDrainsRequest + (*ListDrainsResponse)(nil), // 26: buildbarn.buildqueuestate.ListDrainsResponse + (*AddOrRemoveDrainRequest)(nil), // 27: buildbarn.buildqueuestate.AddOrRemoveDrainRequest + (*BackgroundLearning)(nil), // 28: buildbarn.buildqueuestate.BackgroundLearning + nil, // 29: buildbarn.buildqueuestate.WorkerState.IdEntry + nil, // 30: buildbarn.buildqueuestate.DrainState.WorkerIdPatternEntry + (*ListOperationsRequest_StartAfter)(nil), // 31: buildbarn.buildqueuestate.ListOperationsRequest.StartAfter + (*KillOperationsRequest_Filter)(nil), // 32: buildbarn.buildqueuestate.KillOperationsRequest.Filter + (*ListQueuedOperationsRequest_StartAfter)(nil), // 33: buildbarn.buildqueuestate.ListQueuedOperationsRequest.StartAfter + (*ListWorkersRequest_Filter)(nil), // 34: buildbarn.buildqueuestate.ListWorkersRequest.Filter + (*ListWorkersRequest_StartAfter)(nil), // 35: buildbarn.buildqueuestate.ListWorkersRequest.StartAfter + nil, // 36: buildbarn.buildqueuestate.ListWorkersRequest.StartAfter.WorkerIdEntry + nil, // 37: buildbarn.buildqueuestate.TerminateWorkersRequest.WorkerIdPatternEntry + nil, // 38: buildbarn.buildqueuestate.AddOrRemoveDrainRequest.WorkerIdPatternEntry + (*v2.Platform)(nil), // 39: build.bazel.remote.execution.v2.Platform + (*anypb.Any)(nil), // 40: google.protobuf.Any + (*durationpb.Duration)(nil), // 41: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 42: google.protobuf.Timestamp + (*v2.Digest)(nil), // 43: build.bazel.remote.execution.v2.Digest + (*emptypb.Empty)(nil), // 44: google.protobuf.Empty + (*v2.ExecuteResponse)(nil), // 45: build.bazel.remote.execution.v2.ExecuteResponse + (v2.DigestFunction_Value)(0), // 46: build.bazel.remote.execution.v2.DigestFunction.Value + (v2.ExecutionStage_Value)(0), // 47: build.bazel.remote.execution.v2.ExecutionStage.Value + (*status.Status)(nil), // 48: google.rpc.Status +} +var file_pkg_proto_buildqueuestate_buildqueuestate_proto_depIdxs = []int32{ + 39, // 0: buildbarn.buildqueuestate.PlatformQueueName.platform:type_name -> build.bazel.remote.execution.v2.Platform + 2, // 1: buildbarn.buildqueuestate.SizeClassQueueName.platform_queue_name:type_name -> buildbarn.buildqueuestate.PlatformQueueName + 3, // 2: buildbarn.buildqueuestate.InvocationName.size_class_queue_name:type_name -> buildbarn.buildqueuestate.SizeClassQueueName + 40, // 3: buildbarn.buildqueuestate.InvocationName.ids:type_name -> google.protobuf.Any + 4, // 4: buildbarn.buildqueuestate.OperationState.invocation_name:type_name -> buildbarn.buildqueuestate.InvocationName + 41, // 5: buildbarn.buildqueuestate.OperationState.expected_duration:type_name -> google.protobuf.Duration + 42, // 6: buildbarn.buildqueuestate.OperationState.queued_timestamp:type_name -> google.protobuf.Timestamp + 43, // 7: buildbarn.buildqueuestate.OperationState.action_digest:type_name -> build.bazel.remote.execution.v2.Digest + 42, // 8: buildbarn.buildqueuestate.OperationState.timeout:type_name -> google.protobuf.Timestamp + 44, // 9: buildbarn.buildqueuestate.OperationState.queued:type_name -> google.protobuf.Empty + 44, // 10: buildbarn.buildqueuestate.OperationState.executing:type_name -> google.protobuf.Empty + 45, // 11: buildbarn.buildqueuestate.OperationState.completed:type_name -> build.bazel.remote.execution.v2.ExecuteResponse + 46, // 12: buildbarn.buildqueuestate.OperationState.digest_function:type_name -> build.bazel.remote.execution.v2.DigestFunction.Value + 42, // 13: buildbarn.buildqueuestate.SizeClassQueueState.timeout:type_name -> google.protobuf.Timestamp + 8, // 14: buildbarn.buildqueuestate.SizeClassQueueState.root_invocation:type_name -> buildbarn.buildqueuestate.InvocationState + 2, // 15: buildbarn.buildqueuestate.PlatformQueueState.name:type_name -> buildbarn.buildqueuestate.PlatformQueueName + 6, // 16: buildbarn.buildqueuestate.PlatformQueueState.size_class_queues:type_name -> buildbarn.buildqueuestate.SizeClassQueueState + 40, // 17: buildbarn.buildqueuestate.InvocationChildState.id:type_name -> google.protobuf.Any + 8, // 18: buildbarn.buildqueuestate.InvocationChildState.state:type_name -> buildbarn.buildqueuestate.InvocationState + 29, // 19: buildbarn.buildqueuestate.WorkerState.id:type_name -> buildbarn.buildqueuestate.WorkerState.IdEntry + 42, // 20: buildbarn.buildqueuestate.WorkerState.timeout:type_name -> google.protobuf.Timestamp + 5, // 21: buildbarn.buildqueuestate.WorkerState.current_operation:type_name -> buildbarn.buildqueuestate.OperationState + 30, // 22: buildbarn.buildqueuestate.DrainState.worker_id_pattern:type_name -> buildbarn.buildqueuestate.DrainState.WorkerIdPatternEntry + 42, // 23: buildbarn.buildqueuestate.DrainState.created_timestamp:type_name -> google.protobuf.Timestamp + 5, // 24: buildbarn.buildqueuestate.GetOperationResponse.operation:type_name -> buildbarn.buildqueuestate.OperationState + 31, // 25: buildbarn.buildqueuestate.ListOperationsRequest.start_after:type_name -> buildbarn.buildqueuestate.ListOperationsRequest.StartAfter + 40, // 26: buildbarn.buildqueuestate.ListOperationsRequest.filter_invocation_id:type_name -> google.protobuf.Any + 47, // 27: buildbarn.buildqueuestate.ListOperationsRequest.filter_stage:type_name -> build.bazel.remote.execution.v2.ExecutionStage.Value + 5, // 28: buildbarn.buildqueuestate.ListOperationsResponse.operations:type_name -> buildbarn.buildqueuestate.OperationState + 1, // 29: buildbarn.buildqueuestate.ListOperationsResponse.pagination_info:type_name -> buildbarn.buildqueuestate.PaginationInfo + 32, // 30: buildbarn.buildqueuestate.KillOperationsRequest.filter:type_name -> buildbarn.buildqueuestate.KillOperationsRequest.Filter + 48, // 31: buildbarn.buildqueuestate.KillOperationsRequest.status:type_name -> google.rpc.Status + 7, // 32: buildbarn.buildqueuestate.ListPlatformQueuesResponse.platform_queues:type_name -> buildbarn.buildqueuestate.PlatformQueueState + 4, // 33: buildbarn.buildqueuestate.ListInvocationChildrenRequest.invocation_name:type_name -> buildbarn.buildqueuestate.InvocationName + 0, // 34: buildbarn.buildqueuestate.ListInvocationChildrenRequest.filter:type_name -> buildbarn.buildqueuestate.ListInvocationChildrenRequest.Filter + 9, // 35: buildbarn.buildqueuestate.ListInvocationChildrenResponse.children:type_name -> buildbarn.buildqueuestate.InvocationChildState + 4, // 36: buildbarn.buildqueuestate.ListQueuedOperationsRequest.invocation_name:type_name -> buildbarn.buildqueuestate.InvocationName + 33, // 37: buildbarn.buildqueuestate.ListQueuedOperationsRequest.start_after:type_name -> buildbarn.buildqueuestate.ListQueuedOperationsRequest.StartAfter + 5, // 38: buildbarn.buildqueuestate.ListQueuedOperationsResponse.queued_operations:type_name -> buildbarn.buildqueuestate.OperationState + 1, // 39: buildbarn.buildqueuestate.ListQueuedOperationsResponse.pagination_info:type_name -> buildbarn.buildqueuestate.PaginationInfo + 34, // 40: buildbarn.buildqueuestate.ListWorkersRequest.filter:type_name -> buildbarn.buildqueuestate.ListWorkersRequest.Filter + 35, // 41: buildbarn.buildqueuestate.ListWorkersRequest.start_after:type_name -> buildbarn.buildqueuestate.ListWorkersRequest.StartAfter + 10, // 42: buildbarn.buildqueuestate.ListWorkersResponse.workers:type_name -> buildbarn.buildqueuestate.WorkerState + 1, // 43: buildbarn.buildqueuestate.ListWorkersResponse.pagination_info:type_name -> buildbarn.buildqueuestate.PaginationInfo + 37, // 44: buildbarn.buildqueuestate.TerminateWorkersRequest.worker_id_pattern:type_name -> buildbarn.buildqueuestate.TerminateWorkersRequest.WorkerIdPatternEntry + 3, // 45: buildbarn.buildqueuestate.ListDrainsRequest.size_class_queue_name:type_name -> buildbarn.buildqueuestate.SizeClassQueueName + 11, // 46: buildbarn.buildqueuestate.ListDrainsResponse.drains:type_name -> buildbarn.buildqueuestate.DrainState + 3, // 47: buildbarn.buildqueuestate.AddOrRemoveDrainRequest.size_class_queue_name:type_name -> buildbarn.buildqueuestate.SizeClassQueueName + 38, // 48: buildbarn.buildqueuestate.AddOrRemoveDrainRequest.worker_id_pattern:type_name -> buildbarn.buildqueuestate.AddOrRemoveDrainRequest.WorkerIdPatternEntry + 3, // 49: buildbarn.buildqueuestate.KillOperationsRequest.Filter.size_class_queue_without_workers:type_name -> buildbarn.buildqueuestate.SizeClassQueueName + 41, // 50: buildbarn.buildqueuestate.ListQueuedOperationsRequest.StartAfter.expected_duration:type_name -> google.protobuf.Duration + 42, // 51: buildbarn.buildqueuestate.ListQueuedOperationsRequest.StartAfter.queued_timestamp:type_name -> google.protobuf.Timestamp + 3, // 52: buildbarn.buildqueuestate.ListWorkersRequest.Filter.all:type_name -> buildbarn.buildqueuestate.SizeClassQueueName + 4, // 53: buildbarn.buildqueuestate.ListWorkersRequest.Filter.executing:type_name -> buildbarn.buildqueuestate.InvocationName + 4, // 54: buildbarn.buildqueuestate.ListWorkersRequest.Filter.idle_synchronizing:type_name -> buildbarn.buildqueuestate.InvocationName + 36, // 55: buildbarn.buildqueuestate.ListWorkersRequest.StartAfter.worker_id:type_name -> buildbarn.buildqueuestate.ListWorkersRequest.StartAfter.WorkerIdEntry + 12, // 56: buildbarn.buildqueuestate.BuildQueueState.GetOperation:input_type -> buildbarn.buildqueuestate.GetOperationRequest + 14, // 57: buildbarn.buildqueuestate.BuildQueueState.ListOperations:input_type -> buildbarn.buildqueuestate.ListOperationsRequest + 16, // 58: buildbarn.buildqueuestate.BuildQueueState.KillOperations:input_type -> buildbarn.buildqueuestate.KillOperationsRequest + 44, // 59: buildbarn.buildqueuestate.BuildQueueState.ListPlatformQueues:input_type -> google.protobuf.Empty + 18, // 60: buildbarn.buildqueuestate.BuildQueueState.ListInvocationChildren:input_type -> buildbarn.buildqueuestate.ListInvocationChildrenRequest + 20, // 61: buildbarn.buildqueuestate.BuildQueueState.ListQueuedOperations:input_type -> buildbarn.buildqueuestate.ListQueuedOperationsRequest + 22, // 62: buildbarn.buildqueuestate.BuildQueueState.ListWorkers:input_type -> buildbarn.buildqueuestate.ListWorkersRequest + 24, // 63: buildbarn.buildqueuestate.BuildQueueState.TerminateWorkers:input_type -> buildbarn.buildqueuestate.TerminateWorkersRequest + 25, // 64: buildbarn.buildqueuestate.BuildQueueState.ListDrains:input_type -> buildbarn.buildqueuestate.ListDrainsRequest + 27, // 65: buildbarn.buildqueuestate.BuildQueueState.AddDrain:input_type -> buildbarn.buildqueuestate.AddOrRemoveDrainRequest + 27, // 66: buildbarn.buildqueuestate.BuildQueueState.RemoveDrain:input_type -> buildbarn.buildqueuestate.AddOrRemoveDrainRequest + 13, // 67: buildbarn.buildqueuestate.BuildQueueState.GetOperation:output_type -> buildbarn.buildqueuestate.GetOperationResponse + 15, // 68: buildbarn.buildqueuestate.BuildQueueState.ListOperations:output_type -> buildbarn.buildqueuestate.ListOperationsResponse + 44, // 69: buildbarn.buildqueuestate.BuildQueueState.KillOperations:output_type -> google.protobuf.Empty + 17, // 70: buildbarn.buildqueuestate.BuildQueueState.ListPlatformQueues:output_type -> buildbarn.buildqueuestate.ListPlatformQueuesResponse + 19, // 71: buildbarn.buildqueuestate.BuildQueueState.ListInvocationChildren:output_type -> buildbarn.buildqueuestate.ListInvocationChildrenResponse + 21, // 72: buildbarn.buildqueuestate.BuildQueueState.ListQueuedOperations:output_type -> buildbarn.buildqueuestate.ListQueuedOperationsResponse + 23, // 73: buildbarn.buildqueuestate.BuildQueueState.ListWorkers:output_type -> buildbarn.buildqueuestate.ListWorkersResponse + 44, // 74: buildbarn.buildqueuestate.BuildQueueState.TerminateWorkers:output_type -> google.protobuf.Empty + 26, // 75: buildbarn.buildqueuestate.BuildQueueState.ListDrains:output_type -> buildbarn.buildqueuestate.ListDrainsResponse + 44, // 76: buildbarn.buildqueuestate.BuildQueueState.AddDrain:output_type -> google.protobuf.Empty + 44, // 77: buildbarn.buildqueuestate.BuildQueueState.RemoveDrain:output_type -> google.protobuf.Empty + 67, // [67:78] is the sub-list for method output_type + 56, // [56:67] is the sub-list for method input_type + 56, // [56:56] is the sub-list for extension type_name + 56, // [56:56] is the sub-list for extension extendee + 0, // [0:56] is the sub-list for field type_name +} + +func init() { file_pkg_proto_buildqueuestate_buildqueuestate_proto_init() } +func file_pkg_proto_buildqueuestate_buildqueuestate_proto_init() { + if File_pkg_proto_buildqueuestate_buildqueuestate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PaginationInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformQueueName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SizeClassQueueName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvocationName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OperationState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SizeClassQueueState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformQueueState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvocationState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvocationChildState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkerState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DrainState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListOperationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListOperationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillOperationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPlatformQueuesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListInvocationChildrenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListInvocationChildrenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListQueuedOperationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListQueuedOperationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListWorkersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListWorkersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TerminateWorkersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDrainsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDrainsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddOrRemoveDrainRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackgroundLearning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListOperationsRequest_StartAfter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillOperationsRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListQueuedOperationsRequest_StartAfter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListWorkersRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListWorkersRequest_StartAfter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*OperationState_Queued)(nil), + (*OperationState_Executing)(nil), + (*OperationState_Completed)(nil), + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[31].OneofWrappers = []interface{}{ + (*KillOperationsRequest_Filter_OperationName)(nil), + (*KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers)(nil), + } + file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes[33].OneofWrappers = []interface{}{ + (*ListWorkersRequest_Filter_All)(nil), + (*ListWorkersRequest_Filter_Executing)(nil), + (*ListWorkersRequest_Filter_IdleSynchronizing)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDesc, + NumEnums: 1, + NumMessages: 38, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_buildqueuestate_buildqueuestate_proto_goTypes, + DependencyIndexes: file_pkg_proto_buildqueuestate_buildqueuestate_proto_depIdxs, + EnumInfos: file_pkg_proto_buildqueuestate_buildqueuestate_proto_enumTypes, + MessageInfos: file_pkg_proto_buildqueuestate_buildqueuestate_proto_msgTypes, + }.Build() + File_pkg_proto_buildqueuestate_buildqueuestate_proto = out.File + file_pkg_proto_buildqueuestate_buildqueuestate_proto_rawDesc = nil + file_pkg_proto_buildqueuestate_buildqueuestate_proto_goTypes = nil + file_pkg_proto_buildqueuestate_buildqueuestate_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// BuildQueueStateClient is the client API for BuildQueueState service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BuildQueueStateClient interface { + GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*GetOperationResponse, error) + ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) + KillOperations(ctx context.Context, in *KillOperationsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ListPlatformQueues(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListPlatformQueuesResponse, error) + ListInvocationChildren(ctx context.Context, in *ListInvocationChildrenRequest, opts ...grpc.CallOption) (*ListInvocationChildrenResponse, error) + ListQueuedOperations(ctx context.Context, in *ListQueuedOperationsRequest, opts ...grpc.CallOption) (*ListQueuedOperationsResponse, error) + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) + TerminateWorkers(ctx context.Context, in *TerminateWorkersRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ListDrains(ctx context.Context, in *ListDrainsRequest, opts ...grpc.CallOption) (*ListDrainsResponse, error) + AddDrain(ctx context.Context, in *AddOrRemoveDrainRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + RemoveDrain(ctx context.Context, in *AddOrRemoveDrainRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type buildQueueStateClient struct { + cc grpc.ClientConnInterface +} + +func NewBuildQueueStateClient(cc grpc.ClientConnInterface) BuildQueueStateClient { + return &buildQueueStateClient{cc} +} + +func (c *buildQueueStateClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*GetOperationResponse, error) { + out := new(GetOperationResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/GetOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { + out := new(ListOperationsResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) KillOperations(ctx context.Context, in *KillOperationsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/KillOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListPlatformQueues(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListPlatformQueuesResponse, error) { + out := new(ListPlatformQueuesResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListPlatformQueues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListInvocationChildren(ctx context.Context, in *ListInvocationChildrenRequest, opts ...grpc.CallOption) (*ListInvocationChildrenResponse, error) { + out := new(ListInvocationChildrenResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListInvocationChildren", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListQueuedOperations(ctx context.Context, in *ListQueuedOperationsRequest, opts ...grpc.CallOption) (*ListQueuedOperationsResponse, error) { + out := new(ListQueuedOperationsResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListQueuedOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListWorkers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) TerminateWorkers(ctx context.Context, in *TerminateWorkersRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/TerminateWorkers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) ListDrains(ctx context.Context, in *ListDrainsRequest, opts ...grpc.CallOption) (*ListDrainsResponse, error) { + out := new(ListDrainsResponse) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/ListDrains", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) AddDrain(ctx context.Context, in *AddOrRemoveDrainRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/AddDrain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *buildQueueStateClient) RemoveDrain(ctx context.Context, in *AddOrRemoveDrainRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.buildqueuestate.BuildQueueState/RemoveDrain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BuildQueueStateServer is the server API for BuildQueueState service. +type BuildQueueStateServer interface { + GetOperation(context.Context, *GetOperationRequest) (*GetOperationResponse, error) + ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) + KillOperations(context.Context, *KillOperationsRequest) (*emptypb.Empty, error) + ListPlatformQueues(context.Context, *emptypb.Empty) (*ListPlatformQueuesResponse, error) + ListInvocationChildren(context.Context, *ListInvocationChildrenRequest) (*ListInvocationChildrenResponse, error) + ListQueuedOperations(context.Context, *ListQueuedOperationsRequest) (*ListQueuedOperationsResponse, error) + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) + TerminateWorkers(context.Context, *TerminateWorkersRequest) (*emptypb.Empty, error) + ListDrains(context.Context, *ListDrainsRequest) (*ListDrainsResponse, error) + AddDrain(context.Context, *AddOrRemoveDrainRequest) (*emptypb.Empty, error) + RemoveDrain(context.Context, *AddOrRemoveDrainRequest) (*emptypb.Empty, error) +} + +// UnimplementedBuildQueueStateServer can be embedded to have forward compatible implementations. +type UnimplementedBuildQueueStateServer struct { +} + +func (*UnimplementedBuildQueueStateServer) GetOperation(context.Context, *GetOperationRequest) (*GetOperationResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetOperation not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListOperations not implemented") +} +func (*UnimplementedBuildQueueStateServer) KillOperations(context.Context, *KillOperationsRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method KillOperations not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListPlatformQueues(context.Context, *emptypb.Empty) (*ListPlatformQueuesResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListPlatformQueues not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListInvocationChildren(context.Context, *ListInvocationChildrenRequest) (*ListInvocationChildrenResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListInvocationChildren not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListQueuedOperations(context.Context, *ListQueuedOperationsRequest) (*ListQueuedOperationsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListQueuedOperations not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListWorkers not implemented") +} +func (*UnimplementedBuildQueueStateServer) TerminateWorkers(context.Context, *TerminateWorkersRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method TerminateWorkers not implemented") +} +func (*UnimplementedBuildQueueStateServer) ListDrains(context.Context, *ListDrainsRequest) (*ListDrainsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListDrains not implemented") +} +func (*UnimplementedBuildQueueStateServer) AddDrain(context.Context, *AddOrRemoveDrainRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method AddDrain not implemented") +} +func (*UnimplementedBuildQueueStateServer) RemoveDrain(context.Context, *AddOrRemoveDrainRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method RemoveDrain not implemented") +} + +func RegisterBuildQueueStateServer(s grpc.ServiceRegistrar, srv BuildQueueStateServer) { + s.RegisterService(&_BuildQueueState_serviceDesc, srv) +} + +func _BuildQueueState_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).GetOperation(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListOperations(ctx, req.(*ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_KillOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KillOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).KillOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/KillOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).KillOperations(ctx, req.(*KillOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListPlatformQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListPlatformQueues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListPlatformQueues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListPlatformQueues(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListInvocationChildren_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInvocationChildrenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListInvocationChildren(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListInvocationChildren", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListInvocationChildren(ctx, req.(*ListInvocationChildrenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListQueuedOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListQueuedOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListQueuedOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListQueuedOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListQueuedOperations(ctx, req.(*ListQueuedOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListWorkers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_TerminateWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TerminateWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).TerminateWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/TerminateWorkers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).TerminateWorkers(ctx, req.(*TerminateWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_ListDrains_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDrainsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).ListDrains(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/ListDrains", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).ListDrains(ctx, req.(*ListDrainsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_AddDrain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddOrRemoveDrainRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).AddDrain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/AddDrain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).AddDrain(ctx, req.(*AddOrRemoveDrainRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BuildQueueState_RemoveDrain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddOrRemoveDrainRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BuildQueueStateServer).RemoveDrain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.buildqueuestate.BuildQueueState/RemoveDrain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BuildQueueStateServer).RemoveDrain(ctx, req.(*AddOrRemoveDrainRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BuildQueueState_serviceDesc = grpc.ServiceDesc{ + ServiceName: "buildbarn.buildqueuestate.BuildQueueState", + HandlerType: (*BuildQueueStateServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetOperation", + Handler: _BuildQueueState_GetOperation_Handler, + }, + { + MethodName: "ListOperations", + Handler: _BuildQueueState_ListOperations_Handler, + }, + { + MethodName: "KillOperations", + Handler: _BuildQueueState_KillOperations_Handler, + }, + { + MethodName: "ListPlatformQueues", + Handler: _BuildQueueState_ListPlatformQueues_Handler, + }, + { + MethodName: "ListInvocationChildren", + Handler: _BuildQueueState_ListInvocationChildren_Handler, + }, + { + MethodName: "ListQueuedOperations", + Handler: _BuildQueueState_ListQueuedOperations_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _BuildQueueState_ListWorkers_Handler, + }, + { + MethodName: "TerminateWorkers", + Handler: _BuildQueueState_TerminateWorkers_Handler, + }, + { + MethodName: "ListDrains", + Handler: _BuildQueueState_ListDrains_Handler, + }, + { + MethodName: "AddDrain", + Handler: _BuildQueueState_AddDrain_Handler, + }, + { + MethodName: "RemoveDrain", + Handler: _BuildQueueState_RemoveDrain_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/proto/buildqueuestate/buildqueuestate.proto", +} diff --git a/pkg/proto/buildqueuestate/buildqueuestate.proto b/pkg/proto/buildqueuestate/buildqueuestate.proto new file mode 100644 index 0000000..1684f9e --- /dev/null +++ b/pkg/proto/buildqueuestate/buildqueuestate.proto @@ -0,0 +1,586 @@ +syntax = "proto3"; + +package buildbarn.buildqueuestate; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate"; + +// BuildQueueState can be used to inspect the internal state of +// InMemoryBuildQueue and perform administrative operations against it. +// It is used by bb_scheduler's web UI. +service BuildQueueState { + // Get information about an operation that is currently known by the + // scheduler. + rpc GetOperation(GetOperationRequest) returns (GetOperationResponse); + + // List information about all operations that are currently known by + // the scheduler. + rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse); + + // Forcefully terminate one or more operations that are either in the + // QUEUED or EXECUTING execution stage with a provided gRPC status. + rpc KillOperations(KillOperationsRequest) returns (google.protobuf.Empty); + + // List information about all platform queues that are currently + // registered by the scheduler. + // + // Platform queues are responsible for tracking all workers and + // operations for a single execution platform. Platform queues are + // identified by an REv2 instance name and Platform message. + // + // TODO: Should this call support pagination, or is the number of + // platform queues low enough that it's not needed? + rpc ListPlatformQueues(google.protobuf.Empty) + returns (ListPlatformQueuesResponse); + + // List information about all client invocations for which one or more + // operations exist that are in the QUEUED or EXECUTING execution + // stage for a given platform queue. + // + // TODO: Should this call support pagination, or is the number of + // invocations low enough that it's not needed? + rpc ListInvocationChildren(ListInvocationChildrenRequest) + returns (ListInvocationChildrenResponse); + + // List information about all operations that are in the QUEUED + // execution stage for a given client invocation. + rpc ListQueuedOperations(ListQueuedOperationsRequest) + returns (ListQueuedOperationsResponse); + + // List information about all workers that are requesting tasks from a + // given platform queue. + rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); + + // Mark workers in a platform queue as about to be terminated. This + // causes the workers to no longer receive new tasks after completing + // the task they are currently assigned. + // + // This method blocks until all workers matched by the provided + // pattern are idle. Once completed, it is safe to terminate the + // matching workers without disrupting execution of tasks. + rpc TerminateWorkers(TerminateWorkersRequest) returns (google.protobuf.Empty); + + // List worker ID patterns that are used to prevent workers from + // receiving new tasks. + rpc ListDrains(ListDrainsRequest) returns (ListDrainsResponse); + + // Add a new worker ID pattern that prevents workers from receiving + // new tasks. The difference between AddDrain() and TerminateWorkers() + // is that entries created by the former can be removed with + // RemoveDrain(). Patterns are also retained, regardless of whether + // workers disappear and reappear in the meantime. + rpc AddDrain(AddOrRemoveDrainRequest) returns (google.protobuf.Empty); + + // Remove an existent worker ID pattern that prevents workers from + // receiving new tasks. This method is the inverse of AddDrain(). + rpc RemoveDrain(AddOrRemoveDrainRequest) returns (google.protobuf.Empty); +} + +// Message types shared by multiple RPCs. + +message PaginationInfo { + // The zero-based index of the first element returned by this + // function, relative to the full list of data that may be returned by + // this method. + uint32 start_index = 1; + + // The total number of entries in the full list of data that may be + // returned by this method. + uint32 total_entries = 2; +} + +message PlatformQueueName { + // The REv2 instance name prefix that workers associated with this + // platform queue announced to the scheduler. + string instance_name_prefix = 1; + + // The REv2 platform properties that workers associated with this + // platform queue announced to the scheduler. + build.bazel.remote.execution.v2.Platform platform = 2; +} + +message SizeClassQueueName { + // The platform queue in which the size class queue is placed. + PlatformQueueName platform_queue_name = 1; + + // The size of the workers relative to other workers in the same + // platform queue. + uint32 size_class = 2; +} + +message InvocationName { + // The size class queue in which the invocation is placed. + SizeClassQueueName size_class_queue_name = 1; + + // Sequence of invocation IDs that is used to identify the invocation. + repeated google.protobuf.Any ids = 2; +} + +message OperationState { + // The name of the operation. In the case of bb_scheduler, the + // operation name will be a UUID. + string name = 1; + + // The invocation in which the operation is placed. + InvocationName invocation_name = 2; + + // Was 'invocation_ids'. Merged together with 'size_class_queue_name' + // into 'invocation_name'. + reserved 3; + + // The expected amount of time this operation takes to complete. + google.protobuf.Duration expected_duration = 14; + + // The time at which the client enqueued the operation by calling + // Execute(). + google.protobuf.Timestamp queued_timestamp = 4; + + // The digest of the Action message that the client instructed the + // worker to execute. + build.bazel.remote.execution.v2.Digest action_digest = 5; + + // Was 'argv0'. + reserved 6; + + // When not set, it indicates that one or more Execute() or + // WaitExecution() calls are currently taking place for this + // operation. The scheduler will continue to maintain this operation. + // + // When set, it indicates that no Execute() or WaitExecution() calls + // are currently taking place for this operation. The client has + // abandoned this operation. This timestamp indicates when the + // scheduler will remove this operation. + // + // If this operation is in the EXECUTING execution stage at the time + // of removal (and this operation hasn't been in-flight deduplicated + // against an operation created by some other client), its worker will + // be instructed to stop execution. + google.protobuf.Timestamp timeout = 7; + + oneof stage { + // The operation is in the QUEUED stage. + google.protobuf.Empty queued = 8; + + // The operation is in the EXECUTING stage. + // TODO: Would we want this to use type WorkerState, so that we can + // get the ID of the worker on which an operation is executing? + google.protobuf.Empty executing = 9; + + // The operation is in the COMPLETED stage. + build.bazel.remote.execution.v2.ExecuteResponse completed = 10; + } + + // A client-provided identifier for the target which produced this + // operation. + string target_id = 11; + + // The priority of the operation, as provided by the client through + // REv2's ExecutionPolicy. + int32 priority = 12; + + // Additional components that the client provided as part of + // ExecuteRequest.instance_name that are not part of the + // PlatformQueueName's instance_name_prefix. + string instance_name_suffix = 13; + + // The digest function that was used to compute the action digest. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 15; +} + +message SizeClassQueueState { + // The size of the workers relative to other workers in the same + // platform queue. + uint32 size_class = 1; + + // When not set, it indicates that one or more workers exist in the + // platform queue. + // + // When set, it indicates that all workers have disappeared. + // Operations scheduled by clients using Execute() are still being + // queued, as this may be a transient condition. This timestamp + // indicates when the scheduler will remove the platform queue, + // thereby causing all Execute() requests for this instance name and + // platform properties to fail. + google.protobuf.Timestamp timeout = 2; + + // Was 'invocations_count', 'active_invocations_count', + // 'queued_invocations_count', and 'executing_workers_count'. These + // have moved into 'root_invocation'. + reserved 3, 4, 6, 8; + + // The total number of workers associated with this platform queue. + uint32 workers_count = 5; + + // The total number of drains associated with this platform queue that + // were created using AddDrain(). + uint32 drains_count = 7; + + // Information about all operations that are part of this size class + // queue, and utilization of workers. + InvocationState root_invocation = 9; +} + +message PlatformQueueState { + // The identifier of this platform queue. + PlatformQueueName name = 1; + + // The state of the platform queue per worker size class. + repeated SizeClassQueueState size_class_queues = 2; +} + +message InvocationState { + // Was 'id'. Moved into InvocationChildState. + reserved 1; + + // The total number of operations associated with this platform queue + // and invocation that are in the QUEUED execution stage. + uint32 queued_operations_count = 2; + + // Was 'first_queued_operation'. This information is no longer + // provided, as the scheduler web UI no longer displays it. + reserved 3; + + // The total number of workers for this size class queue that are + // currently executing an operation belonging to this invocation. This + // equals the number of operations associated with this platform queue + // and invocation that are in the EXECUTING execution stage. + uint32 executing_workers_count = 4; + + // The total number of workers for this size class queue that are + // currently idle and most recently completed an operation belonging + // to this invocation. + uint32 idle_workers_count = 5; + + // The total number of workers for this size class queue that are + // currently idle and most recently executed an action belonging to + // this invocation and synchronizing against the scheduler. + // + // These are the workers that will most preferably be used to execute + // successive operations scheduled as part of this invocation. + uint32 idle_synchronizing_workers_count = 6; + + // The total number of client invocations for which one or more + // operations in either the QUEUED or EXECUTING execution stage exist, + // or for which one or more workers exist that most recently executed + // a task belonging to this invocation. + uint32 children_count = 7; + + // The total number of client invocations for which one or more + // operations in either the QUEUED or EXECUTING execution stage exist. + uint32 active_children_count = 8; + + // The total number of client invocations for which one or more + // operations in the QUEUED execution stage exist. + uint32 queued_children_count = 9; +} + +message InvocationChildState { + // An identifier for a set of operations that should be scheduled + // collectively and fairly with respect to other sets. + // + // If operations are created by an REv2 client, this field typically + // contains an REv2 RequestMetadata message. Only the + // 'tool_invocation_id' field will be set, as that field is sufficient + // for identifying a single build. All other fields are cleared. + // + // If the operation was created because the scheduler wanted to test + // the execution of an action on a size class for which there is a + // high probability of failure, this field contains a + // BackgroundLearning message. + // + // This field may also contain other kinds of messages in case the + // scheduler is configured to provide fairness not at the Bazel + // invocation ID level, but using some alternative heuristics. + google.protobuf.Any id = 1; + + // State of the child invocation. + InvocationState state = 2; +} + +message WorkerState { + // The labels that uniquely identifies the worker inside this size + // class queue. + map id = 1; + + // When not set, it indicates that the worker is currently calling + // Synchronize() to report its current status, potentially blocking on + // the scheduler to provide it a new task to execute. + // + // When set, it indicates that the worker is currently not calling + // Synchronize(). This may either be because it's executing a task and + // will only report its state periodically, or because the worker has + // disappeared. This timestamp indicates when the scheduler will + // remove this worker and terminate any operation it is running, + // assuming the worker does not call Synchronize() before this time. + google.protobuf.Timestamp timeout = 2; + + // The properties of the operation that is currently executed by this + // worker. This option is not set when the worker is idle. + // + // The 'size_class_queue' and 'stage' fields are omitted, as their + // values are implied. + OperationState current_operation = 3; + + // Set if one or more drains exist whose worker ID patterns match the + // ID of the worker. When set, the worker is permitted to complete the + // task it is currently execution, but will not receive any further + // tasks to execute. + bool drained = 4; +} + +message DrainState { + // The pattern of matching workers to drain. This pattern matches all + // workers for which the worker ID is a superset of the pattern. An + // empty worker ID pattern would match all workers. + map worker_id_pattern = 1; + + // The time at which this drain was created. + google.protobuf.Timestamp created_timestamp = 2; +} + +// Request and response messages. + +message GetOperationRequest { + // The name of the operation for which the state needs to be obtained. + string operation_name = 1; +} + +message GetOperationResponse { + // The state of the operation that needed to be obtain. + // + // The 'name' field of the operation is omitted, as its value is + // implied. + OperationState operation = 1; +} + +message ListOperationsRequest { + message StartAfter { + // The name of the operation. + string operation_name = 1; + } + + // The number of operations to be returned. The response may contain + // fewer operations, iff fewer than page_size operations exist after + // the starting position. + uint32 page_size = 1; + + // When not set, return the first operations known by the scheduler. + // When set, return the first operations after the provided starting + // point. + StartAfter start_after = 2; + + // If set, only return operations having an invocation ID that + // matches the provided value. + google.protobuf.Any filter_invocation_id = 3; + + // If set, only return operations whose execution stage matches the + // provided value. + build.bazel.remote.execution.v2.ExecutionStage.Value filter_stage = 4; +} + +message ListOperationsResponse { + // The state of operations known by the scheduler. + repeated OperationState operations = 1; + + // The position at which the resulting operations are stored in the + // list of all operations. + PaginationInfo pagination_info = 2; +} + +message KillOperationsRequest { + message Filter { + oneof type { + // Kill one operation, having a given name. + string operation_name = 1; + + // Kill all operations within a given size class queue, only if the + // size class queue has zero workers. + // + // This filter can be used by autoscaler processes to report to + // clients that they are attempting to use a size class queue for + // which no workers can be created. + SizeClassQueueName size_class_queue_without_workers = 2; + } + } + + // Limit the operations to kill according to a filter. + Filter filter = 1; + + // The error to return to clients as part of the ExecuteResponse. + google.rpc.Status status = 2; +} + +message ListPlatformQueuesResponse { + // The state of all platform queued managed by the scheduler. + repeated PlatformQueueState platform_queues = 1; +} + +message ListInvocationChildrenRequest { + // The name of the invocation whose children need to be listed. + InvocationName invocation_name = 1; + + enum Filter { + // Return invocations for which one or more operations that are in + // the QUEUED or EXECUTING stages exist, or for which one or more + // workers exist that most recently executed a task belonging to + // this invocation. + // + // Invocations are sorted alphabetically by invocation ID. + ALL = 0; + + // Return invocations for which one or more operations that are in + // the QUEUED or EXECUTING stages exist. + // + // Invocations are sorted alphabetically by invocation ID. + ACTIVE = 1; + + // Return invocations for which one or more operations that are in + // the QUEUED stage exist. + // + // Invocations are sorted by the order in which the scheduler will + // prefer scheduling operations from these invocations, in + // descending order. + QUEUED = 2; + } + + // Limit the results according to a filter. + Filter filter = 2; +} + +message ListInvocationChildrenResponse { + // The state of all invocations known by the scheduler that are + // contained within the provided invocation. + repeated InvocationChildState children = 1; +} + +message ListQueuedOperationsRequest { + message StartAfter { + // The priority of the queued operation. + int32 priority = 1; + + // The expected amount of time that this operation takes to + // complete. + google.protobuf.Duration expected_duration = 3; + + // The timestamp at which the operation was queued. + google.protobuf.Timestamp queued_timestamp = 2; + } + + // The invocation whose list of queued operations needs to be + // returned. + InvocationName invocation_name = 1; + + // Was 'invocation_ids'. Merged together with 'size_class_queue_name' + // into 'invocation_name'. + reserved 2; + + // The number of operations to be returned. The response may contain + // fewer operations, iff fewer than page_size operations exist after + // the starting position. + uint32 page_size = 3; + + // When not set, return the first queued operations known by the + // scheduler for the provided platform queue and invocation ID. When + // set, return the first operations after the provided starting point. + StartAfter start_after = 4; +} + +message ListQueuedOperationsResponse { + // The state of operations that are in the QUEUED stage that are part + // of the provided platform queue and invocation ID. + // + // The 'size_class_queue' and 'invocation_ids' fields are omitted, as + // their values are implied. + repeated OperationState queued_operations = 1; + + // The position at which the resulting operations are stored in the + // list of all queued operations for the provided platform queue and + // invocation ID. + PaginationInfo pagination_info = 2; +} + +message ListWorkersRequest { + message Filter { + oneof type { + // List all of the workers in a given size class queue. + SizeClassQueueName all = 1; + + // List all of the workers that are executing an operation + // belonging to a given invocation, or one of its transitive child + // invocations. + InvocationName executing = 2; + + // List all of the workers that are idle and are synchronizing + // against the scheduler that most recently executed an operation + // belonging to a given invocation. + InvocationName idle_synchronizing = 3; + } + } + + message StartAfter { + // The labels that uniquely identifies the worker inside this size + // class queue. + map worker_id = 1; + } + + // Limit the results according to a filter. + Filter filter = 1; + + // The number of workers to be returned. The response may contain + // fewer workers, iff fewer than page_size workers exist after the + // starting position. + uint32 page_size = 3; + + // When not set, return the first workers known by the scheduler for + // the provided platform queue. When set, return the first workers + // after the provided starting point. + StartAfter start_after = 4; +} + +message ListWorkersResponse { + // The state of workers that are part of the provided platform queue. + repeated WorkerState workers = 1; + + // The position at which the resulting workers are stored in the list + // of all workers for the provided platform queue. + PaginationInfo pagination_info = 2; +} + +message TerminateWorkersRequest { + // The pattern of matching workers to terminate. This pattern matches + // all workers for which the worker ID is a superset of the pattern. + // An empty worker ID pattern would match all workers. + map worker_id_pattern = 1; +} + +message ListDrainsRequest { + // The platform queue whose list of drains needs to be returned. + SizeClassQueueName size_class_queue_name = 1; +} + +message ListDrainsResponse { + // The state of drains that are part of the provided platform queue. + repeated DrainState drains = 1; +} + +message AddOrRemoveDrainRequest { + // The platform queue whose list of drains needs to be altered. + SizeClassQueueName size_class_queue_name = 1; + + // The pattern of matching workers for which a drain needs to be added + // or removed. + map worker_id_pattern = 2; +} + +// A special message type that is used as an invocation ID to indicate +// that an operation was created, because the scheduler wanted to test +// the execution of an action on a size class for which there is a high +// probability of failure. Background learning is performed to ensure +// that execution statistics remain calibrated. +message BackgroundLearning {} diff --git a/pkg/proto/cas/BUILD.bazel b/pkg/proto/cas/BUILD.bazel new file mode 100644 index 0000000..d7cc799 --- /dev/null +++ b/pkg/proto/cas/BUILD.bazel @@ -0,0 +1,25 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "cas_proto", + srcs = ["cas.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto"], +) + +go_proto_library( + name = "cas_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/cas", + proto = ":cas_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution"], +) + +go_library( + name = "cas", + embed = [":cas_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/cas", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/cas/cas.pb.go b/pkg/proto/cas/cas.pb.go new file mode 100644 index 0000000..6daf620 --- /dev/null +++ b/pkg/proto/cas/cas.pb.go @@ -0,0 +1,173 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/cas/cas.proto + +package cas + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HistoricalExecuteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ActionDigest *v2.Digest `protobuf:"bytes,1,opt,name=action_digest,json=actionDigest,proto3" json:"action_digest,omitempty"` + ExecuteResponse *v2.ExecuteResponse `protobuf:"bytes,3,opt,name=execute_response,json=executeResponse,proto3" json:"execute_response,omitempty"` +} + +func (x *HistoricalExecuteResponse) Reset() { + *x = HistoricalExecuteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_cas_cas_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HistoricalExecuteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistoricalExecuteResponse) ProtoMessage() {} + +func (x *HistoricalExecuteResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_cas_cas_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistoricalExecuteResponse.ProtoReflect.Descriptor instead. +func (*HistoricalExecuteResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_cas_cas_proto_rawDescGZIP(), []int{0} +} + +func (x *HistoricalExecuteResponse) GetActionDigest() *v2.Digest { + if x != nil { + return x.ActionDigest + } + return nil +} + +func (x *HistoricalExecuteResponse) GetExecuteResponse() *v2.ExecuteResponse { + if x != nil { + return x.ExecuteResponse + } + return nil +} + +var File_pkg_proto_cas_cas_proto protoreflect.FileDescriptor + +var file_pkg_proto_cas_cas_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x61, 0x73, 0x2f, + 0x63, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x61, 0x73, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, + 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xcc, 0x01, 0x0a, 0x19, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, + 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, + 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x0c, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, + 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x42, + 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x61, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_pkg_proto_cas_cas_proto_rawDescOnce sync.Once + file_pkg_proto_cas_cas_proto_rawDescData = file_pkg_proto_cas_cas_proto_rawDesc +) + +func file_pkg_proto_cas_cas_proto_rawDescGZIP() []byte { + file_pkg_proto_cas_cas_proto_rawDescOnce.Do(func() { + file_pkg_proto_cas_cas_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_cas_cas_proto_rawDescData) + }) + return file_pkg_proto_cas_cas_proto_rawDescData +} + +var file_pkg_proto_cas_cas_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_cas_cas_proto_goTypes = []interface{}{ + (*HistoricalExecuteResponse)(nil), // 0: buildbarn.cas.HistoricalExecuteResponse + (*v2.Digest)(nil), // 1: build.bazel.remote.execution.v2.Digest + (*v2.ExecuteResponse)(nil), // 2: build.bazel.remote.execution.v2.ExecuteResponse +} +var file_pkg_proto_cas_cas_proto_depIdxs = []int32{ + 1, // 0: buildbarn.cas.HistoricalExecuteResponse.action_digest:type_name -> build.bazel.remote.execution.v2.Digest + 2, // 1: buildbarn.cas.HistoricalExecuteResponse.execute_response:type_name -> build.bazel.remote.execution.v2.ExecuteResponse + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_proto_cas_cas_proto_init() } +func file_pkg_proto_cas_cas_proto_init() { + if File_pkg_proto_cas_cas_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_cas_cas_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HistoricalExecuteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_cas_cas_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_cas_cas_proto_goTypes, + DependencyIndexes: file_pkg_proto_cas_cas_proto_depIdxs, + MessageInfos: file_pkg_proto_cas_cas_proto_msgTypes, + }.Build() + File_pkg_proto_cas_cas_proto = out.File + file_pkg_proto_cas_cas_proto_rawDesc = nil + file_pkg_proto_cas_cas_proto_goTypes = nil + file_pkg_proto_cas_cas_proto_depIdxs = nil +} diff --git a/pkg/proto/cas/cas.proto b/pkg/proto/cas/cas.proto new file mode 100644 index 0000000..9df1d37 --- /dev/null +++ b/pkg/proto/cas/cas.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package buildbarn.cas; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/cas"; + +// HistoricalExecuteResponse is a custom message that is stored into the +// Content Addressable Storage. The Action Cache is only permitted to +// contain ActionResults of successful builds. In our case we also want +// to provide the user insight as to why their build fails by storing +// the ActionResult upon failure. +// +// This message is written into the ContentAddressableStorage by +// bb_worker by the CachingBuildExecutor. The digest is returned to the +// user by providing a URL to bb_browser as a message in the +// ExecuteResponse. +// +// Additionally, this message is attached to CompletedActions that are +// streamed through a CompletedActionLogger in order to provide metadata +// for uniquely identifying actions. +message HistoricalExecuteResponse { + reserved 2; + + build.bazel.remote.execution.v2.Digest action_digest = 1; + build.bazel.remote.execution.v2.ExecuteResponse execute_response = 3; +} diff --git a/pkg/proto/completedactionlogger/BUILD.bazel b/pkg/proto/completedactionlogger/BUILD.bazel new file mode 100644 index 0000000..1cce298 --- /dev/null +++ b/pkg/proto/completedactionlogger/BUILD.bazel @@ -0,0 +1,33 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "completedactionlogger_proto", + srcs = ["completed_action_logger.proto"], + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/cas:cas_proto", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:empty_proto", + ], +) + +go_proto_library( + name = "completedactionlogger_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger", + proto = ":completedactionlogger_proto", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/cas", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + ], +) + +go_library( + name = "completedactionlogger", + embed = [":completedactionlogger_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/completedactionlogger/completed_action_logger.pb.go b/pkg/proto/completedactionlogger/completed_action_logger.pb.go new file mode 100644 index 0000000..0b731e4 --- /dev/null +++ b/pkg/proto/completedactionlogger/completed_action_logger.pb.go @@ -0,0 +1,330 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/completedactionlogger/completed_action_logger.proto + +package completedactionlogger + +import ( + context "context" + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + cas "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CompletedAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HistoricalExecuteResponse *cas.HistoricalExecuteResponse `protobuf:"bytes,1,opt,name=historical_execute_response,json=historicalExecuteResponse,proto3" json:"historical_execute_response,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + InstanceName string `protobuf:"bytes,3,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + DigestFunction v2.DigestFunction_Value `protobuf:"varint,4,opt,name=digest_function,json=digestFunction,proto3,enum=build.bazel.remote.execution.v2.DigestFunction_Value" json:"digest_function,omitempty"` +} + +func (x *CompletedAction) Reset() { + *x = CompletedAction{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_completedactionlogger_completed_action_logger_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedAction) ProtoMessage() {} + +func (x *CompletedAction) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_completedactionlogger_completed_action_logger_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedAction.ProtoReflect.Descriptor instead. +func (*CompletedAction) Descriptor() ([]byte, []int) { + return file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescGZIP(), []int{0} +} + +func (x *CompletedAction) GetHistoricalExecuteResponse() *cas.HistoricalExecuteResponse { + if x != nil { + return x.HistoricalExecuteResponse + } + return nil +} + +func (x *CompletedAction) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *CompletedAction) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *CompletedAction) GetDigestFunction() v2.DigestFunction_Value { + if x != nil { + return x.DigestFunction + } + return v2.DigestFunction_Value(0) +} + +var File_pkg_proto_completedactionlogger_completed_action_logger_proto protoreflect.FileDescriptor + +var file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDesc = []byte{ + 0x0a, 0x3d, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x6c, 0x6f, 0x67, 0x67, 0x65, + 0x72, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x1f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, + 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x61, 0x73, 0x2f, 0x63, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, + 0x02, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x68, 0x0a, 0x1b, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, + 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x61, 0x73, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, + 0x61, 0x6c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x52, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x7c, 0x0a, 0x15, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x63, + 0x0a, 0x13, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, + 0x01, 0x30, 0x01, 0x42, 0x4a, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescOnce sync.Once + file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescData = file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDesc +) + +func file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescGZIP() []byte { + file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescOnce.Do(func() { + file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescData) + }) + return file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDescData +} + +var file_pkg_proto_completedactionlogger_completed_action_logger_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_completedactionlogger_completed_action_logger_proto_goTypes = []interface{}{ + (*CompletedAction)(nil), // 0: buildbarn.completedactionlogger.CompletedAction + (*cas.HistoricalExecuteResponse)(nil), // 1: buildbarn.cas.HistoricalExecuteResponse + (v2.DigestFunction_Value)(0), // 2: build.bazel.remote.execution.v2.DigestFunction.Value + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty +} +var file_pkg_proto_completedactionlogger_completed_action_logger_proto_depIdxs = []int32{ + 1, // 0: buildbarn.completedactionlogger.CompletedAction.historical_execute_response:type_name -> buildbarn.cas.HistoricalExecuteResponse + 2, // 1: buildbarn.completedactionlogger.CompletedAction.digest_function:type_name -> build.bazel.remote.execution.v2.DigestFunction.Value + 0, // 2: buildbarn.completedactionlogger.CompletedActionLogger.LogCompletedActions:input_type -> buildbarn.completedactionlogger.CompletedAction + 3, // 3: buildbarn.completedactionlogger.CompletedActionLogger.LogCompletedActions:output_type -> google.protobuf.Empty + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_proto_completedactionlogger_completed_action_logger_proto_init() } +func file_pkg_proto_completedactionlogger_completed_action_logger_proto_init() { + if File_pkg_proto_completedactionlogger_completed_action_logger_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_completedactionlogger_completed_action_logger_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompletedAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_completedactionlogger_completed_action_logger_proto_goTypes, + DependencyIndexes: file_pkg_proto_completedactionlogger_completed_action_logger_proto_depIdxs, + MessageInfos: file_pkg_proto_completedactionlogger_completed_action_logger_proto_msgTypes, + }.Build() + File_pkg_proto_completedactionlogger_completed_action_logger_proto = out.File + file_pkg_proto_completedactionlogger_completed_action_logger_proto_rawDesc = nil + file_pkg_proto_completedactionlogger_completed_action_logger_proto_goTypes = nil + file_pkg_proto_completedactionlogger_completed_action_logger_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// CompletedActionLoggerClient is the client API for CompletedActionLogger service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CompletedActionLoggerClient interface { + LogCompletedActions(ctx context.Context, opts ...grpc.CallOption) (CompletedActionLogger_LogCompletedActionsClient, error) +} + +type completedActionLoggerClient struct { + cc grpc.ClientConnInterface +} + +func NewCompletedActionLoggerClient(cc grpc.ClientConnInterface) CompletedActionLoggerClient { + return &completedActionLoggerClient{cc} +} + +func (c *completedActionLoggerClient) LogCompletedActions(ctx context.Context, opts ...grpc.CallOption) (CompletedActionLogger_LogCompletedActionsClient, error) { + stream, err := c.cc.NewStream(ctx, &_CompletedActionLogger_serviceDesc.Streams[0], "/buildbarn.completedactionlogger.CompletedActionLogger/LogCompletedActions", opts...) + if err != nil { + return nil, err + } + x := &completedActionLoggerLogCompletedActionsClient{stream} + return x, nil +} + +type CompletedActionLogger_LogCompletedActionsClient interface { + Send(*CompletedAction) error + Recv() (*emptypb.Empty, error) + grpc.ClientStream +} + +type completedActionLoggerLogCompletedActionsClient struct { + grpc.ClientStream +} + +func (x *completedActionLoggerLogCompletedActionsClient) Send(m *CompletedAction) error { + return x.ClientStream.SendMsg(m) +} + +func (x *completedActionLoggerLogCompletedActionsClient) Recv() (*emptypb.Empty, error) { + m := new(emptypb.Empty) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CompletedActionLoggerServer is the server API for CompletedActionLogger service. +type CompletedActionLoggerServer interface { + LogCompletedActions(CompletedActionLogger_LogCompletedActionsServer) error +} + +// UnimplementedCompletedActionLoggerServer can be embedded to have forward compatible implementations. +type UnimplementedCompletedActionLoggerServer struct { +} + +func (*UnimplementedCompletedActionLoggerServer) LogCompletedActions(CompletedActionLogger_LogCompletedActionsServer) error { + return status.Errorf(codes.Unimplemented, "method LogCompletedActions not implemented") +} + +func RegisterCompletedActionLoggerServer(s grpc.ServiceRegistrar, srv CompletedActionLoggerServer) { + s.RegisterService(&_CompletedActionLogger_serviceDesc, srv) +} + +func _CompletedActionLogger_LogCompletedActions_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CompletedActionLoggerServer).LogCompletedActions(&completedActionLoggerLogCompletedActionsServer{stream}) +} + +type CompletedActionLogger_LogCompletedActionsServer interface { + Send(*emptypb.Empty) error + Recv() (*CompletedAction, error) + grpc.ServerStream +} + +type completedActionLoggerLogCompletedActionsServer struct { + grpc.ServerStream +} + +func (x *completedActionLoggerLogCompletedActionsServer) Send(m *emptypb.Empty) error { + return x.ServerStream.SendMsg(m) +} + +func (x *completedActionLoggerLogCompletedActionsServer) Recv() (*CompletedAction, error) { + m := new(CompletedAction) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _CompletedActionLogger_serviceDesc = grpc.ServiceDesc{ + ServiceName: "buildbarn.completedactionlogger.CompletedActionLogger", + HandlerType: (*CompletedActionLoggerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "LogCompletedActions", + Handler: _CompletedActionLogger_LogCompletedActions_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "pkg/proto/completedactionlogger/completed_action_logger.proto", +} diff --git a/pkg/proto/completedactionlogger/completed_action_logger.proto b/pkg/proto/completedactionlogger/completed_action_logger.proto new file mode 100644 index 0000000..ea919ae --- /dev/null +++ b/pkg/proto/completedactionlogger/completed_action_logger.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package buildbarn.completedactionlogger; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/empty.proto"; +import "pkg/proto/cas/cas.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger"; + +// CompletedActionLogger provides a way for streaming completed build actions +// and their associated metadata to an external service in real time. This is +// primarily useful for analyzing or aggregating actions and determining how +// they may relate to each build invocation. +service CompletedActionLogger { + // Send a CompletedAction to another service as soon as a build action has + // completed. Receiving a message from the return stream indicates that the + // service successfully received the CompletedAction. + rpc LogCompletedActions(stream CompletedAction) + returns (stream google.protobuf.Empty); +} + +// CompletedAction wraps a finished build action in order to transmit to +// an external service. +message CompletedAction { + // A wrapper around the action's digest and REv2 ActionResult, which contains + // the action's associated metadata. + buildbarn.cas.HistoricalExecuteResponse historical_execute_response = 1; + + // A unique identifier associated with the CompletedAction, which is + // generated by the build executor. This provides a means by which the + // external logging service may be able to deduplicate incoming + // CompletedActions. The usage of this field is left to the external + // logging service to determine. + string uuid = 2; + + // The REv2 instance name of the remote cluster that workers are returning + // the action result from. + string instance_name = 3; + + // The digest function that was used to compute the action digest. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 4; +} diff --git a/pkg/proto/configuration/bb_noop_worker/BUILD.bazel b/pkg/proto/configuration/bb_noop_worker/BUILD.bazel new file mode 100644 index 0000000..120014e --- /dev/null +++ b/pkg/proto/configuration/bb_noop_worker/BUILD.bazel @@ -0,0 +1,35 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "bb_noop_worker_proto", + srcs = ["bb_noop_worker.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore:blobstore_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto", + ], +) + +go_proto_library( + name = "bb_noop_worker_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_noop_worker", + proto = ":bb_noop_worker_proto", + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc", + ], +) + +go_library( + name = "bb_noop_worker", + embed = [":bb_noop_worker_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_noop_worker", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.pb.go b/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.pb.go new file mode 100644 index 0000000..c10ac52 --- /dev/null +++ b/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.pb.go @@ -0,0 +1,274 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/bb_noop_worker/bb_noop_worker.proto + +package bb_noop_worker + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + blobstore "github.com/buildbarn/bb-storage/pkg/proto/configuration/blobstore" + global "github.com/buildbarn/bb-storage/pkg/proto/configuration/global" + grpc "github.com/buildbarn/bb-storage/pkg/proto/configuration/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplicationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Global *global.Configuration `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"` + BrowserUrl string `protobuf:"bytes,2,opt,name=browser_url,json=browserUrl,proto3" json:"browser_url,omitempty"` + Scheduler *grpc.ClientConfiguration `protobuf:"bytes,3,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + InstanceNamePrefix string `protobuf:"bytes,4,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,5,opt,name=platform,proto3" json:"platform,omitempty"` + WorkerId map[string]string `protobuf:"bytes,6,rep,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ContentAddressableStorage *blobstore.BlobAccessConfiguration `protobuf:"bytes,7,opt,name=content_addressable_storage,json=contentAddressableStorage,proto3" json:"content_addressable_storage,omitempty"` + MaximumMessageSizeBytes int64 `protobuf:"varint,8,opt,name=maximum_message_size_bytes,json=maximumMessageSizeBytes,proto3" json:"maximum_message_size_bytes,omitempty"` +} + +func (x *ApplicationConfiguration) Reset() { + *x = ApplicationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationConfiguration) ProtoMessage() {} + +func (x *ApplicationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationConfiguration.ProtoReflect.Descriptor instead. +func (*ApplicationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplicationConfiguration) GetGlobal() *global.Configuration { + if x != nil { + return x.Global + } + return nil +} + +func (x *ApplicationConfiguration) GetBrowserUrl() string { + if x != nil { + return x.BrowserUrl + } + return "" +} + +func (x *ApplicationConfiguration) GetScheduler() *grpc.ClientConfiguration { + if x != nil { + return x.Scheduler + } + return nil +} + +func (x *ApplicationConfiguration) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *ApplicationConfiguration) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *ApplicationConfiguration) GetWorkerId() map[string]string { + if x != nil { + return x.WorkerId + } + return nil +} + +func (x *ApplicationConfiguration) GetContentAddressableStorage() *blobstore.BlobAccessConfiguration { + if x != nil { + return x.ContentAddressableStorage + } + return nil +} + +func (x *ApplicationConfiguration) GetMaximumMessageSizeBytes() int64 { + if x != nil { + return x.MaximumMessageSizeBytes + } + return 0 +} + +var File_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x6e, 0x6f, 0x6f, + 0x70, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2f, 0x62, 0x62, 0x5f, 0x6e, 0x6f, 0x6f, 0x70, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x6e, 0x6f, 0x6f, 0x70, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, + 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x05, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x72, + 0x6f, 0x77, 0x73, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x09, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, + 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x6b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x6e, 0x6f, 0x6f, 0x70, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x7a, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x3b, + 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x51, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, + 0x6e, 0x6f, 0x6f, 0x70, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescData = file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDesc +) + +func file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescData) + }) + return file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDescData +} + +var file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_goTypes = []interface{}{ + (*ApplicationConfiguration)(nil), // 0: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration + nil, // 1: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.WorkerIdEntry + (*global.Configuration)(nil), // 2: buildbarn.configuration.global.Configuration + (*grpc.ClientConfiguration)(nil), // 3: buildbarn.configuration.grpc.ClientConfiguration + (*v2.Platform)(nil), // 4: build.bazel.remote.execution.v2.Platform + (*blobstore.BlobAccessConfiguration)(nil), // 5: buildbarn.configuration.blobstore.BlobAccessConfiguration +} +var file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_depIdxs = []int32{ + 2, // 0: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.global:type_name -> buildbarn.configuration.global.Configuration + 3, // 1: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.scheduler:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 4, // 2: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.platform:type_name -> build.bazel.remote.execution.v2.Platform + 1, // 3: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.worker_id:type_name -> buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.WorkerIdEntry + 5, // 4: buildbarn.configuration.bb_noop_worker.ApplicationConfiguration.content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_init() } +func file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_init() { + if File_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto = out.File + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_rawDesc = nil + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_goTypes = nil + file_pkg_proto_configuration_bb_noop_worker_bb_noop_worker_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.proto b/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.proto new file mode 100644 index 0000000..b5f3060 --- /dev/null +++ b/pkg/proto/configuration/bb_noop_worker/bb_noop_worker.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package buildbarn.configuration.bb_noop_worker; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "pkg/proto/configuration/blobstore/blobstore.proto"; +import "pkg/proto/configuration/global/global.proto"; +import "pkg/proto/configuration/grpc/grpc.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_noop_worker"; + +message ApplicationConfiguration { + // Common configuration options that apply to all Buildbarn binaries. + buildbarn.configuration.global.Configuration global = 1; + + // URL of the Buildbarn Browser, shown to the user upon build completion. + string browser_url = 2; + + // Endpoint of the scheduler to which to connect. + buildbarn.configuration.grpc.ClientConfiguration scheduler = 3; + + // The prefix of the instance name for which requests from clients + // should be routed to this worker. + string instance_name_prefix = 4; + + // Platform properties that need to be reported to the scheduler. + build.bazel.remote.execution.v2.Platform platform = 5; + + // Additional fields that need to be attached to the ID of the worker, + // as announced to the scheduler. + map worker_id = 6; + + // Configuration for blob storage. + buildbarn.configuration.blobstore.BlobAccessConfiguration + content_addressable_storage = 7; + + // Maximum Protobuf message size to unmarshal. + int64 maximum_message_size_bytes = 8; +} diff --git a/pkg/proto/configuration/bb_runner/BUILD.bazel b/pkg/proto/configuration/bb_runner/BUILD.bazel new file mode 100644 index 0000000..b95e0cd --- /dev/null +++ b/pkg/proto/configuration/bb_runner/BUILD.bazel @@ -0,0 +1,33 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "bb_runner_proto", + srcs = ["bb_runner.proto"], + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/credentials:credentials_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto", + ], +) + +go_proto_library( + name = "bb_runner_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_runner", + proto = ":bb_runner_proto", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/credentials", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc", + ], +) + +go_library( + name = "bb_runner", + embed = [":bb_runner_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_runner", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/bb_runner/bb_runner.pb.go b/pkg/proto/configuration/bb_runner/bb_runner.pb.go new file mode 100644 index 0000000..4a1356d --- /dev/null +++ b/pkg/proto/configuration/bb_runner/bb_runner.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/bb_runner/bb_runner.proto + +package bb_runner + +import ( + credentials "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials" + global "github.com/buildbarn/bb-storage/pkg/proto/configuration/global" + grpc "github.com/buildbarn/bb-storage/pkg/proto/configuration/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplicationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildDirectoryPath string `protobuf:"bytes,1,opt,name=build_directory_path,json=buildDirectoryPath,proto3" json:"build_directory_path,omitempty"` + GrpcServers []*grpc.ServerConfiguration `protobuf:"bytes,2,rep,name=grpc_servers,json=grpcServers,proto3" json:"grpc_servers,omitempty"` + CleanTemporaryDirectories []string `protobuf:"bytes,3,rep,name=clean_temporary_directories,json=cleanTemporaryDirectories,proto3" json:"clean_temporary_directories,omitempty"` + Global *global.Configuration `protobuf:"bytes,4,opt,name=global,proto3" json:"global,omitempty"` + SetTmpdirEnvironmentVariable bool `protobuf:"varint,5,opt,name=set_tmpdir_environment_variable,json=setTmpdirEnvironmentVariable,proto3" json:"set_tmpdir_environment_variable,omitempty"` + TemporaryDirectoryInstaller *grpc.ClientConfiguration `protobuf:"bytes,6,opt,name=temporary_directory_installer,json=temporaryDirectoryInstaller,proto3" json:"temporary_directory_installer,omitempty"` + ChrootIntoInputRoot bool `protobuf:"varint,7,opt,name=chroot_into_input_root,json=chrootIntoInputRoot,proto3" json:"chroot_into_input_root,omitempty"` + CleanProcessTable bool `protobuf:"varint,8,opt,name=clean_process_table,json=cleanProcessTable,proto3" json:"clean_process_table,omitempty"` + ReadinessCheckingPathnames []string `protobuf:"bytes,10,rep,name=readiness_checking_pathnames,json=readinessCheckingPathnames,proto3" json:"readiness_checking_pathnames,omitempty"` + RunCommandsAs *credentials.UNIXCredentialsConfiguration `protobuf:"bytes,11,opt,name=run_commands_as,json=runCommandsAs,proto3" json:"run_commands_as,omitempty"` + SymlinkTemporaryDirectories []string `protobuf:"bytes,12,rep,name=symlink_temporary_directories,json=symlinkTemporaryDirectories,proto3" json:"symlink_temporary_directories,omitempty"` + RunCommandCleaner []string `protobuf:"bytes,13,rep,name=run_command_cleaner,json=runCommandCleaner,proto3" json:"run_command_cleaner,omitempty"` + AppleXcodeDeveloperDirectories map[string]string `protobuf:"bytes,14,rep,name=apple_xcode_developer_directories,json=appleXcodeDeveloperDirectories,proto3" json:"apple_xcode_developer_directories,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ApplicationConfiguration) Reset() { + *x = ApplicationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_runner_bb_runner_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationConfiguration) ProtoMessage() {} + +func (x *ApplicationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_runner_bb_runner_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationConfiguration.ProtoReflect.Descriptor instead. +func (*ApplicationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplicationConfiguration) GetBuildDirectoryPath() string { + if x != nil { + return x.BuildDirectoryPath + } + return "" +} + +func (x *ApplicationConfiguration) GetGrpcServers() []*grpc.ServerConfiguration { + if x != nil { + return x.GrpcServers + } + return nil +} + +func (x *ApplicationConfiguration) GetCleanTemporaryDirectories() []string { + if x != nil { + return x.CleanTemporaryDirectories + } + return nil +} + +func (x *ApplicationConfiguration) GetGlobal() *global.Configuration { + if x != nil { + return x.Global + } + return nil +} + +func (x *ApplicationConfiguration) GetSetTmpdirEnvironmentVariable() bool { + if x != nil { + return x.SetTmpdirEnvironmentVariable + } + return false +} + +func (x *ApplicationConfiguration) GetTemporaryDirectoryInstaller() *grpc.ClientConfiguration { + if x != nil { + return x.TemporaryDirectoryInstaller + } + return nil +} + +func (x *ApplicationConfiguration) GetChrootIntoInputRoot() bool { + if x != nil { + return x.ChrootIntoInputRoot + } + return false +} + +func (x *ApplicationConfiguration) GetCleanProcessTable() bool { + if x != nil { + return x.CleanProcessTable + } + return false +} + +func (x *ApplicationConfiguration) GetReadinessCheckingPathnames() []string { + if x != nil { + return x.ReadinessCheckingPathnames + } + return nil +} + +func (x *ApplicationConfiguration) GetRunCommandsAs() *credentials.UNIXCredentialsConfiguration { + if x != nil { + return x.RunCommandsAs + } + return nil +} + +func (x *ApplicationConfiguration) GetSymlinkTemporaryDirectories() []string { + if x != nil { + return x.SymlinkTemporaryDirectories + } + return nil +} + +func (x *ApplicationConfiguration) GetRunCommandCleaner() []string { + if x != nil { + return x.RunCommandCleaner + } + return nil +} + +func (x *ApplicationConfiguration) GetAppleXcodeDeveloperDirectories() map[string]string { + if x != nil { + return x.AppleXcodeDeveloperDirectories + } + return nil +} + +var File_pkg_proto_configuration_bb_runner_bb_runner_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDesc = []byte{ + 0x0a, 0x31, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x72, 0x75, 0x6e, + 0x6e, 0x65, 0x72, 0x2f, 0x62, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, + 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x1a, 0x35, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x63, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2f, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xf3, 0x08, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x30, 0x0a, 0x14, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x54, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x1b, 0x63, 0x6c, 0x65, 0x61, + 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x19, 0x63, + 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, + 0x45, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x6d, 0x70, 0x64, 0x69, 0x72, 0x5f, 0x65, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x73, 0x65, 0x74, 0x54, 0x6d, 0x70, + 0x64, 0x69, 0x72, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x75, 0x0a, 0x1d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x1b, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x33, 0x0a, + 0x16, 0x63, 0x68, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x63, + 0x68, 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x6e, 0x74, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x1c, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1a, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, + 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x74, 0x68, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0f, 0x72, 0x75, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x2e, 0x55, 0x4e, 0x49, 0x58, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x41, 0x73, 0x12, + 0x42, 0x0a, 0x1d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x75, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x11, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x65, 0x72, 0x12, 0xaa, 0x01, 0x0a, 0x21, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x5f, 0x78, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x65, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x5f, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x72, 0x75, 0x6e, + 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x70, 0x70, + 0x6c, 0x65, 0x58, 0x63, 0x6f, 0x64, 0x65, 0x44, 0x65, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x72, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x1e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x58, 0x63, 0x6f, 0x64, 0x65, 0x44, 0x65, 0x76, 0x65, + 0x6c, 0x6f, 0x70, 0x65, 0x72, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, + 0x1a, 0x51, 0x0a, 0x23, 0x41, 0x70, 0x70, 0x6c, 0x65, 0x58, 0x63, 0x6f, 0x64, 0x65, 0x44, 0x65, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x72, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescData = file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDesc +) + +func file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescData) + }) + return file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDescData +} + +var file_pkg_proto_configuration_bb_runner_bb_runner_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_proto_configuration_bb_runner_bb_runner_proto_goTypes = []interface{}{ + (*ApplicationConfiguration)(nil), // 0: buildbarn.configuration.bb_runner.ApplicationConfiguration + nil, // 1: buildbarn.configuration.bb_runner.ApplicationConfiguration.AppleXcodeDeveloperDirectoriesEntry + (*grpc.ServerConfiguration)(nil), // 2: buildbarn.configuration.grpc.ServerConfiguration + (*global.Configuration)(nil), // 3: buildbarn.configuration.global.Configuration + (*grpc.ClientConfiguration)(nil), // 4: buildbarn.configuration.grpc.ClientConfiguration + (*credentials.UNIXCredentialsConfiguration)(nil), // 5: buildbarn.configuration.credentials.UNIXCredentialsConfiguration +} +var file_pkg_proto_configuration_bb_runner_bb_runner_proto_depIdxs = []int32{ + 2, // 0: buildbarn.configuration.bb_runner.ApplicationConfiguration.grpc_servers:type_name -> buildbarn.configuration.grpc.ServerConfiguration + 3, // 1: buildbarn.configuration.bb_runner.ApplicationConfiguration.global:type_name -> buildbarn.configuration.global.Configuration + 4, // 2: buildbarn.configuration.bb_runner.ApplicationConfiguration.temporary_directory_installer:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 5, // 3: buildbarn.configuration.bb_runner.ApplicationConfiguration.run_commands_as:type_name -> buildbarn.configuration.credentials.UNIXCredentialsConfiguration + 1, // 4: buildbarn.configuration.bb_runner.ApplicationConfiguration.apple_xcode_developer_directories:type_name -> buildbarn.configuration.bb_runner.ApplicationConfiguration.AppleXcodeDeveloperDirectoriesEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_bb_runner_bb_runner_proto_init() } +func file_pkg_proto_configuration_bb_runner_bb_runner_proto_init() { + if File_pkg_proto_configuration_bb_runner_bb_runner_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_bb_runner_bb_runner_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_bb_runner_bb_runner_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_bb_runner_bb_runner_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_bb_runner_bb_runner_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_bb_runner_bb_runner_proto = out.File + file_pkg_proto_configuration_bb_runner_bb_runner_proto_rawDesc = nil + file_pkg_proto_configuration_bb_runner_bb_runner_proto_goTypes = nil + file_pkg_proto_configuration_bb_runner_bb_runner_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/bb_runner/bb_runner.proto b/pkg/proto/configuration/bb_runner/bb_runner.proto new file mode 100644 index 0000000..94c5251 --- /dev/null +++ b/pkg/proto/configuration/bb_runner/bb_runner.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +package buildbarn.configuration.bb_runner; + +import "pkg/proto/configuration/credentials/credentials.proto"; +import "pkg/proto/configuration/global/global.proto"; +import "pkg/proto/configuration/grpc/grpc.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_runner"; + +message ApplicationConfiguration { + // Directory where builds take place. + string build_directory_path = 1; + + // gRPC servers to spawn to listen for requests from bb_worker. + repeated buildbarn.configuration.grpc.ServerConfiguration grpc_servers = 2; + + // Temporary directories that should be cleaned up after a build action + // (e.g. /tmp). + repeated string clean_temporary_directories = 3; + + // Common configuration options that apply to all Buildbarn binaries. + buildbarn.configuration.global.Configuration global = 4; + + // Run every build action with the TMPDIR environment variable set to + // point to a location inside the build directory. This causes + // temporary files to be cleaned up automatically on the build + // action's behalf, assuming it properly respects the environment + // variable. + bool set_tmpdir_environment_variable = 5; + + // Optional helper process for resolving /tmp. + buildbarn.configuration.grpc.ClientConfiguration + temporary_directory_installer = 6; + + // Chroot into the input root to run commands. This option can be used + // if the input root contains a full userland installation. This + // feature is used by the BuildStream build system. + bool chroot_into_input_root = 7; + + // Kill processes that are left behind by build actions that are + // running in the background. + // + // Enabling this feature causes all processes to be killed that run as + // the same user ID as bb_runner (or the one configured through + // 'run_commands_as') and were spawned after bb_runner. This feature + // must not be enabled when the same user ID is used for other + // purposes (e.g., running multiple bb_runners), as this may cause + // unrelated processes to be killed. + bool clean_process_table = 8; + + // Was filesystem_writability_checker which has been removed. + reserved 9; + + // List of paths that need to exist on the system for the runner to be + // considered healthy. This option may be set to let a runner stop + // accepting more work in case a remote file system becomes + // unavailable. + repeated string readiness_checking_pathnames = 10; + + // When set, run commands as another user. On most platforms, this + // requires bb_runner to run as root. + buildbarn.configuration.credentials.UNIXCredentialsConfiguration + run_commands_as = 11; + + // System temporary directories (e.g., /tmp) that should be removed + // and replaced with symbolic links pointing into the temporary + // directory that bb_worker allocates for an action. + // + // This option is similar to clean_temporary_directories, except that + // it ensures that temporary directories point to a location managed + // by bb_worker. The advantage of this approach is that it makes it + // easier to apply resource limits to disk consumption of build + // actions, especially when bb_worker is configured to use FUSE. + // + // It is only safe to enable this option when this runner executes + // only up to a single command concurrently. + repeated string symlink_temporary_directories = 12; + + // Run a clean command with arguments. This option allows a custom clean + // operation that can perform arbitrary cleaning desired on the system. + // This could be useful reset state that cannot be changed using the other + // built-in options (e.g. stopping docker containers left running by an + // action). + // + // This command runs when the runner transitions from being idle to running + // at least one action, and vice versa. It is also called periodically + // whenever the runner is idle. + // + // Caution: since this option executes a command in a separate process + // there is an inherent performance penalty. If possible, use the built-in + // cleaning options that are handled in-process for optimal performance. + repeated string run_command_cleaner = 13; + + // If set, automatically inject DEVELOPER_DIR and SDKROOT environment + // variables based on the action's values of the APPLE_SDK_PLATFORM, + // APPLE_SDK_VERSION_OVERRIDE, and XCODE_VERSION_OVERRIDE environment + // variables. + // + // Tools such as /usr/bin/cc, /usr/bin/ld, etc. that ship with macOS + // are stubs that forward their calls to copies that are part of + // Xcode. As it is possible to install multiple versions of Xcode at + // different locations on the file system, these stubs will use the + // DEVELOPER_DIR and SDKROOT environment variables to determine which + // copy of Xcode to use. + // + // Because the location at which Xcode is installed may differ between + // local and remote execution environments, Bazel does not set the + // DEVELOPER_DIR and SDKROOT environment variables directly. Instead, + // it sets environment variables APPLE_SDK_PLATFORM, + // APPLE_SDK_VERSION_OVERRIDE and XCODE_VERSION_OVERRIDE. The remote + // worker is responsible for interpreting these environment variables + // and resolving those to the location of the intended copy of Xcode + // and the desired SDK. + // + // This option can be used to specify how values of + // XCODE_VERSION_OVERRIDE translate to values of DEVELOPER_DIR. The + // value of SDKROOT is obtained by invoking the xcrun utility with + // DEVELOPER_DIR set. + // + // Example: + // + // { + // "12.5.1.12E507": "/Applications/Xcode12.app/Contents/Developer", + // "13.4.1.13F100": "/Applications/Xcode13.app/Contents/Developer", + // "14.2.0.14C18": "/Applications/Xcode14.app/Contents/Developer" + // } + // + // More details: + // https://blog.bazel.build/2020/02/26/xcode-selection.html + // https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/local/XcodeLocalEnvProvider.java + // https://www.smileykeith.com/2021/03/08/locking-xcode-in-bazel/ + map apple_xcode_developer_directories = 14; +} diff --git a/pkg/proto/configuration/bb_scheduler/BUILD.bazel b/pkg/proto/configuration/bb_scheduler/BUILD.bazel new file mode 100644 index 0000000..4e0d23b --- /dev/null +++ b/pkg/proto/configuration/bb_scheduler/BUILD.bazel @@ -0,0 +1,42 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "bb_scheduler_proto", + srcs = ["bb_scheduler.proto"], + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/scheduler:scheduler_proto", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/auth:auth_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore:blobstore_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/http:http_proto", + "@com_google_protobuf//:duration_proto", + ], +) + +go_proto_library( + name = "bb_scheduler_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_scheduler", + proto = ":bb_scheduler_proto", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/scheduler", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/auth", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/http", + ], +) + +go_library( + name = "bb_scheduler", + embed = [":bb_scheduler_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_scheduler", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/bb_scheduler/bb_scheduler.pb.go b/pkg/proto/configuration/bb_scheduler/bb_scheduler.pb.go new file mode 100644 index 0000000..1a4d86a --- /dev/null +++ b/pkg/proto/configuration/bb_scheduler/bb_scheduler.pb.go @@ -0,0 +1,553 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/bb_scheduler/bb_scheduler.proto + +package bb_scheduler + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + scheduler "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler" + auth "github.com/buildbarn/bb-storage/pkg/proto/configuration/auth" + blobstore "github.com/buildbarn/bb-storage/pkg/proto/configuration/blobstore" + global "github.com/buildbarn/bb-storage/pkg/proto/configuration/global" + grpc "github.com/buildbarn/bb-storage/pkg/proto/configuration/grpc" + http "github.com/buildbarn/bb-storage/pkg/proto/configuration/http" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplicationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdminHttpServers []*http.ServerConfiguration `protobuf:"bytes,19,rep,name=admin_http_servers,json=adminHttpServers,proto3" json:"admin_http_servers,omitempty"` + AdminRoutePrefix string `protobuf:"bytes,22,opt,name=admin_route_prefix,json=adminRoutePrefix,proto3" json:"admin_route_prefix,omitempty"` + ClientGrpcServers []*grpc.ServerConfiguration `protobuf:"bytes,3,rep,name=client_grpc_servers,json=clientGrpcServers,proto3" json:"client_grpc_servers,omitempty"` + WorkerGrpcServers []*grpc.ServerConfiguration `protobuf:"bytes,4,rep,name=worker_grpc_servers,json=workerGrpcServers,proto3" json:"worker_grpc_servers,omitempty"` + BrowserUrl string `protobuf:"bytes,5,opt,name=browser_url,json=browserUrl,proto3" json:"browser_url,omitempty"` + ContentAddressableStorage *blobstore.BlobAccessConfiguration `protobuf:"bytes,6,opt,name=content_addressable_storage,json=contentAddressableStorage,proto3" json:"content_addressable_storage,omitempty"` + MaximumMessageSizeBytes int64 `protobuf:"varint,7,opt,name=maximum_message_size_bytes,json=maximumMessageSizeBytes,proto3" json:"maximum_message_size_bytes,omitempty"` + Global *global.Configuration `protobuf:"bytes,8,opt,name=global,proto3" json:"global,omitempty"` + BuildQueueStateGrpcServers []*grpc.ServerConfiguration `protobuf:"bytes,11,rep,name=build_queue_state_grpc_servers,json=buildQueueStateGrpcServers,proto3" json:"build_queue_state_grpc_servers,omitempty"` + PredeclaredPlatformQueues []*PredeclaredPlatformQueueConfiguration `protobuf:"bytes,12,rep,name=predeclared_platform_queues,json=predeclaredPlatformQueues,proto3" json:"predeclared_platform_queues,omitempty"` + ExecuteAuthorizer *auth.AuthorizerConfiguration `protobuf:"bytes,15,opt,name=execute_authorizer,json=executeAuthorizer,proto3" json:"execute_authorizer,omitempty"` + ModifyDrainsAuthorizer *auth.AuthorizerConfiguration `protobuf:"bytes,20,opt,name=modify_drains_authorizer,json=modifyDrainsAuthorizer,proto3" json:"modify_drains_authorizer,omitempty"` + KillOperationsAuthorizer *auth.AuthorizerConfiguration `protobuf:"bytes,21,opt,name=kill_operations_authorizer,json=killOperationsAuthorizer,proto3" json:"kill_operations_authorizer,omitempty"` + ActionRouter *scheduler.ActionRouterConfiguration `protobuf:"bytes,16,opt,name=action_router,json=actionRouter,proto3" json:"action_router,omitempty"` + InitialSizeClassCache *blobstore.BlobAccessConfiguration `protobuf:"bytes,17,opt,name=initial_size_class_cache,json=initialSizeClassCache,proto3" json:"initial_size_class_cache,omitempty"` + PlatformQueueWithNoWorkersTimeout *durationpb.Duration `protobuf:"bytes,18,opt,name=platform_queue_with_no_workers_timeout,json=platformQueueWithNoWorkersTimeout,proto3" json:"platform_queue_with_no_workers_timeout,omitempty"` +} + +func (x *ApplicationConfiguration) Reset() { + *x = ApplicationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationConfiguration) ProtoMessage() {} + +func (x *ApplicationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationConfiguration.ProtoReflect.Descriptor instead. +func (*ApplicationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplicationConfiguration) GetAdminHttpServers() []*http.ServerConfiguration { + if x != nil { + return x.AdminHttpServers + } + return nil +} + +func (x *ApplicationConfiguration) GetAdminRoutePrefix() string { + if x != nil { + return x.AdminRoutePrefix + } + return "" +} + +func (x *ApplicationConfiguration) GetClientGrpcServers() []*grpc.ServerConfiguration { + if x != nil { + return x.ClientGrpcServers + } + return nil +} + +func (x *ApplicationConfiguration) GetWorkerGrpcServers() []*grpc.ServerConfiguration { + if x != nil { + return x.WorkerGrpcServers + } + return nil +} + +func (x *ApplicationConfiguration) GetBrowserUrl() string { + if x != nil { + return x.BrowserUrl + } + return "" +} + +func (x *ApplicationConfiguration) GetContentAddressableStorage() *blobstore.BlobAccessConfiguration { + if x != nil { + return x.ContentAddressableStorage + } + return nil +} + +func (x *ApplicationConfiguration) GetMaximumMessageSizeBytes() int64 { + if x != nil { + return x.MaximumMessageSizeBytes + } + return 0 +} + +func (x *ApplicationConfiguration) GetGlobal() *global.Configuration { + if x != nil { + return x.Global + } + return nil +} + +func (x *ApplicationConfiguration) GetBuildQueueStateGrpcServers() []*grpc.ServerConfiguration { + if x != nil { + return x.BuildQueueStateGrpcServers + } + return nil +} + +func (x *ApplicationConfiguration) GetPredeclaredPlatformQueues() []*PredeclaredPlatformQueueConfiguration { + if x != nil { + return x.PredeclaredPlatformQueues + } + return nil +} + +func (x *ApplicationConfiguration) GetExecuteAuthorizer() *auth.AuthorizerConfiguration { + if x != nil { + return x.ExecuteAuthorizer + } + return nil +} + +func (x *ApplicationConfiguration) GetModifyDrainsAuthorizer() *auth.AuthorizerConfiguration { + if x != nil { + return x.ModifyDrainsAuthorizer + } + return nil +} + +func (x *ApplicationConfiguration) GetKillOperationsAuthorizer() *auth.AuthorizerConfiguration { + if x != nil { + return x.KillOperationsAuthorizer + } + return nil +} + +func (x *ApplicationConfiguration) GetActionRouter() *scheduler.ActionRouterConfiguration { + if x != nil { + return x.ActionRouter + } + return nil +} + +func (x *ApplicationConfiguration) GetInitialSizeClassCache() *blobstore.BlobAccessConfiguration { + if x != nil { + return x.InitialSizeClassCache + } + return nil +} + +func (x *ApplicationConfiguration) GetPlatformQueueWithNoWorkersTimeout() *durationpb.Duration { + if x != nil { + return x.PlatformQueueWithNoWorkersTimeout + } + return nil +} + +type PredeclaredPlatformQueueConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InstanceNamePrefix string `protobuf:"bytes,1,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,2,opt,name=platform,proto3" json:"platform,omitempty"` + MaximumSizeClass uint32 `protobuf:"varint,3,opt,name=maximum_size_class,json=maximumSizeClass,proto3" json:"maximum_size_class,omitempty"` + WorkerInvocationStickinessLimits []*durationpb.Duration `protobuf:"bytes,5,rep,name=worker_invocation_stickiness_limits,json=workerInvocationStickinessLimits,proto3" json:"worker_invocation_stickiness_limits,omitempty"` + MaximumQueuedBackgroundLearningOperations int32 `protobuf:"varint,6,opt,name=maximum_queued_background_learning_operations,json=maximumQueuedBackgroundLearningOperations,proto3" json:"maximum_queued_background_learning_operations,omitempty"` + BackgroundLearningOperationPriority int32 `protobuf:"varint,7,opt,name=background_learning_operation_priority,json=backgroundLearningOperationPriority,proto3" json:"background_learning_operation_priority,omitempty"` +} + +func (x *PredeclaredPlatformQueueConfiguration) Reset() { + *x = PredeclaredPlatformQueueConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PredeclaredPlatformQueueConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PredeclaredPlatformQueueConfiguration) ProtoMessage() {} + +func (x *PredeclaredPlatformQueueConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PredeclaredPlatformQueueConfiguration.ProtoReflect.Descriptor instead. +func (*PredeclaredPlatformQueueConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescGZIP(), []int{1} +} + +func (x *PredeclaredPlatformQueueConfiguration) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *PredeclaredPlatformQueueConfiguration) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *PredeclaredPlatformQueueConfiguration) GetMaximumSizeClass() uint32 { + if x != nil { + return x.MaximumSizeClass + } + return 0 +} + +func (x *PredeclaredPlatformQueueConfiguration) GetWorkerInvocationStickinessLimits() []*durationpb.Duration { + if x != nil { + return x.WorkerInvocationStickinessLimits + } + return nil +} + +func (x *PredeclaredPlatformQueueConfiguration) GetMaximumQueuedBackgroundLearningOperations() int32 { + if x != nil { + return x.MaximumQueuedBackgroundLearningOperations + } + return 0 +} + +func (x *PredeclaredPlatformQueueConfiguration) GetBackgroundLearningOperationPriority() int32 { + if x != nil { + return x.BackgroundLearningOperationPriority + } + return 0 +} + +var File_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDesc = []byte{ + 0x0a, 0x37, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2f, 0x62, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x24, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x1a, + 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, + 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x31, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x27, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x6b, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x31, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x0c, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x61, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x72, 0x6f, 0x77, 0x73, + 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x72, + 0x6f, 0x77, 0x73, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x7a, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x45, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x1e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, + 0x8b, 0x01, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x5f, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x50, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x64, 0x0a, + 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x72, + 0x61, 0x69, 0x6e, 0x73, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x61, 0x75, 0x74, 0x68, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x73, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x72, 0x12, 0x73, 0x0a, 0x1a, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x18, 0x6b, 0x69, 0x6c, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0d, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x12, 0x73, 0x0a, 0x18, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x12, 0x6c, 0x0a, 0x26, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x5f, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x21, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x57, 0x69, 0x74, 0x68, 0x4e, + 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x22, 0xf5, 0x03, + 0x0a, 0x25, 0x50, 0x72, 0x65, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x50, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x08, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x68, + 0x0a, 0x23, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, + 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x65, + 0x73, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x60, 0x0a, 0x2d, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x67, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x29, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x61, + 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x26, 0x62, 0x61, + 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x23, 0x62, 0x61, 0x63, 0x6b, + 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x4f, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, + 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescData = file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDesc +) + +func file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescData) + }) + return file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDescData +} + +var file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_goTypes = []interface{}{ + (*ApplicationConfiguration)(nil), // 0: buildbarn.configuration.bb_scheduler.ApplicationConfiguration + (*PredeclaredPlatformQueueConfiguration)(nil), // 1: buildbarn.configuration.bb_scheduler.PredeclaredPlatformQueueConfiguration + (*http.ServerConfiguration)(nil), // 2: buildbarn.configuration.http.ServerConfiguration + (*grpc.ServerConfiguration)(nil), // 3: buildbarn.configuration.grpc.ServerConfiguration + (*blobstore.BlobAccessConfiguration)(nil), // 4: buildbarn.configuration.blobstore.BlobAccessConfiguration + (*global.Configuration)(nil), // 5: buildbarn.configuration.global.Configuration + (*auth.AuthorizerConfiguration)(nil), // 6: buildbarn.configuration.auth.AuthorizerConfiguration + (*scheduler.ActionRouterConfiguration)(nil), // 7: buildbarn.configuration.scheduler.ActionRouterConfiguration + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration + (*v2.Platform)(nil), // 9: build.bazel.remote.execution.v2.Platform +} +var file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_depIdxs = []int32{ + 2, // 0: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.admin_http_servers:type_name -> buildbarn.configuration.http.ServerConfiguration + 3, // 1: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.client_grpc_servers:type_name -> buildbarn.configuration.grpc.ServerConfiguration + 3, // 2: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.worker_grpc_servers:type_name -> buildbarn.configuration.grpc.ServerConfiguration + 4, // 3: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.content_addressable_storage:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 5, // 4: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.global:type_name -> buildbarn.configuration.global.Configuration + 3, // 5: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.build_queue_state_grpc_servers:type_name -> buildbarn.configuration.grpc.ServerConfiguration + 1, // 6: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.predeclared_platform_queues:type_name -> buildbarn.configuration.bb_scheduler.PredeclaredPlatformQueueConfiguration + 6, // 7: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.execute_authorizer:type_name -> buildbarn.configuration.auth.AuthorizerConfiguration + 6, // 8: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.modify_drains_authorizer:type_name -> buildbarn.configuration.auth.AuthorizerConfiguration + 6, // 9: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.kill_operations_authorizer:type_name -> buildbarn.configuration.auth.AuthorizerConfiguration + 7, // 10: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.action_router:type_name -> buildbarn.configuration.scheduler.ActionRouterConfiguration + 4, // 11: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.initial_size_class_cache:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 8, // 12: buildbarn.configuration.bb_scheduler.ApplicationConfiguration.platform_queue_with_no_workers_timeout:type_name -> google.protobuf.Duration + 9, // 13: buildbarn.configuration.bb_scheduler.PredeclaredPlatformQueueConfiguration.platform:type_name -> build.bazel.remote.execution.v2.Platform + 8, // 14: buildbarn.configuration.bb_scheduler.PredeclaredPlatformQueueConfiguration.worker_invocation_stickiness_limits:type_name -> google.protobuf.Duration + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_init() } +func file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_init() { + if File_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PredeclaredPlatformQueueConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto = out.File + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_rawDesc = nil + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_goTypes = nil + file_pkg_proto_configuration_bb_scheduler_bb_scheduler_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/bb_scheduler/bb_scheduler.proto b/pkg/proto/configuration/bb_scheduler/bb_scheduler.proto new file mode 100644 index 0000000..67440c6 --- /dev/null +++ b/pkg/proto/configuration/bb_scheduler/bb_scheduler.proto @@ -0,0 +1,256 @@ +syntax = "proto3"; + +package buildbarn.configuration.bb_scheduler; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/duration.proto"; +import "pkg/proto/configuration/auth/auth.proto"; +import "pkg/proto/configuration/blobstore/blobstore.proto"; +import "pkg/proto/configuration/global/global.proto"; +import "pkg/proto/configuration/grpc/grpc.proto"; +import "pkg/proto/configuration/http/http.proto"; +import "pkg/proto/configuration/scheduler/scheduler.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_scheduler"; + +message ApplicationConfiguration { + // Was 'admin_http_listen_address'. The listen address of the HTTP + // server can now be configured through 'admin_http_servers'. + reserved 2; + + // Configuration for the HTTP servers that expose the web UI. + // + // TODO: This web UI no longer needs to be integrated into + // bb_scheduler, as the underlying information can be exposed over + // gRPC using the 'build_queue_state_grpc_servers' option. + repeated buildbarn.configuration.http.ServerConfiguration admin_http_servers = + 19; + + // The path under which the administrative web UI needs to be exposed. + // When left empty, the web UI will be exposed at "/". + string admin_route_prefix = 22; + + // gRPC servers to spawn to listen for requests from clients + // (bb_storage or Bazel). + repeated buildbarn.configuration.grpc.ServerConfiguration + client_grpc_servers = 3; + + // gRPC servers to spawn to listen for requests from bb_worker. + repeated buildbarn.configuration.grpc.ServerConfiguration + worker_grpc_servers = 4; + + // URL of the Buildbarn Browser, linked to by the web UI. + string browser_url = 5; + + // Configuration for blob storage. + buildbarn.configuration.blobstore.BlobAccessConfiguration + content_addressable_storage = 6; + + // Maximum Protobuf message size to unmarshal. + int64 maximum_message_size_bytes = 7; + + // Common configuration options that apply to all Buildbarn binaries. + buildbarn.configuration.global.Configuration global = 8; + + // Was 'aws_asg_lifecycle_hooks' and 'aws_session'. This functionality + // has now moved into bb_asg_lifecycle_hook, which is part of the + // bb-autoscaler project. + reserved 9, 10; + + // gRPC servers to spawn to expose the state of the build queue. This + // can be used to obtain access to the data shown in the web UI in a + // programmatic manner. + repeated buildbarn.configuration.grpc.ServerConfiguration + build_queue_state_grpc_servers = 11; + + // Create platform queues that are always present, regardless of + // whether there are workers synchronizing against the scheduler. + // + // It is required to use this option to create platform queues that + // support multiple worker size classes. + repeated PredeclaredPlatformQueueConfiguration predeclared_platform_queues = + 12; + + // Was 'default_execution_timeout'. This setting is now configured + // through 'action_router'. + reserved 13; + + // Was 'maximum_execution_timeout'. This setting is now configured + // through 'action_router'. + reserved 14; + + // Authorization requirements to be enforced for Execute and WaitExecution + // requests. + buildbarn.configuration.auth.AuthorizerConfiguration execute_authorizer = 15; + + // Authorization requirements to be enforced for AddDrain and + // RemoveDrain requests issued through the BuildQueueState gRPC + // servers and web UI. + // + // The instance name to be matched is the instance name prefix of the + // size class queue to which drains are added, or from which drains + // are removed. + buildbarn.configuration.auth.AuthorizerConfiguration + modify_drains_authorizer = 20; + + // Authorization requirements to be enforced for KillOperations + // RemoveDrain requests issued through the BuildQueueState gRPC + // servers and web UI. + // + // The instance name to be matched is the instance name prefix of the + // size class queue containing the operation. Not the instance name + // used by the operation itself. + buildbarn.configuration.auth.AuthorizerConfiguration + kill_operations_authorizer = 21; + + // The policy for routing actions. + // + // Before the scheduler is capable of enqueueing an action, it must + // extract some properties from the incoming execution request: + // + // - The platform on which the action has to run. + // - The invocation to which the execution request belongs, so that + // may be scheduled fairly with respect to actions queued by other + // users. + // - The execution timeout that needs to be applied. + // - If the platform has workers partitioned across multiple size + // classes, it must choose a size class on which execution is + // attempted initially. + // + // The action router is the subsystem that is responsible for + // extracting these properties. + // + // It is possible to write fairly complex action router configurations + // (e.g., ones that apply different policies between platforms, or + // reroute requests from one platform to another). However, for + // typical setups the configuration can remain fairly simple. It is + // recommended that a configuration like the one below is used as a + // starting point: + // + // { + // simple: { + // platformKeyExtractor: { actionAndCommand: {} }, + // invocationKeyExtractors: [{ toolInvocationId: {} }], + // initialSizeClassAnalyzer: { + // defaultExecutionTimeout: '1800s', + // maximumExecutionTimeout: '7200s', + // }, + // }, + // } + buildbarn.configuration.scheduler.ActionRouterConfiguration action_router = + 16; + + // Optional: The Initial Size Class Cache (ISCC) where execution times + // of actions are read and written. + // + // This option only needs to be set if one or feedback driven + // analyzers are configured through 'action_router'. + buildbarn.configuration.blobstore.BlobAccessConfiguration + initial_size_class_cache = 17; + + // Platform queues are removed when no workers have been present + // during this time period. + // + // Recommended value: 900s + google.protobuf.Duration platform_queue_with_no_workers_timeout = 18; +} + +message PredeclaredPlatformQueueConfiguration { + // The instance name prefix of the platform queue to create. + string instance_name_prefix = 1; + + // The platform properties of the platform queue to create. + build.bazel.remote.execution.v2.Platform platform = 2; + + // The maximum size class for which workers exist for this platform + // queue. All actions that fail on smaller size classes will be + // retried on workers of this size class. + uint32 maximum_size_class = 3; + + // Was 'initial_size_class_feedback_driven_analyzer'. This option has + // moved into ApplicationConfiguration.action_router. + reserved 4; + + // Allow workers to continue to execute actions from the same + // invocation up to given amount of time, if doing so keeps the number + // of workers assigned to an invocation balanced. It is worth setting + // this option if there is an inherent overhead when switching between + // actions belonging to different invocations. + // + // For example, consider the case where workers are configured to run + // tests inside virtual machines, and that the virtual machine's boot + // image is provided as part of the input root. When actions that use + // the same boot image run right after each other, the existing + // virtual machine may be repurposed. If the boot image is different, + // a costly restart needs to be performed, so that a new image may be + // loaded. By placing actions that use different boot images in + // separate invocations and adding stickiness, the probability of + // needing to do virtual machine restarts decreases. + // + // Because invocations can be nested by using multiple invocation key + // extractors, this field contains a list of durations of stickiness + // to apply at each level. If the number of invocation keys of an + // operation exceeds the configured number of stickiness limits, the + // stickiness limit for the remaining invocation keys is assumed to be + // zero. + // + // This option may require custom implementations of + // invocation.KeyExtractor to be effective. + // + // Recommended value: unset + repeated google.protobuf.Duration worker_invocation_stickiness_limits = 5; + + // NOTE: The option below only has effect when feedback driven initial + // size class analysis is enabled. + // + // There is a small probability that this implementation runs actions + // on size classes even if it is fairly certain that they are + // suboptimal (either too small or too large). This is necessary, as + // without it there is a chance that previous execution statistics + // stored in the ISCC remain permanently outdated. The downside of + // this strategy is that it may cause unnecessary delays, especially + // when attempted against long-running actions that are part of the + // critical path of a build. + // + // To mitigate this, this implementation uses an alternative execution + // strategy in case there is a >50% probability of failure on a + // smaller size class. Instead of first executing the action on the + // smaller size class, followed by falling back to the largest size + // class, it schedules it the other way around. The client is + // unblocked as soon as the execution on the largest size class + // succeeds, while the execution on the smallest size class is + // performed in the background. + // + // To make sure that operations that are merely created to perform + // learning in the background don't starve other builds that are + // taking place, all of them are placed in a single fictive + // invocation, so that fairness is respected. + // + // This option determines the maximum number of background learning + // operations that may be in the QUEUED execution stage, per size + // class. Excessive operations are discarded. Not only is it necessary + // to set this value to ensure that the scheduler doesn't run out of + // memory due to background actions piling up, it can also put a limit + // on how much the cluster is scaled up (in case autoscaling based on + // queue sizes is performed). + // + // Recommended value: 1000 + int32 maximum_queued_background_learning_operations = 6; + + // NOTE: The option below only has effect when feedback driven initial + // size class analysis is enabled. + // + // The REv2 execution priority that needs to be used for background + // learning operations. + // + // bb_scheduler respects REv2 execution priorities by increasing the + // number of actions to run concurrently between invocations by a + // factor 2 for every 100 decrease in priority value (i.e., lower + // priority value means faster builds). + // + // This option determines how aggressively background learning + // operations should be preferred over operations enqueued by clients. + // + // Recommended value: 0 + int32 background_learning_operation_priority = 7; +} diff --git a/pkg/proto/configuration/bb_virtual_tmp/BUILD.bazel b/pkg/proto/configuration/bb_virtual_tmp/BUILD.bazel new file mode 100644 index 0000000..ff36176 --- /dev/null +++ b/pkg/proto/configuration/bb_virtual_tmp/BUILD.bazel @@ -0,0 +1,33 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "bb_virtual_tmp_proto", + srcs = ["bb_virtual_tmp.proto"], + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/filesystem/virtual:virtual_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto", + ], +) + +go_proto_library( + name = "bb_virtual_tmp_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_virtual_tmp", + proto = ":bb_virtual_tmp_proto", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc", + ], +) + +go_library( + name = "bb_virtual_tmp", + embed = [":bb_virtual_tmp_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_virtual_tmp", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.pb.go b/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.pb.go new file mode 100644 index 0000000..5bda9c8 --- /dev/null +++ b/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.pb.go @@ -0,0 +1,211 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.proto + +package bb_virtual_tmp + +import ( + virtual "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + global "github.com/buildbarn/bb-storage/pkg/proto/configuration/global" + grpc "github.com/buildbarn/bb-storage/pkg/proto/configuration/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplicationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Global *global.Configuration `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"` + BuildDirectoryPath string `protobuf:"bytes,2,opt,name=build_directory_path,json=buildDirectoryPath,proto3" json:"build_directory_path,omitempty"` + Mount *virtual.MountConfiguration `protobuf:"bytes,3,opt,name=mount,proto3" json:"mount,omitempty"` + GrpcServers []*grpc.ServerConfiguration `protobuf:"bytes,4,rep,name=grpc_servers,json=grpcServers,proto3" json:"grpc_servers,omitempty"` +} + +func (x *ApplicationConfiguration) Reset() { + *x = ApplicationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationConfiguration) ProtoMessage() {} + +func (x *ApplicationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationConfiguration.ProtoReflect.Descriptor instead. +func (*ApplicationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplicationConfiguration) GetGlobal() *global.Configuration { + if x != nil { + return x.Global + } + return nil +} + +func (x *ApplicationConfiguration) GetBuildDirectoryPath() string { + if x != nil { + return x.BuildDirectoryPath + } + return "" +} + +func (x *ApplicationConfiguration) GetMount() *virtual.MountConfiguration { + if x != nil { + return x.Mount + } + return nil +} + +func (x *ApplicationConfiguration) GetGrpcServers() []*grpc.ServerConfiguration { + if x != nil { + return x.GrpcServers + } + return nil +} + +var File_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x76, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x74, 0x6d, 0x70, 0x2f, 0x62, 0x62, 0x5f, 0x76, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x5f, 0x74, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x5f, 0x74, 0x6d, 0x70, 0x1a, 0x38, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2f, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x54, 0x0a, 0x05, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x54, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x42, 0x51, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, + 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x76, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x74, 0x6d, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescData = file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDesc +) + +func file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescData) + }) + return file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDescData +} + +var file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_goTypes = []interface{}{ + (*ApplicationConfiguration)(nil), // 0: buildbarn.configuration.bb_virtual_tmp.ApplicationConfiguration + (*global.Configuration)(nil), // 1: buildbarn.configuration.global.Configuration + (*virtual.MountConfiguration)(nil), // 2: buildbarn.configuration.filesystem.virtual.MountConfiguration + (*grpc.ServerConfiguration)(nil), // 3: buildbarn.configuration.grpc.ServerConfiguration +} +var file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_depIdxs = []int32{ + 1, // 0: buildbarn.configuration.bb_virtual_tmp.ApplicationConfiguration.global:type_name -> buildbarn.configuration.global.Configuration + 2, // 1: buildbarn.configuration.bb_virtual_tmp.ApplicationConfiguration.mount:type_name -> buildbarn.configuration.filesystem.virtual.MountConfiguration + 3, // 2: buildbarn.configuration.bb_virtual_tmp.ApplicationConfiguration.grpc_servers:type_name -> buildbarn.configuration.grpc.ServerConfiguration + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_init() } +func file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_init() { + if File_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto = out.File + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_rawDesc = nil + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_goTypes = nil + file_pkg_proto_configuration_bb_virtual_tmp_bb_virtual_tmp_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.proto b/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.proto new file mode 100644 index 0000000..7d697f4 --- /dev/null +++ b/pkg/proto/configuration/bb_virtual_tmp/bb_virtual_tmp.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package buildbarn.configuration.bb_virtual_tmp; + +import "pkg/proto/configuration/filesystem/virtual/virtual.proto"; +import "pkg/proto/configuration/global/global.proto"; +import "pkg/proto/configuration/grpc/grpc.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_virtual_tmp"; + +message ApplicationConfiguration { + // Common configuration options that apply to all Buildbarn binaries. + buildbarn.configuration.global.Configuration global = 1; + + // The path of the build directory, as managed by bb_worker. + string build_directory_path = 2; + + // The virtual file system to expose, containing the "tmp" symbolic link. + buildbarn.configuration.filesystem.virtual.MountConfiguration mount = 3; + + // The gRPC servers that offer the tmp_installer service, allowing + // instances of bb_runner to configure the target location of the + // temporary directory. + repeated buildbarn.configuration.grpc.ServerConfiguration grpc_servers = 4; +} diff --git a/pkg/proto/configuration/bb_worker/BUILD.bazel b/pkg/proto/configuration/bb_worker/BUILD.bazel new file mode 100644 index 0000000..9ea3f5a --- /dev/null +++ b/pkg/proto/configuration/bb_worker/BUILD.bazel @@ -0,0 +1,46 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "bb_worker_proto", + srcs = ["bb_worker.proto"], + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/cas:cas_proto", + "//pkg/proto/configuration/filesystem:filesystem_proto", + "//pkg/proto/configuration/filesystem/virtual:virtual_proto", + "//pkg/proto/resourceusage:resourceusage_proto", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore:blobstore_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction:eviction_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global:global_proto", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc:grpc_proto", + "@com_google_protobuf//:duration_proto", + ], +) + +go_proto_library( + name = "bb_worker_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_worker", + proto = ":bb_worker_proto", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/cas", + "//pkg/proto/configuration/filesystem", + "//pkg/proto/configuration/filesystem/virtual", + "//pkg/proto/resourceusage", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blobstore", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/global", + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/grpc", + ], +) + +go_library( + name = "bb_worker", + embed = [":bb_worker_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_worker", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/bb_worker/bb_worker.pb.go b/pkg/proto/configuration/bb_worker/bb_worker.pb.go new file mode 100644 index 0000000..2dd40b0 --- /dev/null +++ b/pkg/proto/configuration/bb_worker/bb_worker.pb.go @@ -0,0 +1,1130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/bb_worker/bb_worker.proto + +package bb_worker + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + cas "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/cas" + filesystem "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem" + virtual "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + resourceusage "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + blobstore "github.com/buildbarn/bb-storage/pkg/proto/configuration/blobstore" + eviction "github.com/buildbarn/bb-storage/pkg/proto/configuration/eviction" + global "github.com/buildbarn/bb-storage/pkg/proto/configuration/global" + grpc "github.com/buildbarn/bb-storage/pkg/proto/configuration/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplicationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Blobstore *blobstore.BlobstoreConfiguration `protobuf:"bytes,1,opt,name=blobstore,proto3" json:"blobstore,omitempty"` + BrowserUrl string `protobuf:"bytes,2,opt,name=browser_url,json=browserUrl,proto3" json:"browser_url,omitempty"` + MaximumMessageSizeBytes int64 `protobuf:"varint,6,opt,name=maximum_message_size_bytes,json=maximumMessageSizeBytes,proto3" json:"maximum_message_size_bytes,omitempty"` + Scheduler *grpc.ClientConfiguration `protobuf:"bytes,8,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + Global *global.Configuration `protobuf:"bytes,19,opt,name=global,proto3" json:"global,omitempty"` + BuildDirectories []*BuildDirectoryConfiguration `protobuf:"bytes,20,rep,name=build_directories,json=buildDirectories,proto3" json:"build_directories,omitempty"` + FilePool *filesystem.FilePoolConfiguration `protobuf:"bytes,22,opt,name=file_pool,json=filePool,proto3" json:"file_pool,omitempty"` + CompletedActionLoggers []*CompletedActionLoggingConfiguration `protobuf:"bytes,23,rep,name=completed_action_loggers,json=completedActionLoggers,proto3" json:"completed_action_loggers,omitempty"` + OutputUploadConcurrency int64 `protobuf:"varint,24,opt,name=output_upload_concurrency,json=outputUploadConcurrency,proto3" json:"output_upload_concurrency,omitempty"` + DirectoryCache *cas.CachingDirectoryFetcherConfiguration `protobuf:"bytes,25,opt,name=directory_cache,json=directoryCache,proto3" json:"directory_cache,omitempty"` + Prefetching *PrefetchingConfiguration `protobuf:"bytes,26,opt,name=prefetching,proto3" json:"prefetching,omitempty"` +} + +func (x *ApplicationConfiguration) Reset() { + *x = ApplicationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplicationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplicationConfiguration) ProtoMessage() {} + +func (x *ApplicationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplicationConfiguration.ProtoReflect.Descriptor instead. +func (*ApplicationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplicationConfiguration) GetBlobstore() *blobstore.BlobstoreConfiguration { + if x != nil { + return x.Blobstore + } + return nil +} + +func (x *ApplicationConfiguration) GetBrowserUrl() string { + if x != nil { + return x.BrowserUrl + } + return "" +} + +func (x *ApplicationConfiguration) GetMaximumMessageSizeBytes() int64 { + if x != nil { + return x.MaximumMessageSizeBytes + } + return 0 +} + +func (x *ApplicationConfiguration) GetScheduler() *grpc.ClientConfiguration { + if x != nil { + return x.Scheduler + } + return nil +} + +func (x *ApplicationConfiguration) GetGlobal() *global.Configuration { + if x != nil { + return x.Global + } + return nil +} + +func (x *ApplicationConfiguration) GetBuildDirectories() []*BuildDirectoryConfiguration { + if x != nil { + return x.BuildDirectories + } + return nil +} + +func (x *ApplicationConfiguration) GetFilePool() *filesystem.FilePoolConfiguration { + if x != nil { + return x.FilePool + } + return nil +} + +func (x *ApplicationConfiguration) GetCompletedActionLoggers() []*CompletedActionLoggingConfiguration { + if x != nil { + return x.CompletedActionLoggers + } + return nil +} + +func (x *ApplicationConfiguration) GetOutputUploadConcurrency() int64 { + if x != nil { + return x.OutputUploadConcurrency + } + return 0 +} + +func (x *ApplicationConfiguration) GetDirectoryCache() *cas.CachingDirectoryFetcherConfiguration { + if x != nil { + return x.DirectoryCache + } + return nil +} + +func (x *ApplicationConfiguration) GetPrefetching() *PrefetchingConfiguration { + if x != nil { + return x.Prefetching + } + return nil +} + +type BuildDirectoryConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Backend: + // + // *BuildDirectoryConfiguration_Native + // *BuildDirectoryConfiguration_Virtual + Backend isBuildDirectoryConfiguration_Backend `protobuf_oneof:"backend"` + Runners []*RunnerConfiguration `protobuf:"bytes,3,rep,name=runners,proto3" json:"runners,omitempty"` +} + +func (x *BuildDirectoryConfiguration) Reset() { + *x = BuildDirectoryConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BuildDirectoryConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BuildDirectoryConfiguration) ProtoMessage() {} + +func (x *BuildDirectoryConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BuildDirectoryConfiguration.ProtoReflect.Descriptor instead. +func (*BuildDirectoryConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{1} +} + +func (m *BuildDirectoryConfiguration) GetBackend() isBuildDirectoryConfiguration_Backend { + if m != nil { + return m.Backend + } + return nil +} + +func (x *BuildDirectoryConfiguration) GetNative() *NativeBuildDirectoryConfiguration { + if x, ok := x.GetBackend().(*BuildDirectoryConfiguration_Native); ok { + return x.Native + } + return nil +} + +func (x *BuildDirectoryConfiguration) GetVirtual() *VirtualBuildDirectoryConfiguration { + if x, ok := x.GetBackend().(*BuildDirectoryConfiguration_Virtual); ok { + return x.Virtual + } + return nil +} + +func (x *BuildDirectoryConfiguration) GetRunners() []*RunnerConfiguration { + if x != nil { + return x.Runners + } + return nil +} + +type isBuildDirectoryConfiguration_Backend interface { + isBuildDirectoryConfiguration_Backend() +} + +type BuildDirectoryConfiguration_Native struct { + Native *NativeBuildDirectoryConfiguration `protobuf:"bytes,1,opt,name=native,proto3,oneof"` +} + +type BuildDirectoryConfiguration_Virtual struct { + Virtual *VirtualBuildDirectoryConfiguration `protobuf:"bytes,2,opt,name=virtual,proto3,oneof"` +} + +func (*BuildDirectoryConfiguration_Native) isBuildDirectoryConfiguration_Backend() {} + +func (*BuildDirectoryConfiguration_Virtual) isBuildDirectoryConfiguration_Backend() {} + +type NativeBuildDirectoryConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildDirectoryPath string `protobuf:"bytes,1,opt,name=build_directory_path,json=buildDirectoryPath,proto3" json:"build_directory_path,omitempty"` + CacheDirectoryPath string `protobuf:"bytes,2,opt,name=cache_directory_path,json=cacheDirectoryPath,proto3" json:"cache_directory_path,omitempty"` + MaximumCacheFileCount uint64 `protobuf:"varint,3,opt,name=maximum_cache_file_count,json=maximumCacheFileCount,proto3" json:"maximum_cache_file_count,omitempty"` + MaximumCacheSizeBytes int64 `protobuf:"varint,4,opt,name=maximum_cache_size_bytes,json=maximumCacheSizeBytes,proto3" json:"maximum_cache_size_bytes,omitempty"` + CacheReplacementPolicy eviction.CacheReplacementPolicy `protobuf:"varint,5,opt,name=cache_replacement_policy,json=cacheReplacementPolicy,proto3,enum=buildbarn.configuration.eviction.CacheReplacementPolicy" json:"cache_replacement_policy,omitempty"` +} + +func (x *NativeBuildDirectoryConfiguration) Reset() { + *x = NativeBuildDirectoryConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NativeBuildDirectoryConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NativeBuildDirectoryConfiguration) ProtoMessage() {} + +func (x *NativeBuildDirectoryConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NativeBuildDirectoryConfiguration.ProtoReflect.Descriptor instead. +func (*NativeBuildDirectoryConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{2} +} + +func (x *NativeBuildDirectoryConfiguration) GetBuildDirectoryPath() string { + if x != nil { + return x.BuildDirectoryPath + } + return "" +} + +func (x *NativeBuildDirectoryConfiguration) GetCacheDirectoryPath() string { + if x != nil { + return x.CacheDirectoryPath + } + return "" +} + +func (x *NativeBuildDirectoryConfiguration) GetMaximumCacheFileCount() uint64 { + if x != nil { + return x.MaximumCacheFileCount + } + return 0 +} + +func (x *NativeBuildDirectoryConfiguration) GetMaximumCacheSizeBytes() int64 { + if x != nil { + return x.MaximumCacheSizeBytes + } + return 0 +} + +func (x *NativeBuildDirectoryConfiguration) GetCacheReplacementPolicy() eviction.CacheReplacementPolicy { + if x != nil { + return x.CacheReplacementPolicy + } + return eviction.CacheReplacementPolicy(0) +} + +type VirtualBuildDirectoryConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mount *virtual.MountConfiguration `protobuf:"bytes,1,opt,name=mount,proto3" json:"mount,omitempty"` + MaximumExecutionTimeoutCompensation *durationpb.Duration `protobuf:"bytes,2,opt,name=maximum_execution_timeout_compensation,json=maximumExecutionTimeoutCompensation,proto3" json:"maximum_execution_timeout_compensation,omitempty"` + ShuffleDirectoryListings bool `protobuf:"varint,3,opt,name=shuffle_directory_listings,json=shuffleDirectoryListings,proto3" json:"shuffle_directory_listings,omitempty"` + HiddenFilesPattern string `protobuf:"bytes,4,opt,name=hidden_files_pattern,json=hiddenFilesPattern,proto3" json:"hidden_files_pattern,omitempty"` +} + +func (x *VirtualBuildDirectoryConfiguration) Reset() { + *x = VirtualBuildDirectoryConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualBuildDirectoryConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualBuildDirectoryConfiguration) ProtoMessage() {} + +func (x *VirtualBuildDirectoryConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualBuildDirectoryConfiguration.ProtoReflect.Descriptor instead. +func (*VirtualBuildDirectoryConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{3} +} + +func (x *VirtualBuildDirectoryConfiguration) GetMount() *virtual.MountConfiguration { + if x != nil { + return x.Mount + } + return nil +} + +func (x *VirtualBuildDirectoryConfiguration) GetMaximumExecutionTimeoutCompensation() *durationpb.Duration { + if x != nil { + return x.MaximumExecutionTimeoutCompensation + } + return nil +} + +func (x *VirtualBuildDirectoryConfiguration) GetShuffleDirectoryListings() bool { + if x != nil { + return x.ShuffleDirectoryListings + } + return false +} + +func (x *VirtualBuildDirectoryConfiguration) GetHiddenFilesPattern() string { + if x != nil { + return x.HiddenFilesPattern + } + return "" +} + +type RunnerConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Endpoint *grpc.ClientConfiguration `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + Concurrency uint64 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + InstanceNamePrefix string `protobuf:"bytes,13,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,3,opt,name=platform,proto3" json:"platform,omitempty"` + SizeClass uint32 `protobuf:"varint,12,opt,name=size_class,json=sizeClass,proto3" json:"size_class,omitempty"` + MaximumFilePoolFileCount int64 `protobuf:"varint,6,opt,name=maximum_file_pool_file_count,json=maximumFilePoolFileCount,proto3" json:"maximum_file_pool_file_count,omitempty"` + MaximumFilePoolSizeBytes int64 `protobuf:"varint,7,opt,name=maximum_file_pool_size_bytes,json=maximumFilePoolSizeBytes,proto3" json:"maximum_file_pool_size_bytes,omitempty"` + WorkerId map[string]string `protobuf:"bytes,8,rep,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + InputRootCharacterDeviceNodes []string `protobuf:"bytes,9,rep,name=input_root_character_device_nodes,json=inputRootCharacterDeviceNodes,proto3" json:"input_root_character_device_nodes,omitempty"` + CostsPerSecond map[string]*resourceusage.MonetaryResourceUsage_Expense `protobuf:"bytes,10,rep,name=costs_per_second,json=costsPerSecond,proto3" json:"costs_per_second,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + EnvironmentVariables map[string]string `protobuf:"bytes,11,rep,name=environment_variables,json=environmentVariables,proto3" json:"environment_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + MaximumConsecutiveTestInfrastructureFailures uint32 `protobuf:"varint,14,opt,name=maximum_consecutive_test_infrastructure_failures,json=maximumConsecutiveTestInfrastructureFailures,proto3" json:"maximum_consecutive_test_infrastructure_failures,omitempty"` +} + +func (x *RunnerConfiguration) Reset() { + *x = RunnerConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunnerConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunnerConfiguration) ProtoMessage() {} + +func (x *RunnerConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunnerConfiguration.ProtoReflect.Descriptor instead. +func (*RunnerConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{4} +} + +func (x *RunnerConfiguration) GetEndpoint() *grpc.ClientConfiguration { + if x != nil { + return x.Endpoint + } + return nil +} + +func (x *RunnerConfiguration) GetConcurrency() uint64 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *RunnerConfiguration) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *RunnerConfiguration) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *RunnerConfiguration) GetSizeClass() uint32 { + if x != nil { + return x.SizeClass + } + return 0 +} + +func (x *RunnerConfiguration) GetMaximumFilePoolFileCount() int64 { + if x != nil { + return x.MaximumFilePoolFileCount + } + return 0 +} + +func (x *RunnerConfiguration) GetMaximumFilePoolSizeBytes() int64 { + if x != nil { + return x.MaximumFilePoolSizeBytes + } + return 0 +} + +func (x *RunnerConfiguration) GetWorkerId() map[string]string { + if x != nil { + return x.WorkerId + } + return nil +} + +func (x *RunnerConfiguration) GetInputRootCharacterDeviceNodes() []string { + if x != nil { + return x.InputRootCharacterDeviceNodes + } + return nil +} + +func (x *RunnerConfiguration) GetCostsPerSecond() map[string]*resourceusage.MonetaryResourceUsage_Expense { + if x != nil { + return x.CostsPerSecond + } + return nil +} + +func (x *RunnerConfiguration) GetEnvironmentVariables() map[string]string { + if x != nil { + return x.EnvironmentVariables + } + return nil +} + +func (x *RunnerConfiguration) GetMaximumConsecutiveTestInfrastructureFailures() uint32 { + if x != nil { + return x.MaximumConsecutiveTestInfrastructureFailures + } + return 0 +} + +type CompletedActionLoggingConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Client *grpc.ClientConfiguration `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` + MaximumSendQueueSize uint32 `protobuf:"varint,2,opt,name=maximum_send_queue_size,json=maximumSendQueueSize,proto3" json:"maximum_send_queue_size,omitempty"` + AddInstanceNamePrefix string `protobuf:"bytes,3,opt,name=add_instance_name_prefix,json=addInstanceNamePrefix,proto3" json:"add_instance_name_prefix,omitempty"` +} + +func (x *CompletedActionLoggingConfiguration) Reset() { + *x = CompletedActionLoggingConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedActionLoggingConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedActionLoggingConfiguration) ProtoMessage() {} + +func (x *CompletedActionLoggingConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedActionLoggingConfiguration.ProtoReflect.Descriptor instead. +func (*CompletedActionLoggingConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{5} +} + +func (x *CompletedActionLoggingConfiguration) GetClient() *grpc.ClientConfiguration { + if x != nil { + return x.Client + } + return nil +} + +func (x *CompletedActionLoggingConfiguration) GetMaximumSendQueueSize() uint32 { + if x != nil { + return x.MaximumSendQueueSize + } + return 0 +} + +func (x *CompletedActionLoggingConfiguration) GetAddInstanceNamePrefix() string { + if x != nil { + return x.AddInstanceNamePrefix + } + return "" +} + +type PrefetchingConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileSystemAccessCache *blobstore.BlobAccessConfiguration `protobuf:"bytes,1,opt,name=file_system_access_cache,json=fileSystemAccessCache,proto3" json:"file_system_access_cache,omitempty"` + BloomFilterBitsPerPath uint32 `protobuf:"varint,2,opt,name=bloom_filter_bits_per_path,json=bloomFilterBitsPerPath,proto3" json:"bloom_filter_bits_per_path,omitempty"` + BloomFilterMaximumSizeBytes uint32 `protobuf:"varint,3,opt,name=bloom_filter_maximum_size_bytes,json=bloomFilterMaximumSizeBytes,proto3" json:"bloom_filter_maximum_size_bytes,omitempty"` + DownloadConcurrency int64 `protobuf:"varint,4,opt,name=download_concurrency,json=downloadConcurrency,proto3" json:"download_concurrency,omitempty"` +} + +func (x *PrefetchingConfiguration) Reset() { + *x = PrefetchingConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrefetchingConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrefetchingConfiguration) ProtoMessage() {} + +func (x *PrefetchingConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrefetchingConfiguration.ProtoReflect.Descriptor instead. +func (*PrefetchingConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP(), []int{6} +} + +func (x *PrefetchingConfiguration) GetFileSystemAccessCache() *blobstore.BlobAccessConfiguration { + if x != nil { + return x.FileSystemAccessCache + } + return nil +} + +func (x *PrefetchingConfiguration) GetBloomFilterBitsPerPath() uint32 { + if x != nil { + return x.BloomFilterBitsPerPath + } + return 0 +} + +func (x *PrefetchingConfiguration) GetBloomFilterMaximumSizeBytes() uint32 { + if x != nil { + return x.BloomFilterMaximumSizeBytes + } + return 0 +} + +func (x *PrefetchingConfiguration) GetDownloadConcurrency() int64 { + if x != nil { + return x.DownloadConcurrency + } + return 0 +} + +var File_pkg_proto_configuration_bb_worker_bb_worker_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDesc = []byte{ + 0x0a, 0x31, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x2f, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, + 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x2f, 0x63, + 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x76, 0x69, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x70, 0x6b, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x38, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x75, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x07, 0x0a, 0x18, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x57, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x62, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x55, + 0x72, 0x6c, 0x12, 0x3b, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, + 0x4f, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, + 0x12, 0x45, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x6b, 0x0a, 0x11, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x6f, + 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x80, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x46, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, + 0x3a, 0x0a, 0x19, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x18, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x17, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x6a, 0x0a, 0x0f, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x63, + 0x61, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x5d, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, + 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x2e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, + 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, + 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, + 0x08, 0x15, 0x10, 0x16, 0x22, 0xbd, 0x02, 0x0a, 0x1b, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x06, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, + 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x12, 0x61, 0x0a, 0x07, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x65, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, + 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x22, 0xed, 0x02, 0x0a, 0x21, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x30, 0x0a, 0x14, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, + 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x15, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x72, 0x0a, 0x18, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x65, 0x76, 0x69, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x16, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0xda, 0x02, 0x0a, 0x22, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x05, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x6e, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x63, + 0x6f, 0x6d, 0x70, 0x65, 0x6e, 0x73, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x23, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x65, 0x6e, 0x73, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x30, 0x0a, 0x14, 0x68, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x68, + 0x69, 0x64, 0x64, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x22, 0xbe, 0x09, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x08, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x61, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x65, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x1d, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x65, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, + 0x74, 0x0a, 0x10, 0x63, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x52, 0x75, + 0x6e, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x43, 0x6f, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x66, 0x0a, + 0x30, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x54, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x72, 0x61, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, + 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x79, 0x0a, 0x13, 0x43, 0x6f, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4c, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x75, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x6f, 0x6e, 0x65, 0x74, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x6e, + 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x47, 0x0a, + 0x19, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0xe0, 0x01, 0x0a, 0x23, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x53, + 0x65, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, + 0x61, 0x64, 0x64, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, + 0x61, 0x64, 0x64, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0xc4, 0x02, 0x0a, 0x18, 0x50, 0x72, 0x65, 0x66, 0x65, 0x74, + 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x73, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, + 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x3a, 0x0a, 0x1a, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x62, 0x6c, 0x6f, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x69, 0x74, 0x73, 0x50, 0x65, 0x72, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x44, 0x0a, 0x1f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x62, 0x6c, + 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x64, 0x6f, 0x77, + 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x42, 0x4c, 0x5a, 0x4a, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x62, 0x62, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescData = file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDesc +) + +func file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescData) + }) + return file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDescData +} + +var file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_pkg_proto_configuration_bb_worker_bb_worker_proto_goTypes = []interface{}{ + (*ApplicationConfiguration)(nil), // 0: buildbarn.configuration.bb_worker.ApplicationConfiguration + (*BuildDirectoryConfiguration)(nil), // 1: buildbarn.configuration.bb_worker.BuildDirectoryConfiguration + (*NativeBuildDirectoryConfiguration)(nil), // 2: buildbarn.configuration.bb_worker.NativeBuildDirectoryConfiguration + (*VirtualBuildDirectoryConfiguration)(nil), // 3: buildbarn.configuration.bb_worker.VirtualBuildDirectoryConfiguration + (*RunnerConfiguration)(nil), // 4: buildbarn.configuration.bb_worker.RunnerConfiguration + (*CompletedActionLoggingConfiguration)(nil), // 5: buildbarn.configuration.bb_worker.CompletedActionLoggingConfiguration + (*PrefetchingConfiguration)(nil), // 6: buildbarn.configuration.bb_worker.PrefetchingConfiguration + nil, // 7: buildbarn.configuration.bb_worker.RunnerConfiguration.WorkerIdEntry + nil, // 8: buildbarn.configuration.bb_worker.RunnerConfiguration.CostsPerSecondEntry + nil, // 9: buildbarn.configuration.bb_worker.RunnerConfiguration.EnvironmentVariablesEntry + (*blobstore.BlobstoreConfiguration)(nil), // 10: buildbarn.configuration.blobstore.BlobstoreConfiguration + (*grpc.ClientConfiguration)(nil), // 11: buildbarn.configuration.grpc.ClientConfiguration + (*global.Configuration)(nil), // 12: buildbarn.configuration.global.Configuration + (*filesystem.FilePoolConfiguration)(nil), // 13: buildbarn.configuration.filesystem.FilePoolConfiguration + (*cas.CachingDirectoryFetcherConfiguration)(nil), // 14: buildbarn.configuration.cas.CachingDirectoryFetcherConfiguration + (eviction.CacheReplacementPolicy)(0), // 15: buildbarn.configuration.eviction.CacheReplacementPolicy + (*virtual.MountConfiguration)(nil), // 16: buildbarn.configuration.filesystem.virtual.MountConfiguration + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*v2.Platform)(nil), // 18: build.bazel.remote.execution.v2.Platform + (*blobstore.BlobAccessConfiguration)(nil), // 19: buildbarn.configuration.blobstore.BlobAccessConfiguration + (*resourceusage.MonetaryResourceUsage_Expense)(nil), // 20: buildbarn.resourceusage.MonetaryResourceUsage.Expense +} +var file_pkg_proto_configuration_bb_worker_bb_worker_proto_depIdxs = []int32{ + 10, // 0: buildbarn.configuration.bb_worker.ApplicationConfiguration.blobstore:type_name -> buildbarn.configuration.blobstore.BlobstoreConfiguration + 11, // 1: buildbarn.configuration.bb_worker.ApplicationConfiguration.scheduler:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 12, // 2: buildbarn.configuration.bb_worker.ApplicationConfiguration.global:type_name -> buildbarn.configuration.global.Configuration + 1, // 3: buildbarn.configuration.bb_worker.ApplicationConfiguration.build_directories:type_name -> buildbarn.configuration.bb_worker.BuildDirectoryConfiguration + 13, // 4: buildbarn.configuration.bb_worker.ApplicationConfiguration.file_pool:type_name -> buildbarn.configuration.filesystem.FilePoolConfiguration + 5, // 5: buildbarn.configuration.bb_worker.ApplicationConfiguration.completed_action_loggers:type_name -> buildbarn.configuration.bb_worker.CompletedActionLoggingConfiguration + 14, // 6: buildbarn.configuration.bb_worker.ApplicationConfiguration.directory_cache:type_name -> buildbarn.configuration.cas.CachingDirectoryFetcherConfiguration + 6, // 7: buildbarn.configuration.bb_worker.ApplicationConfiguration.prefetching:type_name -> buildbarn.configuration.bb_worker.PrefetchingConfiguration + 2, // 8: buildbarn.configuration.bb_worker.BuildDirectoryConfiguration.native:type_name -> buildbarn.configuration.bb_worker.NativeBuildDirectoryConfiguration + 3, // 9: buildbarn.configuration.bb_worker.BuildDirectoryConfiguration.virtual:type_name -> buildbarn.configuration.bb_worker.VirtualBuildDirectoryConfiguration + 4, // 10: buildbarn.configuration.bb_worker.BuildDirectoryConfiguration.runners:type_name -> buildbarn.configuration.bb_worker.RunnerConfiguration + 15, // 11: buildbarn.configuration.bb_worker.NativeBuildDirectoryConfiguration.cache_replacement_policy:type_name -> buildbarn.configuration.eviction.CacheReplacementPolicy + 16, // 12: buildbarn.configuration.bb_worker.VirtualBuildDirectoryConfiguration.mount:type_name -> buildbarn.configuration.filesystem.virtual.MountConfiguration + 17, // 13: buildbarn.configuration.bb_worker.VirtualBuildDirectoryConfiguration.maximum_execution_timeout_compensation:type_name -> google.protobuf.Duration + 11, // 14: buildbarn.configuration.bb_worker.RunnerConfiguration.endpoint:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 18, // 15: buildbarn.configuration.bb_worker.RunnerConfiguration.platform:type_name -> build.bazel.remote.execution.v2.Platform + 7, // 16: buildbarn.configuration.bb_worker.RunnerConfiguration.worker_id:type_name -> buildbarn.configuration.bb_worker.RunnerConfiguration.WorkerIdEntry + 8, // 17: buildbarn.configuration.bb_worker.RunnerConfiguration.costs_per_second:type_name -> buildbarn.configuration.bb_worker.RunnerConfiguration.CostsPerSecondEntry + 9, // 18: buildbarn.configuration.bb_worker.RunnerConfiguration.environment_variables:type_name -> buildbarn.configuration.bb_worker.RunnerConfiguration.EnvironmentVariablesEntry + 11, // 19: buildbarn.configuration.bb_worker.CompletedActionLoggingConfiguration.client:type_name -> buildbarn.configuration.grpc.ClientConfiguration + 19, // 20: buildbarn.configuration.bb_worker.PrefetchingConfiguration.file_system_access_cache:type_name -> buildbarn.configuration.blobstore.BlobAccessConfiguration + 20, // 21: buildbarn.configuration.bb_worker.RunnerConfiguration.CostsPerSecondEntry.value:type_name -> buildbarn.resourceusage.MonetaryResourceUsage.Expense + 22, // [22:22] is the sub-list for method output_type + 22, // [22:22] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_bb_worker_bb_worker_proto_init() } +func file_pkg_proto_configuration_bb_worker_bb_worker_proto_init() { + if File_pkg_proto_configuration_bb_worker_bb_worker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplicationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildDirectoryConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NativeBuildDirectoryConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualBuildDirectoryConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunnerConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompletedActionLoggingConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrefetchingConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*BuildDirectoryConfiguration_Native)(nil), + (*BuildDirectoryConfiguration_Virtual)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_bb_worker_bb_worker_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_bb_worker_bb_worker_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_bb_worker_bb_worker_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_bb_worker_bb_worker_proto = out.File + file_pkg_proto_configuration_bb_worker_bb_worker_proto_rawDesc = nil + file_pkg_proto_configuration_bb_worker_bb_worker_proto_goTypes = nil + file_pkg_proto_configuration_bb_worker_bb_worker_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/bb_worker/bb_worker.proto b/pkg/proto/configuration/bb_worker/bb_worker.proto new file mode 100644 index 0000000..c0a7e94 --- /dev/null +++ b/pkg/proto/configuration/bb_worker/bb_worker.proto @@ -0,0 +1,369 @@ +syntax = "proto3"; + +package buildbarn.configuration.bb_worker; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/duration.proto"; +import "pkg/proto/configuration/blobstore/blobstore.proto"; +import "pkg/proto/configuration/cas/cas.proto"; +import "pkg/proto/configuration/eviction/eviction.proto"; +import "pkg/proto/configuration/filesystem/filesystem.proto"; +import "pkg/proto/configuration/filesystem/virtual/virtual.proto"; +import "pkg/proto/configuration/global/global.proto"; +import "pkg/proto/configuration/grpc/grpc.proto"; +import "pkg/proto/resourceusage/resourceusage.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_worker"; + +message ApplicationConfiguration { + // Configuration for blob storage. + buildbarn.configuration.blobstore.BlobstoreConfiguration blobstore = 1; + + // URL of the Buildbarn Browser, shown to the user upon build completion. + string browser_url = 2; + + // Maximum Protobuf message size to unmarshal. + int64 maximum_message_size_bytes = 6; + + // Endpoint of the scheduler to which to connect. + buildbarn.configuration.grpc.ClientConfiguration scheduler = 8; + + // Was 'http_listen_address'. This option has been moved to + // 'global.diagnostics_http_listen_address'. + reserved 9; + + // Was 'maximum_memory_cached_directories'. Moved into 'directory_cache'. + reserved 12; + + // Was 'instance_name'. This option has been renamed to + // 'RunnerConfiguration.instance_name_prefix'. + reserved 16; + + // Was 'file_pool_directory_path'. This option has been renamed to + // 'file_pool.directory_path'. + reserved 18; + + // Common configuration options that apply to all Buildbarn binaries. + buildbarn.configuration.global.Configuration global = 19; + + // Directories on the system in which builds take place. + repeated BuildDirectoryConfiguration build_directories = 20; + + // Was 'file_pool_block_device'. This option has been renamed to + // 'file_pool.block_device'. + reserved 21; + + // Location where temporary files that are created by bb_worker are + // stored. Not all configurations of bb_worker require this to be set. + // When left unset, any attempt to allocate temporary files will fail. + buildbarn.configuration.filesystem.FilePoolConfiguration file_pool = 22; + + // Configurations for any CompletedActionLoggingServers that bb_worker will + // forward all CompletedActions to along with their REv2 build metadata. + // This provides a means for aggregating and analyzing the data from builds + // to potentially identify computationally expensive targets, estimate the + // approximate cost of builds, or uncover underlying patterns in build + // behavior. No CompletedActions will be streamed if this option is left + // empty. See + // https://github.com/buildbarn/bb-adrs/blob/master/0006-operation-logging-and-monetary-resource-usage.md + // for more details around the Completed Action Logger service. + repeated CompletedActionLoggingConfiguration completed_action_loggers = 23; + + // The maximum number of concurrent writes to issue against the + // Content Addressable Storage (CAS). This limit applies to the worker + // process as a whole; not individual worker threads. + int64 output_upload_concurrency = 24; + + // If set, cache REv2 Directory objects in memory. These objects need + // to be loaded from the Content Addressable Storage when populating + // input roots. + buildbarn.configuration.cas.CachingDirectoryFetcherConfiguration + directory_cache = 25; + + // Prefetch files from the Content Addressable Storage (CAS), using + // profiling data stored in the File System Access Cache (FSAC). + // + // When using build directories backed by the virtual file system + // (FUSE/NFSv4), files and directories stored in the input root are + // fetched on demand (i.e., when listing directory contents, or when + // calling read() against files). Even though this approach reduces + // reads against the CAS to a minimum, it has the downside that CAS + // objects are read at a low concurrency. This may lead to poor + // execution times for file system intensive operations (e.g., ones + // packaging a large number of files stored in the input root), or + // when latency of the CAS is high. + // + // Enabling this option reduces this overhead, by letting the workers + // store profiling data into the FSAC. These profiles are reloaded + // during future invocations of similar actions, so that files and + // directories that are expected to be used are prefetched. This + // process takes place in the background, while the action is running. + PrefetchingConfiguration prefetching = 26; +} + +message BuildDirectoryConfiguration { + oneof backend { + // Perform builds in a native directory on the system. The advantage + // of this setup is that build actions can run with little overhead. + // The disadvantage is that build actions with large input roots may + // take long to initialize. + NativeBuildDirectoryConfiguration native = 1; + + // Perform builds in a virtual mount (e.g., FUSE) that is managed by + // this process. The mount instantiates build input roots lazily and + // loads input files on demand. This means that build actions can be + // started instantaneously, downloading data gradually. + // + // The virtual file system may impose a larger performance overhead + // for build actions that are I/O intensive. To provide adequate + // performance, it is important that a Content Addressable Storage + // backend is used that supports fast random access to blobs. It is + // suggested that workers use ReadCachingBlobAccess in combination + // with LocalBlobAccess to achieve this. When LocalBlobAccess is + // backed by a block device, 'data_integrity_validation_cache' must + // be set to ensure the file isn't checksum validated for every + // individual read. + VirtualBuildDirectoryConfiguration virtual = 2; + } + + // Runners to which to send requests to invoke build action commands. + repeated RunnerConfiguration runners = 3; +} + +message NativeBuildDirectoryConfiguration { + // Directory where builds take place. + string build_directory_path = 1; + + // Directory where build input files are cached. + string cache_directory_path = 2; + + // Maximum number of files in the cache. + uint64 maximum_cache_file_count = 3; + + // Maximum total size of the cache in bytes. + int64 maximum_cache_size_bytes = 4; + + // The cache replacement policy to use for the input file cache. For + // most setups, it is advised that RANDOM_REPLACEMENT is used, for the + // reason that it still performs adequately in worst-case scenarios. + // + // There are certain workloads that may require the use of + // LEAST_RECENTLY_USED. For example, Clang's implementation of + // '#pragma once' requires that identical source files exposed under + // different paths have identical inode numbers. The + // RANDOM_REPLACEMENT algorithm cannot guarantee this property, while + // LEAST_RECENTLY_USED can, assuming the cache size is sufficient. + buildbarn.configuration.eviction.CacheReplacementPolicy + cache_replacement_policy = 5; +} + +message VirtualBuildDirectoryConfiguration { + // Options for mounting the virtual file system at a given path. + buildbarn.configuration.filesystem.virtual.MountConfiguration mount = 1; + + // Because the virtual file system only loads input root contents from + // the Content Addressable Storage (CAS) lazily, build actions may + // block during execution. To prevent these delays from causing + // spurious test timeouts, this implementation is capable of + // compensating the execution timeout of actions based on storage + // latency observed. + // + // To prevent build actions from abusing this system (i.e., extending + // the execution timeout indefinitely by accessing files that are + // known to be slow), this option can be used to specify the maximum + // amount of time the build action is permitted to run in excess of + // the originally specified execution timeout. + // + // Recommended value: 3600s + google.protobuf.Duration maximum_execution_timeout_compensation = 2; + + // POSIX doesn't guarantee that readdir() returns elements in any + // particular order. This is a common source of irreproducibility in + // builds. Because most file systems behave deterministically, it is + // typically hard to discover these problems without using multiple + // operating systems or file system types. + // + // Enabling this option causes the virtual file system to populate + // input root directories in random order, making it easier to detect + // irreproducible build actions. Disabling this option causes input + // root directories to be populated in alphabetic order, which reduces + // storage usage for build actions that are known to be + // irreproducible. + bool shuffle_directory_listings = 3; + + // When set, hide files from directory listings that match a given + // regular expression. The presence of hidden files will not cause + // ENOTEMPTY to be returned upon directory removal. There are various + // use cases for this option: + // + // - On macOS, this feature can be used to prevent capturing of ._* + // (AppleDouble) files. + // + // - NFSv4 clients may delay unlink() operations, ensuring that opened + // files remain accessible. To prevent these files from being in the + // way when creating a new file with the same name, clients will + // rename the old file to .nfs*. This is called a "silly rename": + // https://linux-nfs.org/wiki/index.php/Server-side_silly_rename. + // This option can be used to prevent ENOTEMPTY errors when removing + // directories containing one or more silly renamed files. + // + // Recommended value: + // - macOS: ^\._|^\.nfs\.[0-9a-f]{8}\.[0-9a-f]{4}$ + // - Other platforms: unset + string hidden_files_pattern = 4; +} + +message RunnerConfiguration { + // GRPC endpoint to which to connect. + buildbarn.configuration.grpc.ClientConfiguration endpoint = 1; + + // Number of actions to run concurrently on this runner. + uint64 concurrency = 2; + + // The prefix of the instance name for which requests from clients + // should be routed to this worker. + string instance_name_prefix = 13; + + // Platform properties that need to be reported to the scheduler. + build.bazel.remote.execution.v2.Platform platform = 3; + + // The size of this worker in terms of CPU count and memory size. When + // all workers of this platform have the same size, it is sufficient + // to leave this field set to zero. + uint32 size_class = 12; + + // Was 'default_execution_timeout'. This value now needs to be set in + // bb_scheduler's configuration. + reserved 4; + + // Was 'maximum_execution_timeout'. This value now needs to be set in + // bb_scheduler's configuration. + reserved 5; + + // Maximum number of temporary files that may be generated by build + // actions during execution. + int64 maximum_file_pool_file_count = 6; + + // Maximum total size of all temporary files that may be generated by + // build actions during execution. + int64 maximum_file_pool_size_bytes = 7; + + // Additional fields that need to be attached to the ID of the worker, + // as announced to the scheduler. In addition to the labels specified, + // one named "thread" is added automatically in case concurrency > 1. + map worker_id = 8; + + // Device nodes to install in input root. + // e.g. ["null", "zero", "random"] + repeated string input_root_character_device_nodes = 9; + + // Expenses that should be associated with build actions for this worker + // type. These could be representative of any recurring expenses such as + // cloud provider costs or on-prem hardware maintenance. If specified, a + // MonetaryResourceUsage message will be appended to each ActionResult's + // auxiliary_metadata. + map + costs_per_second = 10; + + // Additional environment variables to set inside the runner. These + // are overridden by environment variables specified in an action. + // + // For better hermeticity, is preferable to have the environment + // controlled by the build client, such as Bazel's --action_env. + // --action_env, however, has limited scope that makes it not useful + // in some scenarios: https://github.com/bazelbuild/bazel/issues/3320 + map environment_variables = 11; + + // If set to a positive value, only allow up to a maximum number of + // consecutively executed actions on a single worker thread to report + // an infrastructure failure. Once reached, all worker threads will + // stop synchronizing against the scheduler. + // + // Test actions executed by Bazel may report infrastructure failures + // by creating the output file referenced by the + // TEST_INFRASTRUCTURE_FAILURE_FILE environment variable. Setting this + // configuration option causes bb_worker to count the number of + // consecutive actions that create this file. + // + // Note that any action that does not create this output file causes + // the counter to be reset to zero. This means that this option is + // only usable on workers that exclusively receive actions running + // Bazel test actions (i.e., ones running the "test-setup.sh" script). + // You may need to implement a custom ActionRouter for bb_scheduler to + // enforce this. + uint32 maximum_consecutive_test_infrastructure_failures = 14; +} + +message CompletedActionLoggingConfiguration { + // The gRPC endpoint to send all CompletedActions to. + buildbarn.configuration.grpc.ClientConfiguration client = 1; + + // The maximum amount of queued CompletedActions that will be stored in the + // CompletedActionLogger's internal buffer. Once maximum_send_queue_size + // CompletedActions been have accumulated, new messages will be discarded + // to prevent the worker process from exhausting memory. + uint32 maximum_send_queue_size = 2; + + // Prefix to add to the instance name that is set as part of + // CompletedAction messages that are sent to the logging server. + string add_instance_name_prefix = 3; +} + +message PrefetchingConfiguration { + // The File System Access Cache (FSAC) storage backend from which + // profiles are read at the start of the action's execution, and to + // which profiles are written after the action completes successfully. + buildbarn.configuration.blobstore.BlobAccessConfiguration + file_system_access_cache = 1; + + // Profiles stored in the File System Access Cache (FSAC) use a Bloom + // filter to track which files and directories in the input root are + // accessed. As Bloom filters are a probabilistic data structure, they + // may return false positives, causing the worker to prefetch too much + // data. + // + // This option determines how many bits of space should be allocated + // for each path in the input root that is accessed. Increasing this + // value reduces the probability of false positives, at the cost of + // making the Bloom filter larger and requiring more iterations per + // lookup. Generally acceptable values include: + // + // - 5 bits per path: ~10% false positive rate + // - 10 bits per path: ~1% false positive rate + // - 14 bits per path: ~0.1% false positive rate + // - 19 bits per path: ~0.01% false positive rate + // - 24 bits per path: ~0.001% false positive rate + // + // There are many web pages that provide calculators that can assist + // you to pick the right value, such as: + // + // - https://hur.st/bloomfilter/ + // - https://krisives.github.io/bloom-calculator/ + // - https://www.di-mgt.com.au/bloom-calculator.html + // + // Recommended value: 14 + uint32 bloom_filter_bits_per_path = 2; + + // The maximum size in bytes of Bloom filters stored in the File + // System Access Cache (FSAC). + // + // The size of the Bloom filter is proportional to the number of paths + // in the input root that are accessed. As this may put a lot of + // pressure on the FSAC for actions that process a large number of + // input files, this option can be used to place a hard limit on the + // Bloom filter's size. Though this leads to an elevated false + // positive rate, this is generally acceptable. It is unlikely that + // such actions only process a small fraction of the input root. + // + // It is recommended to set this to a power of two, as the current + // implementation always generates Bloom filters having the largest + // prime below powers of two as a size. + // + // Recommended value: 65536 + uint32 bloom_filter_maximum_size_bytes = 3; + + // The maximum number of concurrent files to read from the Content + // Addressable Storage (CAS) while prefetching. This limit applied to + // the worker process as a whole; not individual worker threads. + int64 download_concurrency = 4; +} diff --git a/pkg/proto/configuration/cas/BUILD.bazel b/pkg/proto/configuration/cas/BUILD.bazel new file mode 100644 index 0000000..908947d --- /dev/null +++ b/pkg/proto/configuration/cas/BUILD.bazel @@ -0,0 +1,25 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "cas_proto", + srcs = ["cas.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction:eviction_proto"], +) + +go_proto_library( + name = "cas_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/cas", + proto = ":cas_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction"], +) + +go_library( + name = "cas", + embed = [":cas_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/cas", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/cas/cas.pb.go b/pkg/proto/configuration/cas/cas.pb.go new file mode 100644 index 0000000..f76e1a9 --- /dev/null +++ b/pkg/proto/configuration/cas/cas.pb.go @@ -0,0 +1,183 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/cas/cas.proto + +package cas + +import ( + eviction "github.com/buildbarn/bb-storage/pkg/proto/configuration/eviction" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CachingDirectoryFetcherConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaximumCount int64 `protobuf:"varint,1,opt,name=maximum_count,json=maximumCount,proto3" json:"maximum_count,omitempty"` + MaximumSizeBytes int64 `protobuf:"varint,2,opt,name=maximum_size_bytes,json=maximumSizeBytes,proto3" json:"maximum_size_bytes,omitempty"` + CacheReplacementPolicy eviction.CacheReplacementPolicy `protobuf:"varint,3,opt,name=cache_replacement_policy,json=cacheReplacementPolicy,proto3,enum=buildbarn.configuration.eviction.CacheReplacementPolicy" json:"cache_replacement_policy,omitempty"` +} + +func (x *CachingDirectoryFetcherConfiguration) Reset() { + *x = CachingDirectoryFetcherConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_cas_cas_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CachingDirectoryFetcherConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CachingDirectoryFetcherConfiguration) ProtoMessage() {} + +func (x *CachingDirectoryFetcherConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_cas_cas_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CachingDirectoryFetcherConfiguration.ProtoReflect.Descriptor instead. +func (*CachingDirectoryFetcherConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_cas_cas_proto_rawDescGZIP(), []int{0} +} + +func (x *CachingDirectoryFetcherConfiguration) GetMaximumCount() int64 { + if x != nil { + return x.MaximumCount + } + return 0 +} + +func (x *CachingDirectoryFetcherConfiguration) GetMaximumSizeBytes() int64 { + if x != nil { + return x.MaximumSizeBytes + } + return 0 +} + +func (x *CachingDirectoryFetcherConfiguration) GetCacheReplacementPolicy() eviction.CacheReplacementPolicy { + if x != nil { + return x.CacheReplacementPolicy + } + return eviction.CacheReplacementPolicy(0) +} + +var File_pkg_proto_configuration_cas_cas_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_cas_cas_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x2f, 0x63, 0x61, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x63, 0x61, 0x73, 0x1a, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x76, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x01, 0x0a, 0x24, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x46, 0x65, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x72, 0x0a, 0x18, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x65, 0x76, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x16, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, + 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_cas_cas_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_cas_cas_proto_rawDescData = file_pkg_proto_configuration_cas_cas_proto_rawDesc +) + +func file_pkg_proto_configuration_cas_cas_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_cas_cas_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_cas_cas_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_cas_cas_proto_rawDescData) + }) + return file_pkg_proto_configuration_cas_cas_proto_rawDescData +} + +var file_pkg_proto_configuration_cas_cas_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_configuration_cas_cas_proto_goTypes = []interface{}{ + (*CachingDirectoryFetcherConfiguration)(nil), // 0: buildbarn.configuration.cas.CachingDirectoryFetcherConfiguration + (eviction.CacheReplacementPolicy)(0), // 1: buildbarn.configuration.eviction.CacheReplacementPolicy +} +var file_pkg_proto_configuration_cas_cas_proto_depIdxs = []int32{ + 1, // 0: buildbarn.configuration.cas.CachingDirectoryFetcherConfiguration.cache_replacement_policy:type_name -> buildbarn.configuration.eviction.CacheReplacementPolicy + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_cas_cas_proto_init() } +func file_pkg_proto_configuration_cas_cas_proto_init() { + if File_pkg_proto_configuration_cas_cas_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_cas_cas_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CachingDirectoryFetcherConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_cas_cas_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_cas_cas_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_cas_cas_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_cas_cas_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_cas_cas_proto = out.File + file_pkg_proto_configuration_cas_cas_proto_rawDesc = nil + file_pkg_proto_configuration_cas_cas_proto_goTypes = nil + file_pkg_proto_configuration_cas_cas_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/cas/cas.proto b/pkg/proto/configuration/cas/cas.proto new file mode 100644 index 0000000..5c40a05 --- /dev/null +++ b/pkg/proto/configuration/cas/cas.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package buildbarn.configuration.cas; + +import "pkg/proto/configuration/eviction/eviction.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/cas"; + +message CachingDirectoryFetcherConfiguration { + // Maximum number of REv2 Directory objects to keep in memory. + int64 maximum_count = 1; + + // Maximum combined size in bytes of the REv2 Directory objects. The + // size encoded in the digest is used; not the size of the in-memory + // representation of the object. + // + // As a starting point, set this field to the value of maximum_count, + // multiplied by the expected average object size. REv2 Directory + // objects tend to be about 1 KiB in size. + int64 maximum_size_bytes = 2; + + // The cache replacement policy to use for the cache. For most setups, + // it is advised that either LEAST_RECENTLY_USED or RANDOM_REPLACEMENT + // is used. + buildbarn.configuration.eviction.CacheReplacementPolicy + cache_replacement_policy = 3; +} diff --git a/pkg/proto/configuration/credentials/BUILD.bazel b/pkg/proto/configuration/credentials/BUILD.bazel new file mode 100644 index 0000000..645a800 --- /dev/null +++ b/pkg/proto/configuration/credentials/BUILD.bazel @@ -0,0 +1,23 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "credentials_proto", + srcs = ["credentials.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "credentials_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials", + proto = ":credentials_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "credentials", + embed = [":credentials_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/credentials/credentials.pb.go b/pkg/proto/configuration/credentials/credentials.pb.go new file mode 100644 index 0000000..20330c0 --- /dev/null +++ b/pkg/proto/configuration/credentials/credentials.pb.go @@ -0,0 +1,172 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/credentials/credentials.proto + +package credentials + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UNIXCredentialsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId uint32 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + GroupId uint32 `protobuf:"varint,2,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + AdditionalGroupIds []uint32 `protobuf:"varint,3,rep,packed,name=additional_group_ids,json=additionalGroupIds,proto3" json:"additional_group_ids,omitempty"` +} + +func (x *UNIXCredentialsConfiguration) Reset() { + *x = UNIXCredentialsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_credentials_credentials_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UNIXCredentialsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UNIXCredentialsConfiguration) ProtoMessage() {} + +func (x *UNIXCredentialsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_credentials_credentials_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UNIXCredentialsConfiguration.ProtoReflect.Descriptor instead. +func (*UNIXCredentialsConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_credentials_credentials_proto_rawDescGZIP(), []int{0} +} + +func (x *UNIXCredentialsConfiguration) GetUserId() uint32 { + if x != nil { + return x.UserId + } + return 0 +} + +func (x *UNIXCredentialsConfiguration) GetGroupId() uint32 { + if x != nil { + return x.GroupId + } + return 0 +} + +func (x *UNIXCredentialsConfiguration) GetAdditionalGroupIds() []uint32 { + if x != nil { + return x.AdditionalGroupIds + } + return nil +} + +var File_pkg_proto_configuration_credentials_credentials_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_credentials_credentials_proto_rawDesc = []byte{ + 0x0a, 0x35, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x22, 0x84, 0x01, 0x0a, + 0x1c, 0x55, 0x4e, 0x49, 0x58, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, + 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, + 0x64, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x49, 0x64, 0x73, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_credentials_credentials_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_credentials_credentials_proto_rawDescData = file_pkg_proto_configuration_credentials_credentials_proto_rawDesc +) + +func file_pkg_proto_configuration_credentials_credentials_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_credentials_credentials_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_credentials_credentials_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_credentials_credentials_proto_rawDescData) + }) + return file_pkg_proto_configuration_credentials_credentials_proto_rawDescData +} + +var file_pkg_proto_configuration_credentials_credentials_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_configuration_credentials_credentials_proto_goTypes = []interface{}{ + (*UNIXCredentialsConfiguration)(nil), // 0: buildbarn.configuration.credentials.UNIXCredentialsConfiguration +} +var file_pkg_proto_configuration_credentials_credentials_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_credentials_credentials_proto_init() } +func file_pkg_proto_configuration_credentials_credentials_proto_init() { + if File_pkg_proto_configuration_credentials_credentials_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_credentials_credentials_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UNIXCredentialsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_credentials_credentials_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_credentials_credentials_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_credentials_credentials_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_credentials_credentials_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_credentials_credentials_proto = out.File + file_pkg_proto_configuration_credentials_credentials_proto_rawDesc = nil + file_pkg_proto_configuration_credentials_credentials_proto_goTypes = nil + file_pkg_proto_configuration_credentials_credentials_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/credentials/credentials.proto b/pkg/proto/configuration/credentials/credentials.proto new file mode 100644 index 0000000..4a0f6eb --- /dev/null +++ b/pkg/proto/configuration/credentials/credentials.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package buildbarn.configuration.credentials; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/credentials"; + +message UNIXCredentialsConfiguration { + // The user ID that should be used to run commands. + uint32 user_id = 1; + + // The primary group ID that should be used to run commands. + uint32 group_id = 2; + + // Additional groups of which the process should be a member. + repeated uint32 additional_group_ids = 3; +} diff --git a/pkg/proto/configuration/filesystem/BUILD.bazel b/pkg/proto/configuration/filesystem/BUILD.bazel new file mode 100644 index 0000000..c046902 --- /dev/null +++ b/pkg/proto/configuration/filesystem/BUILD.bazel @@ -0,0 +1,28 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "filesystem_proto", + srcs = ["filesystem.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice:blockdevice_proto", + "@com_google_protobuf//:empty_proto", + ], +) + +go_proto_library( + name = "filesystem_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem", + proto = ":filesystem_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice"], +) + +go_library( + name = "filesystem", + embed = [":filesystem_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/filesystem/filesystem.pb.go b/pkg/proto/configuration/filesystem/filesystem.pb.go new file mode 100644 index 0000000..df43801 --- /dev/null +++ b/pkg/proto/configuration/filesystem/filesystem.pb.go @@ -0,0 +1,226 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/filesystem/filesystem.proto + +package filesystem + +import ( + blockdevice "github.com/buildbarn/bb-storage/pkg/proto/configuration/blockdevice" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FilePoolConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Backend: + // + // *FilePoolConfiguration_InMemory + // *FilePoolConfiguration_DirectoryPath + // *FilePoolConfiguration_BlockDevice + Backend isFilePoolConfiguration_Backend `protobuf_oneof:"backend"` +} + +func (x *FilePoolConfiguration) Reset() { + *x = FilePoolConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilePoolConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilePoolConfiguration) ProtoMessage() {} + +func (x *FilePoolConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilePoolConfiguration.ProtoReflect.Descriptor instead. +func (*FilePoolConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescGZIP(), []int{0} +} + +func (m *FilePoolConfiguration) GetBackend() isFilePoolConfiguration_Backend { + if m != nil { + return m.Backend + } + return nil +} + +func (x *FilePoolConfiguration) GetInMemory() *emptypb.Empty { + if x, ok := x.GetBackend().(*FilePoolConfiguration_InMemory); ok { + return x.InMemory + } + return nil +} + +func (x *FilePoolConfiguration) GetDirectoryPath() string { + if x, ok := x.GetBackend().(*FilePoolConfiguration_DirectoryPath); ok { + return x.DirectoryPath + } + return "" +} + +func (x *FilePoolConfiguration) GetBlockDevice() *blockdevice.Configuration { + if x, ok := x.GetBackend().(*FilePoolConfiguration_BlockDevice); ok { + return x.BlockDevice + } + return nil +} + +type isFilePoolConfiguration_Backend interface { + isFilePoolConfiguration_Backend() +} + +type FilePoolConfiguration_InMemory struct { + InMemory *emptypb.Empty `protobuf:"bytes,1,opt,name=in_memory,json=inMemory,proto3,oneof"` +} + +type FilePoolConfiguration_DirectoryPath struct { + DirectoryPath string `protobuf:"bytes,2,opt,name=directory_path,json=directoryPath,proto3,oneof"` +} + +type FilePoolConfiguration_BlockDevice struct { + BlockDevice *blockdevice.Configuration `protobuf:"bytes,3,opt,name=block_device,json=blockDevice,proto3,oneof"` +} + +func (*FilePoolConfiguration_InMemory) isFilePoolConfiguration_Backend() {} + +func (*FilePoolConfiguration_DirectoryPath) isFilePoolConfiguration_Backend() {} + +func (*FilePoolConfiguration_BlockDevice) isFilePoolConfiguration_Backend() {} + +var File_pkg_proto_configuration_filesystem_filesystem_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_filesystem_filesystem_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x01, + 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x09, 0x69, 0x6e, 0x5f, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x27, + 0x0a, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x57, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x64, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x09, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x42, 0x4d, 0x5a, 0x4b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescData = file_pkg_proto_configuration_filesystem_filesystem_proto_rawDesc +) + +func file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescData) + }) + return file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescData +} + +var file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_configuration_filesystem_filesystem_proto_goTypes = []interface{}{ + (*FilePoolConfiguration)(nil), // 0: buildbarn.configuration.filesystem.FilePoolConfiguration + (*emptypb.Empty)(nil), // 1: google.protobuf.Empty + (*blockdevice.Configuration)(nil), // 2: buildbarn.configuration.blockdevice.Configuration +} +var file_pkg_proto_configuration_filesystem_filesystem_proto_depIdxs = []int32{ + 1, // 0: buildbarn.configuration.filesystem.FilePoolConfiguration.in_memory:type_name -> google.protobuf.Empty + 2, // 1: buildbarn.configuration.filesystem.FilePoolConfiguration.block_device:type_name -> buildbarn.configuration.blockdevice.Configuration + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_filesystem_filesystem_proto_init() } +func file_pkg_proto_configuration_filesystem_filesystem_proto_init() { + if File_pkg_proto_configuration_filesystem_filesystem_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilePoolConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FilePoolConfiguration_InMemory)(nil), + (*FilePoolConfiguration_DirectoryPath)(nil), + (*FilePoolConfiguration_BlockDevice)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_filesystem_filesystem_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_filesystem_filesystem_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_filesystem_filesystem_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_filesystem_filesystem_proto = out.File + file_pkg_proto_configuration_filesystem_filesystem_proto_rawDesc = nil + file_pkg_proto_configuration_filesystem_filesystem_proto_goTypes = nil + file_pkg_proto_configuration_filesystem_filesystem_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/filesystem/filesystem.proto b/pkg/proto/configuration/filesystem/filesystem.proto new file mode 100644 index 0000000..e16c453 --- /dev/null +++ b/pkg/proto/configuration/filesystem/filesystem.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package buildbarn.configuration.filesystem; + +import "google/protobuf/empty.proto"; +import "pkg/proto/configuration/blockdevice/blockdevice.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem"; + +message FilePoolConfiguration { + oneof backend { + // Store all temporary files in memory. + google.protobuf.Empty in_memory = 1; + + // Store all temporary files in a single directory on a file system. + // This option denotes the path of this directory. + string directory_path = 2; + + // Store all temporary files in a single file on a file system or on + // a raw block device. + buildbarn.configuration.blockdevice.Configuration block_device = 3; + } +} diff --git a/pkg/proto/configuration/filesystem/virtual/BUILD.bazel b/pkg/proto/configuration/filesystem/virtual/BUILD.bazel new file mode 100644 index 0000000..e26c84d --- /dev/null +++ b/pkg/proto/configuration/filesystem/virtual/BUILD.bazel @@ -0,0 +1,28 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "virtual_proto", + srcs = ["virtual.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction:eviction_proto", + "@com_google_protobuf//:duration_proto", + ], +) + +go_proto_library( + name = "virtual_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual", + proto = ":virtual_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/proto/configuration/eviction"], +) + +go_library( + name = "virtual", + embed = [":virtual_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/filesystem/virtual/virtual.pb.go b/pkg/proto/configuration/filesystem/virtual/virtual.pb.go new file mode 100644 index 0000000..0dc13a4 --- /dev/null +++ b/pkg/proto/configuration/filesystem/virtual/virtual.pb.go @@ -0,0 +1,663 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/filesystem/virtual/virtual.proto + +package virtual + +import ( + eviction "github.com/buildbarn/bb-storage/pkg/proto/configuration/eviction" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MountPath string `protobuf:"bytes,1,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + // Types that are assignable to Backend: + // + // *MountConfiguration_Fuse + // *MountConfiguration_Nfsv4 + Backend isMountConfiguration_Backend `protobuf_oneof:"backend"` +} + +func (x *MountConfiguration) Reset() { + *x = MountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MountConfiguration) ProtoMessage() {} + +func (x *MountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MountConfiguration.ProtoReflect.Descriptor instead. +func (*MountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{0} +} + +func (x *MountConfiguration) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (m *MountConfiguration) GetBackend() isMountConfiguration_Backend { + if m != nil { + return m.Backend + } + return nil +} + +func (x *MountConfiguration) GetFuse() *FUSEMountConfiguration { + if x, ok := x.GetBackend().(*MountConfiguration_Fuse); ok { + return x.Fuse + } + return nil +} + +func (x *MountConfiguration) GetNfsv4() *NFSv4MountConfiguration { + if x, ok := x.GetBackend().(*MountConfiguration_Nfsv4); ok { + return x.Nfsv4 + } + return nil +} + +type isMountConfiguration_Backend interface { + isMountConfiguration_Backend() +} + +type MountConfiguration_Fuse struct { + Fuse *FUSEMountConfiguration `protobuf:"bytes,2,opt,name=fuse,proto3,oneof"` +} + +type MountConfiguration_Nfsv4 struct { + Nfsv4 *NFSv4MountConfiguration `protobuf:"bytes,3,opt,name=nfsv4,proto3,oneof"` +} + +func (*MountConfiguration_Fuse) isMountConfiguration_Backend() {} + +func (*MountConfiguration_Nfsv4) isMountConfiguration_Backend() {} + +type FUSEMountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DirectoryEntryValidity *durationpb.Duration `protobuf:"bytes,2,opt,name=directory_entry_validity,json=directoryEntryValidity,proto3" json:"directory_entry_validity,omitempty"` + InodeAttributeValidity *durationpb.Duration `protobuf:"bytes,3,opt,name=inode_attribute_validity,json=inodeAttributeValidity,proto3" json:"inode_attribute_validity,omitempty"` + AllowOther bool `protobuf:"varint,6,opt,name=allow_other,json=allowOther,proto3" json:"allow_other,omitempty"` + DirectMount bool `protobuf:"varint,7,opt,name=direct_mount,json=directMount,proto3" json:"direct_mount,omitempty"` + InHeaderAuthenticationMetadataJmespathExpression string `protobuf:"bytes,8,opt,name=in_header_authentication_metadata_jmespath_expression,json=inHeaderAuthenticationMetadataJmespathExpression,proto3" json:"in_header_authentication_metadata_jmespath_expression,omitempty"` + LinuxBackingDevInfoTunables map[string]string `protobuf:"bytes,9,rep,name=linux_backing_dev_info_tunables,json=linuxBackingDevInfoTunables,proto3" json:"linux_backing_dev_info_tunables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *FUSEMountConfiguration) Reset() { + *x = FUSEMountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FUSEMountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FUSEMountConfiguration) ProtoMessage() {} + +func (x *FUSEMountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FUSEMountConfiguration.ProtoReflect.Descriptor instead. +func (*FUSEMountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{1} +} + +func (x *FUSEMountConfiguration) GetDirectoryEntryValidity() *durationpb.Duration { + if x != nil { + return x.DirectoryEntryValidity + } + return nil +} + +func (x *FUSEMountConfiguration) GetInodeAttributeValidity() *durationpb.Duration { + if x != nil { + return x.InodeAttributeValidity + } + return nil +} + +func (x *FUSEMountConfiguration) GetAllowOther() bool { + if x != nil { + return x.AllowOther + } + return false +} + +func (x *FUSEMountConfiguration) GetDirectMount() bool { + if x != nil { + return x.DirectMount + } + return false +} + +func (x *FUSEMountConfiguration) GetInHeaderAuthenticationMetadataJmespathExpression() string { + if x != nil { + return x.InHeaderAuthenticationMetadataJmespathExpression + } + return "" +} + +func (x *FUSEMountConfiguration) GetLinuxBackingDevInfoTunables() map[string]string { + if x != nil { + return x.LinuxBackingDevInfoTunables + } + return nil +} + +type NFSv4MountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OperatingSystem: + // + // *NFSv4MountConfiguration_Darwin + OperatingSystem isNFSv4MountConfiguration_OperatingSystem `protobuf_oneof:"operating_system"` + EnforcedLeaseTime *durationpb.Duration `protobuf:"bytes,2,opt,name=enforced_lease_time,json=enforcedLeaseTime,proto3" json:"enforced_lease_time,omitempty"` + AnnouncedLeaseTime *durationpb.Duration `protobuf:"bytes,3,opt,name=announced_lease_time,json=announcedLeaseTime,proto3" json:"announced_lease_time,omitempty"` + SystemAuthentication *RPCv2SystemAuthenticationConfiguration `protobuf:"bytes,4,opt,name=system_authentication,json=systemAuthentication,proto3" json:"system_authentication,omitempty"` +} + +func (x *NFSv4MountConfiguration) Reset() { + *x = NFSv4MountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NFSv4MountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NFSv4MountConfiguration) ProtoMessage() {} + +func (x *NFSv4MountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NFSv4MountConfiguration.ProtoReflect.Descriptor instead. +func (*NFSv4MountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{2} +} + +func (m *NFSv4MountConfiguration) GetOperatingSystem() isNFSv4MountConfiguration_OperatingSystem { + if m != nil { + return m.OperatingSystem + } + return nil +} + +func (x *NFSv4MountConfiguration) GetDarwin() *NFSv4DarwinMountConfiguration { + if x, ok := x.GetOperatingSystem().(*NFSv4MountConfiguration_Darwin); ok { + return x.Darwin + } + return nil +} + +func (x *NFSv4MountConfiguration) GetEnforcedLeaseTime() *durationpb.Duration { + if x != nil { + return x.EnforcedLeaseTime + } + return nil +} + +func (x *NFSv4MountConfiguration) GetAnnouncedLeaseTime() *durationpb.Duration { + if x != nil { + return x.AnnouncedLeaseTime + } + return nil +} + +func (x *NFSv4MountConfiguration) GetSystemAuthentication() *RPCv2SystemAuthenticationConfiguration { + if x != nil { + return x.SystemAuthentication + } + return nil +} + +type isNFSv4MountConfiguration_OperatingSystem interface { + isNFSv4MountConfiguration_OperatingSystem() +} + +type NFSv4MountConfiguration_Darwin struct { + Darwin *NFSv4DarwinMountConfiguration `protobuf:"bytes,1,opt,name=darwin,proto3,oneof"` +} + +func (*NFSv4MountConfiguration_Darwin) isNFSv4MountConfiguration_OperatingSystem() {} + +type NFSv4DarwinMountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SocketPath string `protobuf:"bytes,1,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"` +} + +func (x *NFSv4DarwinMountConfiguration) Reset() { + *x = NFSv4DarwinMountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NFSv4DarwinMountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NFSv4DarwinMountConfiguration) ProtoMessage() {} + +func (x *NFSv4DarwinMountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NFSv4DarwinMountConfiguration.ProtoReflect.Descriptor instead. +func (*NFSv4DarwinMountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{3} +} + +func (x *NFSv4DarwinMountConfiguration) GetSocketPath() string { + if x != nil { + return x.SocketPath + } + return "" +} + +type RPCv2SystemAuthenticationConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MetadataJmespathExpression string `protobuf:"bytes,1,opt,name=metadata_jmespath_expression,json=metadataJmespathExpression,proto3" json:"metadata_jmespath_expression,omitempty"` + MaximumCacheSize int32 `protobuf:"varint,2,opt,name=maximum_cache_size,json=maximumCacheSize,proto3" json:"maximum_cache_size,omitempty"` + CacheReplacementPolicy eviction.CacheReplacementPolicy `protobuf:"varint,3,opt,name=cache_replacement_policy,json=cacheReplacementPolicy,proto3,enum=buildbarn.configuration.eviction.CacheReplacementPolicy" json:"cache_replacement_policy,omitempty"` +} + +func (x *RPCv2SystemAuthenticationConfiguration) Reset() { + *x = RPCv2SystemAuthenticationConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RPCv2SystemAuthenticationConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RPCv2SystemAuthenticationConfiguration) ProtoMessage() {} + +func (x *RPCv2SystemAuthenticationConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RPCv2SystemAuthenticationConfiguration.ProtoReflect.Descriptor instead. +func (*RPCv2SystemAuthenticationConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{4} +} + +func (x *RPCv2SystemAuthenticationConfiguration) GetMetadataJmespathExpression() string { + if x != nil { + return x.MetadataJmespathExpression + } + return "" +} + +func (x *RPCv2SystemAuthenticationConfiguration) GetMaximumCacheSize() int32 { + if x != nil { + return x.MaximumCacheSize + } + return 0 +} + +func (x *RPCv2SystemAuthenticationConfiguration) GetCacheReplacementPolicy() eviction.CacheReplacementPolicy { + if x != nil { + return x.CacheReplacementPolicy + } + return eviction.CacheReplacementPolicy(0) +} + +var File_pkg_proto_configuration_filesystem_virtual_virtual_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2f, 0x76, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2a, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x65, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x01, 0x0a, 0x12, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x58, 0x0a, + 0x04, 0x66, 0x75, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x04, 0x66, 0x75, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x05, 0x6e, 0x66, 0x73, 0x76, 0x34, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x2e, 0x4e, 0x46, 0x53, 0x76, 0x34, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x6e, + 0x66, 0x73, 0x76, 0x34, 0x42, 0x09, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x22, + 0xff, 0x04, 0x0a, 0x16, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x18, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, + 0x53, 0x0a, 0x18, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x69, 0x6e, + 0x6f, 0x64, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x74, + 0x68, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x4f, 0x74, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x35, 0x69, 0x6e, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6a, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x30, 0x69, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x74, 0x68, 0x45, + 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xa9, 0x01, 0x0a, 0x1f, 0x6c, 0x69, + 0x6e, 0x75, 0x78, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x76, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x74, 0x75, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x63, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x2e, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x42, 0x61, 0x63, + 0x6b, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x76, 0x49, 0x6e, 0x66, 0x6f, 0x54, 0x75, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1b, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x42, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x76, 0x49, 0x6e, 0x66, 0x6f, 0x54, 0x75, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, 0x4e, 0x0a, 0x20, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x42, 0x61, + 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x76, 0x49, 0x6e, 0x66, 0x6f, 0x54, 0x75, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, + 0x06, 0x22, 0xb4, 0x03, 0x0a, 0x17, 0x4e, 0x46, 0x53, 0x76, 0x34, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x63, 0x0a, + 0x06, 0x64, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x4e, 0x46, 0x53, 0x76, 0x34, + 0x44, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x64, 0x61, 0x72, 0x77, + 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x13, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x6e, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, + 0x14, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, + 0x64, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x15, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x52, 0x50, 0x43, 0x76, 0x32, 0x53, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x12, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x4c, 0x0a, 0x1d, 0x4e, 0x46, 0x53, 0x76, + 0x34, 0x44, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, + 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x8c, 0x02, 0x0a, 0x26, 0x52, 0x50, 0x43, 0x76, 0x32, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x1c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6a, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4a, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x74, 0x68, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x72, 0x0a, 0x18, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x65, 0x76, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x16, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x55, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, + 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescData = file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc +) + +func file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescData) + }) + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescData +} + +var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_goTypes = []interface{}{ + (*MountConfiguration)(nil), // 0: buildbarn.configuration.filesystem.virtual.MountConfiguration + (*FUSEMountConfiguration)(nil), // 1: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration + (*NFSv4MountConfiguration)(nil), // 2: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration + (*NFSv4DarwinMountConfiguration)(nil), // 3: buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration + (*RPCv2SystemAuthenticationConfiguration)(nil), // 4: buildbarn.configuration.filesystem.virtual.RPCv2SystemAuthenticationConfiguration + nil, // 5: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.LinuxBackingDevInfoTunablesEntry + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (eviction.CacheReplacementPolicy)(0), // 7: buildbarn.configuration.eviction.CacheReplacementPolicy +} +var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_depIdxs = []int32{ + 1, // 0: buildbarn.configuration.filesystem.virtual.MountConfiguration.fuse:type_name -> buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration + 2, // 1: buildbarn.configuration.filesystem.virtual.MountConfiguration.nfsv4:type_name -> buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration + 6, // 2: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.directory_entry_validity:type_name -> google.protobuf.Duration + 6, // 3: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.inode_attribute_validity:type_name -> google.protobuf.Duration + 5, // 4: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.linux_backing_dev_info_tunables:type_name -> buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.LinuxBackingDevInfoTunablesEntry + 3, // 5: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.darwin:type_name -> buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration + 6, // 6: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.enforced_lease_time:type_name -> google.protobuf.Duration + 6, // 7: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.announced_lease_time:type_name -> google.protobuf.Duration + 4, // 8: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.system_authentication:type_name -> buildbarn.configuration.filesystem.virtual.RPCv2SystemAuthenticationConfiguration + 7, // 9: buildbarn.configuration.filesystem.virtual.RPCv2SystemAuthenticationConfiguration.cache_replacement_policy:type_name -> buildbarn.configuration.eviction.CacheReplacementPolicy + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_filesystem_virtual_virtual_proto_init() } +func file_pkg_proto_configuration_filesystem_virtual_virtual_proto_init() { + if File_pkg_proto_configuration_filesystem_virtual_virtual_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FUSEMountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NFSv4MountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NFSv4DarwinMountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RPCv2SystemAuthenticationConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*MountConfiguration_Fuse)(nil), + (*MountConfiguration_Nfsv4)(nil), + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*NFSv4MountConfiguration_Darwin)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_filesystem_virtual_virtual_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_filesystem_virtual_virtual_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_filesystem_virtual_virtual_proto = out.File + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc = nil + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_goTypes = nil + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/filesystem/virtual/virtual.proto b/pkg/proto/configuration/filesystem/virtual/virtual.proto new file mode 100644 index 0000000..a5d440e --- /dev/null +++ b/pkg/proto/configuration/filesystem/virtual/virtual.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +package buildbarn.configuration.filesystem.virtual; + +import "google/protobuf/duration.proto"; +import "pkg/proto/configuration/eviction/eviction.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual"; + +message MountConfiguration { + // Path where the mount needs to be created. + string mount_path = 1; + + oneof backend { + // Use the kernel's FUSE driver to expose the mount. + // + // This option is supported on Linux and macOS, though its use is + // only recommended on Linux. On macOS, this option requires the + // OSXFUSE or macFUSE kernel extension to be installed. This kernel + // extension tends to cause system lockups under high load. + FUSEMountConfiguration fuse = 2; + + // Run an in-process NFSv4 server and use the kernel's NFSv4 client + // to expose the mount. This option is currently only supported on + // macOS Ventura 13.3 (22E252) or later. + // + // The NFS server is expected to conform to NFSv4.0 (RFC 7530). + // Features provided by newer versions of the protocol, such as + // NFSv4.1 (RFC 8881) and NFSv4.2 (RFC 7862), are not supported at + // this time. macOS also does not support the latter. + NFSv4MountConfiguration nfsv4 = 3; + } +} + +message FUSEMountConfiguration { + // The amount of time the kernel is permitted to cache directory + // entries. When left unset, the kernel is not permitted to cache this + // data at all, causing it to issue more LOOKUP requests. + // + // Because bb_worker is capable of sending NOTIFY_ENTRY messages to + // the kernel to actively invalidate directory entries, it is + // generally safe to turn on directory entry caching. For bb_clientd + // it is also safe to enable this option, as directory contents are + // immutable. Not all implementations may respect this option. + // + // Recommended value: 300s + google.protobuf.Duration directory_entry_validity = 2; + + // The amount of time the kernel is permitted to cache inode + // attributes. When left unset, the kernel is not permitted to cache this + // data at all, causing it to issue more GETATTR requests. + // + // Because files placed in the FUSE file system by bb_worker itself + // are immutable, it is generally safe to turn on inode attribute + // caching. For bb_clientd it is also safe to enable this option, as + // files are immutable. Not all implementations may respect this + // option. + // + // Recommended value: 300s + google.protobuf.Duration inode_attribute_validity = 3; + + // Was 'maximum_dirty_pages_percentage'. Use + // 'linux_backing_dev_info_tunables' instead. + reserved 4; + + // Was 'shuffle_directory_listings'. This option has been promoted up to + // bb_worker's VirtualBuildDirectoryConfiguration. + reserved 5; + + // Provide the "allow_other" mount option, thereby allowing other + // users on the system to access the FUSE mount point as well. + // + // This option needs to be enabled if bb_worker is configured to use + // privilege separation, as build actions wouldn't be able to access + // their build directory otherwise. + // + // This option may be useful for bb_clientd in case files need to be + // accessible to debugging tools that require administrative + // privileges. Care should be taken that other access controls (e.g., + // permissions on parent directories) are in place to prevent + // unintended access to the mount point. + bool allow_other = 6; + + // If set, the FUSE mount is created by calling mount() directly, as + // opposed to invoking the fusermount utility. This option needs to be + // enabled in environments where the fusermount utility is not + // available, such as the bb_worker container images. + bool direct_mount = 7; + + // If set, extract authentication metadata from the "fuse_in_header" + // messages sent by the kernel. This field contains a JMESPath + // expression that is used to construct the authentication metadata. + // The input data will have the following format: + // + // { + // "uid": number, + // "gid": number, + // "pid": number, + // } + // + // NOTE: This option is only used by bb_virtual_tmp. + string in_header_authentication_metadata_jmespath_expression = 8; + + // Tunables to apply to the Linux Backing Dev Info (BDI) after the + // FUSE mount has been created. Setting this option causes the + // tunables at /sys/class/bdi/${device}/${key} to be modified. These + // tunables are documented here: + // + // https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-bdi + // + // By default, Linux only allows 1% of vm.dirty_ratio of memory to be + // dirty pages belonging to the FUSE mount. This may be insufficient + // for workloads that depend on random access writes to large files. + // Increasing this value may speed up writes against the FUSE file + // system, at the cost of slowing down writes against other volumes. + // For example, increasing the ratio from 1% to 10% can be + // accomplished by setting this option as follows: + // + // { + // "max_ratio": "10" + // } + // + // If random access writes against the FUSE file system are still + // slower than desired, performance may be increased further by + // removing the BDI_CAP_STRICTLIMIT flag from the FUSE mount. On Linux + // 6.2 and later, this can be achieved by setting this option as + // follows: + // + // { + // "max_ratio": "10", + // "strict_limit": "0" + // } + // + // Recommended value: unset + map linux_backing_dev_info_tunables = 9; +} + +message NFSv4MountConfiguration { + oneof operating_system { + // Configuration options specific to mounting the NFSv4 file system + // on macOS. + NFSv4DarwinMountConfiguration darwin = 1; + } + + // The amount of time that needs to pass for the server to close files + // and remove state belonging to a client that has not shown any + // activity. + // + // This option also controls how long state associated with a single + // process on a client (an 'open-owner') is allowed to continue to + // exist on the server if no files are opened, or if left unconfirmed. + // + // Recommended value: 120s + google.protobuf.Duration enforced_lease_time = 2; + + // The lease time to announce to clients through the FATTR4_LEASE_TIME + // attribute. This option should be set lower than + // 'enforced_lease_time', as it needs to account for network delays + // and instability. + // + // Recommended value: 60s + google.protobuf.Duration announced_lease_time = 3; + + // If set, extract the system authentication (AUTH_SYS) data from + // RPCv2 requests and convert them to authentication metadata. The + // resulting metadata can be used for purposes such as authorization. + // + // Please refer to RFC 5531, appendix A for more details. + // + // NOTE: This option is only used by bb_virtual_tmp. + RPCv2SystemAuthenticationConfiguration system_authentication = 4; +} + +message NFSv4DarwinMountConfiguration { + // Path on which to bind the UNIX socket of the NFSv4 server. The + // kernel will connect to this socket when mounting. + // + // NOTE: No facilities are provided to set the ownership or + // permissions on the socket file. On most operating systems, the + // socket file will have mode 0777. How the mode is interpreted when + // changed is inconsistent between operating systems. Some require the + // socket to be writable in order to connect, while others ignore the + // permissions altogether. + // + // It is therefore strongly advised that socket files are placed + // inside directories that have access controls set up properly. + string socket_path = 1; + + // Was 'minimum_directories_attribute_cache_timeout' and + // 'maximum_directories_attribute_cache_timeout'. These options are + // now set automatically, based on the application that is used. + reserved 2, 3; +} + +message RPCv2SystemAuthenticationConfiguration { + // The JMESPath expression to be used to construct authentication + // metadata. The expression receives the following input, which + // corresponds to the authsys_parms structure that is described in RFC + // 5531, appendix A. + // + // { + // "stamp": number, + // "machinename": string, + // "uid": number, + // "gid": number, + // "gids": list, + // } + string metadata_jmespath_expression = 1; + + // The number of authentication metadata objects to cache. This cache + // allows clients to use authentication flavor AUTH_SHORT in + // subsequent requests, meaning they don't need to transmit full + // credentials as part of every request. + // + // It is recommended that this cache is sized proportionally to the + // number of UNIX users accessing the server. + int32 maximum_cache_size = 2; + + // The cache replacement policy that should be applied. It is advised + // that this is set to LEAST_RECENTLY_USED. + buildbarn.configuration.eviction.CacheReplacementPolicy + cache_replacement_policy = 3; +} diff --git a/pkg/proto/configuration/scheduler/BUILD.bazel b/pkg/proto/configuration/scheduler/BUILD.bazel new file mode 100644 index 0000000..e295fa5 --- /dev/null +++ b/pkg/proto/configuration/scheduler/BUILD.bazel @@ -0,0 +1,29 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "scheduler_proto", + srcs = ["scheduler.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:empty_proto", + ], +) + +go_proto_library( + name = "scheduler_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler", + proto = ":scheduler_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution"], +) + +go_library( + name = "scheduler", + embed = [":scheduler_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/configuration/scheduler/scheduler.pb.go b/pkg/proto/configuration/scheduler/scheduler.pb.go new file mode 100644 index 0000000..174dcbd --- /dev/null +++ b/pkg/proto/configuration/scheduler/scheduler.pb.go @@ -0,0 +1,1096 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/configuration/scheduler/scheduler.proto + +package scheduler + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ActionRouterConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ActionRouterConfiguration_Simple + // *ActionRouterConfiguration_Demultiplexing + Kind isActionRouterConfiguration_Kind `protobuf_oneof:"kind"` +} + +func (x *ActionRouterConfiguration) Reset() { + *x = ActionRouterConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActionRouterConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActionRouterConfiguration) ProtoMessage() {} + +func (x *ActionRouterConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActionRouterConfiguration.ProtoReflect.Descriptor instead. +func (*ActionRouterConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{0} +} + +func (m *ActionRouterConfiguration) GetKind() isActionRouterConfiguration_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ActionRouterConfiguration) GetSimple() *SimpleActionRouterConfiguration { + if x, ok := x.GetKind().(*ActionRouterConfiguration_Simple); ok { + return x.Simple + } + return nil +} + +func (x *ActionRouterConfiguration) GetDemultiplexing() *DemultiplexingActionRouterConfiguration { + if x, ok := x.GetKind().(*ActionRouterConfiguration_Demultiplexing); ok { + return x.Demultiplexing + } + return nil +} + +type isActionRouterConfiguration_Kind interface { + isActionRouterConfiguration_Kind() +} + +type ActionRouterConfiguration_Simple struct { + Simple *SimpleActionRouterConfiguration `protobuf:"bytes,1,opt,name=simple,proto3,oneof"` +} + +type ActionRouterConfiguration_Demultiplexing struct { + Demultiplexing *DemultiplexingActionRouterConfiguration `protobuf:"bytes,2,opt,name=demultiplexing,proto3,oneof"` +} + +func (*ActionRouterConfiguration_Simple) isActionRouterConfiguration_Kind() {} + +func (*ActionRouterConfiguration_Demultiplexing) isActionRouterConfiguration_Kind() {} + +type SimpleActionRouterConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlatformKeyExtractor *PlatformKeyExtractorConfiguration `protobuf:"bytes,1,opt,name=platform_key_extractor,json=platformKeyExtractor,proto3" json:"platform_key_extractor,omitempty"` + InvocationKeyExtractors []*InvocationKeyExtractorConfiguration `protobuf:"bytes,2,rep,name=invocation_key_extractors,json=invocationKeyExtractors,proto3" json:"invocation_key_extractors,omitempty"` + InitialSizeClassAnalyzer *InitialSizeClassAnalyzerConfiguration `protobuf:"bytes,3,opt,name=initial_size_class_analyzer,json=initialSizeClassAnalyzer,proto3" json:"initial_size_class_analyzer,omitempty"` +} + +func (x *SimpleActionRouterConfiguration) Reset() { + *x = SimpleActionRouterConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SimpleActionRouterConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SimpleActionRouterConfiguration) ProtoMessage() {} + +func (x *SimpleActionRouterConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SimpleActionRouterConfiguration.ProtoReflect.Descriptor instead. +func (*SimpleActionRouterConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{1} +} + +func (x *SimpleActionRouterConfiguration) GetPlatformKeyExtractor() *PlatformKeyExtractorConfiguration { + if x != nil { + return x.PlatformKeyExtractor + } + return nil +} + +func (x *SimpleActionRouterConfiguration) GetInvocationKeyExtractors() []*InvocationKeyExtractorConfiguration { + if x != nil { + return x.InvocationKeyExtractors + } + return nil +} + +func (x *SimpleActionRouterConfiguration) GetInitialSizeClassAnalyzer() *InitialSizeClassAnalyzerConfiguration { + if x != nil { + return x.InitialSizeClassAnalyzer + } + return nil +} + +type DemultiplexingActionRouterConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlatformKeyExtractor *PlatformKeyExtractorConfiguration `protobuf:"bytes,1,opt,name=platform_key_extractor,json=platformKeyExtractor,proto3" json:"platform_key_extractor,omitempty"` + Backends []*DemultiplexingActionRouterConfiguration_Backend `protobuf:"bytes,2,rep,name=backends,proto3" json:"backends,omitempty"` + DefaultActionRouter *ActionRouterConfiguration `protobuf:"bytes,3,opt,name=default_action_router,json=defaultActionRouter,proto3" json:"default_action_router,omitempty"` +} + +func (x *DemultiplexingActionRouterConfiguration) Reset() { + *x = DemultiplexingActionRouterConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DemultiplexingActionRouterConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DemultiplexingActionRouterConfiguration) ProtoMessage() {} + +func (x *DemultiplexingActionRouterConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DemultiplexingActionRouterConfiguration.ProtoReflect.Descriptor instead. +func (*DemultiplexingActionRouterConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{2} +} + +func (x *DemultiplexingActionRouterConfiguration) GetPlatformKeyExtractor() *PlatformKeyExtractorConfiguration { + if x != nil { + return x.PlatformKeyExtractor + } + return nil +} + +func (x *DemultiplexingActionRouterConfiguration) GetBackends() []*DemultiplexingActionRouterConfiguration_Backend { + if x != nil { + return x.Backends + } + return nil +} + +func (x *DemultiplexingActionRouterConfiguration) GetDefaultActionRouter() *ActionRouterConfiguration { + if x != nil { + return x.DefaultActionRouter + } + return nil +} + +type PlatformKeyExtractorConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *PlatformKeyExtractorConfiguration_Action + // *PlatformKeyExtractorConfiguration_ActionAndCommand + // *PlatformKeyExtractorConfiguration_Static + Kind isPlatformKeyExtractorConfiguration_Kind `protobuf_oneof:"kind"` +} + +func (x *PlatformKeyExtractorConfiguration) Reset() { + *x = PlatformKeyExtractorConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformKeyExtractorConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformKeyExtractorConfiguration) ProtoMessage() {} + +func (x *PlatformKeyExtractorConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformKeyExtractorConfiguration.ProtoReflect.Descriptor instead. +func (*PlatformKeyExtractorConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{3} +} + +func (m *PlatformKeyExtractorConfiguration) GetKind() isPlatformKeyExtractorConfiguration_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *PlatformKeyExtractorConfiguration) GetAction() *emptypb.Empty { + if x, ok := x.GetKind().(*PlatformKeyExtractorConfiguration_Action); ok { + return x.Action + } + return nil +} + +func (x *PlatformKeyExtractorConfiguration) GetActionAndCommand() *emptypb.Empty { + if x, ok := x.GetKind().(*PlatformKeyExtractorConfiguration_ActionAndCommand); ok { + return x.ActionAndCommand + } + return nil +} + +func (x *PlatformKeyExtractorConfiguration) GetStatic() *v2.Platform { + if x, ok := x.GetKind().(*PlatformKeyExtractorConfiguration_Static); ok { + return x.Static + } + return nil +} + +type isPlatformKeyExtractorConfiguration_Kind interface { + isPlatformKeyExtractorConfiguration_Kind() +} + +type PlatformKeyExtractorConfiguration_Action struct { + Action *emptypb.Empty `protobuf:"bytes,1,opt,name=action,proto3,oneof"` +} + +type PlatformKeyExtractorConfiguration_ActionAndCommand struct { + ActionAndCommand *emptypb.Empty `protobuf:"bytes,2,opt,name=action_and_command,json=actionAndCommand,proto3,oneof"` +} + +type PlatformKeyExtractorConfiguration_Static struct { + Static *v2.Platform `protobuf:"bytes,3,opt,name=static,proto3,oneof"` +} + +func (*PlatformKeyExtractorConfiguration_Action) isPlatformKeyExtractorConfiguration_Kind() {} + +func (*PlatformKeyExtractorConfiguration_ActionAndCommand) isPlatformKeyExtractorConfiguration_Kind() { +} + +func (*PlatformKeyExtractorConfiguration_Static) isPlatformKeyExtractorConfiguration_Kind() {} + +type InvocationKeyExtractorConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *InvocationKeyExtractorConfiguration_ToolInvocationId + // *InvocationKeyExtractorConfiguration_CorrelatedInvocationsId + // *InvocationKeyExtractorConfiguration_AuthenticationMetadata + Kind isInvocationKeyExtractorConfiguration_Kind `protobuf_oneof:"kind"` +} + +func (x *InvocationKeyExtractorConfiguration) Reset() { + *x = InvocationKeyExtractorConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvocationKeyExtractorConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationKeyExtractorConfiguration) ProtoMessage() {} + +func (x *InvocationKeyExtractorConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationKeyExtractorConfiguration.ProtoReflect.Descriptor instead. +func (*InvocationKeyExtractorConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{4} +} + +func (m *InvocationKeyExtractorConfiguration) GetKind() isInvocationKeyExtractorConfiguration_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *InvocationKeyExtractorConfiguration) GetToolInvocationId() *emptypb.Empty { + if x, ok := x.GetKind().(*InvocationKeyExtractorConfiguration_ToolInvocationId); ok { + return x.ToolInvocationId + } + return nil +} + +func (x *InvocationKeyExtractorConfiguration) GetCorrelatedInvocationsId() *emptypb.Empty { + if x, ok := x.GetKind().(*InvocationKeyExtractorConfiguration_CorrelatedInvocationsId); ok { + return x.CorrelatedInvocationsId + } + return nil +} + +func (x *InvocationKeyExtractorConfiguration) GetAuthenticationMetadata() *emptypb.Empty { + if x, ok := x.GetKind().(*InvocationKeyExtractorConfiguration_AuthenticationMetadata); ok { + return x.AuthenticationMetadata + } + return nil +} + +type isInvocationKeyExtractorConfiguration_Kind interface { + isInvocationKeyExtractorConfiguration_Kind() +} + +type InvocationKeyExtractorConfiguration_ToolInvocationId struct { + ToolInvocationId *emptypb.Empty `protobuf:"bytes,2,opt,name=tool_invocation_id,json=toolInvocationId,proto3,oneof"` +} + +type InvocationKeyExtractorConfiguration_CorrelatedInvocationsId struct { + CorrelatedInvocationsId *emptypb.Empty `protobuf:"bytes,3,opt,name=correlated_invocations_id,json=correlatedInvocationsId,proto3,oneof"` +} + +type InvocationKeyExtractorConfiguration_AuthenticationMetadata struct { + AuthenticationMetadata *emptypb.Empty `protobuf:"bytes,4,opt,name=authentication_metadata,json=authenticationMetadata,proto3,oneof"` +} + +func (*InvocationKeyExtractorConfiguration_ToolInvocationId) isInvocationKeyExtractorConfiguration_Kind() { +} + +func (*InvocationKeyExtractorConfiguration_CorrelatedInvocationsId) isInvocationKeyExtractorConfiguration_Kind() { +} + +func (*InvocationKeyExtractorConfiguration_AuthenticationMetadata) isInvocationKeyExtractorConfiguration_Kind() { +} + +type InitialSizeClassAnalyzerConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DefaultExecutionTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=default_execution_timeout,json=defaultExecutionTimeout,proto3" json:"default_execution_timeout,omitempty"` + MaximumExecutionTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=maximum_execution_timeout,json=maximumExecutionTimeout,proto3" json:"maximum_execution_timeout,omitempty"` + FeedbackDriven *InitialSizeClassFeedbackDrivenAnalyzerConfiguration `protobuf:"bytes,3,opt,name=feedback_driven,json=feedbackDriven,proto3" json:"feedback_driven,omitempty"` +} + +func (x *InitialSizeClassAnalyzerConfiguration) Reset() { + *x = InitialSizeClassAnalyzerConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialSizeClassAnalyzerConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialSizeClassAnalyzerConfiguration) ProtoMessage() {} + +func (x *InitialSizeClassAnalyzerConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialSizeClassAnalyzerConfiguration.ProtoReflect.Descriptor instead. +func (*InitialSizeClassAnalyzerConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{5} +} + +func (x *InitialSizeClassAnalyzerConfiguration) GetDefaultExecutionTimeout() *durationpb.Duration { + if x != nil { + return x.DefaultExecutionTimeout + } + return nil +} + +func (x *InitialSizeClassAnalyzerConfiguration) GetMaximumExecutionTimeout() *durationpb.Duration { + if x != nil { + return x.MaximumExecutionTimeout + } + return nil +} + +func (x *InitialSizeClassAnalyzerConfiguration) GetFeedbackDriven() *InitialSizeClassFeedbackDrivenAnalyzerConfiguration { + if x != nil { + return x.FeedbackDriven + } + return nil +} + +type InitialSizeClassFeedbackDrivenAnalyzerConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FailureCacheDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=failure_cache_duration,json=failureCacheDuration,proto3" json:"failure_cache_duration,omitempty"` + HistorySize int32 `protobuf:"varint,6,opt,name=history_size,json=historySize,proto3" json:"history_size,omitempty"` + PageRank *InitialSizeClassPageRankStrategyCalculatorConfiguration `protobuf:"bytes,7,opt,name=page_rank,json=pageRank,proto3" json:"page_rank,omitempty"` +} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) Reset() { + *x = InitialSizeClassFeedbackDrivenAnalyzerConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialSizeClassFeedbackDrivenAnalyzerConfiguration) ProtoMessage() {} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialSizeClassFeedbackDrivenAnalyzerConfiguration.ProtoReflect.Descriptor instead. +func (*InitialSizeClassFeedbackDrivenAnalyzerConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{6} +} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) GetFailureCacheDuration() *durationpb.Duration { + if x != nil { + return x.FailureCacheDuration + } + return nil +} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) GetHistorySize() int32 { + if x != nil { + return x.HistorySize + } + return 0 +} + +func (x *InitialSizeClassFeedbackDrivenAnalyzerConfiguration) GetPageRank() *InitialSizeClassPageRankStrategyCalculatorConfiguration { + if x != nil { + return x.PageRank + } + return nil +} + +type InitialSizeClassPageRankStrategyCalculatorConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AcceptableExecutionTimeIncreaseExponent float64 `protobuf:"fixed64,1,opt,name=acceptable_execution_time_increase_exponent,json=acceptableExecutionTimeIncreaseExponent,proto3" json:"acceptable_execution_time_increase_exponent,omitempty"` + SmallerSizeClassExecutionTimeoutMultiplier float64 `protobuf:"fixed64,2,opt,name=smaller_size_class_execution_timeout_multiplier,json=smallerSizeClassExecutionTimeoutMultiplier,proto3" json:"smaller_size_class_execution_timeout_multiplier,omitempty"` + MinimumExecutionTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=minimum_execution_timeout,json=minimumExecutionTimeout,proto3" json:"minimum_execution_timeout,omitempty"` + MaximumConvergenceError float64 `protobuf:"fixed64,4,opt,name=maximum_convergence_error,json=maximumConvergenceError,proto3" json:"maximum_convergence_error,omitempty"` +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) Reset() { + *x = InitialSizeClassPageRankStrategyCalculatorConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialSizeClassPageRankStrategyCalculatorConfiguration) ProtoMessage() {} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialSizeClassPageRankStrategyCalculatorConfiguration.ProtoReflect.Descriptor instead. +func (*InitialSizeClassPageRankStrategyCalculatorConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{7} +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) GetAcceptableExecutionTimeIncreaseExponent() float64 { + if x != nil { + return x.AcceptableExecutionTimeIncreaseExponent + } + return 0 +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) GetSmallerSizeClassExecutionTimeoutMultiplier() float64 { + if x != nil { + return x.SmallerSizeClassExecutionTimeoutMultiplier + } + return 0 +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) GetMinimumExecutionTimeout() *durationpb.Duration { + if x != nil { + return x.MinimumExecutionTimeout + } + return nil +} + +func (x *InitialSizeClassPageRankStrategyCalculatorConfiguration) GetMaximumConvergenceError() float64 { + if x != nil { + return x.MaximumConvergenceError + } + return 0 +} + +type DemultiplexingActionRouterConfiguration_Backend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InstanceNamePrefix string `protobuf:"bytes,1,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,2,opt,name=platform,proto3" json:"platform,omitempty"` + ActionRouter *ActionRouterConfiguration `protobuf:"bytes,3,opt,name=action_router,json=actionRouter,proto3" json:"action_router,omitempty"` +} + +func (x *DemultiplexingActionRouterConfiguration_Backend) Reset() { + *x = DemultiplexingActionRouterConfiguration_Backend{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DemultiplexingActionRouterConfiguration_Backend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DemultiplexingActionRouterConfiguration_Backend) ProtoMessage() {} + +func (x *DemultiplexingActionRouterConfiguration_Backend) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DemultiplexingActionRouterConfiguration_Backend.ProtoReflect.Descriptor instead. +func (*DemultiplexingActionRouterConfiguration_Backend) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DemultiplexingActionRouterConfiguration_Backend) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *DemultiplexingActionRouterConfiguration_Backend) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *DemultiplexingActionRouterConfiguration_Backend) GetActionRouter() *ActionRouterConfiguration { + if x != nil { + return x.ActionRouter + } + return nil +} + +var File_pkg_proto_configuration_scheduler_scheduler_proto protoreflect.FileDescriptor + +var file_pkg_proto_configuration_scheduler_scheduler_proto_rawDesc = []byte{ + 0x0a, 0x31, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, + 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf7, 0x01, 0x0a, 0x19, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x06, 0x73, 0x69, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, 0x69, + 0x6d, 0x70, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x06, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x74, 0x0a, 0x0e, 0x64, 0x65, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x4a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, + 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, + 0x65, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x42, 0x06, 0x0a, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x03, 0x0a, 0x1f, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x0a, 0x16, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x14, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x82, 0x01, 0x0a, 0x19, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x49, 0x6e, + 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x17, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x87, 0x01, 0x0a, 0x1b, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x48, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x41, 0x6e, 0x61, 0x6c, + 0x79, 0x7a, 0x65, 0x72, 0x22, 0xef, 0x04, 0x0a, 0x27, 0x44, 0x65, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x7a, 0x0a, 0x16, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4b, 0x65, 0x79, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x6e, 0x0a, 0x08, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x52, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x12, 0x70, 0x0a, 0x15, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x1a, 0xe5, + 0x01, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x61, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x22, 0xea, 0x01, 0x0a, 0x21, 0x50, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, + 0x0a, 0x12, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, + 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x42, 0x06, 0x0a, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x22, 0xa4, 0x02, 0x0a, 0x23, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x12, 0x74, + 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x10, 0x74, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x54, 0x0a, 0x19, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, + 0x52, 0x17, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x49, 0x6e, 0x76, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x64, 0x12, 0x51, 0x0a, 0x17, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd6, 0x02, 0x0a, 0x25, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x41, + 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x19, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x17, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x55, 0x0a, 0x19, 0x6d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x7f, 0x0a, 0x0f, 0x66, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x64, + 0x72, 0x69, 0x76, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x56, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x72, 0x69, 0x76, 0x65, 0x6e, 0x41, 0x6e, + 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x66, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x72, 0x69, + 0x76, 0x65, 0x6e, 0x22, 0xba, 0x02, 0x0a, 0x33, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, + 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, + 0x44, 0x72, 0x69, 0x76, 0x65, 0x6e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x16, 0x66, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x77, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x6b, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x6f, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x70, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x6b, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x22, 0x8f, 0x03, 0x0a, 0x37, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x6b, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x2b, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x61, + 0x73, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x27, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x61, + 0x73, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x63, 0x0a, 0x2f, 0x73, 0x6d, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x2a, 0x73, 0x6d, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, + 0x55, 0x0a, 0x19, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3a, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescOnce sync.Once + file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescData = file_pkg_proto_configuration_scheduler_scheduler_proto_rawDesc +) + +func file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescGZIP() []byte { + file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescOnce.Do(func() { + file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescData) + }) + return file_pkg_proto_configuration_scheduler_scheduler_proto_rawDescData +} + +var file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_pkg_proto_configuration_scheduler_scheduler_proto_goTypes = []interface{}{ + (*ActionRouterConfiguration)(nil), // 0: buildbarn.configuration.scheduler.ActionRouterConfiguration + (*SimpleActionRouterConfiguration)(nil), // 1: buildbarn.configuration.scheduler.SimpleActionRouterConfiguration + (*DemultiplexingActionRouterConfiguration)(nil), // 2: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration + (*PlatformKeyExtractorConfiguration)(nil), // 3: buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration + (*InvocationKeyExtractorConfiguration)(nil), // 4: buildbarn.configuration.scheduler.InvocationKeyExtractorConfiguration + (*InitialSizeClassAnalyzerConfiguration)(nil), // 5: buildbarn.configuration.scheduler.InitialSizeClassAnalyzerConfiguration + (*InitialSizeClassFeedbackDrivenAnalyzerConfiguration)(nil), // 6: buildbarn.configuration.scheduler.InitialSizeClassFeedbackDrivenAnalyzerConfiguration + (*InitialSizeClassPageRankStrategyCalculatorConfiguration)(nil), // 7: buildbarn.configuration.scheduler.InitialSizeClassPageRankStrategyCalculatorConfiguration + (*DemultiplexingActionRouterConfiguration_Backend)(nil), // 8: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.Backend + (*emptypb.Empty)(nil), // 9: google.protobuf.Empty + (*v2.Platform)(nil), // 10: build.bazel.remote.execution.v2.Platform + (*durationpb.Duration)(nil), // 11: google.protobuf.Duration +} +var file_pkg_proto_configuration_scheduler_scheduler_proto_depIdxs = []int32{ + 1, // 0: buildbarn.configuration.scheduler.ActionRouterConfiguration.simple:type_name -> buildbarn.configuration.scheduler.SimpleActionRouterConfiguration + 2, // 1: buildbarn.configuration.scheduler.ActionRouterConfiguration.demultiplexing:type_name -> buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration + 3, // 2: buildbarn.configuration.scheduler.SimpleActionRouterConfiguration.platform_key_extractor:type_name -> buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration + 4, // 3: buildbarn.configuration.scheduler.SimpleActionRouterConfiguration.invocation_key_extractors:type_name -> buildbarn.configuration.scheduler.InvocationKeyExtractorConfiguration + 5, // 4: buildbarn.configuration.scheduler.SimpleActionRouterConfiguration.initial_size_class_analyzer:type_name -> buildbarn.configuration.scheduler.InitialSizeClassAnalyzerConfiguration + 3, // 5: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.platform_key_extractor:type_name -> buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration + 8, // 6: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.backends:type_name -> buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.Backend + 0, // 7: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.default_action_router:type_name -> buildbarn.configuration.scheduler.ActionRouterConfiguration + 9, // 8: buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration.action:type_name -> google.protobuf.Empty + 9, // 9: buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration.action_and_command:type_name -> google.protobuf.Empty + 10, // 10: buildbarn.configuration.scheduler.PlatformKeyExtractorConfiguration.static:type_name -> build.bazel.remote.execution.v2.Platform + 9, // 11: buildbarn.configuration.scheduler.InvocationKeyExtractorConfiguration.tool_invocation_id:type_name -> google.protobuf.Empty + 9, // 12: buildbarn.configuration.scheduler.InvocationKeyExtractorConfiguration.correlated_invocations_id:type_name -> google.protobuf.Empty + 9, // 13: buildbarn.configuration.scheduler.InvocationKeyExtractorConfiguration.authentication_metadata:type_name -> google.protobuf.Empty + 11, // 14: buildbarn.configuration.scheduler.InitialSizeClassAnalyzerConfiguration.default_execution_timeout:type_name -> google.protobuf.Duration + 11, // 15: buildbarn.configuration.scheduler.InitialSizeClassAnalyzerConfiguration.maximum_execution_timeout:type_name -> google.protobuf.Duration + 6, // 16: buildbarn.configuration.scheduler.InitialSizeClassAnalyzerConfiguration.feedback_driven:type_name -> buildbarn.configuration.scheduler.InitialSizeClassFeedbackDrivenAnalyzerConfiguration + 11, // 17: buildbarn.configuration.scheduler.InitialSizeClassFeedbackDrivenAnalyzerConfiguration.failure_cache_duration:type_name -> google.protobuf.Duration + 7, // 18: buildbarn.configuration.scheduler.InitialSizeClassFeedbackDrivenAnalyzerConfiguration.page_rank:type_name -> buildbarn.configuration.scheduler.InitialSizeClassPageRankStrategyCalculatorConfiguration + 11, // 19: buildbarn.configuration.scheduler.InitialSizeClassPageRankStrategyCalculatorConfiguration.minimum_execution_timeout:type_name -> google.protobuf.Duration + 10, // 20: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.Backend.platform:type_name -> build.bazel.remote.execution.v2.Platform + 0, // 21: buildbarn.configuration.scheduler.DemultiplexingActionRouterConfiguration.Backend.action_router:type_name -> buildbarn.configuration.scheduler.ActionRouterConfiguration + 22, // [22:22] is the sub-list for method output_type + 22, // [22:22] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_pkg_proto_configuration_scheduler_scheduler_proto_init() } +func file_pkg_proto_configuration_scheduler_scheduler_proto_init() { + if File_pkg_proto_configuration_scheduler_scheduler_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ActionRouterConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SimpleActionRouterConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DemultiplexingActionRouterConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformKeyExtractorConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvocationKeyExtractorConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialSizeClassAnalyzerConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialSizeClassFeedbackDrivenAnalyzerConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialSizeClassPageRankStrategyCalculatorConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DemultiplexingActionRouterConfiguration_Backend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ActionRouterConfiguration_Simple)(nil), + (*ActionRouterConfiguration_Demultiplexing)(nil), + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*PlatformKeyExtractorConfiguration_Action)(nil), + (*PlatformKeyExtractorConfiguration_ActionAndCommand)(nil), + (*PlatformKeyExtractorConfiguration_Static)(nil), + } + file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*InvocationKeyExtractorConfiguration_ToolInvocationId)(nil), + (*InvocationKeyExtractorConfiguration_CorrelatedInvocationsId)(nil), + (*InvocationKeyExtractorConfiguration_AuthenticationMetadata)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_configuration_scheduler_scheduler_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_configuration_scheduler_scheduler_proto_goTypes, + DependencyIndexes: file_pkg_proto_configuration_scheduler_scheduler_proto_depIdxs, + MessageInfos: file_pkg_proto_configuration_scheduler_scheduler_proto_msgTypes, + }.Build() + File_pkg_proto_configuration_scheduler_scheduler_proto = out.File + file_pkg_proto_configuration_scheduler_scheduler_proto_rawDesc = nil + file_pkg_proto_configuration_scheduler_scheduler_proto_goTypes = nil + file_pkg_proto_configuration_scheduler_scheduler_proto_depIdxs = nil +} diff --git a/pkg/proto/configuration/scheduler/scheduler.proto b/pkg/proto/configuration/scheduler/scheduler.proto new file mode 100644 index 0000000..ee7e6d9 --- /dev/null +++ b/pkg/proto/configuration/scheduler/scheduler.proto @@ -0,0 +1,312 @@ +syntax = "proto3"; + +package buildbarn.configuration.scheduler; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler"; + +message ActionRouterConfiguration { + oneof kind { + // Let all incoming execution requests use the same method for + // extracting platform properties and invocation key, and let them + // all use the same initial size class analyzer. + SimpleActionRouterConfiguration simple = 1; + + // Demultiplex incoming requests based on the client-provided REv2 + // instance name prefix and platform properties. + DemultiplexingActionRouterConfiguration demultiplexing = 2; + } +} + +message SimpleActionRouterConfiguration { + // The method that is used to extract platform properties from an + // incoming execution request. This can either read them from just the + // Action message (REv2.2 or later), both the Action and Command + // message (all versions of REv2), or ignore the client provided + // platform properties by overriding them. + PlatformKeyExtractorConfiguration platform_key_extractor = 1; + + // The method that is used to extract an invocation key from an + // incoming execution requests. The scheduler uses invocation keys to + // group queued actions, ensuring that actions are scheduled fairly + // with respect to other groups. + // + // The recommended method is to group actions by the + // 'tool_invocation_id' field that is part of the client provided + // RequestMetadata. This ensures that every running Bazel build + // receives an equal number of workers. + // + // For more advanced setups it may be advised to write your own + // implementation that, for example, extracts usernames from + // authentication tokens. This ensures that every user receives an + // equal number of workers. + // + // By leaving this field empty, all actions are grouped together in a + // single queue, meaning there is no fairness. When multiple elements + // are provided, fairness is provided on multiple levels. For example, + // one can enforce fairness at the user level, followed by fairness at + // the Bazel invocation ID level. + repeated InvocationKeyExtractorConfiguration invocation_key_extractors = 2; + + // The initial size class analyzer that is used. The initial size + // class analyzer is responsible for determining the size of the + // worker on which an action should run, and what its execution + // timeout should be. + // + // For simpler setups that don't use multiple size classes (i.e., all + // workers have the same size), this field is still required, as a + // default and maximum execution timeout must be set. + InitialSizeClassAnalyzerConfiguration initial_size_class_analyzer = 3; +} + +message DemultiplexingActionRouterConfiguration { + message Backend { + // The instance name prefix to match. + string instance_name_prefix = 1; + + // The platform properties to match. + build.bazel.remote.execution.v2.Platform platform = 2; + + // The action router to which routing requests are forwarded. + ActionRouterConfiguration action_router = 3; + } + + // The method that is used to extract platform properties from the + // incoming execution request. + PlatformKeyExtractorConfiguration platform_key_extractor = 1; + + // List of backends to which to forward requests if the instance name + // prefix and platform properties match. + repeated Backend backends = 2; + + // The backend to which to forward requests if none of the declared + // backends have matching instance name prefixes and platform + // properties. + ActionRouterConfiguration default_action_router = 3; +} + +message PlatformKeyExtractorConfiguration { + oneof kind { + // Attempt to extract platform properties from the REv2 Action + // message. + // + // This is sufficient when exclusively dealing with clients that + // implement REv2.2 or later, such as Bazel 4.1.0 and later. + google.protobuf.Empty action = 1; + + // Attempt to extract platform properties from the REv2 Action + // message, and fall back to the Command message in case the Action + // message contains no platform properties. + // + // This is less efficient than only considering the Action message, + // but allows requests from clients that implement REv2.1 or earlier + // to be respected as well. + google.protobuf.Empty action_and_command = 2; + + // Do not respect platform properties from the client's request, but + // use a static value provided in configuration. + // + // This option can be used in combination with + // DemultiplexingActionRouter to rewrite platform properties on + // incoming requests. This is useful when older platforms are phased + // out, and best effort support is provided by directing requests to + // workers of a newer similar platform. + build.bazel.remote.execution.v2.Platform static = 3; + } +} + +message InvocationKeyExtractorConfiguration { + // Was 'empty'. The same behavior can now be achieved by leaving the + // 'invocation_key_extractors' list empty. + reserved 1; + + oneof kind { + // Use the 'tool_invocation_id' that the client provided as part of + // the RequestMetadata message as the invocation key. In the case of + // Bazel, this causes all actions belonging to the same Bazel + // invocation to be grouped together. + google.protobuf.Empty tool_invocation_id = 2; + + // Use the 'correlated_invocations_id' that the client provided as + // part of the RequestMetadata message as the invocation key. In the + // case of Bazel, this causes all actions belonging to instances of + // Bazel that were invoked with the same --build_request_id to be + // grouped together. + google.protobuf.Empty correlated_invocations_id = 3; + + // Use the publicly displayable part of the authentication metadata + // as the invocation key. This causes all actions belonging to the + // same user to be grouped together. + google.protobuf.Empty authentication_metadata = 4; + } +} + +message InitialSizeClassAnalyzerConfiguration { + // Execution timeout that needs to be applied in case the build action + // contains no explicit timeout. + google.protobuf.Duration default_execution_timeout = 1; + + // Maximum permitted execution timeout. + google.protobuf.Duration maximum_execution_timeout = 2; + + // When set, persist statistics on execution times and outcomes into + // the Initial Size Class Cache (ISCC), so that future invocations of + // similar actions can be scheduled more intelligently. + // + // bb_browser is also capable of reading data from this data store, + // making it possible to view these statistics by visiting the page of + // an action. + InitialSizeClassFeedbackDrivenAnalyzerConfiguration feedback_driven = 3; +} + +message InitialSizeClassFeedbackDrivenAnalyzerConfiguration { + // Immediately schedule actions on the largest size class if they have + // failed at least once within the provided timeframe. + // + // Actions that fail on any size class other than the largest will + // always be retried on the largest size class to rule out failures + // caused by a lack of resources. This means that if an action is + // known to fail, attempting to run it on smaller size classes causes + // unnecessary delays in error reporting. + // + // During iterative development, it is likely that the same action + // action is invoked repeatedly, each time having a high probability + // of failure. This option controls how long these kinds of actions + // should receive a boost, allowing them to be run on the largest size + // class and fail quickly. + // + // Recommended value: 86400s (1 day) + google.protobuf.Duration failure_cache_duration = 1; + + // Was 'acceptable_execution_time_increase_exponent', + // 'smaller_size_class_execution_timeout_multiplier', + // 'minimum_execution_timeout', and 'maximum_convergence_error'. + // + // These options have moved into 'page_rank'. + reserved 2, 3, 4, 5; + + // The number of action outcomes to store per size class. Increasing + // this improves the accuracy of timing information that is captured, + // but has the downside that the system responds to changes in + // behavior of actions less quickly. + // + // To ensure that the system does not end up in a steady state where + // actions are always run on the same size class, there is roughly a + // 1.0 / history_size probability that actions are run on sizes + // classes other than the optimum, regardless of historical outcomes. + // + // Recommended value: 32 + int32 history_size = 6; + + // When not set, run all actions on the smallest size class for which + // workers exist. Upon failure, retry actions on the largest size + // class. This mode is not recommended for setups with more than two + // size classes, or workloads where build times matter. + // + // When set, run all actions on the largest size class if not seen + // before. Future invocations of actions with the same command line + // arguments and environment variables will run on all size classes, + // using probabilities based on how their execution times compare to + // those of the largest size class. + InitialSizeClassPageRankStrategyCalculatorConfiguration page_rank = 7; +} + +message InitialSizeClassPageRankStrategyCalculatorConfiguration { + // An exponent to determine whether an increase in execution time when + // scheduling an action on a smaller size class is considere + // acceptable. + // + // For example, consider the case where this exponent is set to 0.7, + // and a given action is known to have a 60s median execution time on + // the largest workers, having size class 16. For the execution time + // to be considered being acceptable on a smaller size class, this + // action must complete within: + // + // - 60s*(16/1)^0.7 = 417.8s on a worker with size class 1, + // - 60s*(16/2)^0.7 = 257.2s on a worker with size class 2, + // - 60s*(16/4)^0.7 = 158.3s on a worker with size class 4, + // - 60s*(16/8)^0.7 = 97.7s on a worker with size class 8. + // + // Whereas if this exponent is set to 0.3, the acceptable execution + // times would be significantly lower: + // + // - 60s*(16/1)^0.3 = 137.8s on a worker with size class 1, + // - 60s*(16/2)^0.3 = 112.0s on a worker with size class 2, + // - 60s*(16/4)^0.3 = 90.9s on a worker with size class 4, + // - 60s*(16/8)^0.3 = 73.9s on a worker with size class 8. + // + // In effect, this exponent determines how much speed you are willing + // to sacrifice for increased worker utilization. Setting this + // exponent to a higher value will increase worker utilization, but + // may cause actions that are only somewhat parallel to run slower. + // + // Recommended value: somewhere between 0.2 and 0.8. + double acceptable_execution_time_increase_exponent = 1; + + // Actions scheduled on smaller size classes are run with a reduced + // timeout value that is based on the acceptable execution time of the + // action for that size class (see above). This ensures that if a + // misprediction is made and an action is running unacceptably slow on + // a size class that is too small, it is terminated and quickly + // retried on the largest size class. + // + // This option configures a multiplier that needs to be applied when + // computing the action's timeout. Setting it to >1.0 gives an action + // a bit more time to finish its work, even if its execution time has + // become unacceptable. This has two advantages: + // + // - Less work is wasted, as the action may likely still complete. + // - If we still observe a timeout on the smaller size class, we + // insert a higher quality data point into the ISCC. + // + // Recommended value: 1.5 + double smaller_size_class_execution_timeout_multiplier = 2; + + // The execution timeout value that is used on smaller size classes is + // proportional to the median execution time observed on the largest + // size class. This means that if the median execution time on the + // largest size class is in the milliseconds, so will be the execution + // timeout on smaller size classes. + // + // Because this tends to introduce too much flakiness, this option can + // be used to set an lower bound on the execution timeout. + // + // Recommended value: 10s + google.protobuf.Duration minimum_execution_timeout = 3; + + // This implementation compares previous execution stats between every + // pair of size classes. The resulting scores are stored in a + // stochastic matrix, of which the resulting eigenvector contains the + // probabilities at which size classes should be chosen. This + // algorithm has a strong resemblance with PageRank. + // + // To compute the eigenvector, a process called "power iteration" is + // used, in which repeated matrix multiplications are performed. This + // method approximates the eigenvector, each iteration giving more + // accurate results. This option can be used to control how many + // iterations should be performed. Matrix multiplication will be + // terminated as soon as the maximum observed error drops below a + // certain value. + // + // It is generally possible to set this option to an aggressive (low) + // value, for a couple of reasons: + // + // - The number of size classes tends to be small (<10), meaning that + // the resulting probability matrix is also not too big (<10x10). + // - The probability function used to populate the stochastic matrix + // is total and asymmetric, meaning that every iteration strongly + // contributes to convergence. + // - Probabilities of previous executions of the same action are + // cached in the Initial Size Class Cache (ISCC), meaning that + // the algorithm often has a good starting point. + // + // Still, it is recommended to inspect Prometheus metrics + // buildbarn_builder_page_rank_strategy_calculator_* in case changes + // are made to this option to assess the performance impact. + // + // Recommended value: 0.002 + double maximum_convergence_error = 4; +} diff --git a/pkg/proto/outputpathpersistency/BUILD.bazel b/pkg/proto/outputpathpersistency/BUILD.bazel new file mode 100644 index 0000000..663342b --- /dev/null +++ b/pkg/proto/outputpathpersistency/BUILD.bazel @@ -0,0 +1,28 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "outputpathpersistency_proto", + srcs = ["outputpathpersistency.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +go_proto_library( + name = "outputpathpersistency_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency", + proto = ":outputpathpersistency_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution"], +) + +go_library( + name = "outputpathpersistency", + embed = [":outputpathpersistency_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/outputpathpersistency/outputpathpersistency.pb.go b/pkg/proto/outputpathpersistency/outputpathpersistency.pb.go new file mode 100644 index 0000000..3f3440a --- /dev/null +++ b/pkg/proto/outputpathpersistency/outputpathpersistency.pb.go @@ -0,0 +1,422 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/outputpathpersistency/outputpathpersistency.proto + +package outputpathpersistency + +import ( + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RootDirectory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InitialCreationTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=initial_creation_time,json=initialCreationTime,proto3" json:"initial_creation_time,omitempty"` + Contents *Directory `protobuf:"bytes,2,opt,name=contents,proto3" json:"contents,omitempty"` +} + +func (x *RootDirectory) Reset() { + *x = RootDirectory{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RootDirectory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RootDirectory) ProtoMessage() {} + +func (x *RootDirectory) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RootDirectory.ProtoReflect.Descriptor instead. +func (*RootDirectory) Descriptor() ([]byte, []int) { + return file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescGZIP(), []int{0} +} + +func (x *RootDirectory) GetInitialCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.InitialCreationTime + } + return nil +} + +func (x *RootDirectory) GetContents() *Directory { + if x != nil { + return x.Contents + } + return nil +} + +type Directory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Files []*v2.FileNode `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + Directories []*DirectoryNode `protobuf:"bytes,2,rep,name=directories,proto3" json:"directories,omitempty"` + Symlinks []*v2.SymlinkNode `protobuf:"bytes,3,rep,name=symlinks,proto3" json:"symlinks,omitempty"` +} + +func (x *Directory) Reset() { + *x = Directory{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Directory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Directory) ProtoMessage() {} + +func (x *Directory) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Directory.ProtoReflect.Descriptor instead. +func (*Directory) Descriptor() ([]byte, []int) { + return file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescGZIP(), []int{1} +} + +func (x *Directory) GetFiles() []*v2.FileNode { + if x != nil { + return x.Files + } + return nil +} + +func (x *Directory) GetDirectories() []*DirectoryNode { + if x != nil { + return x.Directories + } + return nil +} + +func (x *Directory) GetSymlinks() []*v2.SymlinkNode { + if x != nil { + return x.Symlinks + } + return nil +} + +type DirectoryNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + FileRegion *FileRegion `protobuf:"bytes,2,opt,name=file_region,json=fileRegion,proto3" json:"file_region,omitempty"` +} + +func (x *DirectoryNode) Reset() { + *x = DirectoryNode{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DirectoryNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DirectoryNode) ProtoMessage() {} + +func (x *DirectoryNode) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DirectoryNode.ProtoReflect.Descriptor instead. +func (*DirectoryNode) Descriptor() ([]byte, []int) { + return file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescGZIP(), []int{2} +} + +func (x *DirectoryNode) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DirectoryNode) GetFileRegion() *FileRegion { + if x != nil { + return x.FileRegion + } + return nil +} + +type FileRegion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OffsetBytes int64 `protobuf:"varint,1,opt,name=offset_bytes,json=offsetBytes,proto3" json:"offset_bytes,omitempty"` + SizeBytes int32 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` +} + +func (x *FileRegion) Reset() { + *x = FileRegion{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileRegion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileRegion) ProtoMessage() {} + +func (x *FileRegion) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileRegion.ProtoReflect.Descriptor instead. +func (*FileRegion) Descriptor() ([]byte, []int) { + return file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescGZIP(), []int{3} +} + +func (x *FileRegion) GetOffsetBytes() int64 { + if x != nil { + return x.OffsetBytes + } + return 0 +} + +func (x *FileRegion) GetSizeBytes() int32 { + if x != nil { + return x.SizeBytes + } + return 0 +} + +var File_pkg_proto_outputpathpersistency_outputpathpersistency_proto protoreflect.FileDescriptor + +var file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x70, 0x61, 0x74, 0x68, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x2f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, 0x61, 0x74, 0x68, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, + 0x61, 0x74, 0x68, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x36, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, 0x01, 0x0a, 0x0d, 0x52, 0x6f, 0x6f, 0x74, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x15, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, 0x61, + 0x74, 0x68, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x2e, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x3f, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, 0x61, 0x74, 0x68, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, + 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x08, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x22, 0x71, 0x0a, 0x0d, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, 0x61, 0x74, 0x68, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x67, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x22, + 0x4e, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, + 0x0c, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, + 0x4a, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x70, 0x61, 0x74, 0x68, + 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescOnce sync.Once + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescData = file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDesc +) + +func file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescGZIP() []byte { + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescOnce.Do(func() { + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescData) + }) + return file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDescData +} + +var file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_goTypes = []interface{}{ + (*RootDirectory)(nil), // 0: buildbarn.outputpathpersistency.RootDirectory + (*Directory)(nil), // 1: buildbarn.outputpathpersistency.Directory + (*DirectoryNode)(nil), // 2: buildbarn.outputpathpersistency.DirectoryNode + (*FileRegion)(nil), // 3: buildbarn.outputpathpersistency.FileRegion + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*v2.FileNode)(nil), // 5: build.bazel.remote.execution.v2.FileNode + (*v2.SymlinkNode)(nil), // 6: build.bazel.remote.execution.v2.SymlinkNode +} +var file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_depIdxs = []int32{ + 4, // 0: buildbarn.outputpathpersistency.RootDirectory.initial_creation_time:type_name -> google.protobuf.Timestamp + 1, // 1: buildbarn.outputpathpersistency.RootDirectory.contents:type_name -> buildbarn.outputpathpersistency.Directory + 5, // 2: buildbarn.outputpathpersistency.Directory.files:type_name -> build.bazel.remote.execution.v2.FileNode + 2, // 3: buildbarn.outputpathpersistency.Directory.directories:type_name -> buildbarn.outputpathpersistency.DirectoryNode + 6, // 4: buildbarn.outputpathpersistency.Directory.symlinks:type_name -> build.bazel.remote.execution.v2.SymlinkNode + 3, // 5: buildbarn.outputpathpersistency.DirectoryNode.file_region:type_name -> buildbarn.outputpathpersistency.FileRegion + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_init() } +func file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_init() { + if File_pkg_proto_outputpathpersistency_outputpathpersistency_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RootDirectory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Directory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DirectoryNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileRegion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_goTypes, + DependencyIndexes: file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_depIdxs, + MessageInfos: file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_msgTypes, + }.Build() + File_pkg_proto_outputpathpersistency_outputpathpersistency_proto = out.File + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_rawDesc = nil + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_goTypes = nil + file_pkg_proto_outputpathpersistency_outputpathpersistency_proto_depIdxs = nil +} diff --git a/pkg/proto/outputpathpersistency/outputpathpersistency.proto b/pkg/proto/outputpathpersistency/outputpathpersistency.proto new file mode 100644 index 0000000..1cc3703 --- /dev/null +++ b/pkg/proto/outputpathpersistency/outputpathpersistency.proto @@ -0,0 +1,98 @@ +syntax = "proto3"; + +package buildbarn.outputpathpersistency; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/outputpathpersistency"; + +// bb_clientd has the option to persist the contents of output paths to +// disk after a build completes. This makes it possible to reload the +// contents of these directories after restarting bb_clientd. +// +// The state of output paths is stored by concatenating a Protobuf for +// every single directory into a file. Directories reference other +// directories by specifying the offset of the Protobuf message in the +// state file. To ensure the directory hierarchy is acyclic, directories +// can only reference other directories at offsets lower than their own. +// In other words, leaf directories are stored at the beginning of the +// file, while the root directory is stored at the very end. +// +// Every state file is prefixed with a 16-byte header. This header +// contains the following three fields: +// +// - A 4-byte magic: 0xfa 0x12 0xa4 0xa5. +// - An 8-byte offset of the RootDirectory message. +// - A 4-byte size of the RootDirectory message. +// +// There is no strong requirement that all Protobuf messages are +// referenced. It is valid for state files to contain "junk" data. This +// makes it possible to make incremental changes to directory contents +// by appending new Protobuf messages to the end of the file and +// updating the header to reference a new root directory. +// +// TODO: The state file has no facilities right now to reference entire +// directory hierarchies (Tree objects) remotely. Doing so would reduce +// the size of the state file significantly in case may actions are used +// that call Bazel's ctx.actions.declare_directory(). We can't add +// support for this right now, as we currently allow directories backed +// by Tree objects to be mutated. + +// The root directory of an output path. +message RootDirectory { + // The initial time at which this output path was created. This value + // is retained when performing incremental builds. + // + // This information may be used to prevent carrying over the results + // of ancient builds. Output paths that are used over a prolonged + // period of time may put excessive pressure on a remote execution + // service by continuing to reference files that are outdated. These + // files would continuously end up getting refreshed in the Content + // Addressable Storage (CAS). + google.protobuf.Timestamp initial_creation_time = 1; + + // The contents of the root directory. + Directory contents = 2; +} + +// The contents of an individual directory. This message is identical to +// build.bazel.remote.execution.v2.Directory, except that DirectoryNode +// is replaced with a version that references other parts of the state +// file, as opposed to being content addressed. +message Directory { + // Files present in the current directory that are present in the + // Content Addressable Storage (CAS) of the remote execution service. + // + // Files that were written into bb_clientd locally are not preserved. + // These would increase the size of the state file significantly. + // Bazel also has the expectation that writable files in the output + // path remain writable across builds, which means restoring them is + // inefficient. + repeated build.bazel.remote.execution.v2.FileNode files = 1; + + // Directories present in the current directory. + repeated DirectoryNode directories = 2; + + // Symbolic links present in the current directory. + repeated build.bazel.remote.execution.v2.SymlinkNode symlinks = 3; +} + +message DirectoryNode { + // The name of child directory. + string name = 1; + + // The location at which the directory is stored in the state file. + FileRegion file_region = 2; +} + +message FileRegion { + // The offset in bytes at which the object is stored in the state + // file. + int64 offset_bytes = 1; + + // The size in bytes of the object stored in the state file. The size + // must be greater than zero. Zero-sized objects may be referenced by + // leaving FileRegion unset. + int32 size_bytes = 2; +} diff --git a/pkg/proto/remoteoutputservice/BUILD.bazel b/pkg/proto/remoteoutputservice/BUILD.bazel new file mode 100644 index 0000000..a9eea11 --- /dev/null +++ b/pkg/proto/remoteoutputservice/BUILD.bazel @@ -0,0 +1,30 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "remoteoutputservice_proto", + srcs = ["remote_output_service.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:empty_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +go_proto_library( + name = "remoteoutputservice_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice", + proto = ":remoteoutputservice_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution"], +) + +go_library( + name = "remoteoutputservice", + embed = [":remoteoutputservice_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteoutputservice", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/remoteoutputservice/remote_output_service.pb.go b/pkg/proto/remoteoutputservice/remote_output_service.pb.go new file mode 100644 index 0000000..9d1485e --- /dev/null +++ b/pkg/proto/remoteoutputservice/remote_output_service.pb.go @@ -0,0 +1,1566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/remoteoutputservice/remote_output_service.proto + +package remoteoutputservice + +import ( + context "context" + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CleanRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OutputBaseId string `protobuf:"bytes,1,opt,name=output_base_id,json=outputBaseId,proto3" json:"output_base_id,omitempty"` +} + +func (x *CleanRequest) Reset() { + *x = CleanRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CleanRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CleanRequest) ProtoMessage() {} + +func (x *CleanRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CleanRequest.ProtoReflect.Descriptor instead. +func (*CleanRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CleanRequest) GetOutputBaseId() string { + if x != nil { + return x.OutputBaseId + } + return "" +} + +type StartBuildRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OutputBaseId string `protobuf:"bytes,1,opt,name=output_base_id,json=outputBaseId,proto3" json:"output_base_id,omitempty"` + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + InstanceName string `protobuf:"bytes,3,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + DigestFunction v2.DigestFunction_Value `protobuf:"varint,4,opt,name=digest_function,json=digestFunction,proto3,enum=build.bazel.remote.execution.v2.DigestFunction_Value" json:"digest_function,omitempty"` + OutputPathPrefix string `protobuf:"bytes,5,opt,name=output_path_prefix,json=outputPathPrefix,proto3" json:"output_path_prefix,omitempty"` + OutputPathAliases map[string]string `protobuf:"bytes,6,rep,name=output_path_aliases,json=outputPathAliases,proto3" json:"output_path_aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *StartBuildRequest) Reset() { + *x = StartBuildRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartBuildRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBuildRequest) ProtoMessage() {} + +func (x *StartBuildRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBuildRequest.ProtoReflect.Descriptor instead. +func (*StartBuildRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{1} +} + +func (x *StartBuildRequest) GetOutputBaseId() string { + if x != nil { + return x.OutputBaseId + } + return "" +} + +func (x *StartBuildRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *StartBuildRequest) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *StartBuildRequest) GetDigestFunction() v2.DigestFunction_Value { + if x != nil { + return x.DigestFunction + } + return v2.DigestFunction_Value(0) +} + +func (x *StartBuildRequest) GetOutputPathPrefix() string { + if x != nil { + return x.OutputPathPrefix + } + return "" +} + +func (x *StartBuildRequest) GetOutputPathAliases() map[string]string { + if x != nil { + return x.OutputPathAliases + } + return nil +} + +type InitialOutputPathContents struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + ModifiedPaths []string `protobuf:"bytes,2,rep,name=modified_paths,json=modifiedPaths,proto3" json:"modified_paths,omitempty"` +} + +func (x *InitialOutputPathContents) Reset() { + *x = InitialOutputPathContents{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialOutputPathContents) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialOutputPathContents) ProtoMessage() {} + +func (x *InitialOutputPathContents) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialOutputPathContents.ProtoReflect.Descriptor instead. +func (*InitialOutputPathContents) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{2} +} + +func (x *InitialOutputPathContents) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *InitialOutputPathContents) GetModifiedPaths() []string { + if x != nil { + return x.ModifiedPaths + } + return nil +} + +type StartBuildResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InitialOutputPathContents *InitialOutputPathContents `protobuf:"bytes,1,opt,name=initial_output_path_contents,json=initialOutputPathContents,proto3" json:"initial_output_path_contents,omitempty"` + OutputPathSuffix string `protobuf:"bytes,2,opt,name=output_path_suffix,json=outputPathSuffix,proto3" json:"output_path_suffix,omitempty"` +} + +func (x *StartBuildResponse) Reset() { + *x = StartBuildResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartBuildResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBuildResponse) ProtoMessage() {} + +func (x *StartBuildResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBuildResponse.ProtoReflect.Descriptor instead. +func (*StartBuildResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{3} +} + +func (x *StartBuildResponse) GetInitialOutputPathContents() *InitialOutputPathContents { + if x != nil { + return x.InitialOutputPathContents + } + return nil +} + +func (x *StartBuildResponse) GetOutputPathSuffix() string { + if x != nil { + return x.OutputPathSuffix + } + return "" +} + +type BatchCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` + CleanPathPrefix bool `protobuf:"varint,3,opt,name=clean_path_prefix,json=cleanPathPrefix,proto3" json:"clean_path_prefix,omitempty"` + Files []*v2.OutputFile `protobuf:"bytes,4,rep,name=files,proto3" json:"files,omitempty"` + Symlinks []*v2.OutputSymlink `protobuf:"bytes,5,rep,name=symlinks,proto3" json:"symlinks,omitempty"` + Directories []*v2.OutputDirectory `protobuf:"bytes,6,rep,name=directories,proto3" json:"directories,omitempty"` +} + +func (x *BatchCreateRequest) Reset() { + *x = BatchCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateRequest) ProtoMessage() {} + +func (x *BatchCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateRequest.ProtoReflect.Descriptor instead. +func (*BatchCreateRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{4} +} + +func (x *BatchCreateRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *BatchCreateRequest) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +func (x *BatchCreateRequest) GetCleanPathPrefix() bool { + if x != nil { + return x.CleanPathPrefix + } + return false +} + +func (x *BatchCreateRequest) GetFiles() []*v2.OutputFile { + if x != nil { + return x.Files + } + return nil +} + +func (x *BatchCreateRequest) GetSymlinks() []*v2.OutputSymlink { + if x != nil { + return x.Symlinks + } + return nil +} + +func (x *BatchCreateRequest) GetDirectories() []*v2.OutputDirectory { + if x != nil { + return x.Directories + } + return nil +} + +type BatchStatRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + IncludeFileDigest bool `protobuf:"varint,2,opt,name=include_file_digest,json=includeFileDigest,proto3" json:"include_file_digest,omitempty"` + IncludeSymlinkTarget bool `protobuf:"varint,3,opt,name=include_symlink_target,json=includeSymlinkTarget,proto3" json:"include_symlink_target,omitempty"` + FollowSymlinks bool `protobuf:"varint,4,opt,name=follow_symlinks,json=followSymlinks,proto3" json:"follow_symlinks,omitempty"` + Paths []string `protobuf:"bytes,5,rep,name=paths,proto3" json:"paths,omitempty"` +} + +func (x *BatchStatRequest) Reset() { + *x = BatchStatRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchStatRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchStatRequest) ProtoMessage() {} + +func (x *BatchStatRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchStatRequest.ProtoReflect.Descriptor instead. +func (*BatchStatRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{5} +} + +func (x *BatchStatRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *BatchStatRequest) GetIncludeFileDigest() bool { + if x != nil { + return x.IncludeFileDigest + } + return false +} + +func (x *BatchStatRequest) GetIncludeSymlinkTarget() bool { + if x != nil { + return x.IncludeSymlinkTarget + } + return false +} + +func (x *BatchStatRequest) GetFollowSymlinks() bool { + if x != nil { + return x.FollowSymlinks + } + return false +} + +func (x *BatchStatRequest) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +type BatchStatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Responses []*StatResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` +} + +func (x *BatchStatResponse) Reset() { + *x = BatchStatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchStatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchStatResponse) ProtoMessage() {} + +func (x *BatchStatResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchStatResponse.ProtoReflect.Descriptor instead. +func (*BatchStatResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{6} +} + +func (x *BatchStatResponse) GetResponses() []*StatResponse { + if x != nil { + return x.Responses + } + return nil +} + +type StatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileStatus *FileStatus `protobuf:"bytes,1,opt,name=file_status,json=fileStatus,proto3" json:"file_status,omitempty"` +} + +func (x *StatResponse) Reset() { + *x = StatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatResponse) ProtoMessage() {} + +func (x *StatResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatResponse.ProtoReflect.Descriptor instead. +func (*StatResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{7} +} + +func (x *StatResponse) GetFileStatus() *FileStatus { + if x != nil { + return x.FileStatus + } + return nil +} + +type FileStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to FileType: + // + // *FileStatus_File_ + // *FileStatus_Symlink_ + // *FileStatus_Directory_ + // *FileStatus_External_ + FileType isFileStatus_FileType `protobuf_oneof:"file_type"` +} + +func (x *FileStatus) Reset() { + *x = FileStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatus) ProtoMessage() {} + +func (x *FileStatus) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatus.ProtoReflect.Descriptor instead. +func (*FileStatus) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{8} +} + +func (m *FileStatus) GetFileType() isFileStatus_FileType { + if m != nil { + return m.FileType + } + return nil +} + +func (x *FileStatus) GetFile() *FileStatus_File { + if x, ok := x.GetFileType().(*FileStatus_File_); ok { + return x.File + } + return nil +} + +func (x *FileStatus) GetSymlink() *FileStatus_Symlink { + if x, ok := x.GetFileType().(*FileStatus_Symlink_); ok { + return x.Symlink + } + return nil +} + +func (x *FileStatus) GetDirectory() *FileStatus_Directory { + if x, ok := x.GetFileType().(*FileStatus_Directory_); ok { + return x.Directory + } + return nil +} + +func (x *FileStatus) GetExternal() *FileStatus_External { + if x, ok := x.GetFileType().(*FileStatus_External_); ok { + return x.External + } + return nil +} + +type isFileStatus_FileType interface { + isFileStatus_FileType() +} + +type FileStatus_File_ struct { + File *FileStatus_File `protobuf:"bytes,1,opt,name=file,proto3,oneof"` +} + +type FileStatus_Symlink_ struct { + Symlink *FileStatus_Symlink `protobuf:"bytes,2,opt,name=symlink,proto3,oneof"` +} + +type FileStatus_Directory_ struct { + Directory *FileStatus_Directory `protobuf:"bytes,3,opt,name=directory,proto3,oneof"` +} + +type FileStatus_External_ struct { + External *FileStatus_External `protobuf:"bytes,4,opt,name=external,proto3,oneof"` +} + +func (*FileStatus_File_) isFileStatus_FileType() {} + +func (*FileStatus_Symlink_) isFileStatus_FileType() {} + +func (*FileStatus_Directory_) isFileStatus_FileType() {} + +func (*FileStatus_External_) isFileStatus_FileType() {} + +type FinalizeBuildRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + BuildSuccessful bool `protobuf:"varint,2,opt,name=build_successful,json=buildSuccessful,proto3" json:"build_successful,omitempty"` +} + +func (x *FinalizeBuildRequest) Reset() { + *x = FinalizeBuildRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FinalizeBuildRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FinalizeBuildRequest) ProtoMessage() {} + +func (x *FinalizeBuildRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FinalizeBuildRequest.ProtoReflect.Descriptor instead. +func (*FinalizeBuildRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{9} +} + +func (x *FinalizeBuildRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *FinalizeBuildRequest) GetBuildSuccessful() bool { + if x != nil { + return x.BuildSuccessful + } + return false +} + +type FileStatus_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest *v2.Digest `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` +} + +func (x *FileStatus_File) Reset() { + *x = FileStatus_File{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatus_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatus_File) ProtoMessage() {} + +func (x *FileStatus_File) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatus_File.ProtoReflect.Descriptor instead. +func (*FileStatus_File) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *FileStatus_File) GetDigest() *v2.Digest { + if x != nil { + return x.Digest + } + return nil +} + +type FileStatus_Symlink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` +} + +func (x *FileStatus_Symlink) Reset() { + *x = FileStatus_Symlink{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatus_Symlink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatus_Symlink) ProtoMessage() {} + +func (x *FileStatus_Symlink) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatus_Symlink.ProtoReflect.Descriptor instead. +func (*FileStatus_Symlink) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *FileStatus_Symlink) GetTarget() string { + if x != nil { + return x.Target + } + return "" +} + +type FileStatus_Directory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LastModifiedTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=last_modified_time,json=lastModifiedTime,proto3" json:"last_modified_time,omitempty"` +} + +func (x *FileStatus_Directory) Reset() { + *x = FileStatus_Directory{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatus_Directory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatus_Directory) ProtoMessage() {} + +func (x *FileStatus_Directory) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatus_Directory.ProtoReflect.Descriptor instead. +func (*FileStatus_Directory) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *FileStatus_Directory) GetLastModifiedTime() *timestamppb.Timestamp { + if x != nil { + return x.LastModifiedTime + } + return nil +} + +type FileStatus_External struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NextPath string `protobuf:"bytes,1,opt,name=next_path,json=nextPath,proto3" json:"next_path,omitempty"` +} + +func (x *FileStatus_External) Reset() { + *x = FileStatus_External{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatus_External) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatus_External) ProtoMessage() {} + +func (x *FileStatus_External) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatus_External.ProtoReflect.Descriptor instead. +func (*FileStatus_External) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP(), []int{8, 3} +} + +func (x *FileStatus_External) GetNextPath() string { + if x != nil { + return x.NextPath + } + return "" +} + +var File_pkg_proto_remoteoutputservice_remote_output_service_proto protoreflect.FileDescriptor + +var file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x34, 0x0a, 0x0c, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xbe, + 0x03, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, + 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x6f, 0x0a, 0x13, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x44, 0x0a, 0x16, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x5d, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0xb5, + 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x19, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xdf, 0x02, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, + 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6c, 0x65, + 0x61, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x41, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, + 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x08, 0x73, 0x79, 0x6d, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x08, 0x73, 0x79, 0x6d, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0b, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd2, 0x01, 0x0a, 0x10, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, + 0x0a, 0x0f, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x53, + 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x56, 0x0a, + 0x11, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, 0x52, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0a, 0x66, + 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa1, 0x04, 0x0a, 0x0a, 0x46, 0x69, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x00, + 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x45, 0x0a, 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x79, 0x6d, 0x6c, 0x69, + 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x4b, 0x0a, + 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x00, 0x52, + 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x48, 0x0a, 0x08, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x1a, 0x47, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x0a, + 0x07, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x1a, 0x55, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x48, 0x0a, + 0x12, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x27, 0x0a, 0x08, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x42, 0x0b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x5c, 0x0a, + 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, + 0x12, 0x29, 0x0a, 0x10, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x66, 0x75, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x32, 0xc6, 0x03, 0x0a, 0x13, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x23, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x61, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x28, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0b, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x29, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5e, + 0x0a, 0x09, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x61, 0x74, 0x12, 0x27, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, + 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, + 0x2b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x55, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x2e, 0x6c, 0x69, 0x62, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x18, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescOnce sync.Once + file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescData = file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDesc +) + +func file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescGZIP() []byte { + file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescOnce.Do(func() { + file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescData) + }) + return file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDescData +} + +var file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_pkg_proto_remoteoutputservice_remote_output_service_proto_goTypes = []interface{}{ + (*CleanRequest)(nil), // 0: remote_output_service.CleanRequest + (*StartBuildRequest)(nil), // 1: remote_output_service.StartBuildRequest + (*InitialOutputPathContents)(nil), // 2: remote_output_service.InitialOutputPathContents + (*StartBuildResponse)(nil), // 3: remote_output_service.StartBuildResponse + (*BatchCreateRequest)(nil), // 4: remote_output_service.BatchCreateRequest + (*BatchStatRequest)(nil), // 5: remote_output_service.BatchStatRequest + (*BatchStatResponse)(nil), // 6: remote_output_service.BatchStatResponse + (*StatResponse)(nil), // 7: remote_output_service.StatResponse + (*FileStatus)(nil), // 8: remote_output_service.FileStatus + (*FinalizeBuildRequest)(nil), // 9: remote_output_service.FinalizeBuildRequest + nil, // 10: remote_output_service.StartBuildRequest.OutputPathAliasesEntry + (*FileStatus_File)(nil), // 11: remote_output_service.FileStatus.File + (*FileStatus_Symlink)(nil), // 12: remote_output_service.FileStatus.Symlink + (*FileStatus_Directory)(nil), // 13: remote_output_service.FileStatus.Directory + (*FileStatus_External)(nil), // 14: remote_output_service.FileStatus.External + (v2.DigestFunction_Value)(0), // 15: build.bazel.remote.execution.v2.DigestFunction.Value + (*v2.OutputFile)(nil), // 16: build.bazel.remote.execution.v2.OutputFile + (*v2.OutputSymlink)(nil), // 17: build.bazel.remote.execution.v2.OutputSymlink + (*v2.OutputDirectory)(nil), // 18: build.bazel.remote.execution.v2.OutputDirectory + (*v2.Digest)(nil), // 19: build.bazel.remote.execution.v2.Digest + (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 21: google.protobuf.Empty +} +var file_pkg_proto_remoteoutputservice_remote_output_service_proto_depIdxs = []int32{ + 15, // 0: remote_output_service.StartBuildRequest.digest_function:type_name -> build.bazel.remote.execution.v2.DigestFunction.Value + 10, // 1: remote_output_service.StartBuildRequest.output_path_aliases:type_name -> remote_output_service.StartBuildRequest.OutputPathAliasesEntry + 2, // 2: remote_output_service.StartBuildResponse.initial_output_path_contents:type_name -> remote_output_service.InitialOutputPathContents + 16, // 3: remote_output_service.BatchCreateRequest.files:type_name -> build.bazel.remote.execution.v2.OutputFile + 17, // 4: remote_output_service.BatchCreateRequest.symlinks:type_name -> build.bazel.remote.execution.v2.OutputSymlink + 18, // 5: remote_output_service.BatchCreateRequest.directories:type_name -> build.bazel.remote.execution.v2.OutputDirectory + 7, // 6: remote_output_service.BatchStatResponse.responses:type_name -> remote_output_service.StatResponse + 8, // 7: remote_output_service.StatResponse.file_status:type_name -> remote_output_service.FileStatus + 11, // 8: remote_output_service.FileStatus.file:type_name -> remote_output_service.FileStatus.File + 12, // 9: remote_output_service.FileStatus.symlink:type_name -> remote_output_service.FileStatus.Symlink + 13, // 10: remote_output_service.FileStatus.directory:type_name -> remote_output_service.FileStatus.Directory + 14, // 11: remote_output_service.FileStatus.external:type_name -> remote_output_service.FileStatus.External + 19, // 12: remote_output_service.FileStatus.File.digest:type_name -> build.bazel.remote.execution.v2.Digest + 20, // 13: remote_output_service.FileStatus.Directory.last_modified_time:type_name -> google.protobuf.Timestamp + 0, // 14: remote_output_service.RemoteOutputService.Clean:input_type -> remote_output_service.CleanRequest + 1, // 15: remote_output_service.RemoteOutputService.StartBuild:input_type -> remote_output_service.StartBuildRequest + 4, // 16: remote_output_service.RemoteOutputService.BatchCreate:input_type -> remote_output_service.BatchCreateRequest + 5, // 17: remote_output_service.RemoteOutputService.BatchStat:input_type -> remote_output_service.BatchStatRequest + 9, // 18: remote_output_service.RemoteOutputService.FinalizeBuild:input_type -> remote_output_service.FinalizeBuildRequest + 21, // 19: remote_output_service.RemoteOutputService.Clean:output_type -> google.protobuf.Empty + 3, // 20: remote_output_service.RemoteOutputService.StartBuild:output_type -> remote_output_service.StartBuildResponse + 21, // 21: remote_output_service.RemoteOutputService.BatchCreate:output_type -> google.protobuf.Empty + 6, // 22: remote_output_service.RemoteOutputService.BatchStat:output_type -> remote_output_service.BatchStatResponse + 21, // 23: remote_output_service.RemoteOutputService.FinalizeBuild:output_type -> google.protobuf.Empty + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_pkg_proto_remoteoutputservice_remote_output_service_proto_init() } +func file_pkg_proto_remoteoutputservice_remote_output_service_proto_init() { + if File_pkg_proto_remoteoutputservice_remote_output_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartBuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitialOutputPathContents); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartBuildResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchStatRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchStatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FinalizeBuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatus_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatus_Symlink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatus_Directory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatus_External); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*FileStatus_File_)(nil), + (*FileStatus_Symlink_)(nil), + (*FileStatus_Directory_)(nil), + (*FileStatus_External_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_remoteoutputservice_remote_output_service_proto_goTypes, + DependencyIndexes: file_pkg_proto_remoteoutputservice_remote_output_service_proto_depIdxs, + MessageInfos: file_pkg_proto_remoteoutputservice_remote_output_service_proto_msgTypes, + }.Build() + File_pkg_proto_remoteoutputservice_remote_output_service_proto = out.File + file_pkg_proto_remoteoutputservice_remote_output_service_proto_rawDesc = nil + file_pkg_proto_remoteoutputservice_remote_output_service_proto_goTypes = nil + file_pkg_proto_remoteoutputservice_remote_output_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// RemoteOutputServiceClient is the client API for RemoteOutputService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RemoteOutputServiceClient interface { + Clean(ctx context.Context, in *CleanRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + StartBuild(ctx context.Context, in *StartBuildRequest, opts ...grpc.CallOption) (*StartBuildResponse, error) + BatchCreate(ctx context.Context, in *BatchCreateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + BatchStat(ctx context.Context, in *BatchStatRequest, opts ...grpc.CallOption) (*BatchStatResponse, error) + FinalizeBuild(ctx context.Context, in *FinalizeBuildRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type remoteOutputServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRemoteOutputServiceClient(cc grpc.ClientConnInterface) RemoteOutputServiceClient { + return &remoteOutputServiceClient{cc} +} + +func (c *remoteOutputServiceClient) Clean(ctx context.Context, in *CleanRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/remote_output_service.RemoteOutputService/Clean", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *remoteOutputServiceClient) StartBuild(ctx context.Context, in *StartBuildRequest, opts ...grpc.CallOption) (*StartBuildResponse, error) { + out := new(StartBuildResponse) + err := c.cc.Invoke(ctx, "/remote_output_service.RemoteOutputService/StartBuild", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *remoteOutputServiceClient) BatchCreate(ctx context.Context, in *BatchCreateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/remote_output_service.RemoteOutputService/BatchCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *remoteOutputServiceClient) BatchStat(ctx context.Context, in *BatchStatRequest, opts ...grpc.CallOption) (*BatchStatResponse, error) { + out := new(BatchStatResponse) + err := c.cc.Invoke(ctx, "/remote_output_service.RemoteOutputService/BatchStat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *remoteOutputServiceClient) FinalizeBuild(ctx context.Context, in *FinalizeBuildRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/remote_output_service.RemoteOutputService/FinalizeBuild", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RemoteOutputServiceServer is the server API for RemoteOutputService service. +type RemoteOutputServiceServer interface { + Clean(context.Context, *CleanRequest) (*emptypb.Empty, error) + StartBuild(context.Context, *StartBuildRequest) (*StartBuildResponse, error) + BatchCreate(context.Context, *BatchCreateRequest) (*emptypb.Empty, error) + BatchStat(context.Context, *BatchStatRequest) (*BatchStatResponse, error) + FinalizeBuild(context.Context, *FinalizeBuildRequest) (*emptypb.Empty, error) +} + +// UnimplementedRemoteOutputServiceServer can be embedded to have forward compatible implementations. +type UnimplementedRemoteOutputServiceServer struct { +} + +func (*UnimplementedRemoteOutputServiceServer) Clean(context.Context, *CleanRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Clean not implemented") +} +func (*UnimplementedRemoteOutputServiceServer) StartBuild(context.Context, *StartBuildRequest) (*StartBuildResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartBuild not implemented") +} +func (*UnimplementedRemoteOutputServiceServer) BatchCreate(context.Context, *BatchCreateRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchCreate not implemented") +} +func (*UnimplementedRemoteOutputServiceServer) BatchStat(context.Context, *BatchStatRequest) (*BatchStatResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchStat not implemented") +} +func (*UnimplementedRemoteOutputServiceServer) FinalizeBuild(context.Context, *FinalizeBuildRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBuild not implemented") +} + +func RegisterRemoteOutputServiceServer(s grpc.ServiceRegistrar, srv RemoteOutputServiceServer) { + s.RegisterService(&_RemoteOutputService_serviceDesc, srv) +} + +func _RemoteOutputService_Clean_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CleanRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoteOutputServiceServer).Clean(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remote_output_service.RemoteOutputService/Clean", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoteOutputServiceServer).Clean(ctx, req.(*CleanRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RemoteOutputService_StartBuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartBuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoteOutputServiceServer).StartBuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remote_output_service.RemoteOutputService/StartBuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoteOutputServiceServer).StartBuild(ctx, req.(*StartBuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RemoteOutputService_BatchCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoteOutputServiceServer).BatchCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remote_output_service.RemoteOutputService/BatchCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoteOutputServiceServer).BatchCreate(ctx, req.(*BatchCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RemoteOutputService_BatchStat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchStatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoteOutputServiceServer).BatchStat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remote_output_service.RemoteOutputService/BatchStat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoteOutputServiceServer).BatchStat(ctx, req.(*BatchStatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RemoteOutputService_FinalizeBuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FinalizeBuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoteOutputServiceServer).FinalizeBuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/remote_output_service.RemoteOutputService/FinalizeBuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoteOutputServiceServer).FinalizeBuild(ctx, req.(*FinalizeBuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RemoteOutputService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "remote_output_service.RemoteOutputService", + HandlerType: (*RemoteOutputServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Clean", + Handler: _RemoteOutputService_Clean_Handler, + }, + { + MethodName: "StartBuild", + Handler: _RemoteOutputService_StartBuild_Handler, + }, + { + MethodName: "BatchCreate", + Handler: _RemoteOutputService_BatchCreate_Handler, + }, + { + MethodName: "BatchStat", + Handler: _RemoteOutputService_BatchStat_Handler, + }, + { + MethodName: "FinalizeBuild", + Handler: _RemoteOutputService_FinalizeBuild_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/proto/remoteoutputservice/remote_output_service.proto", +} diff --git a/pkg/proto/remoteoutputservice/remote_output_service.proto b/pkg/proto/remoteoutputservice/remote_output_service.proto new file mode 100644 index 0000000..43e3442 --- /dev/null +++ b/pkg/proto/remoteoutputservice/remote_output_service.proto @@ -0,0 +1,326 @@ +// Copyright 2021 The Bazel Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package remote_output_service; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option java_package = "com.google.devtools.build.lib.remote"; +option java_outer_classname = "RemoteOutputServiceProto"; +option go_package = "remoteoutputservice"; + +// The Remote Output Service may be used by users of the Remote +// Execution API to construct a directory on the local system that +// contains all output files of a build. +// +// Primitive implementations of this API may simply download files from +// the Content Addressable Storage (CAS) and store them at their +// designated location. Complex implementations may use a pseudo file +// system (e.g., FUSE) to support deduplication, lazy loading and +// snapshotting. +// +// Details: +// https://github.com/bazelbuild/proposals/blob/master/designs/2021-02-09-remote-output-service.md +// https://groups.google.com/g/remote-execution-apis/c/qOSWWwBLPzo +// https://groups.google.com/g/bazel-dev/c/lKzENsNd1Do +service RemoteOutputService { + // Methods that can be invoked at any point in time. + + // Clean all data associated with a single output path, so that the + // next invocation of StartBuild() yields an empty output path. This + // may be implemented in a way that's faster than removing all of the + // files from the file system manually. + rpc Clean(CleanRequest) returns (google.protobuf.Empty); + + // Signal that a new build is about to start. + // + // The client uses this call to obtain a directory where outputs of + // the build may be stored, called the output path. Based on the + // parameters provided, the remote output service may provide an empty + // output path, or one that has contents from a previous build of the + // same workspace. + // + // In case the output path contains data from a previous build, the + // remote output service is responsible for calling + // ContentAddressableStorage.FindMissingBlobs() for all of the objects + // that are stored remotely. This ensures that these objects don't + // disappear from the Content Addressable Storage while the build is + // running. Any files that are absent must be removed from the output + // path and reported through InitialOutputPathContents.modified_paths. + rpc StartBuild(StartBuildRequest) returns (StartBuildResponse); + + // Methods that can only be invoked during a build. + + // Create one or more files, directories or symbolic links in the + // output path. + rpc BatchCreate(BatchCreateRequest) returns (google.protobuf.Empty); + + // Obtain the status of one or more files, directories or symbolic + // links that are stored in the input path. + rpc BatchStat(BatchStatRequest) returns (BatchStatResponse); + + // Signal that a build has been completed. + rpc FinalizeBuild(FinalizeBuildRequest) returns (google.protobuf.Empty); +} + +message CleanRequest { + // The output base identifier that was provided to + // StartBuildRequest.output_base_id whose data needs to be removed. + string output_base_id = 1; +} + +message StartBuildRequest { + // A client-chosen value that uniquely identifies the workspace for + // which the build is being started. This value must be set to ensure + // that the remote output service is capable of managing builds for + // distinct workspaces concurrently. + // + // This value must be a valid filename for the operating system on + // which the remote output service and client are being executed. This + // allows the remote output service to create one subdirectory per + // project that needs to be built. + // + // By default, Bazel sets this value to the MD5 sum of the absolute + // path of the workspace directory. This is generally sufficient, + // though a more complex scheme may necessary in case the file system + // namespace is virtualized. + // + // Starting a build finalizes any previous build with the same + // output_base_id that has not been finalized yet. + string output_base_id = 1; + + // A client-chosen value that uniquely identifies this build. This + // value must be provided to most other methods to ensure that + // operations are targeted against the right output path. + string build_id = 2; + + // The instance name that the client uses when communicating with the + // remote execution system. The remote output service uses this value + // when loading objects from the Content Addressable Storage. + string instance_name = 3; + + // The digest function that the client uses when communicating with + // the remote execution system. The remote output service uses this + // value to ensure that FileStatus responses contain digests that were + // computed with right digest function. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 4; + + // The absolute path at which the remote output service exposes its + // output paths, as seen from the perspective of the client. + // + // This value needs to be provided by the client, because file system + // namespace virtualization may cause this directory to appear at a + // location that differs from the one used by the service. + // + // The purpose of this field is to ensure that the remote output + // service is capable of expanding symbolic links containing absolute + // paths. + string output_path_prefix = 5; + + // A map of paths on the system that will become symbolic links + // pointing to locations inside the output path. Similar to + // output_path_prefix, this option is used to ensure the remote output + // service is capable of expanding symbolic links. + // + // Map keys are absolute paths, while map values are paths that are + // relative to the output path. + map output_path_aliases = 6; +} + +message InitialOutputPathContents { + // The identifier of a previously finalized build whose results are + // stored in the output path. + string build_id = 1; + + // Paths that have been modified or removed since the build finalized. + // + // If the remote output service freezes the contents of the output + // path between builds, this field can be left empty. + repeated string modified_paths = 2; +} + +message StartBuildResponse { + // If set, the contents of the output path are almost entirely + // identical on the results of a previous build. This information may + // be used by the client to prevent unnecessary scanning of the file + // system. + // + // Servers can leave this field unset in case the contents of the + // output path are empty, not based on a previous build, if no + // tracking of this information is performed, or if the number of + // changes made to the output path is too large to be expressed. + InitialOutputPathContents initial_output_path_contents = 1; + + // A relative path that the client must append to + // StartBuildRequest.output_path_prefix to obtain the full path at + // which outputs of the build are stored. + // + // If the remote output service is incapable of storing the output of + // multiple builds, this string may be left empty. + string output_path_suffix = 2; +} + +message BatchCreateRequest { + // The identifier of the build. The remote output service uses this to + // determine which output path needs to be modified. + string build_id = 1; + + // A path relative to the root of the output path where files, + // symbolic links and directories need to be created. + string path_prefix = 2; + + // Whether the contents of the path prefix should be removed prior to + // creating the specified files. + bool clean_path_prefix = 3; + + // Files that need to be downloaded from the Content Addressable + // Storage. + // + // Any missing parent directories, including those in path_prefix, are + // created as well. If any of the parents refer to a non-directory + // file, they are replaced by an empty directory. If a file or + // directory already exists at the provided path, it is replaced. + // + // This means that symbolic links are not followed when evaluating + // path_prefix and OutputFile.path. + repeated build.bazel.remote.execution.v2.OutputFile files = 4; + + // Symbolic links that need to be created. + // + // Any missing parent directories, including those in path_prefix, are + // created as well. If any of the parents refer to a non-directory + // file, they are replaced by an empty directory. If a file or + // directory already exists at the provided path, it is replaced. + // + // This means that symbolic links are not followed when evaluating + // path_prefix and OutputSymlink.path. + repeated build.bazel.remote.execution.v2.OutputSymlink symlinks = 5; + + // Directories that need to be downloaded from the Content Addressable + // Storage. + // + // Any missing parent directories, including those in path_prefix, are + // created as well. If any of the parents refer to a non-directory + // file, they are replaced by an empty directory. Any file or + // directory that already exists at the provided path is replaced. + // + // This means that symbolic links are not followed when evaluating + // path_prefix and OutputDirectory.path. + repeated build.bazel.remote.execution.v2.OutputDirectory directories = 6; +} + +message BatchStatRequest { + // The identifier of the build. The remote output service uses this to + // determine which output path needs to be inspected. + string build_id = 1; + + // In case the path corresponds to a regular file, include the hash + // and size of the file in the response. + bool include_file_digest = 2; + + // In case the path corresponds to a symbolic link, include the target + // of the symbolic link in the response. + bool include_symlink_target = 3; + + // If the last component of the path corresponds to a symbolic link, + // return the status of the file at the target location. + // + // Symbolic links encountered before the last component of the path + // are always expanded, regardless of the value of this option. + bool follow_symlinks = 4; + + // Paths whose status needs to be obtained. + repeated string paths = 5; +} + +message BatchStatResponse { + // The status response for each of the requested paths, using the same + // order as requested. This means that this list has the same length + // as BatchStatRequest.paths. + repeated StatResponse responses = 1; +} + +message StatResponse { + // The status of the file. If the file corresponding with the + // requested path does not exist, this field will be null. + FileStatus file_status = 1; +} + +message FileStatus { + message File { + // The hash and size of the file. This field is only set when + // BatchStatRequest.include_file_digest is set. + // + // This field may also be omitted if the remote output service is + // unable to compute it accurately. For example, when a file is + // opened for writing, the kernel may buffer data to be written. + // When absent, the caller should fall back to computing the digest + // manually. + build.bazel.remote.execution.v2.Digest digest = 1; + } + + message Symlink { + // The target of the symbolic link. This field is only set when + // BatchStatRequest.include_symlink_target is set. + string target = 1; + } + + message Directory { + // The time at which the directory contents were last modified. + google.protobuf.Timestamp last_modified_time = 1; + } + + message External { + // The path relative to the root of the output path where the file + // is located. This path is absolute, or it is relative, starting + // with "../". + // + // The client can use this field to obtain the file status manually. + string next_path = 1; + } + + oneof file_type { + // The path resolves to a regular file. + File file = 1; + + // The path resolves to a symbolic link. + // + // This field may not be set if BatchStatRequest.follow_symlinks is + // set to true. + Symlink symlink = 2; + + // The path resolves to a directory. + Directory directory = 3; + + // The path resolves to a location outside the output path. The + // remote output service is unable to determine whether any file + // exists at the resulting path, and can therefore not obtain its + // status. + External external = 4; + } +} + +message FinalizeBuildRequest { + // The identifier of the build that should be finalized. + string build_id = 1; + + // Whether the build completed successfully. The remote output service + // may, for example, use this option to apply different retention + // policies that take the outcome of the build into account. + bool build_successful = 2; +} diff --git a/pkg/proto/remoteworker/BUILD.bazel b/pkg/proto/remoteworker/BUILD.bazel new file mode 100644 index 0000000..2c60900 --- /dev/null +++ b/pkg/proto/remoteworker/BUILD.bazel @@ -0,0 +1,31 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "remoteworker_proto", + srcs = ["remoteworker.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:remote_execution_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:empty_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +go_proto_library( + name = "remoteworker_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker", + proto = ":remoteworker_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution"], +) + +go_library( + name = "remoteworker", + embed = [":remoteworker_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/remoteworker/remoteworker.pb.go b/pkg/proto/remoteworker/remoteworker.pb.go new file mode 100644 index 0000000..37e112d --- /dev/null +++ b/pkg/proto/remoteworker/remoteworker.pb.go @@ -0,0 +1,972 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/remoteworker/remoteworker.proto + +package remoteworker + +import ( + context "context" + v2 "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SynchronizeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerId map[string]string `protobuf:"bytes,1,rep,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + InstanceNamePrefix string `protobuf:"bytes,2,opt,name=instance_name_prefix,json=instanceNamePrefix,proto3" json:"instance_name_prefix,omitempty"` + Platform *v2.Platform `protobuf:"bytes,3,opt,name=platform,proto3" json:"platform,omitempty"` + SizeClass uint32 `protobuf:"varint,5,opt,name=size_class,json=sizeClass,proto3" json:"size_class,omitempty"` + CurrentState *CurrentState `protobuf:"bytes,4,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + PreferBeingIdle bool `protobuf:"varint,6,opt,name=prefer_being_idle,json=preferBeingIdle,proto3" json:"prefer_being_idle,omitempty"` +} + +func (x *SynchronizeRequest) Reset() { + *x = SynchronizeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SynchronizeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SynchronizeRequest) ProtoMessage() {} + +func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead. +func (*SynchronizeRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{0} +} + +func (x *SynchronizeRequest) GetWorkerId() map[string]string { + if x != nil { + return x.WorkerId + } + return nil +} + +func (x *SynchronizeRequest) GetInstanceNamePrefix() string { + if x != nil { + return x.InstanceNamePrefix + } + return "" +} + +func (x *SynchronizeRequest) GetPlatform() *v2.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *SynchronizeRequest) GetSizeClass() uint32 { + if x != nil { + return x.SizeClass + } + return 0 +} + +func (x *SynchronizeRequest) GetCurrentState() *CurrentState { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *SynchronizeRequest) GetPreferBeingIdle() bool { + if x != nil { + return x.PreferBeingIdle + } + return false +} + +type CurrentState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to WorkerState: + // + // *CurrentState_Idle + // *CurrentState_Executing_ + WorkerState isCurrentState_WorkerState `protobuf_oneof:"worker_state"` +} + +func (x *CurrentState) Reset() { + *x = CurrentState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CurrentState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CurrentState) ProtoMessage() {} + +func (x *CurrentState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CurrentState.ProtoReflect.Descriptor instead. +func (*CurrentState) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{1} +} + +func (m *CurrentState) GetWorkerState() isCurrentState_WorkerState { + if m != nil { + return m.WorkerState + } + return nil +} + +func (x *CurrentState) GetIdle() *emptypb.Empty { + if x, ok := x.GetWorkerState().(*CurrentState_Idle); ok { + return x.Idle + } + return nil +} + +func (x *CurrentState) GetExecuting() *CurrentState_Executing { + if x, ok := x.GetWorkerState().(*CurrentState_Executing_); ok { + return x.Executing + } + return nil +} + +type isCurrentState_WorkerState interface { + isCurrentState_WorkerState() +} + +type CurrentState_Idle struct { + Idle *emptypb.Empty `protobuf:"bytes,1,opt,name=idle,proto3,oneof"` +} + +type CurrentState_Executing_ struct { + Executing *CurrentState_Executing `protobuf:"bytes,2,opt,name=executing,proto3,oneof"` +} + +func (*CurrentState_Idle) isCurrentState_WorkerState() {} + +func (*CurrentState_Executing_) isCurrentState_WorkerState() {} + +type SynchronizeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NextSynchronizationAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=next_synchronization_at,json=nextSynchronizationAt,proto3" json:"next_synchronization_at,omitempty"` + DesiredState *DesiredState `protobuf:"bytes,2,opt,name=desired_state,json=desiredState,proto3" json:"desired_state,omitempty"` +} + +func (x *SynchronizeResponse) Reset() { + *x = SynchronizeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SynchronizeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SynchronizeResponse) ProtoMessage() {} + +func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead. +func (*SynchronizeResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{2} +} + +func (x *SynchronizeResponse) GetNextSynchronizationAt() *timestamppb.Timestamp { + if x != nil { + return x.NextSynchronizationAt + } + return nil +} + +func (x *SynchronizeResponse) GetDesiredState() *DesiredState { + if x != nil { + return x.DesiredState + } + return nil +} + +type DesiredState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to WorkerState: + // + // *DesiredState_Idle + // *DesiredState_Executing_ + WorkerState isDesiredState_WorkerState `protobuf_oneof:"worker_state"` +} + +func (x *DesiredState) Reset() { + *x = DesiredState{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DesiredState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DesiredState) ProtoMessage() {} + +func (x *DesiredState) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DesiredState.ProtoReflect.Descriptor instead. +func (*DesiredState) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{3} +} + +func (m *DesiredState) GetWorkerState() isDesiredState_WorkerState { + if m != nil { + return m.WorkerState + } + return nil +} + +func (x *DesiredState) GetIdle() *emptypb.Empty { + if x, ok := x.GetWorkerState().(*DesiredState_Idle); ok { + return x.Idle + } + return nil +} + +func (x *DesiredState) GetExecuting() *DesiredState_Executing { + if x, ok := x.GetWorkerState().(*DesiredState_Executing_); ok { + return x.Executing + } + return nil +} + +type isDesiredState_WorkerState interface { + isDesiredState_WorkerState() +} + +type DesiredState_Idle struct { + Idle *emptypb.Empty `protobuf:"bytes,1,opt,name=idle,proto3,oneof"` +} + +type DesiredState_Executing_ struct { + Executing *DesiredState_Executing `protobuf:"bytes,2,opt,name=executing,proto3,oneof"` +} + +func (*DesiredState_Idle) isDesiredState_WorkerState() {} + +func (*DesiredState_Executing_) isDesiredState_WorkerState() {} + +type CurrentState_Executing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ActionDigest *v2.Digest `protobuf:"bytes,1,opt,name=action_digest,json=actionDigest,proto3" json:"action_digest,omitempty"` + // Types that are assignable to ExecutionState: + // + // *CurrentState_Executing_Started + // *CurrentState_Executing_FetchingInputs + // *CurrentState_Executing_Running + // *CurrentState_Executing_UploadingOutputs + // *CurrentState_Executing_Completed + ExecutionState isCurrentState_Executing_ExecutionState `protobuf_oneof:"execution_state"` +} + +func (x *CurrentState_Executing) Reset() { + *x = CurrentState_Executing{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CurrentState_Executing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CurrentState_Executing) ProtoMessage() {} + +func (x *CurrentState_Executing) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CurrentState_Executing.ProtoReflect.Descriptor instead. +func (*CurrentState_Executing) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *CurrentState_Executing) GetActionDigest() *v2.Digest { + if x != nil { + return x.ActionDigest + } + return nil +} + +func (m *CurrentState_Executing) GetExecutionState() isCurrentState_Executing_ExecutionState { + if m != nil { + return m.ExecutionState + } + return nil +} + +func (x *CurrentState_Executing) GetStarted() *emptypb.Empty { + if x, ok := x.GetExecutionState().(*CurrentState_Executing_Started); ok { + return x.Started + } + return nil +} + +func (x *CurrentState_Executing) GetFetchingInputs() *emptypb.Empty { + if x, ok := x.GetExecutionState().(*CurrentState_Executing_FetchingInputs); ok { + return x.FetchingInputs + } + return nil +} + +func (x *CurrentState_Executing) GetRunning() *emptypb.Empty { + if x, ok := x.GetExecutionState().(*CurrentState_Executing_Running); ok { + return x.Running + } + return nil +} + +func (x *CurrentState_Executing) GetUploadingOutputs() *emptypb.Empty { + if x, ok := x.GetExecutionState().(*CurrentState_Executing_UploadingOutputs); ok { + return x.UploadingOutputs + } + return nil +} + +func (x *CurrentState_Executing) GetCompleted() *v2.ExecuteResponse { + if x, ok := x.GetExecutionState().(*CurrentState_Executing_Completed); ok { + return x.Completed + } + return nil +} + +type isCurrentState_Executing_ExecutionState interface { + isCurrentState_Executing_ExecutionState() +} + +type CurrentState_Executing_Started struct { + Started *emptypb.Empty `protobuf:"bytes,2,opt,name=started,proto3,oneof"` +} + +type CurrentState_Executing_FetchingInputs struct { + FetchingInputs *emptypb.Empty `protobuf:"bytes,3,opt,name=fetching_inputs,json=fetchingInputs,proto3,oneof"` +} + +type CurrentState_Executing_Running struct { + Running *emptypb.Empty `protobuf:"bytes,4,opt,name=running,proto3,oneof"` +} + +type CurrentState_Executing_UploadingOutputs struct { + UploadingOutputs *emptypb.Empty `protobuf:"bytes,5,opt,name=uploading_outputs,json=uploadingOutputs,proto3,oneof"` +} + +type CurrentState_Executing_Completed struct { + Completed *v2.ExecuteResponse `protobuf:"bytes,6,opt,name=completed,proto3,oneof"` +} + +func (*CurrentState_Executing_Started) isCurrentState_Executing_ExecutionState() {} + +func (*CurrentState_Executing_FetchingInputs) isCurrentState_Executing_ExecutionState() {} + +func (*CurrentState_Executing_Running) isCurrentState_Executing_ExecutionState() {} + +func (*CurrentState_Executing_UploadingOutputs) isCurrentState_Executing_ExecutionState() {} + +func (*CurrentState_Executing_Completed) isCurrentState_Executing_ExecutionState() {} + +type DesiredState_Executing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ActionDigest *v2.Digest `protobuf:"bytes,1,opt,name=action_digest,json=actionDigest,proto3" json:"action_digest,omitempty"` + Action *v2.Action `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` + QueuedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=queued_timestamp,json=queuedTimestamp,proto3" json:"queued_timestamp,omitempty"` + AuxiliaryMetadata []*anypb.Any `protobuf:"bytes,6,rep,name=auxiliary_metadata,json=auxiliaryMetadata,proto3" json:"auxiliary_metadata,omitempty"` + InstanceNameSuffix string `protobuf:"bytes,7,opt,name=instance_name_suffix,json=instanceNameSuffix,proto3" json:"instance_name_suffix,omitempty"` + W3CTraceContext map[string]string `protobuf:"bytes,8,rep,name=w3c_trace_context,json=w3cTraceContext,proto3" json:"w3c_trace_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DigestFunction v2.DigestFunction_Value `protobuf:"varint,9,opt,name=digest_function,json=digestFunction,proto3,enum=build.bazel.remote.execution.v2.DigestFunction_Value" json:"digest_function,omitempty"` +} + +func (x *DesiredState_Executing) Reset() { + *x = DesiredState_Executing{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DesiredState_Executing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DesiredState_Executing) ProtoMessage() {} + +func (x *DesiredState_Executing) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DesiredState_Executing.ProtoReflect.Descriptor instead. +func (*DesiredState_Executing) Descriptor() ([]byte, []int) { + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *DesiredState_Executing) GetActionDigest() *v2.Digest { + if x != nil { + return x.ActionDigest + } + return nil +} + +func (x *DesiredState_Executing) GetAction() *v2.Action { + if x != nil { + return x.Action + } + return nil +} + +func (x *DesiredState_Executing) GetQueuedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.QueuedTimestamp + } + return nil +} + +func (x *DesiredState_Executing) GetAuxiliaryMetadata() []*anypb.Any { + if x != nil { + return x.AuxiliaryMetadata + } + return nil +} + +func (x *DesiredState_Executing) GetInstanceNameSuffix() string { + if x != nil { + return x.InstanceNameSuffix + } + return "" +} + +func (x *DesiredState_Executing) GetW3CTraceContext() map[string]string { + if x != nil { + return x.W3CTraceContext + } + return nil +} + +func (x *DesiredState_Executing) GetDigestFunction() v2.DigestFunction_Value { + if x != nil { + return x.DigestFunction + } + return v2.DigestFunction_Value(0) +} + +var File_pkg_proto_remoteworker_remoteworker_proto protoreflect.FileDescriptor + +var file_pkg_proto_remoteworker_remoteworker_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x1a, 0x36, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x61, 0x7a, 0x65, 0x6c, + 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x03, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, + 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x55, 0x0a, 0x09, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, + 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x73, 0x69, 0x7a, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, + 0x62, 0x65, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x42, 0x65, 0x69, 0x6e, 0x67, 0x49, 0x64, 0x6c, + 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd5, + 0x04, 0x0a, 0x0c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x2c, 0x0a, 0x04, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x04, 0x69, 0x64, 0x6c, 0x65, 0x12, 0x4e, 0x0a, + 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, 0xb6, 0x03, + 0x0a, 0x09, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x4c, 0x0a, 0x0d, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x0c, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, + 0x0f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, + 0x52, 0x0e, 0x66, 0x65, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x12, 0x32, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x07, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x10, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x09, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x11, 0x0a, + 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x68, + 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x15, 0x6e, 0x65, 0x78, + 0x74, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x74, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x98, 0x06, + 0x0a, 0x0c, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, + 0x0a, 0x04, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x04, 0x69, 0x64, 0x6c, 0x65, 0x12, 0x4e, 0x0a, 0x09, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, 0xf9, 0x04, 0x0a, + 0x09, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x4c, 0x0a, 0x0d, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x10, 0x71, 0x75, 0x65, + 0x75, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x43, 0x0a, 0x12, 0x61, 0x75, 0x78, 0x69, 0x6c, 0x69, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x11, 0x61, 0x75, 0x78, 0x69, 0x6c, 0x69, 0x61, 0x72, 0x79, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x6f, 0x0a, 0x11, 0x77, 0x33, 0x63, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x73, 0x69, + 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x57, 0x33, 0x63, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x77, 0x33, 0x63, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x5e, 0x0a, 0x0f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x62, 0x61, 0x7a, 0x65, 0x6c, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x57, 0x33, 0x63, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, + 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x32, 0x78, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x66, 0x0a, 0x0b, 0x53, 0x79, + 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x12, 0x2a, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x53, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_remoteworker_remoteworker_proto_rawDescOnce sync.Once + file_pkg_proto_remoteworker_remoteworker_proto_rawDescData = file_pkg_proto_remoteworker_remoteworker_proto_rawDesc +) + +func file_pkg_proto_remoteworker_remoteworker_proto_rawDescGZIP() []byte { + file_pkg_proto_remoteworker_remoteworker_proto_rawDescOnce.Do(func() { + file_pkg_proto_remoteworker_remoteworker_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_remoteworker_remoteworker_proto_rawDescData) + }) + return file_pkg_proto_remoteworker_remoteworker_proto_rawDescData +} + +var file_pkg_proto_remoteworker_remoteworker_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_pkg_proto_remoteworker_remoteworker_proto_goTypes = []interface{}{ + (*SynchronizeRequest)(nil), // 0: buildbarn.remoteworker.SynchronizeRequest + (*CurrentState)(nil), // 1: buildbarn.remoteworker.CurrentState + (*SynchronizeResponse)(nil), // 2: buildbarn.remoteworker.SynchronizeResponse + (*DesiredState)(nil), // 3: buildbarn.remoteworker.DesiredState + nil, // 4: buildbarn.remoteworker.SynchronizeRequest.WorkerIdEntry + (*CurrentState_Executing)(nil), // 5: buildbarn.remoteworker.CurrentState.Executing + (*DesiredState_Executing)(nil), // 6: buildbarn.remoteworker.DesiredState.Executing + nil, // 7: buildbarn.remoteworker.DesiredState.Executing.W3cTraceContextEntry + (*v2.Platform)(nil), // 8: build.bazel.remote.execution.v2.Platform + (*emptypb.Empty)(nil), // 9: google.protobuf.Empty + (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp + (*v2.Digest)(nil), // 11: build.bazel.remote.execution.v2.Digest + (*v2.ExecuteResponse)(nil), // 12: build.bazel.remote.execution.v2.ExecuteResponse + (*v2.Action)(nil), // 13: build.bazel.remote.execution.v2.Action + (*anypb.Any)(nil), // 14: google.protobuf.Any + (v2.DigestFunction_Value)(0), // 15: build.bazel.remote.execution.v2.DigestFunction.Value +} +var file_pkg_proto_remoteworker_remoteworker_proto_depIdxs = []int32{ + 4, // 0: buildbarn.remoteworker.SynchronizeRequest.worker_id:type_name -> buildbarn.remoteworker.SynchronizeRequest.WorkerIdEntry + 8, // 1: buildbarn.remoteworker.SynchronizeRequest.platform:type_name -> build.bazel.remote.execution.v2.Platform + 1, // 2: buildbarn.remoteworker.SynchronizeRequest.current_state:type_name -> buildbarn.remoteworker.CurrentState + 9, // 3: buildbarn.remoteworker.CurrentState.idle:type_name -> google.protobuf.Empty + 5, // 4: buildbarn.remoteworker.CurrentState.executing:type_name -> buildbarn.remoteworker.CurrentState.Executing + 10, // 5: buildbarn.remoteworker.SynchronizeResponse.next_synchronization_at:type_name -> google.protobuf.Timestamp + 3, // 6: buildbarn.remoteworker.SynchronizeResponse.desired_state:type_name -> buildbarn.remoteworker.DesiredState + 9, // 7: buildbarn.remoteworker.DesiredState.idle:type_name -> google.protobuf.Empty + 6, // 8: buildbarn.remoteworker.DesiredState.executing:type_name -> buildbarn.remoteworker.DesiredState.Executing + 11, // 9: buildbarn.remoteworker.CurrentState.Executing.action_digest:type_name -> build.bazel.remote.execution.v2.Digest + 9, // 10: buildbarn.remoteworker.CurrentState.Executing.started:type_name -> google.protobuf.Empty + 9, // 11: buildbarn.remoteworker.CurrentState.Executing.fetching_inputs:type_name -> google.protobuf.Empty + 9, // 12: buildbarn.remoteworker.CurrentState.Executing.running:type_name -> google.protobuf.Empty + 9, // 13: buildbarn.remoteworker.CurrentState.Executing.uploading_outputs:type_name -> google.protobuf.Empty + 12, // 14: buildbarn.remoteworker.CurrentState.Executing.completed:type_name -> build.bazel.remote.execution.v2.ExecuteResponse + 11, // 15: buildbarn.remoteworker.DesiredState.Executing.action_digest:type_name -> build.bazel.remote.execution.v2.Digest + 13, // 16: buildbarn.remoteworker.DesiredState.Executing.action:type_name -> build.bazel.remote.execution.v2.Action + 10, // 17: buildbarn.remoteworker.DesiredState.Executing.queued_timestamp:type_name -> google.protobuf.Timestamp + 14, // 18: buildbarn.remoteworker.DesiredState.Executing.auxiliary_metadata:type_name -> google.protobuf.Any + 7, // 19: buildbarn.remoteworker.DesiredState.Executing.w3c_trace_context:type_name -> buildbarn.remoteworker.DesiredState.Executing.W3cTraceContextEntry + 15, // 20: buildbarn.remoteworker.DesiredState.Executing.digest_function:type_name -> build.bazel.remote.execution.v2.DigestFunction.Value + 0, // 21: buildbarn.remoteworker.OperationQueue.Synchronize:input_type -> buildbarn.remoteworker.SynchronizeRequest + 2, // 22: buildbarn.remoteworker.OperationQueue.Synchronize:output_type -> buildbarn.remoteworker.SynchronizeResponse + 22, // [22:23] is the sub-list for method output_type + 21, // [21:22] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_pkg_proto_remoteworker_remoteworker_proto_init() } +func file_pkg_proto_remoteworker_remoteworker_proto_init() { + if File_pkg_proto_remoteworker_remoteworker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SynchronizeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CurrentState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SynchronizeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DesiredState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CurrentState_Executing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DesiredState_Executing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CurrentState_Idle)(nil), + (*CurrentState_Executing_)(nil), + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*DesiredState_Idle)(nil), + (*DesiredState_Executing_)(nil), + } + file_pkg_proto_remoteworker_remoteworker_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*CurrentState_Executing_Started)(nil), + (*CurrentState_Executing_FetchingInputs)(nil), + (*CurrentState_Executing_Running)(nil), + (*CurrentState_Executing_UploadingOutputs)(nil), + (*CurrentState_Executing_Completed)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_remoteworker_remoteworker_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_remoteworker_remoteworker_proto_goTypes, + DependencyIndexes: file_pkg_proto_remoteworker_remoteworker_proto_depIdxs, + MessageInfos: file_pkg_proto_remoteworker_remoteworker_proto_msgTypes, + }.Build() + File_pkg_proto_remoteworker_remoteworker_proto = out.File + file_pkg_proto_remoteworker_remoteworker_proto_rawDesc = nil + file_pkg_proto_remoteworker_remoteworker_proto_goTypes = nil + file_pkg_proto_remoteworker_remoteworker_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// OperationQueueClient is the client API for OperationQueue service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OperationQueueClient interface { + Synchronize(ctx context.Context, in *SynchronizeRequest, opts ...grpc.CallOption) (*SynchronizeResponse, error) +} + +type operationQueueClient struct { + cc grpc.ClientConnInterface +} + +func NewOperationQueueClient(cc grpc.ClientConnInterface) OperationQueueClient { + return &operationQueueClient{cc} +} + +func (c *operationQueueClient) Synchronize(ctx context.Context, in *SynchronizeRequest, opts ...grpc.CallOption) (*SynchronizeResponse, error) { + out := new(SynchronizeResponse) + err := c.cc.Invoke(ctx, "/buildbarn.remoteworker.OperationQueue/Synchronize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OperationQueueServer is the server API for OperationQueue service. +type OperationQueueServer interface { + Synchronize(context.Context, *SynchronizeRequest) (*SynchronizeResponse, error) +} + +// UnimplementedOperationQueueServer can be embedded to have forward compatible implementations. +type UnimplementedOperationQueueServer struct { +} + +func (*UnimplementedOperationQueueServer) Synchronize(context.Context, *SynchronizeRequest) (*SynchronizeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Synchronize not implemented") +} + +func RegisterOperationQueueServer(s grpc.ServiceRegistrar, srv OperationQueueServer) { + s.RegisterService(&_OperationQueue_serviceDesc, srv) +} + +func _OperationQueue_Synchronize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SynchronizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationQueueServer).Synchronize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.remoteworker.OperationQueue/Synchronize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationQueueServer).Synchronize(ctx, req.(*SynchronizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _OperationQueue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "buildbarn.remoteworker.OperationQueue", + HandlerType: (*OperationQueueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Synchronize", + Handler: _OperationQueue_Synchronize_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/proto/remoteworker/remoteworker.proto", +} diff --git a/pkg/proto/remoteworker/remoteworker.proto b/pkg/proto/remoteworker/remoteworker.proto new file mode 100644 index 0000000..434315c --- /dev/null +++ b/pkg/proto/remoteworker/remoteworker.proto @@ -0,0 +1,210 @@ +syntax = "proto3"; + +package buildbarn.remoteworker; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker"; + +// Buildbarn's workers connect to the scheduler to receive instructions +// on what they should be doing. They can either be instructed to be +// idle or to execute a build action. They can also report their state +// to the scheduler. The purpose of reporting state is twofold: +// +// - Upon completion of a build action, the worker reports the outcome +// of the build action, so that it may be communicated back to a +// client. +// - It allows for centralized management/insight in the functioning of +// the build cluster. +// +// All of this exchange of information takes place through a single type +// of RPC named Synchronize(), called by the worker against the +// scheduler. The worker provides information about its identity and its +// current state. The scheduler responds with instructions on whether to +// do something different or to continue. +// +// Every response contains a timestamp that instructs the worker when to +// resynchronize. Calls to Synchronize() are guaranteed to be +// non-blocking when it is executing a build action. They may be +// blocking in case the worker is idle or reporting the completion of a +// build action. In that case the scheduler may decide to let the call +// hang until more work is available. +service OperationQueue { + rpc Synchronize(SynchronizeRequest) returns (SynchronizeResponse); +} + +message SynchronizeRequest { + // Set of key-value properties that uniquely identify (a thread of) a + // worker capable of executing actions. These properties are not + // intended to describe the execution capabilities of the worker, nor + // should individual elements be interpreted by the scheduler. + // + // These properties are present to permit the scheduler to associate + // internal state with a worker. They may also be displayed as part of + // logs, dashboards, etc. + map worker_id = 1; + + // The prefix of the instance name for which requests from clients + // should be routed to this worker. Any trailing components will be + // returned by the scheduler through + // DesiredState.Executing.instance_name_suffix. + string instance_name_prefix = 2; + + // The platform features available for the execution environment. The + // scheduler uses these features to determine which actions are + // eligible to be executed on this worker. + build.bazel.remote.execution.v2.Platform platform = 3; + + // The size of this worker in terms of CPU count and memory size. + uint32 size_class = 5; + + // The kind of activity the worker is currently performing. + CurrentState current_state = 4; + + // The worker prefers to be idle. This flag may be set by workers if + // they want to terminate gracefully. The scheduler should respond to + // such requests without blocking. + // + // Additionally, workers may set this flag in case they are in a + // degraded state (e.g., bb_runner not being up and running). This + // allows workers to temporarily suspend until the system recovers. + bool prefer_being_idle = 6; +} + +message CurrentState { + message Executing { + // The digest of the action currently being executed. This digest + // must be set the value obtained through + // DesiredState.Executing.action_digest. + build.bazel.remote.execution.v2.Digest action_digest = 1; + + oneof execution_state { + // The worker is initializing its build environment. + google.protobuf.Empty started = 2; + + // The worker is fetching inputs that are a prerequisite for + // execution. Prerequisites include the initial file system + // contents if instantiated explicitly. + google.protobuf.Empty fetching_inputs = 3; + + // The command associated with the action is currently being run. + google.protobuf.Empty running = 4; + + // Output files of the action are currently being uploaded. + google.protobuf.Empty uploading_outputs = 5; + + // Execution of the action has completed. Results may be + // communicated to the client. + build.bazel.remote.execution.v2.ExecuteResponse completed = 6; + } + + // Was 'prefer_being_idle'. This field has been promoted to + // SynchronizeRequest. + reserved 7; + } + + oneof worker_state { + // The worker is currently idle. + google.protobuf.Empty idle = 1; + + // The worker is currently executing a Remote Execution action. It + // may also be finished executing an action, waiting to receive + // information about the next action to run. + Executing executing = 2; + } +} + +message SynchronizeResponse { + // The timestamp at which the worker is supposed to synchronize again. + // The worker is permitted to contact the scheduler earlier if any + // notable events occur (e.g., execution of an action completes). + // + // If the worker does not synchronize with the scheduler at around + // this time, the scheduler may assume the worker has gone offline. + // To compensate for clock inaccuracies, network latency and transient + // network failures, the scheduler should tolerate a certain amount of + // skew. Schedulers may vary the interval returned to tune the amount + // of load received. + google.protobuf.Timestamp next_synchronization_at = 1; + + // The state to which the scheduler desires the worker to transition. + // This value must be left unset in case the scheduler desires the + // worker to remain in its current state. For example, when it wants + // the worker to continue executing the currently running build + // action. + DesiredState desired_state = 2; +} + +message DesiredState { + message Executing { + // The digest of the action that should be executed. + build.bazel.remote.execution.v2.Digest action_digest = 1; + + // The action message corresponding with the action digest. Although + // the worker could also load the action from the Content + // Addressable Storage, the scheduler also needs to access this + // information. It is passed on to prevent duplicate requests + // against storage. + // + // Even though the Remote Execution protocol allows the execution + // timeout value to be null, this protocol requires it to be set. + // The scheduler is responsible for filling in the default value and + // enforcing a maximum value. The scheduler is permitted to override + // the timeout value specified by the client. + build.bazel.remote.execution.v2.Action action = 2; + + // Was 'command'. + reserved 3; + + // The point in time at which the operation associated with this + // execution request was created within the scheduler. The worker + // can attach this data to ExecutedActionMetadata's + // queued_timestamp. + google.protobuf.Timestamp queued_timestamp = 4; + + // Was 'trace_context'. Replaced by 'w3c_trace_context'. + reserved 5; + + // Auxiliary metadata that the client sent or the scheduler + // generated. The worker can attach this data to + // ExecutedActionMetadata's auxiliary_metadata. + repeated google.protobuf.Any auxiliary_metadata = 6; + + // Additional components of the instance name that the client + // provided as part of the ExecuteRequest, but were not announced by + // the worker as part of SynchronizeRequest.instance_name_prefix. + // The worker needs to append these components to its instance name + // prefix when making requests against storage. + string instance_name_suffix = 7; + + // Trace/Span context data for the trace that started this + // execution, formatted as W3C Trace Context headers. This is used + // to associate the worker's storage activity back to the trace that + // started the action. + map w3c_trace_context = 8; + + // The digest function that was used to compute the action digest. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 9; + } + + oneof worker_state { + // The worker is currently not idle. The scheduler desires it to be + // idle instead. This could for example occur in case the client + // providing the build action disappears. + // + // It is not needed to forcefully switch the worker to idle when the + // worker finishes executing a build action and no additional work + // is available. The scheduler can decide to let the Synchronize() + // call block until more work is available, switching the worker + // directly to the next 'Executing' state. + google.protobuf.Empty idle = 1; + + // The worker is currently idle, finished executing a build action, + // or is running a build action that should be cancelled. The + // scheduler desires it to execute another build action instead. + Executing executing = 2; + } +} diff --git a/pkg/proto/resourceusage/BUILD.bazel b/pkg/proto/resourceusage/BUILD.bazel new file mode 100644 index 0000000..45e5540 --- /dev/null +++ b/pkg/proto/resourceusage/BUILD.bazel @@ -0,0 +1,24 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "resourceusage_proto", + srcs = ["resourceusage.proto"], + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:duration_proto"], +) + +go_proto_library( + name = "resourceusage_go_proto", + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage", + proto = ":resourceusage_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "resourceusage", + embed = [":resourceusage_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/resourceusage/resourceusage.pb.go b/pkg/proto/resourceusage/resourceusage.pb.go new file mode 100644 index 0000000..09bde28 --- /dev/null +++ b/pkg/proto/resourceusage/resourceusage.pb.go @@ -0,0 +1,673 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/resourceusage/resourceusage.proto + +package resourceusage + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FilePoolResourceUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FilesCreated uint64 `protobuf:"varint,1,opt,name=files_created,json=filesCreated,proto3" json:"files_created,omitempty"` + FilesCountPeak uint64 `protobuf:"varint,2,opt,name=files_count_peak,json=filesCountPeak,proto3" json:"files_count_peak,omitempty"` + FilesSizeBytesPeak uint64 `protobuf:"varint,3,opt,name=files_size_bytes_peak,json=filesSizeBytesPeak,proto3" json:"files_size_bytes_peak,omitempty"` + ReadsCount uint64 `protobuf:"varint,4,opt,name=reads_count,json=readsCount,proto3" json:"reads_count,omitempty"` + ReadsSizeBytes uint64 `protobuf:"varint,5,opt,name=reads_size_bytes,json=readsSizeBytes,proto3" json:"reads_size_bytes,omitempty"` + WritesCount uint64 `protobuf:"varint,6,opt,name=writes_count,json=writesCount,proto3" json:"writes_count,omitempty"` + WritesSizeBytes uint64 `protobuf:"varint,7,opt,name=writes_size_bytes,json=writesSizeBytes,proto3" json:"writes_size_bytes,omitempty"` + TruncatesCount uint64 `protobuf:"varint,8,opt,name=truncates_count,json=truncatesCount,proto3" json:"truncates_count,omitempty"` +} + +func (x *FilePoolResourceUsage) Reset() { + *x = FilePoolResourceUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilePoolResourceUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilePoolResourceUsage) ProtoMessage() {} + +func (x *FilePoolResourceUsage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilePoolResourceUsage.ProtoReflect.Descriptor instead. +func (*FilePoolResourceUsage) Descriptor() ([]byte, []int) { + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP(), []int{0} +} + +func (x *FilePoolResourceUsage) GetFilesCreated() uint64 { + if x != nil { + return x.FilesCreated + } + return 0 +} + +func (x *FilePoolResourceUsage) GetFilesCountPeak() uint64 { + if x != nil { + return x.FilesCountPeak + } + return 0 +} + +func (x *FilePoolResourceUsage) GetFilesSizeBytesPeak() uint64 { + if x != nil { + return x.FilesSizeBytesPeak + } + return 0 +} + +func (x *FilePoolResourceUsage) GetReadsCount() uint64 { + if x != nil { + return x.ReadsCount + } + return 0 +} + +func (x *FilePoolResourceUsage) GetReadsSizeBytes() uint64 { + if x != nil { + return x.ReadsSizeBytes + } + return 0 +} + +func (x *FilePoolResourceUsage) GetWritesCount() uint64 { + if x != nil { + return x.WritesCount + } + return 0 +} + +func (x *FilePoolResourceUsage) GetWritesSizeBytes() uint64 { + if x != nil { + return x.WritesSizeBytes + } + return 0 +} + +func (x *FilePoolResourceUsage) GetTruncatesCount() uint64 { + if x != nil { + return x.TruncatesCount + } + return 0 +} + +type POSIXResourceUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserTime *durationpb.Duration `protobuf:"bytes,1,opt,name=user_time,json=userTime,proto3" json:"user_time,omitempty"` + SystemTime *durationpb.Duration `protobuf:"bytes,2,opt,name=system_time,json=systemTime,proto3" json:"system_time,omitempty"` + MaximumResidentSetSize int64 `protobuf:"varint,3,opt,name=maximum_resident_set_size,json=maximumResidentSetSize,proto3" json:"maximum_resident_set_size,omitempty"` + PageReclaims int64 `protobuf:"varint,7,opt,name=page_reclaims,json=pageReclaims,proto3" json:"page_reclaims,omitempty"` + PageFaults int64 `protobuf:"varint,8,opt,name=page_faults,json=pageFaults,proto3" json:"page_faults,omitempty"` + Swaps int64 `protobuf:"varint,9,opt,name=swaps,proto3" json:"swaps,omitempty"` + BlockInputOperations int64 `protobuf:"varint,10,opt,name=block_input_operations,json=blockInputOperations,proto3" json:"block_input_operations,omitempty"` + BlockOutputOperations int64 `protobuf:"varint,11,opt,name=block_output_operations,json=blockOutputOperations,proto3" json:"block_output_operations,omitempty"` + MessagesSent int64 `protobuf:"varint,12,opt,name=messages_sent,json=messagesSent,proto3" json:"messages_sent,omitempty"` + MessagesReceived int64 `protobuf:"varint,13,opt,name=messages_received,json=messagesReceived,proto3" json:"messages_received,omitempty"` + SignalsReceived int64 `protobuf:"varint,14,opt,name=signals_received,json=signalsReceived,proto3" json:"signals_received,omitempty"` + VoluntaryContextSwitches int64 `protobuf:"varint,15,opt,name=voluntary_context_switches,json=voluntaryContextSwitches,proto3" json:"voluntary_context_switches,omitempty"` + InvoluntaryContextSwitches int64 `protobuf:"varint,16,opt,name=involuntary_context_switches,json=involuntaryContextSwitches,proto3" json:"involuntary_context_switches,omitempty"` + TerminationSignal string `protobuf:"bytes,17,opt,name=termination_signal,json=terminationSignal,proto3" json:"termination_signal,omitempty"` +} + +func (x *POSIXResourceUsage) Reset() { + *x = POSIXResourceUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *POSIXResourceUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*POSIXResourceUsage) ProtoMessage() {} + +func (x *POSIXResourceUsage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use POSIXResourceUsage.ProtoReflect.Descriptor instead. +func (*POSIXResourceUsage) Descriptor() ([]byte, []int) { + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP(), []int{1} +} + +func (x *POSIXResourceUsage) GetUserTime() *durationpb.Duration { + if x != nil { + return x.UserTime + } + return nil +} + +func (x *POSIXResourceUsage) GetSystemTime() *durationpb.Duration { + if x != nil { + return x.SystemTime + } + return nil +} + +func (x *POSIXResourceUsage) GetMaximumResidentSetSize() int64 { + if x != nil { + return x.MaximumResidentSetSize + } + return 0 +} + +func (x *POSIXResourceUsage) GetPageReclaims() int64 { + if x != nil { + return x.PageReclaims + } + return 0 +} + +func (x *POSIXResourceUsage) GetPageFaults() int64 { + if x != nil { + return x.PageFaults + } + return 0 +} + +func (x *POSIXResourceUsage) GetSwaps() int64 { + if x != nil { + return x.Swaps + } + return 0 +} + +func (x *POSIXResourceUsage) GetBlockInputOperations() int64 { + if x != nil { + return x.BlockInputOperations + } + return 0 +} + +func (x *POSIXResourceUsage) GetBlockOutputOperations() int64 { + if x != nil { + return x.BlockOutputOperations + } + return 0 +} + +func (x *POSIXResourceUsage) GetMessagesSent() int64 { + if x != nil { + return x.MessagesSent + } + return 0 +} + +func (x *POSIXResourceUsage) GetMessagesReceived() int64 { + if x != nil { + return x.MessagesReceived + } + return 0 +} + +func (x *POSIXResourceUsage) GetSignalsReceived() int64 { + if x != nil { + return x.SignalsReceived + } + return 0 +} + +func (x *POSIXResourceUsage) GetVoluntaryContextSwitches() int64 { + if x != nil { + return x.VoluntaryContextSwitches + } + return 0 +} + +func (x *POSIXResourceUsage) GetInvoluntaryContextSwitches() int64 { + if x != nil { + return x.InvoluntaryContextSwitches + } + return 0 +} + +func (x *POSIXResourceUsage) GetTerminationSignal() string { + if x != nil { + return x.TerminationSignal + } + return "" +} + +type MonetaryResourceUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expenses map[string]*MonetaryResourceUsage_Expense `protobuf:"bytes,1,rep,name=expenses,proto3" json:"expenses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonetaryResourceUsage) Reset() { + *x = MonetaryResourceUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonetaryResourceUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonetaryResourceUsage) ProtoMessage() {} + +func (x *MonetaryResourceUsage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonetaryResourceUsage.ProtoReflect.Descriptor instead. +func (*MonetaryResourceUsage) Descriptor() ([]byte, []int) { + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP(), []int{2} +} + +func (x *MonetaryResourceUsage) GetExpenses() map[string]*MonetaryResourceUsage_Expense { + if x != nil { + return x.Expenses + } + return nil +} + +type InputRootResourceUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DirectoriesResolved uint64 `protobuf:"varint,1,opt,name=directories_resolved,json=directoriesResolved,proto3" json:"directories_resolved,omitempty"` + DirectoriesRead uint64 `protobuf:"varint,2,opt,name=directories_read,json=directoriesRead,proto3" json:"directories_read,omitempty"` + FilesRead uint64 `protobuf:"varint,3,opt,name=files_read,json=filesRead,proto3" json:"files_read,omitempty"` +} + +func (x *InputRootResourceUsage) Reset() { + *x = InputRootResourceUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InputRootResourceUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InputRootResourceUsage) ProtoMessage() {} + +func (x *InputRootResourceUsage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InputRootResourceUsage.ProtoReflect.Descriptor instead. +func (*InputRootResourceUsage) Descriptor() ([]byte, []int) { + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP(), []int{3} +} + +func (x *InputRootResourceUsage) GetDirectoriesResolved() uint64 { + if x != nil { + return x.DirectoriesResolved + } + return 0 +} + +func (x *InputRootResourceUsage) GetDirectoriesRead() uint64 { + if x != nil { + return x.DirectoriesRead + } + return 0 +} + +func (x *InputRootResourceUsage) GetFilesRead() uint64 { + if x != nil { + return x.FilesRead + } + return 0 +} + +type MonetaryResourceUsage_Expense struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Currency string `protobuf:"bytes,1,opt,name=currency,proto3" json:"currency,omitempty"` + Cost float64 `protobuf:"fixed64,2,opt,name=cost,proto3" json:"cost,omitempty"` +} + +func (x *MonetaryResourceUsage_Expense) Reset() { + *x = MonetaryResourceUsage_Expense{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonetaryResourceUsage_Expense) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonetaryResourceUsage_Expense) ProtoMessage() {} + +func (x *MonetaryResourceUsage_Expense) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonetaryResourceUsage_Expense.ProtoReflect.Descriptor instead. +func (*MonetaryResourceUsage_Expense) Descriptor() ([]byte, []int) { + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *MonetaryResourceUsage_Expense) GetCurrency() string { + if x != nil { + return x.Currency + } + return "" +} + +func (x *MonetaryResourceUsage_Expense) GetCost() float64 { + if x != nil { + return x.Cost + } + return 0 +} + +var File_pkg_proto_resourceusage_resourceusage_proto protoreflect.FileDescriptor + +var file_pkg_proto_resourceusage_resourceusage_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x02, 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x50, + 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x61, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x61, 0x6b, 0x12, + 0x31, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x61, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, + 0x61, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x72, + 0x65, 0x61, 0x64, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xcb, 0x05, 0x0a, 0x12, 0x50, 0x4f, 0x53, 0x49, 0x58, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x0b, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x77, 0x61, 0x70, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x73, 0x77, 0x61, 0x70, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x36, 0x0a, + 0x17, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x6e, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1a, 0x69, 0x6e, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, + 0x61, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, + 0x06, 0x10, 0x07, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x4d, 0x6f, 0x6e, 0x65, 0x74, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, + 0x08, 0x65, 0x78, 0x70, 0x65, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x6f, 0x6e, 0x65, 0x74, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x2e, + 0x45, 0x78, 0x70, 0x65, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, + 0x78, 0x70, 0x65, 0x6e, 0x73, 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x07, 0x45, 0x78, 0x70, 0x65, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x63, 0x6f, + 0x73, 0x74, 0x1a, 0x73, 0x0a, 0x0d, 0x45, 0x78, 0x70, 0x65, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, + 0x6f, 0x6e, 0x65, 0x74, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x13, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x42, + 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_resourceusage_resourceusage_proto_rawDescOnce sync.Once + file_pkg_proto_resourceusage_resourceusage_proto_rawDescData = file_pkg_proto_resourceusage_resourceusage_proto_rawDesc +) + +func file_pkg_proto_resourceusage_resourceusage_proto_rawDescGZIP() []byte { + file_pkg_proto_resourceusage_resourceusage_proto_rawDescOnce.Do(func() { + file_pkg_proto_resourceusage_resourceusage_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_resourceusage_resourceusage_proto_rawDescData) + }) + return file_pkg_proto_resourceusage_resourceusage_proto_rawDescData +} + +var file_pkg_proto_resourceusage_resourceusage_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_proto_resourceusage_resourceusage_proto_goTypes = []interface{}{ + (*FilePoolResourceUsage)(nil), // 0: buildbarn.resourceusage.FilePoolResourceUsage + (*POSIXResourceUsage)(nil), // 1: buildbarn.resourceusage.POSIXResourceUsage + (*MonetaryResourceUsage)(nil), // 2: buildbarn.resourceusage.MonetaryResourceUsage + (*InputRootResourceUsage)(nil), // 3: buildbarn.resourceusage.InputRootResourceUsage + (*MonetaryResourceUsage_Expense)(nil), // 4: buildbarn.resourceusage.MonetaryResourceUsage.Expense + nil, // 5: buildbarn.resourceusage.MonetaryResourceUsage.ExpensesEntry + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration +} +var file_pkg_proto_resourceusage_resourceusage_proto_depIdxs = []int32{ + 6, // 0: buildbarn.resourceusage.POSIXResourceUsage.user_time:type_name -> google.protobuf.Duration + 6, // 1: buildbarn.resourceusage.POSIXResourceUsage.system_time:type_name -> google.protobuf.Duration + 5, // 2: buildbarn.resourceusage.MonetaryResourceUsage.expenses:type_name -> buildbarn.resourceusage.MonetaryResourceUsage.ExpensesEntry + 4, // 3: buildbarn.resourceusage.MonetaryResourceUsage.ExpensesEntry.value:type_name -> buildbarn.resourceusage.MonetaryResourceUsage.Expense + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_pkg_proto_resourceusage_resourceusage_proto_init() } +func file_pkg_proto_resourceusage_resourceusage_proto_init() { + if File_pkg_proto_resourceusage_resourceusage_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilePoolResourceUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*POSIXResourceUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonetaryResourceUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InputRootResourceUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_resourceusage_resourceusage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonetaryResourceUsage_Expense); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_resourceusage_resourceusage_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_proto_resourceusage_resourceusage_proto_goTypes, + DependencyIndexes: file_pkg_proto_resourceusage_resourceusage_proto_depIdxs, + MessageInfos: file_pkg_proto_resourceusage_resourceusage_proto_msgTypes, + }.Build() + File_pkg_proto_resourceusage_resourceusage_proto = out.File + file_pkg_proto_resourceusage_resourceusage_proto_rawDesc = nil + file_pkg_proto_resourceusage_resourceusage_proto_goTypes = nil + file_pkg_proto_resourceusage_resourceusage_proto_depIdxs = nil +} diff --git a/pkg/proto/resourceusage/resourceusage.proto b/pkg/proto/resourceusage/resourceusage.proto new file mode 100644 index 0000000..112b8f1 --- /dev/null +++ b/pkg/proto/resourceusage/resourceusage.proto @@ -0,0 +1,128 @@ +syntax = "proto3"; + +package buildbarn.resourceusage; + +import "google/protobuf/duration.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage"; + +// File pool resource usage statistics. File pools are used by bb_worker +// to allocate temporary files that are created by build actions. +// Temporary files include output files created through the FUSE file +// system. +message FilePoolResourceUsage { + // Total number of files created. + uint64 files_created = 1; + + // Maximum number of files that existed at some point in time. + uint64 files_count_peak = 2; + + // Maximum total size of all files at some point in time. + uint64 files_size_bytes_peak = 3; + + // Total number of ReadAt() calls performed. + uint64 reads_count = 4; + + // Total amount of data returned by all ReadAt() calls. + uint64 reads_size_bytes = 5; + + // Total number of WriteAt() calls performed. + uint64 writes_count = 6; + + // Total amount of data processed by all WriteAt() calls. + uint64 writes_size_bytes = 7; + + // Total number of Truncate() calls performed. + uint64 truncates_count = 8; +} + +// The equivalent of 'struct rusage' in POSIX, generally returned by +// getrusage(2) or wait4(2). +message POSIXResourceUsage { + // ru_utime: Amount of CPU time in seconds spent in userspace. + google.protobuf.Duration user_time = 1; + + // ru_stime: Amount of CPU time in seconds spent in kernelspace. + google.protobuf.Duration system_time = 2; + + // ru_maxrss: Maximum amount of resident memory in bytes. + int64 maximum_resident_set_size = 3; + + // ru_ixrss, ru_idrss and ru_isrss are omitted, as there is no + // portable way to obtain the number of ticks used to compute these + // integrals. + reserved 4, 5, 6; + + // ru_minflt: Page reclaims. + int64 page_reclaims = 7; + + // ru_majflt: Page faults. + int64 page_faults = 8; + + // ru_nswap: Number of swaps. + int64 swaps = 9; + + // ru_inblock: Block input operations. + int64 block_input_operations = 10; + + // ru_oublock: Block output operations. + int64 block_output_operations = 11; + + // ru_msgsnd: Messages sent. + int64 messages_sent = 12; + + // ru_msgrcv: Messages received. + int64 messages_received = 13; + + // ru_nsignals: Signals received. + int64 signals_received = 14; + + // ru_nvcsw: Voluntary context switches. + int64 voluntary_context_switches = 15; + + // ru_nivcsw: Involuntary context switches. + int64 involuntary_context_switches = 16; + + // If abnormal process termination occurred, the name of the signal + // that was delivered, without the "SIG" prefix (e.g., "BUS", "KILL", + // "SEGV"). + // + // Abnormal process termination can occur by calling abort(), or by + // receiving a signal for which no signal handler is installed. + string termination_signal = 17; +} + +// A representation of unique factors that may be aggregated to +// compute a given build action's total price. +message MonetaryResourceUsage { + message Expense { + // The type of currency the cost is measured in. Required to be in + // ISO 4217 format: https://en.wikipedia.org/wiki/ISO_4217#Active_codes + string currency = 1; + + // The value of a specific expense for a build action. + double cost = 2; + } + + // A mapping of expense categories to their respective costs. + map expenses = 1; +} + +// Input root resource usage statistics. These statistics indicate how +// many directories and files inside the virtual file system were +// accessed. These statistics are only reported if prefetching is +// enabled, as they are computed together with the Bloom filter. +message InputRootResourceUsage { + // The number of directories in the input root that have been + // resolved. This equates to the total number of directories that are + // present in all directories that have been read. + uint64 directories_resolved = 1; + + // The number of directories whose contents have been read from the + // Content Addressable Storage (CAS). + uint64 directories_read = 2; + + // The number of files whose contents have been read from the Content + // Addressable Storage (CAS). + uint64 files_read = 3; +} diff --git a/pkg/proto/runner/BUILD.bazel b/pkg/proto/runner/BUILD.bazel new file mode 100644 index 0000000..b68fb24 --- /dev/null +++ b/pkg/proto/runner/BUILD.bazel @@ -0,0 +1,28 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "runner_proto", + srcs = ["runner.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:empty_proto", + ], +) + +go_proto_library( + name = "runner_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/runner", + proto = ":runner_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "runner", + embed = [":runner_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/runner", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/runner/runner.pb.go b/pkg/proto/runner/runner.pb.go new file mode 100644 index 0000000..1fe76c2 --- /dev/null +++ b/pkg/proto/runner/runner.pb.go @@ -0,0 +1,437 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/runner/runner.proto + +package runner + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RunRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Arguments []string `protobuf:"bytes,1,rep,name=arguments,proto3" json:"arguments,omitempty"` + EnvironmentVariables map[string]string `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables,proto3" json:"environment_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkingDirectory string `protobuf:"bytes,3,opt,name=working_directory,json=workingDirectory,proto3" json:"working_directory,omitempty"` + StdoutPath string `protobuf:"bytes,4,opt,name=stdout_path,json=stdoutPath,proto3" json:"stdout_path,omitempty"` + StderrPath string `protobuf:"bytes,5,opt,name=stderr_path,json=stderrPath,proto3" json:"stderr_path,omitempty"` + InputRootDirectory string `protobuf:"bytes,6,opt,name=input_root_directory,json=inputRootDirectory,proto3" json:"input_root_directory,omitempty"` + TemporaryDirectory string `protobuf:"bytes,7,opt,name=temporary_directory,json=temporaryDirectory,proto3" json:"temporary_directory,omitempty"` +} + +func (x *RunRequest) Reset() { + *x = RunRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_runner_runner_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunRequest) ProtoMessage() {} + +func (x *RunRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_runner_runner_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunRequest.ProtoReflect.Descriptor instead. +func (*RunRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_runner_runner_proto_rawDescGZIP(), []int{0} +} + +func (x *RunRequest) GetArguments() []string { + if x != nil { + return x.Arguments + } + return nil +} + +func (x *RunRequest) GetEnvironmentVariables() map[string]string { + if x != nil { + return x.EnvironmentVariables + } + return nil +} + +func (x *RunRequest) GetWorkingDirectory() string { + if x != nil { + return x.WorkingDirectory + } + return "" +} + +func (x *RunRequest) GetStdoutPath() string { + if x != nil { + return x.StdoutPath + } + return "" +} + +func (x *RunRequest) GetStderrPath() string { + if x != nil { + return x.StderrPath + } + return "" +} + +func (x *RunRequest) GetInputRootDirectory() string { + if x != nil { + return x.InputRootDirectory + } + return "" +} + +func (x *RunRequest) GetTemporaryDirectory() string { + if x != nil { + return x.TemporaryDirectory + } + return "" +} + +type RunResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + ResourceUsage []*anypb.Any `protobuf:"bytes,2,rep,name=resource_usage,json=resourceUsage,proto3" json:"resource_usage,omitempty"` +} + +func (x *RunResponse) Reset() { + *x = RunResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_runner_runner_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunResponse) ProtoMessage() {} + +func (x *RunResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_runner_runner_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunResponse.ProtoReflect.Descriptor instead. +func (*RunResponse) Descriptor() ([]byte, []int) { + return file_pkg_proto_runner_runner_proto_rawDescGZIP(), []int{1} +} + +func (x *RunResponse) GetExitCode() int32 { + if x != nil { + return x.ExitCode + } + return 0 +} + +func (x *RunResponse) GetResourceUsage() []*anypb.Any { + if x != nil { + return x.ResourceUsage + } + return nil +} + +var File_pkg_proto_runner_runner_proto protoreflect.FileDescriptor + +var file_pkg_proto_runner_runner_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x75, 0x6e, 0x6e, + 0x65, 0x72, 0x2f, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x10, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x75, 0x6e, 0x6e, 0x65, + 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x03, 0x0a, 0x0a, 0x52, 0x75, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x6b, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, + 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x65, + 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, + 0x79, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x1a, 0x47, 0x0a, 0x19, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x67, + 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x32, 0x8e, 0x01, 0x0a, 0x06, 0x52, 0x75, 0x6e, 0x6e, + 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x69, + 0x6e, 0x65, 0x73, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x03, 0x52, 0x75, 0x6e, 0x12, 0x1c, 0x2e, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, + 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x72, 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x52, 0x75, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, + 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, + 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_runner_runner_proto_rawDescOnce sync.Once + file_pkg_proto_runner_runner_proto_rawDescData = file_pkg_proto_runner_runner_proto_rawDesc +) + +func file_pkg_proto_runner_runner_proto_rawDescGZIP() []byte { + file_pkg_proto_runner_runner_proto_rawDescOnce.Do(func() { + file_pkg_proto_runner_runner_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_runner_runner_proto_rawDescData) + }) + return file_pkg_proto_runner_runner_proto_rawDescData +} + +var file_pkg_proto_runner_runner_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_pkg_proto_runner_runner_proto_goTypes = []interface{}{ + (*RunRequest)(nil), // 0: buildbarn.runner.RunRequest + (*RunResponse)(nil), // 1: buildbarn.runner.RunResponse + nil, // 2: buildbarn.runner.RunRequest.EnvironmentVariablesEntry + (*anypb.Any)(nil), // 3: google.protobuf.Any + (*emptypb.Empty)(nil), // 4: google.protobuf.Empty +} +var file_pkg_proto_runner_runner_proto_depIdxs = []int32{ + 2, // 0: buildbarn.runner.RunRequest.environment_variables:type_name -> buildbarn.runner.RunRequest.EnvironmentVariablesEntry + 3, // 1: buildbarn.runner.RunResponse.resource_usage:type_name -> google.protobuf.Any + 4, // 2: buildbarn.runner.Runner.CheckReadiness:input_type -> google.protobuf.Empty + 0, // 3: buildbarn.runner.Runner.Run:input_type -> buildbarn.runner.RunRequest + 4, // 4: buildbarn.runner.Runner.CheckReadiness:output_type -> google.protobuf.Empty + 1, // 5: buildbarn.runner.Runner.Run:output_type -> buildbarn.runner.RunResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_proto_runner_runner_proto_init() } +func file_pkg_proto_runner_runner_proto_init() { + if File_pkg_proto_runner_runner_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_runner_runner_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_runner_runner_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_runner_runner_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_runner_runner_proto_goTypes, + DependencyIndexes: file_pkg_proto_runner_runner_proto_depIdxs, + MessageInfos: file_pkg_proto_runner_runner_proto_msgTypes, + }.Build() + File_pkg_proto_runner_runner_proto = out.File + file_pkg_proto_runner_runner_proto_rawDesc = nil + file_pkg_proto_runner_runner_proto_goTypes = nil + file_pkg_proto_runner_runner_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// RunnerClient is the client API for Runner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RunnerClient interface { + CheckReadiness(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) + Run(ctx context.Context, in *RunRequest, opts ...grpc.CallOption) (*RunResponse, error) +} + +type runnerClient struct { + cc grpc.ClientConnInterface +} + +func NewRunnerClient(cc grpc.ClientConnInterface) RunnerClient { + return &runnerClient{cc} +} + +func (c *runnerClient) CheckReadiness(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.runner.Runner/CheckReadiness", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runnerClient) Run(ctx context.Context, in *RunRequest, opts ...grpc.CallOption) (*RunResponse, error) { + out := new(RunResponse) + err := c.cc.Invoke(ctx, "/buildbarn.runner.Runner/Run", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RunnerServer is the server API for Runner service. +type RunnerServer interface { + CheckReadiness(context.Context, *emptypb.Empty) (*emptypb.Empty, error) + Run(context.Context, *RunRequest) (*RunResponse, error) +} + +// UnimplementedRunnerServer can be embedded to have forward compatible implementations. +type UnimplementedRunnerServer struct { +} + +func (*UnimplementedRunnerServer) CheckReadiness(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckReadiness not implemented") +} +func (*UnimplementedRunnerServer) Run(context.Context, *RunRequest) (*RunResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Run not implemented") +} + +func RegisterRunnerServer(s grpc.ServiceRegistrar, srv RunnerServer) { + s.RegisterService(&_Runner_serviceDesc, srv) +} + +func _Runner_CheckReadiness_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RunnerServer).CheckReadiness(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.runner.Runner/CheckReadiness", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RunnerServer).CheckReadiness(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Runner_Run_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RunnerServer).Run(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.runner.Runner/Run", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RunnerServer).Run(ctx, req.(*RunRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Runner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "buildbarn.runner.Runner", + HandlerType: (*RunnerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CheckReadiness", + Handler: _Runner_CheckReadiness_Handler, + }, + { + MethodName: "Run", + Handler: _Runner_Run_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/proto/runner/runner.proto", +} diff --git a/pkg/proto/runner/runner.proto b/pkg/proto/runner/runner.proto new file mode 100644 index 0000000..45169a4 --- /dev/null +++ b/pkg/proto/runner/runner.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package buildbarn.runner; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/runner"; + +// In order to make the execution strategy of bb_worker pluggable and +// capable of supporting privilege separation, bb_worker calls into a +// runner service to invoke the desired command after setting up inputs +// accordingly. This service needs to be implemented by the runner. +// +// Using separate runner processes also prevents bb_worker from +// forking. This is good, as UNIX systems (without native support for +// spawning processes without forking) have an inherent race condition +// that effectively prevents bb_worker from both writing executables to +// disk and executing them. More details: +// +// https://github.com/golang/go/issues/22315 +service Runner { + rpc CheckReadiness(google.protobuf.Empty) returns (google.protobuf.Empty); + rpc Run(RunRequest) returns (RunResponse); +} + +message RunRequest { + // Command line arguments that need to be set. + repeated string arguments = 1; + + // Environment variables that need to be set. + map environment_variables = 2; + + // Working directory, relative to the input root directory. + string working_directory = 3; + + // Path where data written over stdout should be stored, relative to + // the build directory. + string stdout_path = 4; + + // Path where data written over stderr should be stored, relative to + // the build directory. + string stderr_path = 5; + + // Path of the input root, relative to the build directory. + string input_root_directory = 6; + + // Path of a scratch space directory that may be used by the build + // action, relative to the build directory. + string temporary_directory = 7; +} + +message RunResponse { + // Exit code generated by the process. + int32 exit_code = 1; + + // Runner-specific information on the amount of resources used during + // execution. + repeated google.protobuf.Any resource_usage = 2; +} diff --git a/pkg/proto/tmp_installer/BUILD.bazel b/pkg/proto/tmp_installer/BUILD.bazel new file mode 100644 index 0000000..e635143 --- /dev/null +++ b/pkg/proto/tmp_installer/BUILD.bazel @@ -0,0 +1,25 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "tmp_installer_proto", + srcs = ["tmp_installer.proto"], + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:empty_proto"], +) + +go_proto_library( + name = "tmp_installer_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer", + proto = ":tmp_installer_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "tmp_installer", + embed = [":tmp_installer_go_proto"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer", + visibility = ["//visibility:public"], +) diff --git a/pkg/proto/tmp_installer/tmp_installer.pb.go b/pkg/proto/tmp_installer/tmp_installer.pb.go new file mode 100644 index 0000000..baa8415 --- /dev/null +++ b/pkg/proto/tmp_installer/tmp_installer.pb.go @@ -0,0 +1,292 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: pkg/proto/tmp_installer/tmp_installer.proto + +package tmp_installer + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type InstallTemporaryDirectoryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TemporaryDirectory string `protobuf:"bytes,1,opt,name=temporary_directory,json=temporaryDirectory,proto3" json:"temporary_directory,omitempty"` +} + +func (x *InstallTemporaryDirectoryRequest) Reset() { + *x = InstallTemporaryDirectoryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_tmp_installer_tmp_installer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstallTemporaryDirectoryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstallTemporaryDirectoryRequest) ProtoMessage() {} + +func (x *InstallTemporaryDirectoryRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_tmp_installer_tmp_installer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstallTemporaryDirectoryRequest.ProtoReflect.Descriptor instead. +func (*InstallTemporaryDirectoryRequest) Descriptor() ([]byte, []int) { + return file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescGZIP(), []int{0} +} + +func (x *InstallTemporaryDirectoryRequest) GetTemporaryDirectory() string { + if x != nil { + return x.TemporaryDirectory + } + return "" +} + +var File_pkg_proto_tmp_installer_tmp_installer_proto protoreflect.FileDescriptor + +var file_pkg_proto_tmp_installer_tmp_installer_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x6d, 0x70, 0x5f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x74, 0x6d, 0x70, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x74, 0x6d, 0x70, 0x5f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x53, 0x0a, 0x20, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x54, 0x65, + 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x32, 0xcf, 0x01, 0x0a, 0x1b, 0x54, 0x65, 0x6d, + 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x6e, 0x0a, 0x19, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x39, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, + 0x61, 0x72, 0x6e, 0x2e, 0x74, 0x6d, 0x70, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, + 0x72, 0x79, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, + 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x74, 0x6d, 0x70, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescOnce sync.Once + file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescData = file_pkg_proto_tmp_installer_tmp_installer_proto_rawDesc +) + +func file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescGZIP() []byte { + file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescOnce.Do(func() { + file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescData) + }) + return file_pkg_proto_tmp_installer_tmp_installer_proto_rawDescData +} + +var file_pkg_proto_tmp_installer_tmp_installer_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_proto_tmp_installer_tmp_installer_proto_goTypes = []interface{}{ + (*InstallTemporaryDirectoryRequest)(nil), // 0: buildbarn.tmp_installer.InstallTemporaryDirectoryRequest + (*emptypb.Empty)(nil), // 1: google.protobuf.Empty +} +var file_pkg_proto_tmp_installer_tmp_installer_proto_depIdxs = []int32{ + 1, // 0: buildbarn.tmp_installer.TemporaryDirectoryInstaller.CheckReadiness:input_type -> google.protobuf.Empty + 0, // 1: buildbarn.tmp_installer.TemporaryDirectoryInstaller.InstallTemporaryDirectory:input_type -> buildbarn.tmp_installer.InstallTemporaryDirectoryRequest + 1, // 2: buildbarn.tmp_installer.TemporaryDirectoryInstaller.CheckReadiness:output_type -> google.protobuf.Empty + 1, // 3: buildbarn.tmp_installer.TemporaryDirectoryInstaller.InstallTemporaryDirectory:output_type -> google.protobuf.Empty + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pkg_proto_tmp_installer_tmp_installer_proto_init() } +func file_pkg_proto_tmp_installer_tmp_installer_proto_init() { + if File_pkg_proto_tmp_installer_tmp_installer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_proto_tmp_installer_tmp_installer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstallTemporaryDirectoryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_proto_tmp_installer_tmp_installer_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_proto_tmp_installer_tmp_installer_proto_goTypes, + DependencyIndexes: file_pkg_proto_tmp_installer_tmp_installer_proto_depIdxs, + MessageInfos: file_pkg_proto_tmp_installer_tmp_installer_proto_msgTypes, + }.Build() + File_pkg_proto_tmp_installer_tmp_installer_proto = out.File + file_pkg_proto_tmp_installer_tmp_installer_proto_rawDesc = nil + file_pkg_proto_tmp_installer_tmp_installer_proto_goTypes = nil + file_pkg_proto_tmp_installer_tmp_installer_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// TemporaryDirectoryInstallerClient is the client API for TemporaryDirectoryInstaller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TemporaryDirectoryInstallerClient interface { + CheckReadiness(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) + InstallTemporaryDirectory(ctx context.Context, in *InstallTemporaryDirectoryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type temporaryDirectoryInstallerClient struct { + cc grpc.ClientConnInterface +} + +func NewTemporaryDirectoryInstallerClient(cc grpc.ClientConnInterface) TemporaryDirectoryInstallerClient { + return &temporaryDirectoryInstallerClient{cc} +} + +func (c *temporaryDirectoryInstallerClient) CheckReadiness(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.tmp_installer.TemporaryDirectoryInstaller/CheckReadiness", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporaryDirectoryInstallerClient) InstallTemporaryDirectory(ctx context.Context, in *InstallTemporaryDirectoryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/buildbarn.tmp_installer.TemporaryDirectoryInstaller/InstallTemporaryDirectory", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TemporaryDirectoryInstallerServer is the server API for TemporaryDirectoryInstaller service. +type TemporaryDirectoryInstallerServer interface { + CheckReadiness(context.Context, *emptypb.Empty) (*emptypb.Empty, error) + InstallTemporaryDirectory(context.Context, *InstallTemporaryDirectoryRequest) (*emptypb.Empty, error) +} + +// UnimplementedTemporaryDirectoryInstallerServer can be embedded to have forward compatible implementations. +type UnimplementedTemporaryDirectoryInstallerServer struct { +} + +func (*UnimplementedTemporaryDirectoryInstallerServer) CheckReadiness(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckReadiness not implemented") +} +func (*UnimplementedTemporaryDirectoryInstallerServer) InstallTemporaryDirectory(context.Context, *InstallTemporaryDirectoryRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method InstallTemporaryDirectory not implemented") +} + +func RegisterTemporaryDirectoryInstallerServer(s grpc.ServiceRegistrar, srv TemporaryDirectoryInstallerServer) { + s.RegisterService(&_TemporaryDirectoryInstaller_serviceDesc, srv) +} + +func _TemporaryDirectoryInstaller_CheckReadiness_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporaryDirectoryInstallerServer).CheckReadiness(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.tmp_installer.TemporaryDirectoryInstaller/CheckReadiness", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporaryDirectoryInstallerServer).CheckReadiness(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporaryDirectoryInstaller_InstallTemporaryDirectory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InstallTemporaryDirectoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporaryDirectoryInstallerServer).InstallTemporaryDirectory(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/buildbarn.tmp_installer.TemporaryDirectoryInstaller/InstallTemporaryDirectory", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporaryDirectoryInstallerServer).InstallTemporaryDirectory(ctx, req.(*InstallTemporaryDirectoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TemporaryDirectoryInstaller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "buildbarn.tmp_installer.TemporaryDirectoryInstaller", + HandlerType: (*TemporaryDirectoryInstallerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CheckReadiness", + Handler: _TemporaryDirectoryInstaller_CheckReadiness_Handler, + }, + { + MethodName: "InstallTemporaryDirectory", + Handler: _TemporaryDirectoryInstaller_InstallTemporaryDirectory_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/proto/tmp_installer/tmp_installer.proto", +} diff --git a/pkg/proto/tmp_installer/tmp_installer.proto b/pkg/proto/tmp_installer/tmp_installer.proto new file mode 100644 index 0000000..e212f2d --- /dev/null +++ b/pkg/proto/tmp_installer/tmp_installer.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package buildbarn.tmp_installer; + +import "google/protobuf/empty.proto"; + +option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer"; + +// For every build action executed, bb_worker creates a temporary +// directory that may be used by bb_runner and the build action as it +// needs. This temporary directory is automatically purged when the +// build action completes. +// +// bb_runner is capable of passing on this directory to the build action +// by setting the TMPDIR environment variable. In practice, there tend +// to be many build actions that simply ignore this environment +// variable. They create files in /tmp unconditionally. +// +// To work around this issue, the TemporaryDirectoryInstaller service +// may be used call into a potentially privileged helper process that +// rewrites access to /tmp in a platform specific way: +// +// - If the runner is known not to provide any concurrency, the helper +// process may remove /tmp and replace it by a symbolic link. +// +// - Operating systems such as NetBSD provide a feature called 'magic +// symlinks'. When enabled, symbolic link targets may contain tags +// such as @uid that cause them to expand to different targets, based +// on the user ID of the calling process. +// +// - Operating systems that provide support for userspace file systems +// may virtualize /tmp into a symbolic link that dynamically resolves +// to the temporary directory corresponding to the build action to +// which the calling process belongs. +service TemporaryDirectoryInstaller { + rpc CheckReadiness(google.protobuf.Empty) returns (google.protobuf.Empty); + rpc InstallTemporaryDirectory(InstallTemporaryDirectoryRequest) + returns (google.protobuf.Empty); +} + +message InstallTemporaryDirectoryRequest { + // Path of a scratch space directory that may be used by the build + // action, relative to the build directory. + string temporary_directory = 1; +} diff --git a/pkg/runner/BUILD.bazel b/pkg/runner/BUILD.bazel new file mode 100644 index 0000000..de0c7fe --- /dev/null +++ b/pkg/runner/BUILD.bazel @@ -0,0 +1,91 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "runner", + srcs = [ + "apple_xcode_resolving_runner.go", + "clean_runner.go", + "local_runner.go", + "local_runner_darwin.go", + "local_runner_rss_bytes.go", + "local_runner_rss_kibibytes.go", + "local_runner_unix.go", + "local_runner_windows.go", + "path_existence_checking_runner.go", + "temporary_directory_installing_runner.go", + "temporary_directory_symlinking_runner.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/runner", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cleaner", + "//pkg/proto/runner", + "//pkg/proto/tmp_installer", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/emptypb", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/proto/resourceusage", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_x_sys//windows", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "runner_test", + srcs = [ + "apple_xcode_resolving_runner_test.go", + "clean_runner_test.go", + "local_runner_test.go", + "path_existence_checking_runner_test.go", + "temporary_directory_symlinking_runner_test.go", + ], + deps = [ + ":runner", + "//internal/mock", + "//pkg/cleaner", + "//pkg/proto/resourceusage", + "//pkg/proto/runner", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//types/known/emptypb", + ], +) diff --git a/pkg/runner/apple_xcode_resolving_runner.go b/pkg/runner/apple_xcode_resolving_runner.go new file mode 100644 index 0000000..924f007 --- /dev/null +++ b/pkg/runner/apple_xcode_resolving_runner.go @@ -0,0 +1,162 @@ +package runner + +import ( + "context" + "errors" + "fmt" + "os/exec" + "sort" + "strings" + "sync" + + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +const ( + // Environment variables provided by Bazel. + environmentVariableXcodeVersion = "XCODE_VERSION_OVERRIDE" + environmentVariableSDKPlatform = "APPLE_SDK_PLATFORM" + environmentVariableSDKVersion = "APPLE_SDK_VERSION_OVERRIDE" + + // Environment variables provided to the build action. + environmentVariableDeveloperDirectory = "DEVELOPER_DIR" + environmentVariableSDKRoot = "SDKROOT" +) + +// AppleXcodeSDKRootResolver is a callback function that is used to +// obtain the path of an Xcode SDK root directory (SDKROOT), given a +// developer directory (DEVELOPER_DIR) and an SDK name. +type AppleXcodeSDKRootResolver func(ctx context.Context, developerDirectory, sdkName string) (string, error) + +// LocalAppleXcodeSDKRootResolver resolves the SDK root directory +// (SDKROOT) by calling into the xcrun utility on the current system. +func LocalAppleXcodeSDKRootResolver(ctx context.Context, developerDirectory, sdkName string) (string, error) { + cmd := exec.CommandContext(ctx, "/usr/bin/xcrun", "--sdk", sdkName, "--show-sdk-path") + cmd.Env = []string{environmentVariableDeveloperDirectory + "=" + developerDirectory} + output, err := cmd.Output() + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + return "", status.Errorf(codes.FailedPrecondition, "xcrun failed with output %#v", string(exitErr.Stderr)) + } + return "", err + } + return strings.TrimSpace(string(output)), nil +} + +// NewCachingAppleXcodeSDKRootResolver creates a decorator for +// AppleXcodeSDKRootResolver that caches successful results of +// successive calls. As it is assumed that the number of SDKs installed +// on the current system is small, no bounds are placed on the maximum +// cache size. +func NewCachingAppleXcodeSDKRootResolver(base AppleXcodeSDKRootResolver) AppleXcodeSDKRootResolver { + type cacheKey struct { + developerDirectory string + sdkName string + } + var lock sync.Mutex + cache := map[cacheKey]string{} + + return func(ctx context.Context, developerDirectory, sdkName string) (string, error) { + key := cacheKey{ + developerDirectory: developerDirectory, + sdkName: sdkName, + } + lock.Lock() + sdkRoot, ok := cache[key] + lock.Unlock() + if ok { + return sdkRoot, nil + } + + sdkRoot, err := base(ctx, developerDirectory, sdkName) + if err != nil { + return "", err + } + + lock.Lock() + cache[key] = sdkRoot + lock.Unlock() + return sdkRoot, nil + } +} + +type appleXcodeResolvingRunner struct { + runner_pb.RunnerServer + developerDirectories map[string]string + supportedVersions string + sdkRootResolver AppleXcodeSDKRootResolver +} + +// NewAppleXcodeResolvingRunner creates a decorator for RunnerServer +// that injects DEVELOPER_DIR and SDKROOT environment variables into +// actions, based on the presence of APPLE_SDK_PLATFORM, +// APPLE_SDK_VERSION_OVERRIDE, and XCODE_VERSION_OVERRIDE environment +// variables. +// +// This decorator can be used on macOS workers to let build actions +// choose between one of the copies of Xcode that is installed on the +// worker, without requiring that the client hardcodes the absolute path +// at which Xcode is installed. +// +// This decorator implements the convention that is used by Bazel. For +// local execution, Bazel implements similar logic as part of class +// com.google.devtools.build.lib.exec.local.XcodeLocalEnvProvider. +func NewAppleXcodeResolvingRunner(base runner_pb.RunnerServer, developerDirectories map[string]string, sdkRootResolver AppleXcodeSDKRootResolver) runner_pb.RunnerServer { + // Create a sorted list of all Xcode versions, to display as + // part of error messages. + supportedVersions := make([]string, 0, len(developerDirectories)) + for supportedVersion := range developerDirectories { + supportedVersions = append(supportedVersions, supportedVersion) + } + sort.Strings(supportedVersions) + + return &appleXcodeResolvingRunner{ + RunnerServer: base, + developerDirectories: developerDirectories, + supportedVersions: fmt.Sprintf("%v", supportedVersions), + sdkRootResolver: sdkRootResolver, + } +} + +func (r *appleXcodeResolvingRunner) Run(ctx context.Context, oldRequest *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + // Check whether we need to infer DEVELOPER_DIR from + // XCODE_VERSION_OVERRIDE. + oldEnvironmentVariables := oldRequest.EnvironmentVariables + _, hasDeveloperDir := oldEnvironmentVariables[environmentVariableDeveloperDirectory] + xcodeVersion, hasXcodeVersion := oldEnvironmentVariables[environmentVariableXcodeVersion] + if hasDeveloperDir || !hasXcodeVersion { + return r.RunnerServer.Run(ctx, oldRequest) + } + + developerDir, ok := r.developerDirectories[xcodeVersion] + if !ok { + return nil, status.Errorf(codes.FailedPrecondition, "Attempted to use Xcode installation with version %#v, while only %s are supported", xcodeVersion, r.supportedVersions) + } + + var newRequest runner_pb.RunRequest + proto.Merge(&newRequest, oldRequest) + newEnvironment := newRequest.EnvironmentVariables + newEnvironment[environmentVariableDeveloperDirectory] = developerDir + + // Check whether we need to infer SDKROOT from + // APPLE_SDK_PLATFORM and APPLE_SDK_VERSION_OVERRIDE. + _, hasSDKRoot := oldEnvironmentVariables[environmentVariableSDKRoot] + sdkPlatform, hasSDKPlatform := oldEnvironmentVariables[environmentVariableSDKPlatform] + sdkVersion, hasSDKVersion := oldEnvironmentVariables[environmentVariableSDKVersion] + if !hasSDKRoot && hasSDKPlatform && hasSDKVersion { + sdkName := strings.ToLower(sdkPlatform) + sdkVersion + sdkRoot, err := r.sdkRootResolver(ctx, developerDir, sdkName) + if err != nil { + return nil, util.StatusWrapf(err, "Cannot resolve root for SDK %#v in Xcode developer directory %#v", sdkName, developerDir) + } + newEnvironment[environmentVariableSDKRoot] = sdkRoot + } + + return r.RunnerServer.Run(ctx, &newRequest) +} diff --git a/pkg/runner/apple_xcode_resolving_runner_test.go b/pkg/runner/apple_xcode_resolving_runner_test.go new file mode 100644 index 0000000..d56c270 --- /dev/null +++ b/pkg/runner/apple_xcode_resolving_runner_test.go @@ -0,0 +1,171 @@ +package runner_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestAppleXcodeResolvingRunner(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseRunner := mock.NewMockRunnerServer(ctrl) + sdkRootResolver := mock.NewMockAppleXcodeSDKRootResolver(ctrl) + runner := runner.NewAppleXcodeResolvingRunner( + baseRunner, + map[string]string{ + "13.4.1.13F100": "/Applications/Xcode13.app/Contents/Developer", + "14.2.0.14C18": "/Applications/Xcode14.app/Contents/Developer", + }, + sdkRootResolver.Call) + + response := &runner_pb.RunResponse{ + ExitCode: 123, + } + + t.Run("NoXcodeVersionSet", func(t *testing.T) { + // This decorator should not have any effect if no Xcode + // version is provided. + request := &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "PATH": "/bin:/usr/bin:/usr/local/bin", + }, + } + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)).Return(response, nil) + + observedResponse, err := runner.Run(ctx, request) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) + + t.Run("DeveloperDirAlreadySet", func(t *testing.T) { + // If DEVELOPER_DIR is already set, this decorator should have + // no effect. + request := &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "DEVELOPER_DIR": "/some/path", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + } + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)).Return(response, nil) + + observedResponse, err := runner.Run(ctx, request) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) + + t.Run("InvalidXcodeVersion", func(t *testing.T) { + // Execution requests that use an unknown version of + // Xcode should be rejected. + _, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "XCODE_VERSION_OVERRIDE": "12.5.1.12E507", + }, + }) + testutil.RequireEqualStatus(t, status.Error(codes.FailedPrecondition, "Attempted to use Xcode installation with version \"12.5.1.12E507\", while only [13.4.1.13F100 14.2.0.14C18] are supported"), err) + }) + + t.Run("OnlyDeveloperDir", func(t *testing.T) { + // If only XCODE_VERSION_OVERRIDE is set without providing + // APPLE_SDK_PLATFORM and APPLE_SDK_VERSION_OVERRIDE, we + // can only set DEVELOPER_DIR. SDKROOT cannot be set. + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "DEVELOPER_DIR": "/Applications/Xcode13.app/Contents/Developer", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + })).Return(response, nil) + + observedResponse, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) + + t.Run("SDKRootAlreadySet", func(t *testing.T) { + // If SDKROOT is set, we should not overwrite it. + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "APPLE_SDK_PLATFORM": "MacOSX", + "APPLE_SDK_VERSION_OVERRIDE": "13.1", + "DEVELOPER_DIR": "/Applications/Xcode13.app/Contents/Developer", + "SDKROOT": "/some/path", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + })).Return(response, nil) + + observedResponse, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "APPLE_SDK_PLATFORM": "MacOSX", + "APPLE_SDK_VERSION_OVERRIDE": "13.1", + "SDKROOT": "/some/path", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) + + t.Run("UnknownSDK", func(t *testing.T) { + // Errors resolving the root of the SDK should be propagated. + sdkRootResolver.EXPECT().Call(ctx, "/Applications/Xcode13.app/Contents/Developer", "macosx13.1"). + Return("", status.Error(codes.FailedPrecondition, "SDK not found")) + + _, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "APPLE_SDK_PLATFORM": "MacOSX", + "APPLE_SDK_VERSION_OVERRIDE": "13.1", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + }) + testutil.RequireEqualStatus(t, status.Error(codes.FailedPrecondition, "Cannot resolve root for SDK \"macosx13.1\" in Xcode developer directory \"/Applications/Xcode13.app/Contents/Developer\": SDK not found"), err) + }) + + t.Run("BothDeveloperDirAndSDKRoot", func(t *testing.T) { + // Example invocation where both DEVELOPER_DIR and + // SDKROOT end up getting set. + sdkRootResolver.EXPECT().Call(ctx, "/Applications/Xcode13.app/Contents/Developer", "macosx13.1"). + Return("/Applications/Xcode13.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX13.1.sdk", nil) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "APPLE_SDK_PLATFORM": "MacOSX", + "APPLE_SDK_VERSION_OVERRIDE": "13.1", + "DEVELOPER_DIR": "/Applications/Xcode13.app/Contents/Developer", + "SDKROOT": "/Applications/Xcode13.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX13.1.sdk", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + })).Return(response, nil) + + observedResponse, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + EnvironmentVariables: map[string]string{ + "APPLE_SDK_PLATFORM": "MacOSX", + "APPLE_SDK_VERSION_OVERRIDE": "13.1", + "XCODE_VERSION_OVERRIDE": "13.4.1.13F100", + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) +} diff --git a/pkg/runner/clean_runner.go b/pkg/runner/clean_runner.go new file mode 100644 index 0000000..0166dea --- /dev/null +++ b/pkg/runner/clean_runner.go @@ -0,0 +1,52 @@ +package runner + +import ( + "context" + + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + + "google.golang.org/protobuf/types/known/emptypb" +) + +type cleanRunner struct { + base runner_pb.RunnerServer + idleInvoker *cleaner.IdleInvoker +} + +// NewCleanRunner is a decorator for Runner that calls into an +// IdleInvoker before and after running a build action. +// +// This decorator can be used to run cleanup tasks that are needed to be +// performed to bring the execution environment in a consistent state +// (e.g., scrubbing the process table, removing stale temporary files). +func NewCleanRunner(base runner_pb.RunnerServer, idleInvoker *cleaner.IdleInvoker) runner_pb.RunnerServer { + return &cleanRunner{ + base: base, + idleInvoker: idleInvoker, + } +} + +func (r *cleanRunner) Run(ctx context.Context, request *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + if err := r.idleInvoker.Acquire(ctx); err != nil { + return nil, err + } + response, err1 := r.base.Run(ctx, request) + err2 := r.idleInvoker.Release(ctx) + if err1 != nil { + return nil, err1 + } + return response, err2 +} + +func (r *cleanRunner) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + if err := r.idleInvoker.Acquire(ctx); err != nil { + return nil, err + } + response, err1 := r.base.CheckReadiness(ctx, request) + err2 := r.idleInvoker.Release(ctx) + if err1 != nil { + return nil, err1 + } + return response, err2 +} diff --git a/pkg/runner/clean_runner_test.go b/pkg/runner/clean_runner_test.go new file mode 100644 index 0000000..6cac4e7 --- /dev/null +++ b/pkg/runner/clean_runner_test.go @@ -0,0 +1,85 @@ +package runner_test + +import ( + "context" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/cleaner" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestCleanRunner(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + baseRunner := mock.NewMockRunnerServer(ctrl) + baseCleaner := mock.NewMockCleaner(ctrl) + runner := runner.NewCleanRunner(baseRunner, cleaner.NewIdleInvoker(baseCleaner.Call)) + + request := &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + WorkingDirectory: "a/root/subdir", + StdoutPath: "a/stdout", + StderrPath: "a/stderr", + InputRootDirectory: "a/root", + TemporaryDirectory: "a/tmp", + } + response := &runner_pb.RunResponse{ + ExitCode: 123, + } + + t.Run("AcquireFailure", func(t *testing.T) { + // No execution should take place if cleaning doesn't + // succeed, as the execution environment may not be in a + // valid state. + baseCleaner.EXPECT().Call(ctx). + Return(status.Error(codes.Internal, "Failed to clean temporary directory")) + + _, err := runner.Run(ctx, request) + require.Equal(t, status.Error(codes.Internal, "Failed to clean temporary directory"), err) + }) + + t.Run("RunFailure", func(t *testing.T) { + // Execution failures should be propagated. In this case + // we must still release the IdleInvoker, so that + // cleanups take place. + baseCleaner.EXPECT().Call(ctx) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)). + Return(nil, status.Error(codes.InvalidArgument, "\"cc\" not found")) + baseCleaner.EXPECT().Call(ctx) + + _, err := runner.Run(ctx, request) + require.Equal(t, status.Error(codes.InvalidArgument, "\"cc\" not found"), err) + }) + + t.Run("ReleaseFailure", func(t *testing.T) { + // Failures to clean up the execution environment + // afterwards should also be propagated. + baseCleaner.EXPECT().Call(ctx) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)). + Return(response, nil) + baseCleaner.EXPECT().Call(ctx). + Return(status.Error(codes.Internal, "Failed to clean temporary directory")) + + _, err := runner.Run(ctx, request) + require.Equal(t, status.Error(codes.Internal, "Failed to clean temporary directory"), err) + }) + + t.Run("Success", func(t *testing.T) { + baseCleaner.EXPECT().Call(ctx) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)). + Return(response, nil) + baseCleaner.EXPECT().Call(ctx) + + obtainedResponse, err := runner.Run(ctx, request) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, obtainedResponse) + }) +} diff --git a/pkg/runner/local_runner.go b/pkg/runner/local_runner.go new file mode 100644 index 0000000..266450a --- /dev/null +++ b/pkg/runner/local_runner.go @@ -0,0 +1,182 @@ +package runner + +import ( + "context" + "errors" + "os/exec" + "path/filepath" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/emptypb" +) + +// logFileResolver is an implementation of path.ComponentWalker that is +// used by localRunner.Run() to traverse to the directory of stdout and +// stderr log files, so that they may be opened. +// +// TODO: This code seems fairly generic. Should move it to the +// filesystem package? +type logFileResolver struct { + path.TerminalNameTrackingComponentWalker + stack util.NonEmptyStack[filesystem.DirectoryCloser] +} + +func (r *logFileResolver) OnDirectory(name path.Component) (path.GotDirectoryOrSymlink, error) { + child, err := r.stack.Peek().EnterDirectory(name) + if err != nil { + return nil, err + } + r.stack.Push(child) + return path.GotDirectory{ + Child: r, + IsReversible: true, + }, nil +} + +func (r *logFileResolver) OnUp() (path.ComponentWalker, error) { + if d, ok := r.stack.PopSingle(); ok { + if err := d.Close(); err != nil { + r.stack.Push(d) + return nil, err + } + return r, nil + } + return nil, status.Error(codes.InvalidArgument, "Path resolves to a location outside the build directory") +} + +func (r *logFileResolver) closeAll() { + for { + d, ok := r.stack.PopSingle() + if !ok { + break + } + d.Close() + } +} + +type localRunner struct { + buildDirectory filesystem.Directory + buildDirectoryPath *path.Builder + commandCreator CommandCreator + setTmpdirEnvironmentVariable bool +} + +func (r *localRunner) openLog(logPath string) (filesystem.FileAppender, error) { + logFileResolver := logFileResolver{ + stack: util.NewNonEmptyStack(filesystem.NopDirectoryCloser(r.buildDirectory)), + } + defer logFileResolver.closeAll() + if err := path.Resolve(logPath, path.NewRelativeScopeWalker(&logFileResolver)); err != nil { + return nil, err + } + if logFileResolver.TerminalName == nil { + return nil, status.Error(codes.InvalidArgument, "Path resolves to a directory") + } + return logFileResolver.stack.Peek().OpenAppend(*logFileResolver.TerminalName, filesystem.CreateExcl(0o666)) +} + +// CommandCreator is a type alias for a function that creates the +// exec.Cmd in localRunner.Run(). It may use different strategies for +// resolving the paths of argv[0] and the working directory, depending +// on whether the action needs to be run in a chroot() or not. +type CommandCreator func(ctx context.Context, arguments []string, inputRootDirectory *path.Builder, workingDirectory, pathVariable string) (*exec.Cmd, error) + +// NewLocalRunner returns a Runner capable of running commands on the +// local system directly. +func NewLocalRunner(buildDirectory filesystem.Directory, buildDirectoryPath *path.Builder, commandCreator CommandCreator, setTmpdirEnvironmentVariable bool) runner.RunnerServer { + return &localRunner{ + buildDirectory: buildDirectory, + buildDirectoryPath: buildDirectoryPath, + commandCreator: commandCreator, + setTmpdirEnvironmentVariable: setTmpdirEnvironmentVariable, + } +} + +func (r *localRunner) Run(ctx context.Context, request *runner.RunRequest) (*runner.RunResponse, error) { + if len(request.Arguments) < 1 { + return nil, status.Error(codes.InvalidArgument, "Insufficient number of command arguments") + } + + inputRootDirectory, scopeWalker := r.buildDirectoryPath.Join(path.VoidScopeWalker) + if err := path.Resolve(request.InputRootDirectory, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve input root directory") + } + + cmd, err := r.commandCreator(ctx, request.Arguments, inputRootDirectory, request.WorkingDirectory, request.EnvironmentVariables["PATH"]) + if err != nil { + return nil, err + } + + // Set the environment variables. + cmd.Env = make([]string, 0, len(request.EnvironmentVariables)+1) + if r.setTmpdirEnvironmentVariable && request.TemporaryDirectory != "" { + temporaryDirectory, scopeWalker := r.buildDirectoryPath.Join(path.VoidScopeWalker) + if err := path.Resolve(request.TemporaryDirectory, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve temporary directory") + } + for _, prefix := range temporaryDirectoryEnvironmentVariablePrefixes { + cmd.Env = append(cmd.Env, prefix+filepath.FromSlash(temporaryDirectory.String())) + } + } + for name, value := range request.EnvironmentVariables { + cmd.Env = append(cmd.Env, name+"="+value) + } + + // Open output files for logging. + stdout, err := r.openLog(request.StdoutPath) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to open stdout path %q", request.StdoutPath) + } + cmd.Stdout = stdout + + stderr, err := r.openLog(request.StderrPath) + if err != nil { + stdout.Close() + return nil, util.StatusWrapf(err, "Failed to open stderr path %q", request.StderrPath) + } + cmd.Stderr = stderr + + // Start the subprocess. We can already close the output files + // while the process is running. + err = cmd.Start() + stdout.Close() + stderr.Close() + if err != nil { + code := codes.Internal + for _, invalidArgumentErr := range invalidArgumentErrs { + if errors.Is(err, invalidArgumentErr) { + code = codes.InvalidArgument + break + } + } + return nil, util.StatusWrapWithCode(err, code, "Failed to start process") + } + + // Wait for execution to complete. Permit non-zero exit codes. + if err := cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return nil, err + } + } + + // Attach rusage information to the response. + posixResourceUsage, err := anypb.New(getPOSIXResourceUsage(cmd)) + if err != nil { + return nil, util.StatusWrap(err, "Failed to marshal POSIX resource usage") + } + return &runner.RunResponse{ + ExitCode: int32(cmd.ProcessState.ExitCode()), + ResourceUsage: []*anypb.Any{posixResourceUsage}, + }, nil +} + +func (r *localRunner) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil +} diff --git a/pkg/runner/local_runner_darwin.go b/pkg/runner/local_runner_darwin.go new file mode 100644 index 0000000..0fd407d --- /dev/null +++ b/pkg/runner/local_runner_darwin.go @@ -0,0 +1,11 @@ +package runner + +import ( + "syscall" +) + +func init() { + // Running Mach-O binaries for the wrong architecture returns + // EBADARCH instead of ENOEXEC. + invalidArgumentErrs = append(invalidArgumentErrs, syscall.EBADARCH) +} diff --git a/pkg/runner/local_runner_rss_bytes.go b/pkg/runner/local_runner_rss_bytes.go new file mode 100644 index 0000000..1c70a53 --- /dev/null +++ b/pkg/runner/local_runner_rss_bytes.go @@ -0,0 +1,10 @@ +//go:build darwin +// +build darwin + +package runner + +const ( + // On macOS, the getrusage(2) man page documents that the + // resident set size is returned in bytes. + maximumResidentSetSizeUnit = 1 +) diff --git a/pkg/runner/local_runner_rss_kibibytes.go b/pkg/runner/local_runner_rss_kibibytes.go new file mode 100644 index 0000000..baa233a --- /dev/null +++ b/pkg/runner/local_runner_rss_kibibytes.go @@ -0,0 +1,11 @@ +//go:build freebsd || linux +// +build freebsd linux + +package runner + +const ( + // On Linux and FreeBSD, the getrusage(2) man pages document + // that the resident set size is returned in kilobytes, though + // kernel sources indicate kibibytes are used. + maximumResidentSetSizeUnit = 1024 +) diff --git a/pkg/runner/local_runner_test.go b/pkg/runner/local_runner_test.go new file mode 100644 index 0000000..3a54d34 --- /dev/null +++ b/pkg/runner/local_runner_test.go @@ -0,0 +1,492 @@ +package runner_test + +import ( + "context" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestLocalRunner(t *testing.T) { + ctrl := gomock.NewController(t) + + buildDirectoryPath := t.TempDir() + buildDirectory, err := filesystem.NewLocalDirectory(buildDirectoryPath) + require.NoError(t, err) + defer buildDirectory.Close() + + buildDirectoryPathBuilder, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + require.NoError(t, path.Resolve(buildDirectoryPath, scopeWalker)) + + var cmdPath string + var getEnvCommand []string + if runtime.GOOS == "windows" { + cmdPath = filepath.Join(os.Getenv("SYSTEMROOT"), "system32\\cmd.exe") + getEnvCommand = []string{cmdPath, "/d", "/c", "set"} + } else { + getEnvCommand = []string{"/usr/bin/env"} + } + + t.Run("EmptyEnvironment", func(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + testPath := filepath.Join(buildDirectoryPath, "EmptyEnvironment") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // Running a command without specifying any environment + // variables should cause the process to be executed in + // an empty environment. It should not inherit the + // environment of the runner. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: getEnvCommand, + StdoutPath: "EmptyEnvironment/stdout", + StderrPath: "EmptyEnvironment/stderr", + InputRootDirectory: "EmptyEnvironment/root", + TemporaryDirectory: "EmptyEnvironment/tmp", + }) + require.NoError(t, err) + require.Equal(t, int32(0), response.ExitCode) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + require.Empty(t, stdout) + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("NonEmptyEnvironment", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "NonEmptyEnvironment") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + tmpPath := filepath.Join(testPath, "tmp") + require.NoError(t, os.Mkdir(tmpPath, 0o777)) + + // The environment variables provided in the RunRequest + // should be respected. If automatic injection of TMPDIR + // is enabled, that variable should also be added. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), true) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: getEnvCommand, + EnvironmentVariables: map[string]string{ + "FOO": "bar", + "BAZ": "xyzzy", + }, + StdoutPath: "NonEmptyEnvironment/stdout", + StderrPath: "NonEmptyEnvironment/stderr", + InputRootDirectory: "NonEmptyEnvironment/root", + TemporaryDirectory: "NonEmptyEnvironment/tmp", + }) + require.NoError(t, err) + require.Equal(t, int32(0), response.ExitCode) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + if runtime.GOOS == "windows" { + require.Subset(t, strings.Fields(string(stdout)), []string{ + "FOO=bar", + "BAZ=xyzzy", + "TMP=" + tmpPath, + "TEMP=" + tmpPath, + }) + } else { + require.ElementsMatch(t, []string{ + "FOO=bar", + "BAZ=xyzzy", + "TMPDIR=" + tmpPath, + }, strings.Fields(string(stdout))) + } + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("OverridingTmpdir", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "OverridingTmpdir") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + tmpPath := filepath.Join(testPath, "tmp") + require.NoError(t, os.Mkdir(tmpPath, 0o777)) + + var envMap map[string]string + if runtime.GOOS == "windows" { + envMap = map[string]string{ + "TMP": "\\somewhere\\else", + "TEMP": "\\somewhere\\else", + } + } else { + envMap = map[string]string{ + "TMPDIR": "/somewhere/else", + } + } + + // Automatic injection of TMPDIR should have no effect + // if the command to be run provides its own TMPDIR. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), true) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: getEnvCommand, + EnvironmentVariables: envMap, + StdoutPath: "OverridingTmpdir/stdout", + StderrPath: "OverridingTmpdir/stderr", + InputRootDirectory: "OverridingTmpdir/root", + TemporaryDirectory: "OverridingTmpdir/tmp", + }) + require.NoError(t, err) + require.Equal(t, int32(0), response.ExitCode) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + if runtime.GOOS == "windows" { + require.Subset(t, strings.Fields(string(stdout)), []string{ + "TMP=\\somewhere\\else", + "TEMP=\\somewhere\\else", + }) + } else { + require.Equal(t, "TMPDIR=/somewhere/else\n", string(stdout)) + } + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("NonZeroExitCode", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "NonZeroExitCode") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // Non-zero exit codes should be captured in the + // RunResponse. POSIX 2008 and later added support for + // 32-bit signed exit codes. Most implementations still + // truncate the exit code to 8 bits. + var exit255Command []string + if runtime.GOOS == "windows" { + exit255Command = []string{cmdPath, "/d", "/c", "exit 255"} + } else { + exit255Command = []string{"/bin/sh", "-c", "exit 255"} + } + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: exit255Command, + StdoutPath: "NonZeroExitCode/stdout", + StderrPath: "NonZeroExitCode/stderr", + InputRootDirectory: "NonZeroExitCode/root", + TemporaryDirectory: "NonZeroExitCode/tmp", + }) + require.NoError(t, err) + require.Equal(t, int32(255), response.ExitCode) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + require.Empty(t, stdout) + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("SigKill", func(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + testPath := filepath.Join(buildDirectoryPath, "SigKill") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // If the process terminates due to a signal, the name + // of the signal should be set as part of the POSIX + // resource usage message. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"/bin/sh", "-c", "kill -s KILL $$"}, + StdoutPath: "SigKill/stdout", + StderrPath: "SigKill/stderr", + InputRootDirectory: "SigKill/root", + TemporaryDirectory: "SigKill/tmp", + }) + require.NoError(t, err) + require.NotEqual(t, int32(0), response.ExitCode) + + require.Len(t, response.ResourceUsage, 1) + var posixResourceUsage resourceusage.POSIXResourceUsage + require.NoError(t, response.ResourceUsage[0].UnmarshalTo(&posixResourceUsage)) + require.Equal(t, "KILL", posixResourceUsage.TerminationSignal) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + require.Empty(t, stdout) + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("UnknownCommandWithEmptyPath", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "UnknownCommandWithEmptyPath") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // If argv[0] consists of a single filename, lookups + // against $PATH need to be performed. If PATH is not + // set, the action should fail with a non-retriable + // error. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"nonexistent_command"}, + StdoutPath: "UnknownCommandWithEmptyPath/stdout", + StderrPath: "UnknownCommandWithEmptyPath/stderr", + InputRootDirectory: "UnknownCommandWithEmptyPath/root", + TemporaryDirectory: "UnknownCommandWithEmptyPath/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Cannot find executable \"nonexistent_command\" in search paths \"\""), err) + }) + + t.Run("UnknownCommandWithBadPath", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "UnknownCommandWithBadPath") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // Even invoking known shell utilities shouldn't be + // permitted if PATH points to a nonexistent location. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"sh", "-c", "exit 123"}, + EnvironmentVariables: map[string]string{"PATH": "/nonexistent"}, + StdoutPath: "UnknownCommandWithBadPath/stdout", + StderrPath: "UnknownCommandWithBadPath/stderr", + InputRootDirectory: "UnknownCommandWithBadPath/root", + TemporaryDirectory: "UnknownCommandWithBadPath/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Cannot find executable \"sh\" in search paths \"/nonexistent\""), err) + }) + + t.Run("RelativeSearchPath", func(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + testPath := filepath.Join(buildDirectoryPath, "RelativeSearchPath") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(testPath, "root", "subdirectory"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(testPath, "root", "subdirectory", "hello.sh"), []byte("#!/bin/sh\necho $0\nexit 42\n"), 0o777)) + + // If the PATH environment variable contains a relative + // path, it should be treated as being relative to the + // working directory. Because the search path is + // relative, execve() should be called with a relative + // path as well. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + response, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"hello.sh"}, + EnvironmentVariables: map[string]string{"PATH": "subdirectory"}, + StdoutPath: "RelativeSearchPath/stdout", + StderrPath: "RelativeSearchPath/stderr", + InputRootDirectory: "RelativeSearchPath/root", + TemporaryDirectory: "RelativeSearchPath/tmp", + }) + require.NoError(t, err) + require.Equal(t, int32(42), response.ExitCode) + + stdout, err := os.ReadFile(filepath.Join(testPath, "stdout")) + require.NoError(t, err) + require.Equal(t, "subdirectory/hello.sh\n", string(stdout)) + + stderr, err := os.ReadFile(filepath.Join(testPath, "stderr")) + require.NoError(t, err) + require.Empty(t, stderr) + }) + + t.Run("UnknownCommandRelative", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "UnknownCommandRelative") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // If argv[0] is not an absolute path, but does consist + // of multiple components, no $PATH lookup is performed. + // If the path does not exist, the action should fail + // with a non-retriable error. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"./nonexistent_command"}, + StdoutPath: "UnknownCommandRelative/stdout", + StderrPath: "UnknownCommandRelative/stderr", + InputRootDirectory: "UnknownCommandRelative/root", + TemporaryDirectory: "UnknownCommandRelative/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err) + }) + + t.Run("UnknownCommandAbsolute", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "UnknownCommandAbsolute") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // If argv[0] is an absolute path that does not exist, + // we should also return a non-retriable error. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"/nonexistent_command"}, + StdoutPath: "UnknownCommandAbsolute/stdout", + StderrPath: "UnknownCommandAbsolute/stderr", + InputRootDirectory: "UnknownCommandAbsolute/root", + TemporaryDirectory: "UnknownCommandAbsolute/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err) + }) + + t.Run("ExecFormatErrorJPEG", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "ExecFormatErrorJPEG") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(testPath, "root", "not_a.binary"), []byte{ + 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, + }, 0o777)) + + // If argv[0] is a binary that cannot be executed we + // should also return a non-retriable error. In this + // case it's a JPEG file. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"./not_a.binary"}, + StdoutPath: "ExecFormatErrorJPEG/stdout", + StderrPath: "ExecFormatErrorJPEG/stderr", + InputRootDirectory: "ExecFormatErrorJPEG/root", + TemporaryDirectory: "ExecFormatErrorJPEG/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err) + }) + + t.Run("ExecFormatErrorMachOBadArch", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "ExecFormatErrorMachOBadArch") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(testPath, "root", "not_a.binary"), []byte{ + 0xcf, 0xfa, 0xed, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0xf1, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, + 0x48, 0x00, 0x00, 0x00, 0x48, 0x65, 0x6c, 0x6c, 0x6f, + 0x2c, 0x20, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0xb8, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x2a, + 0x00, 0x00, 0x00, 0xba, 0x0e, 0x00, 0x00, 0x00, 0xb8, + 0x04, 0x00, 0x00, 0x02, 0x0f, 0x05, 0xeb, 0x28, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x48, 0x31, 0xff, 0xb8, 0x01, 0x00, + 0x00, 0x02, 0x0f, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, + }, 0o777)) + + // On macOS, running a Mach-O executable that was + // compiled for a different CPU will return EBADARCH + // instead of ENOEXEC. This should still cause a + // non-retriable error to be returned. + // + // Test this by attempting to run a tiny Mach-O + // executable that uses CPU_TYPE_VAX. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"./not_a.binary"}, + StdoutPath: "ExecFormatErrorMachOBadArch/stdout", + StderrPath: "ExecFormatErrorMachOBadArch/stderr", + InputRootDirectory: "ExecFormatErrorMachOBadArch/root", + TemporaryDirectory: "ExecFormatErrorMachOBadArch/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err) + }) + + t.Run("UnknownCommandDirectory", func(t *testing.T) { + testPath := filepath.Join(buildDirectoryPath, "UnknownCommandDirectory") + require.NoError(t, os.Mkdir(testPath, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777)) + + // If argv[0] refers to a directory, we should also + // return a non-retriable error. + runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: []string{"/"}, + StdoutPath: "UnknownCommandDirectory/stdout", + StderrPath: "UnknownCommandDirectory/stderr", + InputRootDirectory: "UnknownCommandDirectory/root", + TemporaryDirectory: "UnknownCommandDirectory/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err) + }) + + t.Run("BuildDirectoryEscape", func(t *testing.T) { + buildDirectory := mock.NewMockDirectory(ctrl) + helloDirectory := mock.NewMockDirectoryCloser(ctrl) + buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("hello")).Return(helloDirectory, nil) + helloDirectory.EXPECT().Close() + + // The runner process may need to run with elevated + // privileges. It shouldn't be possible to trick the + // runner into opening files outside the build + // directory. + runner := runner.NewLocalRunner(buildDirectory, &path.EmptyBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false) + _, err := runner.Run(context.Background(), &runner_pb.RunRequest{ + Arguments: getEnvCommand, + StdoutPath: "hello/../../../../../../etc/passwd", + StderrPath: "stderr", + InputRootDirectory: ".", + TemporaryDirectory: ".", + }) + testutil.RequireEqualStatus( + t, + status.Error(codes.InvalidArgument, "Failed to open stdout path \"hello/../../../../../../etc/passwd\": Path resolves to a location outside the build directory"), + err) + }) + + // TODO: Improve testing coverage of LocalRunner. +} diff --git a/pkg/runner/local_runner_unix.go b/pkg/runner/local_runner_unix.go new file mode 100644 index 0000000..5b15021 --- /dev/null +++ b/pkg/runner/local_runner_unix.go @@ -0,0 +1,167 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package runner + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" +) + +// getExecutablePath returns the path of an executable within a given +// search path that is part of the PATH environment variable. +func getExecutablePath(baseDirectory *path.Builder, searchPathStr, argv0 string) (string, error) { + searchPath, scopeWalker := baseDirectory.Join(path.VoidScopeWalker) + if err := path.Resolve(searchPathStr, scopeWalker); err != nil { + return "", err + } + executablePath, scopeWalker := searchPath.Join(path.VoidScopeWalker) + if err := path.Resolve(argv0, scopeWalker); err != nil { + return "", err + } + return executablePath.String(), nil +} + +// lookupExecutable returns the path of an executable, taking the PATH +// environment variable into account. +func lookupExecutable(workingDirectory *path.Builder, pathVariable, argv0 string) (string, error) { + if strings.ContainsRune(argv0, os.PathSeparator) { + // No PATH processing needs to be performed. + return argv0, nil + } + + // Executable path does not contain any slashes. Perform PATH + // lookups. + // + // We cannot use exec.LookPath() directly, as that function + // disregards the working directory of the action. It also uses + // the PATH environment variable of the current process, as + // opposed to respecting the value that is provided as part of + // the action. Do call into this function to validate the + // existence of the executable. + for _, searchPathStr := range filepath.SplitList(pathVariable) { + executablePathAbs, err := getExecutablePath(workingDirectory, searchPathStr, argv0) + if err != nil { + return "", util.StatusWrapf(err, "Failed to resolve executable %#v in search path %#v", argv0, searchPathStr) + } + if _, err := exec.LookPath(executablePathAbs); err == nil { + // Regular compiled executables will receive the + // argv[0] that we provide, but scripts starting + // with '#!' will receive the literal executable + // path. + // + // Most shells seem to guarantee that if argv[0] + // is relative, the executable path is relative + // as well. Prevent these scripts from breaking + // by recomputing the executable path once more, + // but relative. + executablePathRel, err := getExecutablePath(&path.EmptyBuilder, searchPathStr, argv0) + if err != nil { + return "", util.StatusWrapf(err, "Failed to resolve executable %#v in search path %#v", argv0, searchPathStr) + } + return executablePathRel, nil + } + } + return "", status.Errorf(codes.InvalidArgument, "Cannot find executable %#v in search paths %#v", argv0, pathVariable) +} + +// NewPlainCommandCreator returns a CommandCreator for cases where we don't +// need to chroot into the input root directory. +func NewPlainCommandCreator(sysProcAttr *syscall.SysProcAttr) CommandCreator { + return func(ctx context.Context, arguments []string, inputRootDirectory *path.Builder, workingDirectoryStr, pathVariable string) (*exec.Cmd, error) { + workingDirectory, scopeWalker := inputRootDirectory.Join(path.VoidScopeWalker) + if err := path.Resolve(workingDirectoryStr, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve working directory") + } + executablePath, err := lookupExecutable(workingDirectory, pathVariable, arguments[0]) + if err != nil { + return nil, err + } + + // exec.CommandContext() has some smartness to call + // exec.LookPath() under the hood, which we don't want. + // Call it with a placeholder path, followed by setting + // cmd.Path and cmd.Args manually. This ensures that our + // own values remain respected. + cmd := exec.CommandContext(ctx, "/nonexistent") + cmd.Args = arguments + cmd.Dir = workingDirectory.String() + cmd.Path = executablePath + cmd.SysProcAttr = sysProcAttr + return cmd, nil + } +} + +// NewChrootedCommandCreator returns a CommandCreator for cases where we +// need to chroot into the input root directory. +func NewChrootedCommandCreator(sysProcAttr *syscall.SysProcAttr) (CommandCreator, error) { + return func(ctx context.Context, arguments []string, inputRootDirectory *path.Builder, workingDirectoryStr, pathVariable string) (*exec.Cmd, error) { + // The addition of /usr/bin/env is necessary as the PATH resolution + // will take place prior to the chroot, so the executable may not be + // found by exec.LookPath() inside exec.CommandContext() and may + // cause cmd.Start() to fail when it shouldn't. + // https://github.com/golang/go/issues/39341 + cmd := exec.CommandContext(ctx, "/usr/bin/env", append([]string{"--"}, arguments...)...) + sysProcAttrCopy := *sysProcAttr + sysProcAttrCopy.Chroot = inputRootDirectory.String() + cmd.SysProcAttr = &sysProcAttrCopy + + // Set the working relative to be relative to the root + // directory of the chrooted environment. + workingDirectory, scopeWalker := path.RootBuilder.Join(path.VoidScopeWalker) + if err := path.Resolve(workingDirectoryStr, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve working directory") + } + cmd.Dir = workingDirectory.String() + return cmd, nil + }, nil +} + +var temporaryDirectoryEnvironmentVariablePrefixes = [...]string{"TMPDIR="} + +var invalidArgumentErrs = []error{exec.ErrNotFound, os.ErrPermission, syscall.EISDIR, syscall.ENOENT, syscall.ENOEXEC} + +func convertTimeval(t syscall.Timeval) *durationpb.Duration { + return &durationpb.Duration{ + Seconds: int64(t.Sec), + Nanos: int32(t.Usec) * 1000, + } +} + +func getPOSIXResourceUsage(cmd *exec.Cmd) *resourceusage.POSIXResourceUsage { + rusage := cmd.ProcessState.SysUsage().(*syscall.Rusage) + resourceUsage := &resourceusage.POSIXResourceUsage{ + UserTime: convertTimeval(rusage.Utime), + SystemTime: convertTimeval(rusage.Stime), + MaximumResidentSetSize: int64(rusage.Maxrss) * maximumResidentSetSizeUnit, + PageReclaims: int64(rusage.Minflt), + PageFaults: int64(rusage.Majflt), + Swaps: int64(rusage.Nswap), + BlockInputOperations: int64(rusage.Inblock), + BlockOutputOperations: int64(rusage.Oublock), + MessagesSent: int64(rusage.Msgsnd), + MessagesReceived: int64(rusage.Msgrcv), + SignalsReceived: int64(rusage.Nsignals), + VoluntaryContextSwitches: int64(rusage.Nvcsw), + InvoluntaryContextSwitches: int64(rusage.Nivcsw), + } + if waitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus); waitStatus.Signaled() { + if s, ok := strings.CutPrefix(unix.SignalName(waitStatus.Signal()), "SIG"); ok { + resourceUsage.TerminationSignal = s + } + } + return resourceUsage +} diff --git a/pkg/runner/local_runner_windows.go b/pkg/runner/local_runner_windows.go new file mode 100644 index 0000000..aacc92e --- /dev/null +++ b/pkg/runner/local_runner_windows.go @@ -0,0 +1,59 @@ +//go:build windows +// +build windows + +package runner + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "golang.org/x/sys/windows" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" +) + +// NewPlainCommandCreator returns a CommandCreator for cases where we don't +// need to chroot into the input root directory. +func NewPlainCommandCreator(sysProcAttr *syscall.SysProcAttr) CommandCreator { + return func(ctx context.Context, arguments []string, inputRootDirectory *path.Builder, workingDirectoryStr, pathVariable string) (*exec.Cmd, error) { + // TODO: This may not work correctly if the action sets + // the PATH environment variable explicitly. + cmd := exec.CommandContext(ctx, arguments[0], arguments[1:]...) + cmd.SysProcAttr = sysProcAttr + + // Set the working relative to be relative to the input + // root directory. + workingDirectory, scopeWalker := inputRootDirectory.Join(path.VoidScopeWalker) + if err := path.Resolve(workingDirectoryStr, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve working directory") + } + cmd.Dir = filepath.FromSlash(workingDirectory.String()) + return cmd, nil + } +} + +// NewChrootedCommandCreator gives an error on Windows, as chroot is not +// supported on the platform. +func NewChrootedCommandCreator(sysProcAttr *syscall.SysProcAttr) (CommandCreator, error) { + return nil, status.Error(codes.InvalidArgument, "Chroot not supported on Windows") +} + +var temporaryDirectoryEnvironmentVariablePrefixes = [...]string{"TMP=", "TEMP="} + +var invalidArgumentErrs = [...]error{exec.ErrNotFound, os.ErrPermission, os.ErrNotExist, windows.ERROR_BAD_EXE_FORMAT} + +func getPOSIXResourceUsage(cmd *exec.Cmd) *resourceusage.POSIXResourceUsage { + processState := cmd.ProcessState + return &resourceusage.POSIXResourceUsage{ + UserTime: durationpb.New(processState.SystemTime()), + SystemTime: durationpb.New(processState.UserTime()), + } +} diff --git a/pkg/runner/path_existence_checking_runner.go b/pkg/runner/path_existence_checking_runner.go new file mode 100644 index 0000000..0edf798 --- /dev/null +++ b/pkg/runner/path_existence_checking_runner.go @@ -0,0 +1,58 @@ +package runner + +import ( + "context" + "os" + + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/types/known/emptypb" +) + +type pathExistenceCheckingRunner struct { + base runner_pb.RunnerServer + readinessCheckingPathnames []string +} + +// NewPathExistenceCheckingRunner creates a decorator of RunnerServer +// that is only healthy when certain paths on disk exist. +func NewPathExistenceCheckingRunner(base runner_pb.RunnerServer, readinessCheckingPathnames []string) runner_pb.RunnerServer { + return &pathExistenceCheckingRunner{ + base: base, + readinessCheckingPathnames: readinessCheckingPathnames, + } +} + +func (r *pathExistenceCheckingRunner) checkPathExistence(ctx context.Context) error { + for _, path := range r.readinessCheckingPathnames { + if _, err := os.Stat(path); err != nil { + return util.StatusWrapfWithCode(err, codes.Unavailable, "Path %#v", path) + } + } + return nil +} + +func (r *pathExistenceCheckingRunner) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + if err := r.checkPathExistence(ctx); err != nil { + return nil, err + } + return r.base.CheckReadiness(ctx, request) +} + +func (r *pathExistenceCheckingRunner) Run(ctx context.Context, request *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + response, err := r.base.Run(ctx, request) + if err != nil { + return nil, err + } + if response.ExitCode != 0 { + // Execution failues may be caused by files + // disappearing. Suppress the results in case the + // readiness check fails. + if err := r.checkPathExistence(ctx); err != nil { + return nil, util.StatusWrap(err, "One or more required files disappeared during execution") + } + } + return response, nil +} diff --git a/pkg/runner/path_existence_checking_runner_test.go b/pkg/runner/path_existence_checking_runner_test.go new file mode 100644 index 0000000..442a552 --- /dev/null +++ b/pkg/runner/path_existence_checking_runner_test.go @@ -0,0 +1,84 @@ +package runner_test + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +func TestPathExistenceCheckingRunner(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + mockRunner := mock.NewMockRunnerServer(ctrl) + readinessCheckingFilename := filepath.Join(t.TempDir(), "ready") + runnerServer := runner.NewPathExistenceCheckingRunner(mockRunner, []string{ + readinessCheckingFilename, + }) + + runRequest := &runner_pb.RunRequest{ + Arguments: []string{"ls", "-l"}, + } + runResponse := &runner_pb.RunResponse{ + ExitCode: 42, + } + + t.Run("NotReadyCheckReadiness", func(t *testing.T) { + // When the file used for readiness checking is not + // present, CheckReadiness() should fail. + _, err := runnerServer.CheckReadiness(ctx, &emptypb.Empty{}) + testutil.RequirePrefixedStatus( + t, + status.Errorf(codes.Unavailable, "Path %#v: ", readinessCheckingFilename), + err) + }) + + t.Run("NotReadyRun", func(t *testing.T) { + // Similarly, the results of failing Run() calls should + // be suppressed, so that build failures are prevented. + mockRunner.EXPECT().Run(ctx, runRequest).Return(&runner_pb.RunResponse{ + ExitCode: 42, + }, nil) + + _, err := runnerServer.Run(ctx, runRequest) + testutil.RequirePrefixedStatus( + t, + status.Errorf(codes.Unavailable, "One or more required files disappeared during execution: Path %#v: ", readinessCheckingFilename), + err) + }) + + // Create the file used for readiness checking and repeat the + // tests above. + f, err := os.OpenFile(readinessCheckingFilename, os.O_CREATE|os.O_WRONLY, 0o666) + require.NoError(t, err) + require.NoError(t, f.Close()) + + t.Run("ReadyCheckReadiness", func(t *testing.T) { + // Readiness checks should now succeed. + mockRunner.EXPECT().CheckReadiness(ctx, gomock.Any()).Return(&emptypb.Empty{}, nil) + + _, err := runnerServer.CheckReadiness(ctx, &emptypb.Empty{}) + require.NoError(t, err) + }) + + t.Run("ReadyRun", func(t *testing.T) { + // If readiness checks pass, non-zero exit codes of + // build actions should be returned as is. + mockRunner.EXPECT().Run(ctx, runRequest).Return(runResponse, nil) + + observedRunResponse, err := runnerServer.Run(ctx, runRequest) + require.NoError(t, err) + testutil.RequireEqualProto(t, runResponse, observedRunResponse) + }) +} diff --git a/pkg/runner/temporary_directory_installing_runner.go b/pkg/runner/temporary_directory_installing_runner.go new file mode 100644 index 0000000..d8158a5 --- /dev/null +++ b/pkg/runner/temporary_directory_installing_runner.go @@ -0,0 +1,47 @@ +package runner + +import ( + "context" + + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/proto/tmp_installer" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/protobuf/types/known/emptypb" +) + +type temporaryDirectoryInstallingRunner struct { + base runner_pb.RunnerServer + tmpInstaller tmp_installer.TemporaryDirectoryInstallerClient +} + +// NewTemporaryDirectoryInstallingRunner creates a Runner that calls +// into a separate gRPC service to announce the availability of a new +// temporary directory that may be used by build actions as a scratch +// space. +// +// This gRPC may, for example, remove the /tmp directory on the system +// and replace it by a symbolic link that points to the directory +// created by the worker. +func NewTemporaryDirectoryInstallingRunner(base runner_pb.RunnerServer, tmpInstaller tmp_installer.TemporaryDirectoryInstallerClient) runner_pb.RunnerServer { + return &temporaryDirectoryInstallingRunner{ + base: base, + tmpInstaller: tmpInstaller, + } +} + +func (r *temporaryDirectoryInstallingRunner) Run(ctx context.Context, request *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + if _, err := r.tmpInstaller.InstallTemporaryDirectory(ctx, &tmp_installer.InstallTemporaryDirectoryRequest{ + TemporaryDirectory: request.TemporaryDirectory, + }); err != nil { + return nil, util.StatusWrap(err, "Failed to install temporary directory") + } + return r.base.Run(ctx, request) +} + +func (r *temporaryDirectoryInstallingRunner) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + if _, err := r.tmpInstaller.CheckReadiness(ctx, &emptypb.Empty{}); err != nil { + return nil, util.StatusWrap(err, "Readiness check of temporary directory installer failed") + } + return r.base.CheckReadiness(ctx, request) +} diff --git a/pkg/runner/temporary_directory_symlinking_runner.go b/pkg/runner/temporary_directory_symlinking_runner.go new file mode 100644 index 0000000..bc31a96 --- /dev/null +++ b/pkg/runner/temporary_directory_symlinking_runner.go @@ -0,0 +1,88 @@ +package runner + +import ( + "context" + "os" + "sync" + + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/types/known/emptypb" +) + +type temporaryDirectorySymlinkingRunner struct { + base runner_pb.RunnerServer + symlinkPath string + buildDirectoryPath *path.Builder + + lock sync.Mutex + runCount int +} + +// NewTemporaryDirectorySymlinkingRunner creates a decorator for Runner +// that removes a local path on the system and replaces it with a +// symbolic link pointing to the temporary directory that was created by +// bb_worker as part of the action's build directory. +func NewTemporaryDirectorySymlinkingRunner(base runner_pb.RunnerServer, symlinkPath string, buildDirectoryPath *path.Builder) runner_pb.RunnerServer { + return &temporaryDirectorySymlinkingRunner{ + base: base, + symlinkPath: symlinkPath, + buildDirectoryPath: buildDirectoryPath, + } +} + +func (r *temporaryDirectorySymlinkingRunner) updateSymlink(symlinkTarget string) error { + if err := os.Remove(r.symlinkPath); err != nil && !os.IsNotExist(err) { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to remove symbolic link %#v", r.symlinkPath) + } + if err := os.Symlink(symlinkTarget, r.symlinkPath); err != nil { + return util.StatusWrapfWithCode(err, codes.Internal, "Failed to create symbolic link %#v pointing to %#v", r.symlinkPath, symlinkTarget) + } + return nil +} + +func (r *temporaryDirectorySymlinkingRunner) Run(ctx context.Context, request *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + // Keep track of the number of concurrent Run() calls. When + // zero, CheckReadiness() can safely adjust the symbolic link + // for testing. + r.lock.Lock() + r.runCount++ + r.lock.Unlock() + defer func() { + r.lock.Lock() + r.runCount-- + r.lock.Unlock() + }() + + // Compute the absolute path of the temporary directory that is + // offered by bb_worker. + temporaryDirectoryPath, scopeWalker := r.buildDirectoryPath.Join(path.VoidScopeWalker) + if err := path.Resolve(request.TemporaryDirectory, scopeWalker); err != nil { + return nil, util.StatusWrap(err, "Failed to resolve temporary directory") + } + + // Install a symbolic link pointing to the temporary directory. + if err := r.updateSymlink(temporaryDirectoryPath.String()); err != nil { + return nil, err + } + return r.base.Run(ctx, request) +} + +func (r *temporaryDirectorySymlinkingRunner) CheckReadiness(ctx context.Context, request *emptypb.Empty) (*emptypb.Empty, error) { + // When idle, test that symlink creation works properly. That + // way the worker won't pick up any actions from the scheduler + // in case of misconfigurations. + r.lock.Lock() + if r.runCount == 0 { + if err := r.updateSymlink("/nonexistent"); err != nil { + r.lock.Unlock() + return nil, err + } + } + r.lock.Unlock() + + return r.base.CheckReadiness(ctx, request) +} diff --git a/pkg/runner/temporary_directory_symlinking_runner_test.go b/pkg/runner/temporary_directory_symlinking_runner_test.go new file mode 100644 index 0000000..04847f3 --- /dev/null +++ b/pkg/runner/temporary_directory_symlinking_runner_test.go @@ -0,0 +1,177 @@ +package runner_test + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" + "github.com/buildbarn/bb-remote-execution/pkg/runner" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +func TestTemporaryDirectorySymlinkingRunnerRun(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + buildDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + require.NoError(t, path.Resolve("/worker/build", scopeWalker)) + + t.Run("InvalidTemporaryDirectory", func(t *testing.T) { + // The temporary directory path provided by bb_worker is + // invalid. This should cause the symbolic link creation + // to fail. + baseRunner := mock.NewMockRunnerServer(ctrl) + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, "/hello", buildDirectory) + + _, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + WorkingDirectory: "a/root/subdir", + StdoutPath: "a/stdout", + StderrPath: "a/stderr", + InputRootDirectory: "a/root", + TemporaryDirectory: "a/\x00tmp", + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Failed to resolve temporary directory: Path contains a null byte"), err) + }) + + t.Run("InvalidSymlinkPath", func(t *testing.T) { + // Failures to replace the provided path with a symbolic + // link should be propagated. + baseRunner := mock.NewMockRunnerServer(ctrl) + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, "/", buildDirectory) + + _, err := runner.Run(ctx, &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + WorkingDirectory: "a/root/subdir", + StdoutPath: "a/stdout", + StderrPath: "a/stderr", + InputRootDirectory: "a/root", + TemporaryDirectory: "a/tmp", + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.Internal, "Failed to remove symbolic link \"/\": "), err) + }) + + t.Run("Success", func(t *testing.T) { + // Successfully replace the provided path with a + // symbolic link. The execution request should be + // forwarded to the underlying Runner. The symbolic link + // should have the right contents. + request := &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + WorkingDirectory: "a/root/subdir", + StdoutPath: "a/stdout", + StderrPath: "a/stderr", + InputRootDirectory: "a/root", + TemporaryDirectory: "a/tmp", + } + response := &runner_pb.RunResponse{ + ExitCode: 123, + } + + baseRunner := mock.NewMockRunnerServer(ctrl) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)).Return(response, nil) + symlinkPath := filepath.Join(t.TempDir(), "symlink") + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, symlinkPath, buildDirectory) + + observedResponse, err := runner.Run(ctx, request) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + + symlinkTarget, err := os.Readlink(symlinkPath) + require.NoError(t, err) + require.Equal(t, filepath.FromSlash("/worker/build/a/tmp"), symlinkTarget) + }) +} + +func TestTemporaryDirectorySymlinkingRunnerCheckReadiness(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + buildDirectory, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker) + require.NoError(t, path.Resolve("/worker/build", scopeWalker)) + + t.Run("InvalidSymlinkPath", func(t *testing.T) { + // Readiness checks should fail in case the path at + // which the symlink needs to be stored is invalid. + baseRunner := mock.NewMockRunnerServer(ctrl) + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, "/", buildDirectory) + + _, err := runner.CheckReadiness(ctx, &emptypb.Empty{}) + testutil.RequirePrefixedStatus(t, status.Error(codes.Internal, "Failed to remove symbolic link \"/\": "), err) + }) + + t.Run("NoopWhenBusy", func(t *testing.T) { + // The readiness check should be a no-op in case there + // are one or more actions running. We don't want to + // change the symbolic link to point to a location for + // testing, as that would interfere with the action. + request := &runner_pb.RunRequest{ + Arguments: []string{"cc", "-o", "hello.o", "hello.c"}, + WorkingDirectory: "a/root/subdir", + StdoutPath: "a/stdout", + StderrPath: "a/stderr", + InputRootDirectory: "a/root", + TemporaryDirectory: "a/tmp", + } + response := &runner_pb.RunResponse{ + ExitCode: 123, + } + + baseRunner := mock.NewMockRunnerServer(ctrl) + symlinkPath := filepath.Join(t.TempDir(), "symlink") + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, symlinkPath, buildDirectory) + baseRunner.EXPECT().Run(ctx, testutil.EqProto(t, request)).DoAndReturn( + func(ctx context.Context, request *runner_pb.RunRequest) (*runner_pb.RunResponse, error) { + // At the start of the action, the + // symbolic link should point to the + // temporary directory provided by the + // worker. + symlinkTarget, err := os.Readlink(symlinkPath) + require.NoError(t, err) + require.Equal(t, filepath.FromSlash("/worker/build/a/tmp"), symlinkTarget) + + // Concurrent readiness check calls + // should still be forwarded. + baseRunner.EXPECT().CheckReadiness(ctx, testutil.EqProto(t, &emptypb.Empty{})).Return(&emptypb.Empty{}, nil) + + _, err = runner.CheckReadiness(ctx, &emptypb.Empty{}) + require.NoError(t, err) + + // The symlink should not get altered in + // the meantime. + symlinkTarget, err = os.Readlink(symlinkPath) + require.NoError(t, err) + require.Equal(t, filepath.FromSlash("/worker/build/a/tmp"), symlinkTarget) + return response, nil + }) + + observedResponse, err := runner.Run(ctx, request) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, observedResponse) + }) + + t.Run("Success", func(t *testing.T) { + // In case no actions are running, readiness checks + // should cause the symbolic link to be created for + // testing purposes. + baseRunner := mock.NewMockRunnerServer(ctrl) + baseRunner.EXPECT().CheckReadiness(ctx, testutil.EqProto(t, &emptypb.Empty{})).Return(&emptypb.Empty{}, nil) + symlinkPath := filepath.Join(t.TempDir(), "symlink") + runner := runner.NewTemporaryDirectorySymlinkingRunner(baseRunner, symlinkPath, buildDirectory) + + _, err := runner.CheckReadiness(ctx, &emptypb.Empty{}) + require.NoError(t, err) + + symlinkTarget, err := os.Readlink(symlinkPath) + require.NoError(t, err) + require.Equal(t, filepath.FromSlash("/nonexistent"), symlinkTarget) + }) +} diff --git a/pkg/scheduler/BUILD.bazel b/pkg/scheduler/BUILD.bazel new file mode 100644 index 0000000..bdebad3 --- /dev/null +++ b/pkg/scheduler/BUILD.bazel @@ -0,0 +1,72 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "scheduler", + srcs = ["in_memory_build_queue.go"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/scheduler", + visibility = ["//visibility:public"], + deps = [ + "//pkg/builder", + "//pkg/proto/buildqueuestate", + "//pkg/proto/remoteworker", + "//pkg/scheduler/initialsizeclass", + "//pkg/scheduler/invocation", + "//pkg/scheduler/platform", + "//pkg/scheduler/routing", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/builder", + "@com_github_buildbarn_bb_storage//pkg/capabilities", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/otel", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_google_uuid//:uuid", + "@com_github_prometheus_client_golang//prometheus", + "@com_google_cloud_go_longrunning//autogen/longrunningpb", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +go_test( + name = "scheduler_test", + srcs = ["in_memory_build_queue_test.go"], + deps = [ + ":scheduler", + "//internal/mock", + "//pkg/proto/buildqueuestate", + "//pkg/proto/remoteworker", + "//pkg/scheduler/invocation", + "//pkg/scheduler/platform", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/builder", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_google_uuid//:uuid", + "@com_github_stretchr_testify//require", + "@com_google_cloud_go_longrunning//autogen/longrunningpb", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_grpc//test/bufconn", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) diff --git a/pkg/scheduler/in_memory_build_queue.go b/pkg/scheduler/in_memory_build_queue.go new file mode 100644 index 0000000..f89eb7f --- /dev/null +++ b/pkg/scheduler/in_memory_build_queue.go @@ -0,0 +1,3179 @@ +package scheduler + +import ( + "container/heap" + "context" + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "sync" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_builder "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + scheduler_invocation "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/routing" + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/capabilities" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/otel" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + + status_pb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "cloud.google.com/go/longrunning/autogen/longrunningpb" +) + +var ( + inMemoryBuildQueuePrometheusMetrics sync.Once + + inMemoryBuildQueueInFlightDeduplicationsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_in_flight_deduplications_total", + Help: "Number of times an Execute() request of a cacheable action was performed, and whether it was in-flight deduplicated against an existing task.", + }, + []string{"instance_name_prefix", "platform", "size_class", "outcome"}) + + inMemoryBuildQueueInvocationsCreatedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_invocations_created_total", + Help: "Number of times an invocation object was created by creating a size class queue or scheduling a task through Execute().", + }, + []string{"instance_name_prefix", "platform", "size_class", "depth"}) + inMemoryBuildQueueInvocationsActivatedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_invocations_activated_total", + Help: "Number of times an invocation object transitioned from being idle to having queued or executing operations.", + }, + []string{"instance_name_prefix", "platform", "size_class", "depth"}) + inMemoryBuildQueueInvocationsDeactivatedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_invocations_deactivated_total", + Help: "Number of times an invocation object transitioned from having queued or executing operations to being idle.", + }, + []string{"instance_name_prefix", "platform", "size_class", "depth"}) + inMemoryBuildQueueInvocationsRemovedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_invocations_removed_total", + Help: "Number of times an invocation object was removed.", + }, + []string{"instance_name_prefix", "platform", "size_class", "depth"}) + + inMemoryBuildQueueTasksScheduledTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_tasks_scheduled_total", + Help: "Number of times tasks were scheduled, either by calling Execute() or through initial size class selection retries.", + }, + []string{"instance_name_prefix", "platform", "size_class", "assignment", "do_not_cache"}) + inMemoryBuildQueueTasksQueuedDurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_tasks_queued_duration_seconds", + Help: "Time in seconds that tasks were queued before executing.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"instance_name_prefix", "platform", "size_class"}) + inMemoryBuildQueueTasksExecutingDurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_tasks_executing_duration_seconds", + Help: "Time in seconds that tasks were executing before completing.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"instance_name_prefix", "platform", "size_class", "result", "grpc_code"}) + inMemoryBuildQueueTasksExecutingRetries = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_tasks_executing_retries", + Help: "Number of times that tasks were retried before completing.", + Buckets: prometheus.LinearBuckets(0, 1, 11), + }, + []string{"instance_name_prefix", "platform", "size_class", "result", "grpc_code"}) + inMemoryBuildQueueTasksCompletedDurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_tasks_completed_duration_seconds", + Help: "Time in seconds that tasks were completed before being removed.", + Buckets: util.DecimalExponentialBuckets(-3, 6, 2), + }, + []string{"instance_name_prefix", "platform", "size_class"}) + + inMemoryBuildQueueWorkersCreatedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_workers_created_total", + Help: "Number of workers created by Synchronize().", + }, + []string{"instance_name_prefix", "platform", "size_class"}) + inMemoryBuildQueueWorkersRemovedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_workers_removed_total", + Help: "Number of workers removed due to expiration.", + }, + []string{"instance_name_prefix", "platform", "size_class", "state"}) + + inMemoryBuildQueueWorkerInvocationStickinessRetained = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "in_memory_build_queue_worker_invocation_stickiness_retained", + Help: "How many levels of worker invocation stickiness were respected, as configured through worker_invocation_stickiness_limits.", + Buckets: prometheus.LinearBuckets(0, 1, 11), + }, + []string{"instance_name_prefix", "platform", "size_class"}) +) + +// InMemoryBuildQueueConfiguration contains all the tunable settings of +// the InMemoryBuildQueue. +type InMemoryBuildQueueConfiguration struct { + // ExecutionUpdateInterval specifies how frequently Execute() + // and WaitExecution() should stream updates for a task to the + // client. + ExecutionUpdateInterval time.Duration + + // OperationWithNoWaitersTimeout specifies how long an operation + // may remain registered without having a single client calling + // Execute() or WaitExecution() on it. + OperationWithNoWaitersTimeout time.Duration + + // PlatformQueueWithNoWorkersTimeout specifies how long a + // platform may remain registered by InMemoryBuildQueue when no + // Synchronize() calls are received for any workers. + PlatformQueueWithNoWorkersTimeout time.Duration + + // BusyWorkerSynchronizationInterval specifies how frequently + // busy workers should be instructed to report their state, even + // if no changes to their running state occurred. + BusyWorkerSynchronizationInterval time.Duration + + // GetIdleWorkerSynchronizationInterval returns the maximum + // amount of time a synchronization performed by a worker + // against the scheduler may block. Once this amount of time is + // passed, the worker is instructed to resynchronize, as a form + // of health checking. + // + // Implementations may add jitter to this value to ensure + // synchronization requests get smeared out over time. + GetIdleWorkerSynchronizationInterval func() time.Duration + + // WorkerTaskRetryCount specifies how many times a worker may + // redundantly request that a single task is started. By + // limiting this, we can prevent a single task from + // crash-looping a worker indefinitely. + WorkerTaskRetryCount int + + // WorkerWithNoSynchronizationsTimeout specifies how long a + // worker may remain registered by InMemoryBuildQueue when no + // Synchronize() calls are received. + WorkerWithNoSynchronizationsTimeout time.Duration +} + +// InMemoryBuildQueue implements a BuildQueue that can distribute +// requests through the Remote Worker protocol to worker processes. All +// of the state of the build queue (i.e., list of queued execution +// requests and list of workers) is kept in memory. +type InMemoryBuildQueue struct { + capabilities.Provider + + contentAddressableStorage blobstore.BlobAccess + clock clock.Clock + uuidGenerator util.UUIDGenerator + configuration *InMemoryBuildQueueConfiguration + platformQueueAbsenceHardFailureTime time.Time + maximumMessageSizeBytes int + actionRouter routing.ActionRouter + + lock sync.Mutex + platformQueuesTrie *platform.Trie + platformQueues []*platformQueue + sizeClassQueues map[sizeClassKey]*sizeClassQueue + + // Bookkeeping for WaitExecution(). This call permits us to + // re-attach to operations by name. It also allows us to obtain + // results for historical actions, up to a certain degree. + operationsNameMap map[string]*operation + + // Map of each task that does not have DoNotCache set by digest. + // This map is used to deduplicate concurrent requests for the + // same action. + inFlightDeduplicationMap map[digest.Digest]*task + + // Time value that is updated during every mutation of build + // queue state. This reduces the number of clock accesses, while + // also making it easier to test this code. + now time.Time + + // Binary heap containing closures that purge stale workers, + // platform queues and operations. + cleanupQueue cleanupQueue + + // Authorizer used to allow/deny access for certain users + // to perform Execute and WaitExecution calls. + executeAuthorizer auth.Authorizer + + // Authorizer used to allow/deny access for certain users to + // perform AddDrain and RemoveDrain calls. + modifyDrainsAuthorizer auth.Authorizer + + // Authorizer used to allow/deny access for certain users to + // perform KillOperations calls. + killOperationsAuthorizer auth.Authorizer +} + +var inMemoryBuildQueueCapabilitiesProvider = capabilities.NewStaticProvider(&remoteexecution.ServerCapabilities{ + ExecutionCapabilities: &remoteexecution.ExecutionCapabilities{ + DigestFunction: remoteexecution.DigestFunction_SHA256, + DigestFunctions: digest.SupportedDigestFunctions, + ExecEnabled: true, + ExecutionPriorityCapabilities: &remoteexecution.PriorityCapabilities{ + Priorities: []*remoteexecution.PriorityCapabilities_PriorityRange{ + {MinPriority: math.MinInt32, MaxPriority: math.MaxInt32}, + }, + }, + }, +}) + +// NewInMemoryBuildQueue creates a new InMemoryBuildQueue that is in the +// initial state. It does not have any queues, workers or queued +// execution requests. All of these are created by sending it RPCs. +func NewInMemoryBuildQueue(contentAddressableStorage blobstore.BlobAccess, clock clock.Clock, uuidGenerator util.UUIDGenerator, configuration *InMemoryBuildQueueConfiguration, maximumMessageSizeBytes int, actionRouter routing.ActionRouter, executeAuthorizer, modifyDrainsAuthorizer, killOperationsAuthorizer auth.Authorizer) *InMemoryBuildQueue { + inMemoryBuildQueuePrometheusMetrics.Do(func() { + prometheus.MustRegister(inMemoryBuildQueueInFlightDeduplicationsTotal) + + prometheus.MustRegister(inMemoryBuildQueueInvocationsCreatedTotal) + prometheus.MustRegister(inMemoryBuildQueueInvocationsActivatedTotal) + prometheus.MustRegister(inMemoryBuildQueueInvocationsDeactivatedTotal) + prometheus.MustRegister(inMemoryBuildQueueInvocationsRemovedTotal) + + prometheus.MustRegister(inMemoryBuildQueueTasksScheduledTotal) + prometheus.MustRegister(inMemoryBuildQueueTasksQueuedDurationSeconds) + prometheus.MustRegister(inMemoryBuildQueueTasksExecutingDurationSeconds) + prometheus.MustRegister(inMemoryBuildQueueTasksExecutingRetries) + prometheus.MustRegister(inMemoryBuildQueueTasksCompletedDurationSeconds) + + prometheus.MustRegister(inMemoryBuildQueueWorkersCreatedTotal) + prometheus.MustRegister(inMemoryBuildQueueWorkersRemovedTotal) + + prometheus.MustRegister(inMemoryBuildQueueWorkerInvocationStickinessRetained) + }) + + return &InMemoryBuildQueue{ + Provider: capabilities.NewAuthorizingProvider(inMemoryBuildQueueCapabilitiesProvider, executeAuthorizer), + + contentAddressableStorage: contentAddressableStorage, + clock: clock, + uuidGenerator: uuidGenerator, + configuration: configuration, + platformQueueAbsenceHardFailureTime: clock.Now().Add(configuration.PlatformQueueWithNoWorkersTimeout), + maximumMessageSizeBytes: maximumMessageSizeBytes, + actionRouter: actionRouter, + platformQueuesTrie: platform.NewTrie(), + sizeClassQueues: map[sizeClassKey]*sizeClassQueue{}, + operationsNameMap: map[string]*operation{}, + inFlightDeduplicationMap: map[digest.Digest]*task{}, + executeAuthorizer: executeAuthorizer, + modifyDrainsAuthorizer: modifyDrainsAuthorizer, + killOperationsAuthorizer: killOperationsAuthorizer, + } +} + +var ( + _ builder.BuildQueue = (*InMemoryBuildQueue)(nil) + _ remoteworker.OperationQueueServer = (*InMemoryBuildQueue)(nil) + _ buildqueuestate.BuildQueueStateServer = (*InMemoryBuildQueue)(nil) +) + +// RegisterPredeclaredPlatformQueue adds a platform queue to +// InMemoryBuildQueue that remains present, regardless of whether +// workers appear. +// +// The main purpose of this method is to create platform queues that are +// capable of using multiple size classes, as a maximum size class and +// initialsizeclass.Analyzer can be provided for specifying how +// operations are assigned to size classes. +func (bq *InMemoryBuildQueue) RegisterPredeclaredPlatformQueue(instanceNamePrefix digest.InstanceName, platformMessage *remoteexecution.Platform, workerInvocationStickinessLimits []time.Duration, maximumQueuedBackgroundLearningOperations int, backgroundLearningOperationPriority int32, maximumSizeClass uint32) error { + platformKey, err := platform.NewKey(instanceNamePrefix, platformMessage) + if err != nil { + return err + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + if bq.platformQueuesTrie.ContainsExact(platformKey) { + return status.Error(codes.AlreadyExists, "A queue with the same instance name prefix or platform already exists") + } + + pq := bq.addPlatformQueue(platformKey, workerInvocationStickinessLimits, maximumQueuedBackgroundLearningOperations, backgroundLearningOperationPriority) + pq.addSizeClassQueue(bq, maximumSizeClass, false) + return nil +} + +// getRequestMetadata extracts the RequestMetadata message stored in the +// gRPC request headers. This message contains the invocation ID that is +// used to group incoming requests by client, so that tasks can be +// scheduled across workers fairly. +func getRequestMetadata(ctx context.Context) *remoteexecution.RequestMetadata { + if md, ok := metadata.FromIncomingContext(ctx); ok { + for _, requestMetadataBin := range md.Get("build.bazel.remote.execution.v2.requestmetadata-bin") { + var requestMetadata remoteexecution.RequestMetadata + if err := proto.Unmarshal([]byte(requestMetadataBin), &requestMetadata); err == nil { + return &requestMetadata + } + } + } + return nil +} + +// Execute an action by scheduling it in the build queue. This call +// blocks until the action is completed. +func (bq *InMemoryBuildQueue) Execute(in *remoteexecution.ExecuteRequest, out remoteexecution.Execution_ExecuteServer) error { + // Fetch the action corresponding to the execute request. + // Ideally, a scheduler is oblivious of what this message looks + // like, if it weren't for the fact that DoNotCache and Platform + // are used for scheduling decisions. + // + // To prevent loading this messages from the Content Addressable + // Storage (CAS) multiple times, the scheduler holds on to it + // and passes it on to the workers. + ctx := out.Context() + instanceName, err := digest.NewInstanceName(in.InstanceName) + if err != nil { + return util.StatusWrapf(err, "Invalid instance name %#v", in.InstanceName) + } + + if err := auth.AuthorizeSingleInstanceName(ctx, bq.executeAuthorizer, instanceName); err != nil { + return util.StatusWrap(err, "Authorization") + } + + digestFunction, err := instanceName.GetDigestFunction(in.DigestFunction, len(in.ActionDigest.GetHash())) + if err != nil { + return err + } + actionDigest, err := digestFunction.NewDigestFromProto(in.ActionDigest) + if err != nil { + return util.StatusWrap(err, "Failed to extract digest for action") + } + actionMessage, err := bq.contentAddressableStorage.Get(ctx, actionDigest).ToProto(&remoteexecution.Action{}, bq.maximumMessageSizeBytes) + if err != nil { + return util.StatusWrap(err, "Failed to obtain action") + } + action := actionMessage.(*remoteexecution.Action) + platformKey, err := platform.NewKey(instanceName, action.Platform) + if err != nil { + return err + } + + // Forward the client-provided authentication and request + // metadata, so that the worker logs it. + auxiliaryMetadata := make([]*anypb.Any, 0, 2) + if authenticationMetadata, shouldDisplay := auth.AuthenticationMetadataFromContext(ctx).GetPublicProto(); shouldDisplay { + authenticationMetadataAny, err := anypb.New(authenticationMetadata) + if err != nil { + return util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to marshal authentication metadata") + } + auxiliaryMetadata = append(auxiliaryMetadata, authenticationMetadataAny) + } + requestMetadata := getRequestMetadata(ctx) + if requestMetadata != nil { + requestMetadataAny, err := anypb.New(requestMetadata) + if err != nil { + return util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to marshal request metadata") + } + auxiliaryMetadata = append(auxiliaryMetadata, requestMetadataAny) + } + w3cTraceContext := otel.W3CTraceContextFromContext(ctx) + + platformKey, invocationKeys, initialSizeClassSelector, err := bq.actionRouter.RouteAction(ctx, actionDigest.GetDigestFunction(), action, requestMetadata) + if err != nil { + return util.StatusWrap(err, "Failed to route action") + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + if t, ok := bq.inFlightDeduplicationMap[actionDigest]; ok { + // A task for the same action digest already exists + // against which we may deduplicate. No need to create a + // task. + initialSizeClassSelector.Abandoned() + scq := t.getCurrentSizeClassQueue() + i := scq.getOrCreateInvocation(bq, invocationKeys) + if o, ok := t.operations[i]; ok { + // Task is already associated with the current + // invocation. Simply wait on the operation that + // already exists. + scq.inFlightDeduplicationsSameInvocation.Inc() + return o.waitExecution(bq, out) + } + + // Create an additional operation for this task. + o := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false) + switch t.getStage() { + case remoteexecution.ExecutionStage_QUEUED: + // The request has been deduplicated against a + // task that is still queued. + o.enqueue() + case remoteexecution.ExecutionStage_EXECUTING: + // The request has been deduplicated against a + // task that is already in the executing stage. + i.incrementExecutingWorkersCount(bq, t.currentWorker) + default: + panic("Task in unexpected stage") + } + scq.inFlightDeduplicationsOtherInvocation.Inc() + return o.waitExecution(bq, out) + } + + // We need to create a new task. For that we first need to + // obtain the size class queue in which we're going to place it. + platformQueueIndex := bq.platformQueuesTrie.GetLongestPrefix(platformKey) + if platformQueueIndex < 0 { + code := codes.FailedPrecondition + if bq.now.Before(bq.platformQueueAbsenceHardFailureTime) { + // The scheduler process started not too long + // ago. It may be the case that clients ended up + // connecting to the scheduler before workers + // got a chance to synchronize. + // + // Prevent builds from failing unnecessarily by + // providing a brief window of time where + // soft errors are returned to the client, + // giving workers time to reconnect. + code = codes.Unavailable + } + initialSizeClassSelector.Abandoned() + return status.Errorf(code, "No workers exist for instance name prefix %#v platform %s", platformKey.GetInstanceNamePrefix().String(), platformKey.GetPlatformString()) + } + pq := bq.platformQueues[platformQueueIndex] + sizeClassIndex, expectedDuration, timeout, initialSizeClassLearner := initialSizeClassSelector.Select(pq.sizeClasses) + scq := pq.sizeClassQueues[sizeClassIndex] + + // Create the task. + actionWithCustomTimeout := *action + actionWithCustomTimeout.Timeout = durationpb.New(timeout) + t := &task{ + operations: map[*invocation]*operation{}, + actionDigest: actionDigest, + desiredState: remoteworker.DesiredState_Executing{ + ActionDigest: in.ActionDigest, + Action: &actionWithCustomTimeout, + QueuedTimestamp: bq.getCurrentTime(), + AuxiliaryMetadata: auxiliaryMetadata, + InstanceNameSuffix: pq.instanceNamePatcher.PatchInstanceName(instanceName).String(), + DigestFunction: digestFunction.GetEnumValue(), + W3CTraceContext: w3cTraceContext, + }, + targetID: requestMetadata.GetTargetId(), + expectedDuration: expectedDuration, + initialSizeClassLearner: initialSizeClassLearner, + stageChangeWakeup: make(chan struct{}), + } + if !action.DoNotCache { + bq.inFlightDeduplicationMap[actionDigest] = t + scq.inFlightDeduplicationsNew.Inc() + } + i := scq.getOrCreateInvocation(bq, invocationKeys) + o := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false) + t.schedule(bq) + return o.waitExecution(bq, out) +} + +// WaitExecution attaches to an existing operation that was created by +// Execute(). This call can be used by the client to reattach to an +// operation in case of network failure. +func (bq *InMemoryBuildQueue) WaitExecution(in *remoteexecution.WaitExecutionRequest, out remoteexecution.Execution_WaitExecutionServer) error { + bq.enter(bq.clock.Now()) + for { + o, ok := bq.operationsNameMap[in.Name] + if !ok { + bq.leave() + return status.Errorf(codes.NotFound, "Operation with name %#v not found", in.Name) + } + instanceName := o.task.actionDigest.GetInstanceName() + + // Ensure that the caller is permitted to access this operation. + // This must be done without holding any locks, as the authorizer + // may block. + bq.leave() + if err := auth.AuthorizeSingleInstanceName(out.Context(), bq.executeAuthorizer, instanceName); err != nil { + return util.StatusWrap(err, "Authorization") + } + + bq.enter(bq.clock.Now()) + if bq.operationsNameMap[in.Name] == o { + defer bq.leave() + return o.waitExecution(bq, out) + } + } +} + +// Synchronize the state of a worker with the scheduler. This call is +// used by a worker to report the completion of an operation and to +// request more work. +func (bq *InMemoryBuildQueue) Synchronize(ctx context.Context, request *remoteworker.SynchronizeRequest) (*remoteworker.SynchronizeResponse, error) { + instanceNamePrefix, err := digest.NewInstanceName(request.InstanceNamePrefix) + if err != nil { + return nil, util.StatusWrapf(err, "Invalid instance name %#v", request.InstanceNamePrefix) + } + platformKey, err := platform.NewKey(instanceNamePrefix, request.Platform) + if err != nil { + return nil, err + } + workerKey := newWorkerKey(request.WorkerId) + + bq.enter(bq.clock.Now()) + defer bq.leave() + + sizeClassKey := sizeClassKey{ + platformKey: platformKey, + sizeClass: request.SizeClass, + } + var pq *platformQueue + scq, ok := bq.sizeClassQueues[sizeClassKey] + if ok { + // Found an existing size class queue. Prevent the + // platform queue from being garbage collected, as it + // will now have an active worker. + pq = scq.platformQueue + if scq.cleanupKey.isActive() { + bq.cleanupQueue.remove(scq.cleanupKey) + } + } else { + if platformQueueIndex := bq.platformQueuesTrie.GetExact(platformKey); platformQueueIndex >= 0 { + // Worker for this type of instance/platform pair has + // been observed before, but not for this size class. + // Create a new size class queue. + // + // Only allow this to take place if the platform + // queue is predeclared, as the build results + // are non-deterministic otherwise. + pq = bq.platformQueues[platformQueueIndex] + if maximumSizeClassQueue := pq.sizeClassQueues[len(pq.sizeClassQueues)-1]; maximumSizeClassQueue.mayBeRemoved { + return nil, status.Error(codes.InvalidArgument, "Cannot add multiple size classes to a platform queue that is not predeclared") + } else if maximumSizeClass := pq.sizeClasses[len(pq.sizeClasses)-1]; request.SizeClass > maximumSizeClass { + return nil, status.Errorf(codes.InvalidArgument, "Worker provided size class %d, which exceeds the predeclared maximum of %d", request.SizeClass, maximumSizeClass) + } else if maximumSizeClass > 0 && request.SizeClass < 1 { + return nil, status.Error(codes.InvalidArgument, "Worker did not provide a size class, even though this platform queue uses them") + } + } else { + // Worker for this type of instance/platform + // pair has not been observed before. Create a + // new platform queue containing a single size + // class queue. + pq = bq.addPlatformQueue(platformKey, nil, 0, 0) + } + scq = pq.addSizeClassQueue(bq, request.SizeClass, true) + } + + w, ok := scq.workers[workerKey] + if ok { + // Prevent the worker from being garbage collected while + // synchronization is happening. + if !w.cleanupKey.isActive() { + return nil, status.Error(codes.ResourceExhausted, "Worker is already synchronizing with the scheduler") + } + bq.cleanupQueue.remove(w.cleanupKey) + } else { + // First time we're seeing this worker. As this worker + // has never run an action before (that we know about), + // associate it with the root invocation. + i := &scq.rootInvocation + w = &worker{ + workerKey: workerKey, + lastInvocation: i, + listIndex: -1, + stickinessStartingTimes: make([]time.Time, len(pq.workerInvocationStickinessLimits)), + } + i.idleWorkersCount++ + scq.workers[workerKey] = w + scq.workersCreatedTotal.Inc() + } + + // Install cleanup handlers to ensure stale workers and queues + // are purged after sufficient amount of time. + defer func() { + removalTime := bq.now.Add(bq.configuration.WorkerWithNoSynchronizationsTimeout) + bq.cleanupQueue.add(&w.cleanupKey, removalTime, func() { + scq.removeStaleWorker(bq, workerKey, removalTime) + }) + }() + + // Process the current state of the worker to determine what it + // should be doing next. + currentState := request.CurrentState + if currentState == nil { + return nil, status.Error(codes.InvalidArgument, "Worker did not provide its current state") + } + switch workerState := currentState.WorkerState.(type) { + case *remoteworker.CurrentState_Idle: + return w.getCurrentOrNextTask(ctx, bq, scq, request.WorkerId, request.PreferBeingIdle) + case *remoteworker.CurrentState_Executing_: + executing := workerState.Executing + if executing.ActionDigest == nil { + return nil, status.Error(codes.InvalidArgument, "Worker is executing, but provided no action digest") + } + switch executionState := executing.ExecutionState.(type) { + case *remoteworker.CurrentState_Executing_Completed: + return w.completeTask(ctx, bq, scq, request.WorkerId, executing.ActionDigest, executionState.Completed, request.PreferBeingIdle) + default: + return w.updateTask(bq, scq, request.WorkerId, executing.ActionDigest, request.PreferBeingIdle) + } + default: + return nil, status.Error(codes.InvalidArgument, "Worker provided an unknown current state") + } +} + +// ListPlatformQueues returns a list of all platform queues currently +// managed by the scheduler. +func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) { + bq.enter(bq.clock.Now()) + defer bq.leave() + + // Obtain platform queue IDs in sorted order. + platformQueueList := append(platformQueueList(nil), bq.platformQueues...) + sort.Sort(platformQueueList) + + // Extract status. + platformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues)) + for _, pq := range platformQueueList { + sizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues)) + for i, scq := range pq.sizeClassQueues { + sizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{ + SizeClass: pq.sizeClasses[i], + Timeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey), + RootInvocation: scq.rootInvocation.getInvocationState(bq), + WorkersCount: uint32(len(scq.workers)), + DrainsCount: uint32(len(scq.drains)), + }) + } + platformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{ + Name: pq.platformKey.GetPlatformQueueName(), + SizeClassQueues: sizeClassQueues, + }) + } + return &buildqueuestate.ListPlatformQueuesResponse{ + PlatformQueues: platformQueues, + }, nil +} + +// GetOperation returns detailed information about a single operation +// identified by name. +func (bq *InMemoryBuildQueue) GetOperation(ctx context.Context, request *buildqueuestate.GetOperationRequest) (*buildqueuestate.GetOperationResponse, error) { + bq.enter(bq.clock.Now()) + defer bq.leave() + + o, ok := bq.operationsNameMap[request.OperationName] + if !ok { + return nil, status.Errorf(codes.NotFound, "Operation %#v not found", request.OperationName) + } + s := o.getOperationState(bq) + s.Name = "" + return &buildqueuestate.GetOperationResponse{ + Operation: s, + }, nil +} + +// getPaginationInfo uses binary searching to determine which +// information should be returned by InMemoryBuildQueue's List*() +// operations. +func getPaginationInfo(n int, pageSize uint32, f func(int) bool) (*buildqueuestate.PaginationInfo, int) { + startIndex := uint32(sort.Search(n, f)) + endIndex := uint32(n) + if endIndex-startIndex > pageSize { + endIndex = startIndex + pageSize + } + return &buildqueuestate.PaginationInfo{ + StartIndex: startIndex, + TotalEntries: uint32(n), + }, int(endIndex) +} + +// KillOperations requests that one or more operations that are +// currently QUEUED or EXECUTING are moved the COMPLETED stage +// immediately. The next time any worker associated with the operation +// contacts the scheduler, it is requested to stop executing the +// operation. +func (bq *InMemoryBuildQueue) KillOperations(ctx context.Context, request *buildqueuestate.KillOperationsRequest) (*emptypb.Empty, error) { + switch filter := request.Filter.GetType().(type) { + case *buildqueuestate.KillOperationsRequest_Filter_OperationName: + for { + // Extract the instance name prefix of the size + // class queue to which the operation belongs. + bq.enter(bq.clock.Now()) + o, ok := bq.operationsNameMap[filter.OperationName] + if !ok { + bq.leave() + return nil, status.Errorf(codes.NotFound, "Operation %#v not found", filter.OperationName) + } + instanceNamePrefix := o.task.getCurrentSizeClassQueue().getKey().platformKey.GetInstanceNamePrefix() + bq.leave() + + // Perform authorization checks without holding + // any locks. + if err := auth.AuthorizeSingleInstanceName(ctx, bq.killOperationsAuthorizer, instanceNamePrefix); err != nil { + return nil, util.StatusWrap(err, "Authorization") + } + + // Kill the operation if it still exists after + // reacquiring the lock. Otherwise we retry. + bq.enter(bq.clock.Now()) + if o == bq.operationsNameMap[filter.OperationName] { + o.task.complete(bq, &remoteexecution.ExecuteResponse{Status: request.Status}, false) + bq.leave() + return &emptypb.Empty{}, nil + } + bq.leave() + } + case *buildqueuestate.KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers: + sizeClassKey, err := newSizeClassKeyFromName(filter.SizeClassQueueWithoutWorkers) + if err != nil { + return nil, err + } + if err := auth.AuthorizeSingleInstanceName(ctx, bq.killOperationsAuthorizer, sizeClassKey.platformKey.GetInstanceNamePrefix()); err != nil { + return nil, util.StatusWrap(err, "Authorization") + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + scq, ok := bq.sizeClassQueues[sizeClassKey] + if !ok { + return nil, status.Error(codes.NotFound, "Size class queue not found") + } + if len(scq.workers) > 0 { + return nil, status.Error(codes.FailedPrecondition, "Cannot kill operations, as size class queue still has workers") + } + scq.rootInvocation.cancelAllQueuedOperations(bq, request.Status) + return &emptypb.Empty{}, nil + default: + return nil, status.Error(codes.InvalidArgument, "Unknown filter provided") + } +} + +// ListOperations returns detailed information about all of the +// operations tracked by the InMemoryBuildQueue. +func (bq *InMemoryBuildQueue) ListOperations(ctx context.Context, request *buildqueuestate.ListOperationsRequest) (*buildqueuestate.ListOperationsResponse, error) { + var invocationKey *scheduler_invocation.Key + if request.FilterInvocationId != nil { + key, err := scheduler_invocation.NewKey(request.FilterInvocationId) + if err != nil { + return nil, util.StatusWrap(err, "Invalid invocation key") + } + invocationKey = &key + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + // Obtain operation names in sorted order. + nameList := make([]string, 0, len(bq.operationsNameMap)) + for name, o := range bq.operationsNameMap { + if (invocationKey == nil || o.invocation.hasInvocationKey(*invocationKey)) && + (request.FilterStage == remoteexecution.ExecutionStage_UNKNOWN || request.FilterStage == o.task.getStage()) { + nameList = append(nameList, name) + } + } + sort.Strings(nameList) + paginationInfo, endIndex := getPaginationInfo(len(nameList), request.PageSize, func(i int) bool { + return request.StartAfter == nil || nameList[i] > request.StartAfter.OperationName + }) + + // Extract status. + nameListRegion := nameList[paginationInfo.StartIndex:endIndex] + operations := make([]*buildqueuestate.OperationState, 0, len(nameListRegion)) + for _, name := range nameListRegion { + o := bq.operationsNameMap[name] + operations = append(operations, o.getOperationState(bq)) + } + return &buildqueuestate.ListOperationsResponse{ + Operations: operations, + PaginationInfo: paginationInfo, + }, nil +} + +func (bq *InMemoryBuildQueue) getSizeClassQueueByName(name *buildqueuestate.SizeClassQueueName) (*sizeClassQueue, error) { + sizeClassKey, err := newSizeClassKeyFromName(name) + if err != nil { + return nil, err + } + scq, ok := bq.sizeClassQueues[sizeClassKey] + if !ok { + return nil, status.Error(codes.NotFound, "Size class queue not found") + } + return scq, nil +} + +func (bq *InMemoryBuildQueue) getInvocationByName(name *buildqueuestate.InvocationName) (*invocation, *sizeClassQueue, error) { + if name == nil { + return nil, nil, status.Error(codes.InvalidArgument, "No invocation name provided") + } + scq, err := bq.getSizeClassQueueByName(name.SizeClassQueueName) + if err != nil { + return nil, nil, err + } + i := &scq.rootInvocation + for idx, id := range name.Ids { + key, err := scheduler_invocation.NewKey(id) + if err != nil { + return nil, nil, util.StatusWrapf(err, "Invalid invocation key at index %d", idx) + } + var ok bool + i, ok = i.children[key] + if !ok { + return nil, nil, status.Error(codes.NotFound, "Invocation not found") + } + } + return i, scq, nil +} + +// ListInvocationChildren returns properties of all client invocations +// for which one or more operations are either queued or executing +// within a given platform queue. +// +// When justQueuedInvocations is false, entries for invocations are +// returned even if they have no queued operations; only ones that are +// being executed right now. Entries will be sorted by invocation ID. +// +// When justQueuedInvocations is true, entries for invocations are +// returned only if they have queued operations. Entries will be sorted +// by priority at which operations are scheduled. +func (bq *InMemoryBuildQueue) ListInvocationChildren(ctx context.Context, request *buildqueuestate.ListInvocationChildrenRequest) (*buildqueuestate.ListInvocationChildrenResponse, error) { + bq.enter(bq.clock.Now()) + defer bq.leave() + + i, _, err := bq.getInvocationByName(request.InvocationName) + if err != nil { + return nil, err + } + + switch request.Filter { + case buildqueuestate.ListInvocationChildrenRequest_ALL, buildqueuestate.ListInvocationChildrenRequest_ACTIVE: + // Return all or active invocations in alphabetic order. + keyList := make([]string, 0, len(i.children)) + for invocationKey, i := range i.children { + if request.Filter == buildqueuestate.ListInvocationChildrenRequest_ALL || i.isActive() { + keyList = append(keyList, string(invocationKey)) + } + } + sort.Strings(keyList) + + children := make([]*buildqueuestate.InvocationChildState, 0, len(i.children)) + for _, key := range keyList { + invocationKey := scheduler_invocation.Key(key) + i := i.children[invocationKey] + children = append(children, &buildqueuestate.InvocationChildState{ + Id: invocationKey.GetID(), + State: i.getInvocationState(bq), + }) + } + return &buildqueuestate.ListInvocationChildrenResponse{ + Children: children, + }, nil + case buildqueuestate.ListInvocationChildrenRequest_QUEUED: + // Return invocations with one or more queued + // operations, sorted by scheduling order. + children := make([]*buildqueuestate.InvocationChildState, 0, i.queuedChildren.Len()) + sort.Sort(&i.queuedChildren) + for _, i := range i.queuedChildren { + children = append(children, &buildqueuestate.InvocationChildState{ + Id: i.invocationKeys[len(i.invocationKeys)-1].GetID(), + State: i.getInvocationState(bq), + }) + } + return &buildqueuestate.ListInvocationChildrenResponse{ + Children: children, + }, nil + default: + return nil, status.Error(codes.InvalidArgument, "Unknown filter provided") + } +} + +// ListQueuedOperations returns properties of all queued operations +// contained for a given invocation within a platform queue. +func (bq *InMemoryBuildQueue) ListQueuedOperations(ctx context.Context, request *buildqueuestate.ListQueuedOperationsRequest) (*buildqueuestate.ListQueuedOperationsResponse, error) { + bq.enter(bq.clock.Now()) + defer bq.leave() + + i, _, err := bq.getInvocationByName(request.InvocationName) + if err != nil { + return nil, err + } + + startAfter := request.StartAfter + var startAfterExpectedDuration time.Duration + var startAfterQueuedTimestamp time.Time + if startAfter != nil { + if err := startAfter.ExpectedDuration.CheckValid(); err != nil { + return nil, util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid expected duration") + } + startAfterExpectedDuration = startAfter.ExpectedDuration.AsDuration() + + if err := startAfter.QueuedTimestamp.CheckValid(); err != nil { + return nil, util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid queued timestamp") + } + startAfterQueuedTimestamp = startAfter.QueuedTimestamp.AsTime() + } + + // As every sorted list is also a valid binary heap, simply sort + // the queued operations list prior to emitting it. + sort.Sort(i.queuedOperations) + paginationInfo, endIndex := getPaginationInfo(i.queuedOperations.Len(), request.PageSize, func(idx int) bool { + o := i.queuedOperations[idx] + if startAfter == nil || o.priority > startAfter.Priority { + return true + } + if o.priority < startAfter.Priority { + return false + } + t := o.task + if t.expectedDuration < startAfterExpectedDuration { + return true + } + if t.expectedDuration > startAfterExpectedDuration { + return false + } + return t.desiredState.QueuedTimestamp.AsTime().After(startAfterQueuedTimestamp) + }) + + queuedOperationsRegion := i.queuedOperations[paginationInfo.StartIndex:endIndex] + queuedOperations := make([]*buildqueuestate.OperationState, 0, queuedOperationsRegion.Len()) + for _, o := range queuedOperationsRegion { + s := o.getOperationState(bq) + s.InvocationName = nil + queuedOperations = append(queuedOperations, s) + } + return &buildqueuestate.ListQueuedOperationsResponse{ + QueuedOperations: queuedOperations, + PaginationInfo: paginationInfo, + }, nil +} + +// ListWorkers returns basic properties of all workers for a given +// platform queue. +func (bq *InMemoryBuildQueue) ListWorkers(ctx context.Context, request *buildqueuestate.ListWorkersRequest) (*buildqueuestate.ListWorkersResponse, error) { + var startAfterWorkerKey *string + if startAfter := request.StartAfter; startAfter != nil { + workerKey := string(newWorkerKey(startAfter.WorkerId)) + startAfterWorkerKey = &workerKey + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + // Obtain IDs of all workers in sorted order. + var scq *sizeClassQueue + var keyList []string + switch filter := request.Filter.GetType().(type) { + case *buildqueuestate.ListWorkersRequest_Filter_All: + var err error + scq, err = bq.getSizeClassQueueByName(filter.All) + if err != nil { + return nil, err + } + for workerKey := range scq.workers { + keyList = append(keyList, string(workerKey)) + } + case *buildqueuestate.ListWorkersRequest_Filter_Executing: + var i *invocation + var err error + i, scq, err = bq.getInvocationByName(filter.Executing) + if err != nil { + return nil, err + } + for w := range i.executingWorkers { + keyList = append(keyList, string(w.workerKey)) + } + case *buildqueuestate.ListWorkersRequest_Filter_IdleSynchronizing: + var i *invocation + var err error + i, scq, err = bq.getInvocationByName(filter.IdleSynchronizing) + if err != nil { + return nil, err + } + for _, entry := range i.idleSynchronizingWorkers { + keyList = append(keyList, string(entry.worker.workerKey)) + } + default: + return nil, status.Error(codes.InvalidArgument, "Unknown filter provided") + } + sort.Strings(keyList) + paginationInfo, endIndex := getPaginationInfo(len(keyList), request.PageSize, func(i int) bool { + return startAfterWorkerKey == nil || keyList[i] > *startAfterWorkerKey + }) + + // Extract status. + keyListRegion := keyList[paginationInfo.StartIndex:endIndex] + workers := make([]*buildqueuestate.WorkerState, 0, len(keyListRegion)) + for _, key := range keyListRegion { + workerKey := workerKey(key) + w := scq.workers[workerKey] + var currentOperation *buildqueuestate.OperationState + if t := w.currentTask; t != nil { + // A task may have more than one operation + // associated with it, in case deduplication of + // in-flight requests occurred. For the time + // being, let's not expose the concept of tasks + // through the web UI yet. Just show one of the + // operations. + // + // Do make this deterministic by picking the + // operation with the lowest name, + // alphabetically. + var o *operation + for _, oCheck := range t.operations { + if o == nil || o.name > oCheck.name { + o = oCheck + } + } + currentOperation = o.getOperationState(bq) + currentOperation.InvocationName = nil + currentOperation.Stage = nil + } + workerID := workerKey.getWorkerID() + workers = append(workers, &buildqueuestate.WorkerState{ + Id: workerID, + Timeout: bq.cleanupQueue.getTimestamp(w.cleanupKey), + CurrentOperation: currentOperation, + Drained: w.isDrained(scq, workerID), + }) + } + return &buildqueuestate.ListWorkersResponse{ + Workers: workers, + PaginationInfo: paginationInfo, + }, nil +} + +// ListDrains returns a list of all the drains that are present within a +// given platform queue. +func (bq *InMemoryBuildQueue) ListDrains(ctx context.Context, request *buildqueuestate.ListDrainsRequest) (*buildqueuestate.ListDrainsResponse, error) { + bq.enter(bq.clock.Now()) + defer bq.leave() + + scq, err := bq.getSizeClassQueueByName(request.SizeClassQueueName) + if err != nil { + return nil, err + } + + // Obtain IDs of all drains in sorted order. + keyList := make([]string, 0, len(scq.drains)) + for drainKey := range scq.drains { + keyList = append(keyList, drainKey) + } + sort.Strings(keyList) + + // Extract drains. + drains := make([]*buildqueuestate.DrainState, 0, len(keyList)) + for _, key := range keyList { + drains = append(drains, scq.drains[key]) + } + return &buildqueuestate.ListDrainsResponse{ + Drains: drains, + }, nil +} + +func (bq *InMemoryBuildQueue) modifyDrain(ctx context.Context, request *buildqueuestate.AddOrRemoveDrainRequest, modifyFunc func(scq *sizeClassQueue, drainKey string)) (*emptypb.Empty, error) { + sizeClassKey, err := newSizeClassKeyFromName(request.SizeClassQueueName) + if err != nil { + return nil, err + } + if err := auth.AuthorizeSingleInstanceName(ctx, bq.modifyDrainsAuthorizer, sizeClassKey.platformKey.GetInstanceNamePrefix()); err != nil { + return nil, util.StatusWrap(err, "Authorization") + } + + drainKey, err := json.Marshal(request.WorkerIdPattern) + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to marshal worker ID pattern") + } + + bq.enter(bq.clock.Now()) + defer bq.leave() + + scq, ok := bq.sizeClassQueues[sizeClassKey] + if !ok { + return nil, status.Error(codes.NotFound, "Size class queue not found") + } + modifyFunc(scq, string(drainKey)) + return &emptypb.Empty{}, nil +} + +// AddDrain inserts a new drain into the list of drains currently +// tracked by the platform queue. +func (bq *InMemoryBuildQueue) AddDrain(ctx context.Context, request *buildqueuestate.AddOrRemoveDrainRequest) (*emptypb.Empty, error) { + return bq.modifyDrain(ctx, request, func(scq *sizeClassQueue, drainKey string) { + scq.drains[drainKey] = &buildqueuestate.DrainState{ + WorkerIdPattern: request.WorkerIdPattern, + CreatedTimestamp: bq.getCurrentTime(), + } + + // Wake up all synchronizing workers that are queued, + // but are supposed to be drained. This ensures that + // they will stop picking up work immediately. + for workerKey, w := range scq.workers { + if w.wakeup != nil && workerMatchesPattern(workerKey.getWorkerID(), request.WorkerIdPattern) { + w.wakeUp(scq) + } + } + }) +} + +// RemoveDrain removes a drain from the list of drains currently tracked +// by the platform queue. +func (bq *InMemoryBuildQueue) RemoveDrain(ctx context.Context, request *buildqueuestate.AddOrRemoveDrainRequest) (*emptypb.Empty, error) { + return bq.modifyDrain(ctx, request, func(scq *sizeClassQueue, drainKey string) { + delete(scq.drains, drainKey) + + // Wake up all synchronizing workers that are drained. + // This ensures that they pick up work immediately. + close(scq.undrainWakeup) + scq.undrainWakeup = make(chan struct{}) + }) +} + +// TerminateWorkers can be used to indicate that workers are going to be +// terminated in the nearby future. This function will block until any +// operations running on the workers complete, thereby allowing the +// workers to be terminated without interrupting operations. +func (bq *InMemoryBuildQueue) TerminateWorkers(ctx context.Context, request *buildqueuestate.TerminateWorkersRequest) (*emptypb.Empty, error) { + var completionWakeups []chan struct{} + bq.enter(bq.clock.Now()) + for _, scq := range bq.sizeClassQueues { + for workerKey, w := range scq.workers { + if workerMatchesPattern(workerKey.getWorkerID(), request.WorkerIdPattern) { + w.terminating = true + if t := w.currentTask; t != nil { + // The task will be at the + // EXECUTING stage, so it can + // only transition to COMPLETED. + completionWakeups = append(completionWakeups, t.stageChangeWakeup) + } else if w.wakeup != nil { + // Wake up the worker, so that + // it's dequeued. This prevents + // additional tasks to be + // assigned to it. + w.wakeUp(scq) + } + } + } + } + bq.leave() + + for _, completionWakeup := range completionWakeups { + select { + case <-completionWakeup: + // Worker has become idle. + case <-ctx.Done(): + // Client has canceled the request. + return nil, util.StatusFromContext(ctx) + } + } + return &emptypb.Empty{}, nil +} + +// getNextSynchronizationAtDelay generates a timestamp that is attached +// to SynchronizeResponses, indicating that the worker is permitted to +// hold off sending updates for a limited amount of time. +func (bq *InMemoryBuildQueue) getNextSynchronizationAtDelay() *timestamppb.Timestamp { + return timestamppb.New(bq.now.Add(bq.configuration.BusyWorkerSynchronizationInterval)) +} + +// getCurrentTime generates a timestamp that corresponds to the current +// time. It is attached to SynchronizeResponses, indicating that the +// worker should resynchronize again as soon as possible. It is also +// used to compute QueuedTimestamps. +func (bq *InMemoryBuildQueue) getCurrentTime() *timestamppb.Timestamp { + return timestamppb.New(bq.now) +} + +// enter acquires the lock on the InMemoryBuildQueue and runs any +// cleanup tasks that should be executed prior mutating its state. +func (bq *InMemoryBuildQueue) enter(t time.Time) { + bq.lock.Lock() + if t.After(bq.now) { + bq.now = t + bq.cleanupQueue.run(bq.now) + } +} + +// leave releases the lock on the InMemoryBuildQueue. +func (bq *InMemoryBuildQueue) leave() { + bq.lock.Unlock() +} + +// getIdleSynchronizeResponse returns a synchronization response that +// explicitly instructs a worker to return to the idle state. +func (bq *InMemoryBuildQueue) getIdleSynchronizeResponse() *remoteworker.SynchronizeResponse { + return &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: bq.getCurrentTime(), + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + } +} + +// addPlatformQueue creates a new platform queue for a given platform. +func (bq *InMemoryBuildQueue) addPlatformQueue(platformKey platform.Key, workerInvocationStickinessLimits []time.Duration, maximumQueuedBackgroundLearningOperations int, backgroundLearningOperationPriority int32) *platformQueue { + pq := &platformQueue{ + platformKey: platformKey, + instanceNamePatcher: digest.NewInstanceNamePatcher(platformKey.GetInstanceNamePrefix(), digest.EmptyInstanceName), + workerInvocationStickinessLimits: workerInvocationStickinessLimits, + maximumQueuedBackgroundLearningOperations: maximumQueuedBackgroundLearningOperations, + backgroundLearningOperationPriority: backgroundLearningOperationPriority, + } + bq.platformQueuesTrie.Set(platformKey, len(bq.platformQueues)) + bq.platformQueues = append(bq.platformQueues, pq) + return pq +} + +// platformQueueList is a list of *platformQueue objects that is +// sortable. It is used by InMemoryBuildQueue.GetBuildQueueState() to +// emit all platform queues in sorted order. +type platformQueueList []*platformQueue + +func (h platformQueueList) Len() int { + return len(h) +} + +func (h platformQueueList) Less(i, j int) bool { + pi, pj := h[i].platformKey, h[j].platformKey + ii, ij := pi.GetInstanceNamePrefix().String(), pj.GetInstanceNamePrefix().String() + return ii < ij || (ii == ij && pi.GetPlatformString() < pj.GetPlatformString()) +} + +func (h platformQueueList) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func newPlatformKeyFromName(name *buildqueuestate.PlatformQueueName) (platform.Key, error) { + var badPlatformKey platform.Key + if name == nil { + return badPlatformKey, status.Error(codes.InvalidArgument, "No platform queue name provided") + } + instanceName, err := digest.NewInstanceName(name.InstanceNamePrefix) + if err != nil { + return badPlatformKey, util.StatusWrapf(err, "Invalid instance name prefix %#v", name.InstanceNamePrefix) + } + return platform.NewKey(instanceName, name.Platform) +} + +// sizeClassKey can be used as a key for maps to uniquely identify a set +// of workers that are all for the same platform and have the same size. +type sizeClassKey struct { + platformKey platform.Key + sizeClass uint32 +} + +func newSizeClassKeyFromName(name *buildqueuestate.SizeClassQueueName) (sizeClassKey, error) { + if name == nil { + return sizeClassKey{}, status.Error(codes.InvalidArgument, "No size class queue name provided") + } + platformKey, err := newPlatformKeyFromName(name.PlatformQueueName) + if err != nil { + return sizeClassKey{}, err + } + return sizeClassKey{ + platformKey: platformKey, + sizeClass: name.SizeClass, + }, nil +} + +func (k *sizeClassKey) getSizeClassQueueName() *buildqueuestate.SizeClassQueueName { + return &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: k.platformKey.GetPlatformQueueName(), + SizeClass: k.sizeClass, + } +} + +// platformQueue is an actual build operations queue that contains a +// list of associated workers and operations that are queued to be +// executed. An InMemoryBuildQueue contains a platformQueue for every +// instance/platform for which one or more workers exist. +type platformQueue struct { + platformKey platform.Key + instanceNamePatcher digest.InstanceNamePatcher + workerInvocationStickinessLimits []time.Duration + maximumQueuedBackgroundLearningOperations int + backgroundLearningOperationPriority int32 + + sizeClasses []uint32 + sizeClassQueues []*sizeClassQueue +} + +// getSizeClassQueueLabels returns the set of label values to attach to +// Prometheus metrics that pertain to a size class queue. +func (pq *platformQueue) getSizeClassQueueLabels(sizeClass uint32) (string, string, string) { + return pq.platformKey.GetInstanceNamePrefix().String(), + pq.platformKey.GetPlatformString(), + strconv.FormatUint(uint64(sizeClass), 10) +} + +func (pq *platformQueue) addSizeClassQueue(bq *InMemoryBuildQueue, sizeClass uint32, mayBeRemoved bool) *sizeClassQueue { + instanceNamePrefix, platformStr, sizeClassStr := pq.getSizeClassQueueLabels(sizeClass) + platformLabels := map[string]string{ + "instance_name_prefix": instanceNamePrefix, + "platform": platformStr, + "size_class": sizeClassStr, + } + tasksScheduledTotal := inMemoryBuildQueueTasksScheduledTotal.MustCurryWith(platformLabels) + scq := &sizeClassQueue{ + platformQueue: pq, + sizeClass: sizeClass, + mayBeRemoved: mayBeRemoved, + + rootInvocation: invocation{ + children: map[scheduler_invocation.Key]*invocation{}, + executingWorkers: map[*worker]int{}, + }, + workers: map[workerKey]*worker{}, + + drains: map[string]*buildqueuestate.DrainState{}, + undrainWakeup: make(chan struct{}), + + inFlightDeduplicationsSameInvocation: inMemoryBuildQueueInFlightDeduplicationsTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, "SameInvocation"), + inFlightDeduplicationsOtherInvocation: inMemoryBuildQueueInFlightDeduplicationsTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, "OtherInvocation"), + inFlightDeduplicationsNew: inMemoryBuildQueueInFlightDeduplicationsTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, "New"), + + tasksScheduledWorker: newTasksScheduledCounterVec(tasksScheduledTotal, "Worker"), + tasksScheduledQueue: newTasksScheduledCounterVec(tasksScheduledTotal, "Queue"), + tasksQueuedDurationSeconds: inMemoryBuildQueueTasksQueuedDurationSeconds.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr), + tasksExecutingDurationSeconds: inMemoryBuildQueueTasksExecutingDurationSeconds.MustCurryWith(platformLabels), + tasksExecutingRetries: inMemoryBuildQueueTasksExecutingRetries.MustCurryWith(platformLabels), + tasksCompletedDurationSeconds: inMemoryBuildQueueTasksCompletedDurationSeconds.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr), + + workersCreatedTotal: inMemoryBuildQueueWorkersCreatedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr), + workersRemovedIdleTotal: inMemoryBuildQueueWorkersRemovedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, "Idle"), + workersRemovedExecutingTotal: inMemoryBuildQueueWorkersRemovedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, "Executing"), + + workerInvocationStickinessRetained: inMemoryBuildQueueWorkerInvocationStickinessRetained.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr), + } + scq.rootInvocation.sizeClassQueue = scq + scq.incrementInvocationsCreatedTotal(0) + + // Force creation of all metrics associated with this platform + // queue to make recording rules work. + scq.tasksExecutingDurationSeconds.WithLabelValues("Success", "") + scq.tasksExecutingRetries.WithLabelValues("Success", "") + + // Insert the new size class queue into the platform queue. + // Keep the size class queues sorted, so that they are provided + // to initialsizeclass.Selector deterministically. + i := 0 + for i < len(pq.sizeClasses) && pq.sizeClasses[i] < sizeClass { + i++ + } + + pq.sizeClasses = append(pq.sizeClasses, 0) + copy(pq.sizeClasses[i+1:], pq.sizeClasses[i:]) + pq.sizeClasses[i] = sizeClass + + pq.sizeClassQueues = append(pq.sizeClassQueues, nil) + copy(pq.sizeClassQueues[i+1:], pq.sizeClassQueues[i:]) + pq.sizeClassQueues[i] = scq + + bq.sizeClassQueues[scq.getKey()] = scq + + return scq +} + +// tasksScheduledCounterVec is a pair of counters for both boolean +// values of "do_not_cache". It is used by the "tasks_scheduled_total" +// Prometheus metric. +type tasksScheduledCounterVec struct { + doNotCacheTrue prometheus.Counter + doNotCacheFalse prometheus.Counter +} + +func newTasksScheduledCounterVec(counterVec *prometheus.CounterVec, assignment string) tasksScheduledCounterVec { + return tasksScheduledCounterVec{ + doNotCacheTrue: counterVec.WithLabelValues(assignment, "true"), + doNotCacheFalse: counterVec.WithLabelValues(assignment, "false"), + } +} + +// invocationsMetrics contains Prometheus metrics that should be tracked +// for every depth of the tree of invocations inside a size class queue. +type invocationsMetrics struct { + createdTotal prometheus.Counter + activatedTotal prometheus.Counter + deactivatedTotal prometheus.Counter + removedTotal prometheus.Counter +} + +type sizeClassQueue struct { + platformQueue *platformQueue + sizeClass uint32 + mayBeRemoved bool + + // Data structure in which all queued and executing operations + // are placed, and which keeps track of all idle workers that + // are synchronizing against the scheduler. + rootInvocation invocation + workers map[workerKey]*worker + cleanupKey cleanupKey + + drains map[string]*buildqueuestate.DrainState + undrainWakeup chan struct{} + + // Prometheus metrics. + inFlightDeduplicationsSameInvocation prometheus.Counter + inFlightDeduplicationsOtherInvocation prometheus.Counter + inFlightDeduplicationsNew prometheus.Counter + + invocationsMetrics []invocationsMetrics + + tasksScheduledWorker tasksScheduledCounterVec + tasksScheduledQueue tasksScheduledCounterVec + tasksQueuedDurationSeconds prometheus.Observer + tasksExecutingDurationSeconds prometheus.ObserverVec + tasksExecutingRetries prometheus.ObserverVec + tasksCompletedDurationSeconds prometheus.Observer + + workersCreatedTotal prometheus.Counter + workersRemovedIdleTotal prometheus.Counter + workersRemovedExecutingTotal prometheus.Counter + + workerInvocationStickinessRetained prometheus.Observer +} + +func (scq *sizeClassQueue) getKey() sizeClassKey { + return sizeClassKey{ + platformKey: scq.platformQueue.platformKey, + sizeClass: scq.sizeClass, + } +} + +// remove is invoked when Synchronize() isn't being invoked by any +// worker for a given platform quickly enough. It causes the platform +// queue and all associated queued operations to be removed from the +// InMemoryBuildQueue. +func (scq *sizeClassQueue) remove(bq *InMemoryBuildQueue) { + scq.rootInvocation.cancelAllQueuedOperations( + bq, + status.New( + codes.Unavailable, + "Workers for this instance name, platform and size class disappeared while task was queued", + ).Proto()) + scq.invocationsMetrics[0].removedTotal.Inc() + + delete(bq.sizeClassQueues, scq.getKey()) + pq := scq.platformQueue + i := 0 + for pq.sizeClassQueues[i] != scq { + i++ + } + pq.sizeClasses = append(pq.sizeClasses[:i], pq.sizeClasses[i+1:]...) + pq.sizeClassQueues = append(pq.sizeClassQueues[:i], pq.sizeClassQueues[i+1:]...) + + if len(pq.sizeClasses) == 0 { + // No size classes remain for this platform queue, + // meaning that we can remove it. We must make sure the + // list of platform queues remains contiguous. + index := bq.platformQueuesTrie.GetExact(pq.platformKey) + newLength := len(bq.platformQueues) - 1 + lastPQ := bq.platformQueues[newLength] + bq.platformQueues[index] = lastPQ + bq.platformQueues = bq.platformQueues[:newLength] + bq.platformQueuesTrie.Set(lastPQ.platformKey, index) + bq.platformQueuesTrie.Remove(pq.platformKey) + } +} + +// removeStaleWorker is invoked when Synchronize() isn't being invoked +// by a worker quickly enough. It causes the worker to be removed from +// the InMemoryBuildQueue. +func (scq *sizeClassQueue) removeStaleWorker(bq *InMemoryBuildQueue, workerKey workerKey, removalTime time.Time) { + w := scq.workers[workerKey] + if t := w.currentTask; t == nil { + scq.workersRemovedIdleTotal.Inc() + } else { + scq.workersRemovedExecutingTotal.Inc() + t.complete(bq, &remoteexecution.ExecuteResponse{ + Status: status.Newf(codes.Unavailable, "Worker %s disappeared while task was executing", workerKey).Proto(), + }, false) + } + w.clearLastInvocation() + delete(scq.workers, workerKey) + + // Trigger platform queue removal if necessary. + if len(scq.workers) == 0 && scq.mayBeRemoved { + bq.cleanupQueue.add(&scq.cleanupKey, removalTime.Add(bq.configuration.PlatformQueueWithNoWorkersTimeout), func() { + scq.remove(bq) + }) + } +} + +// getOrCreateInvocation looks up the invocation key in the size class +// queue, returning the corresponding invocation. If no invocation under +// this key exists, a new invocation is created. +// +// As the invocation may be just created, the caller must either queue +// an operation, increase the executing operations count, place a worker +// in the invocation or call invocation.removeIfEmpty(). +func (scq *sizeClassQueue) getOrCreateInvocation(bq *InMemoryBuildQueue, invocationKeys []scheduler_invocation.Key) *invocation { + i := &scq.rootInvocation + for depth, invocationKey := range invocationKeys { + iChild, ok := i.children[invocationKey] + if !ok { + iChild = &invocation{ + sizeClassQueue: scq, + invocationKeys: invocationKeys[:depth+1], + parent: i, + children: map[scheduler_invocation.Key]*invocation{}, + queuedChildrenIndex: -1, + executingWorkers: map[*worker]int{}, + lastOperationStarted: bq.now, + lastOperationCompletion: bq.now, + idleSynchronizingWorkersChildrenIndex: -1, + } + i.children[invocationKey] = iChild + scq.incrementInvocationsCreatedTotal(len(iChild.invocationKeys)) + + } + i = iChild + } + return i +} + +// incrementInvocationsCreatedTotal increments the +// "invocations_created_total" counter for the provided depth. If no +// counters exist for the given depth, they are created and initialized +// with zero. +func (scq *sizeClassQueue) incrementInvocationsCreatedTotal(depth int) { + if len(scq.invocationsMetrics) == depth { + instanceNamePrefix, platformStr, sizeClassStr := scq.platformQueue.getSizeClassQueueLabels(scq.sizeClass) + depthStr := strconv.FormatInt(int64(depth), 10) + + scq.invocationsMetrics = append( + scq.invocationsMetrics, + invocationsMetrics{ + createdTotal: inMemoryBuildQueueInvocationsCreatedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, depthStr), + activatedTotal: inMemoryBuildQueueInvocationsActivatedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, depthStr), + deactivatedTotal: inMemoryBuildQueueInvocationsDeactivatedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, depthStr), + removedTotal: inMemoryBuildQueueInvocationsRemovedTotal.WithLabelValues(instanceNamePrefix, platformStr, sizeClassStr, depthStr), + }) + } + + scq.invocationsMetrics[depth].createdTotal.Inc() +} + +// workerKey can be used as a key for maps to uniquely identify a worker +// within the domain of a certain platform. This key is used for looking +// up the state of a worker when synchronizing. +type workerKey string + +func newWorkerKey(workerID map[string]string) workerKey { + key, err := json.Marshal(workerID) + if err != nil { + panic(fmt.Sprintf("Failed to marshal worker ID: %s", err)) + } + return workerKey(key) +} + +// getWorkerID reobtains the worker ID map that was used to construct +// the platformKey. As this is only used infrequently, we don't bother +// keeping the unmarshalled map around to preserve memory usage. +func (k workerKey) getWorkerID() map[string]string { + var workerID map[string]string + if err := json.Unmarshal([]byte(k), &workerID); err != nil { + panic(fmt.Sprintf("Failed to unmarshal previously marshalled worker ID: %s", err)) + } + return workerID +} + +// queuedChildrenHeap is a binary heap that contains a list of all child +// invocations in an invocation that have one or more queued operations. +// It is used to determine which operation should be started in case a +// worker requests a new task. +type queuedChildrenHeap []*invocation + +func (h queuedChildrenHeap) Len() int { + return len(h) +} + +func (h queuedChildrenHeap) Less(i, j int) bool { + return h[i].isPreferred(h[j], h[i].lastOperationStarted.Before(h[j].lastOperationStarted)) +} + +func (h queuedChildrenHeap) Swap(i, j int) { + if h[i].queuedChildrenIndex != i || h[j].queuedChildrenIndex != j { + panic("Invalid queue indices") + } + h[i], h[j] = h[j], h[i] + h[i].queuedChildrenIndex = i + h[j].queuedChildrenIndex = j +} + +func (h *queuedChildrenHeap) Push(x interface{}) { + e := x.(*invocation) + if e.queuedChildrenIndex != -1 { + panic("Invalid queue index") + } + e.queuedChildrenIndex = len(*h) + *h = append(*h, e) +} + +func (h *queuedChildrenHeap) Pop() interface{} { + old := *h + n := len(old) + e := old[n-1] + old[n-1] = nil + *h = old[:n-1] + if e.queuedChildrenIndex != n-1 { + panic("Invalid queue index") + } + e.queuedChildrenIndex = -1 + return e +} + +// idleSynchronizingWorkersInvocationsHeap is a binary heap that +// contains all invocations for which one or more workers exist that +// most recently ran a task associated with that invocation, and are +// currently synchronizing against the scheduler. +// +// This heap is used to determine which invocations should have their +// workers rebalanced to other invocations. +type idleSynchronizingWorkersChildrenHeap []*invocation + +func (h idleSynchronizingWorkersChildrenHeap) Len() int { + return len(h) +} + +func (h idleSynchronizingWorkersChildrenHeap) Less(i, j int) bool { + // Sort invocations by worker utilization rate, so that + // invocations with an excessive number of workers have their + // workers taken first. Invocations can only be part of this + // heap if they have no queued operations, so we only need to + // consider executing and idle workers: + // + // utilization = executing / (executing + idle) + // + // Simplify the comparison by cross-multiplying and removing the + // common part. + ui := uint64(len(h[i].executingWorkers)) * uint64(len(h[j].idleSynchronizingWorkers)) + uj := uint64(len(h[j].executingWorkers)) * uint64(len(h[i].idleSynchronizingWorkers)) + if ui < uj { + return true + } else if ui > uj { + return false + } + // Tie breaker, most likely because both invocations are no + // longer executing anything. Remove workers from the oldest + // invocation first, as those are the least likely to be needed. + return h[i].lastOperationCompletion.Before(h[j].lastOperationCompletion) +} + +func (h idleSynchronizingWorkersChildrenHeap) Swap(i, j int) { + if h[i].idleSynchronizingWorkersChildrenIndex != i || h[j].idleSynchronizingWorkersChildrenIndex != j { + panic("Invalid queue indices") + } + h[i], h[j] = h[j], h[i] + h[i].idleSynchronizingWorkersChildrenIndex = i + h[j].idleSynchronizingWorkersChildrenIndex = j +} + +func (h *idleSynchronizingWorkersChildrenHeap) Push(x interface{}) { + e := x.(*invocation) + if e.idleSynchronizingWorkersChildrenIndex != -1 { + panic("Invalid queue index") + } + e.idleSynchronizingWorkersChildrenIndex = len(*h) + *h = append(*h, e) +} + +func (h *idleSynchronizingWorkersChildrenHeap) Pop() interface{} { + old := *h + n := len(old) + e := old[n-1] + old[n-1] = nil + *h = old[:n-1] + if e.idleSynchronizingWorkersChildrenIndex != n-1 { + panic("Invalid queue index") + } + e.idleSynchronizingWorkersChildrenIndex = -1 + return e +} + +// invocation keeps track of operations that all need to be scheduled on +// a single size class queue, all having the same invocation ID. These +// operations will be scheduled fairly with respect to other +// invocations. +type invocation struct { + sizeClassQueue *sizeClassQueue + invocationKeys []scheduler_invocation.Key + parent *invocation + + // Heap of operations that are part of this invocation that are + // currently in the QUEUED stage. + queuedOperations queuedOperationsHeap + + // All nested invocations for which one or more operations or + // workers exist. + children map[scheduler_invocation.Key]*invocation + // Heap of nested invocations for which one or more operations + // exist that are in the QUEUED stage. + queuedChildren queuedChildrenHeap + // All nested invocations for which one or more idle + // synchronizing workers exist. + idleSynchronizingWorkersChildren idleSynchronizingWorkersChildrenHeap + + // The index of this invocation inside the queuedChildren heap + // of the parent invocation. + queuedChildrenIndex int + // The index of this invocation inside the + // sizeClassQueue.idleSynchronizingWorkersChildren heap. + idleSynchronizingWorkersChildrenIndex int + + // The priority of the operation that is either queued in this + // invocation or one of its children that is expected to be + // executed next. This value needs to be tracked to ensure the + // queuedChildren heap of the parent invocation is ordered + // correctly. + firstQueuedOperationPriority int32 + + // Number of workers that are executing operations that belong + // to this invocation. This equals the number of operations that + // are part of this invocation that are currently in the + // EXECUTING stage. + executingWorkers map[*worker]int + lastOperationStarted time.Time + // The time at which the last executing operation was completed. + // This value is used to determine which invocations are the + // best candidates for rebalancing idle synchronizing workers. + lastOperationCompletion time.Time + // Number of workers that are idle and most recently completed + // an operation belonging to this invocation. + idleWorkersCount uint32 + // List of workers that are idle and most recently executed an + // operation belonging to this invocation and are currently + // synchronizing against the scheduler. + idleSynchronizingWorkers idleSynchronizingWorkersList +} + +// isQueued returns whether an invocation has one or more queued +// operations, or contains a child invocation that has one or more +// queued operations. +func (i *invocation) isQueued() bool { + return i.queuedOperations.Len() > 0 || i.queuedChildren.Len() > 0 +} + +// isActive returns whether an invocation has one or more queued or +// executing operations. These are generally the ones that users of the +// BuildQueueState service want to view. +func (i *invocation) isActive() bool { + return i.isQueued() || len(i.executingWorkers) > 0 +} + +// removeIfEmpty checks whether the invocation is empty (i.e., not +// containing any operations or workers). If so, it removes the +// invocation from the size class queue in which it is contained. +func (i *invocation) removeIfEmpty() bool { + if i.parent != nil && !i.isActive() && i.idleWorkersCount == 0 { + depth := len(i.invocationKeys) + invocationKey := i.invocationKeys[depth-1] + if i.parent.children[invocationKey] != i { + panic("Attempted to remove an invocation that was already removed") + } + delete(i.parent.children, invocationKey) + i.sizeClassQueue.invocationsMetrics[depth].removedTotal.Inc() + return true + } + return false +} + +func (i *invocation) getInvocationState(bq *InMemoryBuildQueue) *buildqueuestate.InvocationState { + activeInvocationsCount := uint32(0) + for _, iChild := range i.children { + if iChild.isActive() { + activeInvocationsCount++ + } + } + return &buildqueuestate.InvocationState{ + QueuedOperationsCount: uint32(i.queuedOperations.Len()), + ChildrenCount: uint32(len(i.children)), + QueuedChildrenCount: uint32(i.queuedChildren.Len()), + ActiveChildrenCount: uint32(activeInvocationsCount), + ExecutingWorkersCount: uint32(len(i.executingWorkers)), + IdleWorkersCount: i.idleWorkersCount, + IdleSynchronizingWorkersCount: uint32(len(i.idleSynchronizingWorkers)), + } +} + +// decrementExecutingWorkersCount decrements the number of operations in +// the EXECUTING stage that are part of this invocation. +// +// Because the number of operations in the EXECUTING stage is used to +// prioritize tasks, this function may need to adjust the position of +// this invocation in the queued invocations heap. It may also need to +// remove the invocation entirely in case it no longer contains any +// operations. +func (i *invocation) decrementExecutingWorkersCount(bq *InMemoryBuildQueue, w *worker) { + for { + if i.executingWorkers[w] <= 0 { + panic("Executing workers count invalid") + } + i.executingWorkers[w]-- + if i.executingWorkers[w] == 0 { + delete(i.executingWorkers, w) + i.maybeDeactivate() + } + i.lastOperationCompletion = bq.now + if i.parent == nil { + break + } + heapMaybeFix(&i.parent.queuedChildren, i.queuedChildrenIndex) + heapMaybeFix(&i.parent.idleSynchronizingWorkersChildren, i.idleSynchronizingWorkersChildrenIndex) + i.removeIfEmpty() + i = i.parent + } +} + +// incrementExecutingWorkersCount increments the number of operations in +// the EXECUTING stage that are part of this invocation. +func (i *invocation) incrementExecutingWorkersCount(bq *InMemoryBuildQueue, w *worker) { + for { + i.maybeActivate() + i.executingWorkers[w]++ + i.lastOperationStarted = bq.now + if i.parent == nil { + break + } + heapMaybeFix(&i.parent.queuedChildren, i.queuedChildrenIndex) + heapMaybeFix(&i.parent.idleSynchronizingWorkersChildren, i.idleSynchronizingWorkersChildrenIndex) + i = i.parent + } +} + +func (i *invocation) hasInvocationKey(filter scheduler_invocation.Key) bool { + for _, key := range i.invocationKeys { + if key == filter { + return true + } + } + return false +} + +// maybeActivate should be called before an invocation is transitioning +// from being possibly idle to having one or more queued or executing +// operations. +func (i *invocation) maybeActivate() { + if !i.isActive() { + i.sizeClassQueue.invocationsMetrics[len(i.invocationKeys)].activatedTotal.Inc() + } +} + +// maybeDectivate should be called after an invocation is transitioning +// from having one or more queued or executing operations to being +// possibly idle. +func (i *invocation) maybeDeactivate() { + if !i.isActive() { + i.sizeClassQueue.invocationsMetrics[len(i.invocationKeys)].deactivatedTotal.Inc() + } +} + +var priorityExponentiationBase = math.Pow(2.0, 0.01) + +// isPreferred returns whether the first queued operation of invocation +// i should be preferred over the first queued operation of invocation j. +func (i *invocation) isPreferred(j *invocation, tieBreaker bool) bool { + // To introduce fairness, we want to prefer scheduling + // operations belonging to invocations that have the fewest + // running operations. In addition to that, we still want to + // respect priorities at the global level. + // + // Combine these two properties into a single score value + // according to the following expression, where the invocation + // with the lowest score is most favourable. + // + // S = (executingWorkersCount + 1) * b^priority + // + // Note that REv2 priorities are inverted; the lower the integer + // value, the higher the priority. The '+ 1' part has been added + // to this expression to ensure that the priority is still taken + // into account when the number of executing operations is zero. + // + // The base value for the expontentiation is chosen to be + // 2^0.01 =~ 1.007. This means that if the difference in + // priority between two builds is 100, one build will be allowed + // to run twice as many operations as the other. + ei, ej := float64(len(i.executingWorkers)+1), float64(len(j.executingWorkers)+1) + var si, sj float64 + if pi, pj := float64(i.firstQueuedOperationPriority), float64(j.firstQueuedOperationPriority); pi < pj { + // Invocation i has a higher priority. Give invocation j + // a penalty based on the difference in priority. + si, sj = ei, ej*math.Pow(priorityExponentiationBase, pj-pi) + } else if pi > pj { + // Invocation j has a higher priority. Give invocation i + // a penalty based on the difference in priority. + si, sj = ei*math.Pow(priorityExponentiationBase, pi-pj), ej + } else { + // Both invocations have the same priority. + si, sj = ei, ej + } + return si < sj || (si == sj && tieBreaker) +} + +func (i *invocation) cancelAllQueuedOperations(bq *InMemoryBuildQueue, status *status_pb.Status) { + // Recursively cancel operations belonging to nested invocations. + for i.queuedChildren.Len() > 0 { + i.queuedChildren[len(i.queuedChildren)-1].cancelAllQueuedOperations(bq, status) + } + + // Cancel operations directly belonging to this invocation. + for i.queuedOperations.Len() > 0 { + i.queuedOperations[i.queuedOperations.Len()-1].task.complete( + bq, + &remoteexecution.ExecuteResponse{Status: status}, + /* completedByWorker = */ false) + } +} + +func (i *invocation) updateFirstOperationPriority() { + // worker.assignNextQueuedTask() prefers scheduling queued + // operations stored directly underneath an invocation over ones + // stored in child invocations, so we do the same thing here. + if i.queuedOperations.Len() > 0 { + i.firstQueuedOperationPriority = i.queuedOperations[0].priority + } else if len(i.queuedChildren) > 0 { + i.firstQueuedOperationPriority = i.queuedChildren[0].firstQueuedOperationPriority + } +} + +// queuedOperationsHeap is a binary heap that stores queued operations, +// sorted by order in which they need to be assigned to workers. +type queuedOperationsHeap []*operation + +func (h queuedOperationsHeap) Len() int { + return len(h) +} + +func (h queuedOperationsHeap) Less(i, j int) bool { + // Lexicographic order on priority, expected duration and queued + // timestamp. By executing operations with a higher expected + // duration first, we reduce the probability of having poor + // concurrency at the final stages of a build. + if h[i].priority < h[j].priority { + return true + } + if h[i].priority > h[j].priority { + return false + } + ti, tj := h[i].task, h[j].task + if ti.expectedDuration > tj.expectedDuration { + return true + } + if ti.expectedDuration < tj.expectedDuration { + return false + } + return ti.desiredState.QueuedTimestamp.AsTime().Before(tj.desiredState.QueuedTimestamp.AsTime()) +} + +func (h queuedOperationsHeap) Swap(i, j int) { + if h[i].queueIndex != i || h[j].queueIndex != j { + panic("Invalid queue indices") + } + h[i], h[j] = h[j], h[i] + h[i].queueIndex = i + h[j].queueIndex = j +} + +func (h *queuedOperationsHeap) Push(x interface{}) { + o := x.(*operation) + if o.queueIndex != -1 { + panic("Invalid queue index") + } + o.queueIndex = len(*h) + *h = append(*h, o) +} + +func (h *queuedOperationsHeap) Pop() interface{} { + old := *h + n := len(old) + o := old[n-1] + old[n-1] = nil + *h = old[:n-1] + if o.queueIndex != n-1 { + panic("Invalid queue index") + } + o.queueIndex = -1 + return o +} + +// operation that a client can use to reference a task. +// +// The difference between operations and tasks is that tasks manage the +// lifecycle of a piece of work in general, while operations manage it +// in the context of a client invocation. This means that if in-flight +// deduplication of requests occurs, a task may be associated with two +// or more operations. +// +// If a single client were to abandon an operation (e.g., by closing the +// gRPC channel), the task and other operations that task will remain +// unaffected. +type operation struct { + name string + task *task + priority int32 + + // The invocation of which this operation is a part. queueIndex + // contains the index at which the operation is stored in the + // invocation's queuedOperations heap. When negative, it means + // that the operation is no longer in the queued stage (and thus + // either in the executing or completed stage). + // + // Because invocations are managed per size class, an operation + // may move from one invocation to another one it is retried as + // part of a different size class. All invocations of which this + // operation is a part during its lifetime will have the same + // invocation ID. + invocation *invocation + queueIndex int + + // Number of clients that are calling Execute() or + // WaitExecution() on this operation. + waiters uint + mayExistWithoutWaiters bool + cleanupKey cleanupKey +} + +// waitExecution periodically streams a series of longrunningpb.Operation +// messages back to the client, containing the state of the current +// operation. Streaming is stopped after execution of the operation is +// completed. +func (o *operation) waitExecution(bq *InMemoryBuildQueue, out remoteexecution.Execution_ExecuteServer) error { + ctx := out.Context() + + // Bookkeeping for determining whether operations are abandoned + // by clients. Operations should be removed if there are no + // clients calling Execute() or WaitExecution() for a certain + // amount of time. + if o.cleanupKey.isActive() { + bq.cleanupQueue.remove(o.cleanupKey) + } + o.waiters++ + defer func() { + if o.waiters == 0 { + panic("Invalid waiters count on operation") + } + o.waiters-- + o.maybeStartCleanup(bq) + }() + + t := o.task + for { + // Construct the longrunningpb.Operation that needs to be + // sent back to the client. + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: t.getStage(), + ActionDigest: t.desiredState.ActionDigest, + }) + if err != nil { + return util.StatusWrap(err, "Failed to marshal execute operation metadata") + } + operation := &longrunningpb.Operation{ + Name: o.name, + Metadata: metadata, + } + if t.executeResponse != nil { + operation.Done = true + response, err := anypb.New(t.executeResponse) + if err != nil { + return util.StatusWrap(err, "Failed to marshal execute response") + } + operation.Result = &longrunningpb.Operation_Response{Response: response} + } + stageChangeWakeup := t.stageChangeWakeup + bq.leave() + + // Send the longrunningpb.Operation back to the client. + if err := out.Send(operation); operation.Done || err != nil { + bq.enter(bq.clock.Now()) + return err + } + + // Suspend until the client closes the connection, the + // action completes or a certain amount of time has + // passed without any updates. + timer, timerChannel := bq.clock.NewTimer(bq.configuration.ExecutionUpdateInterval) + select { + case <-ctx.Done(): + timer.Stop() + bq.enter(bq.clock.Now()) + return util.StatusFromContext(ctx) + case <-stageChangeWakeup: + timer.Stop() + bq.enter(bq.clock.Now()) + case t := <-timerChannel: + bq.enter(t) + } + } +} + +// removeQueuedFromInvocation removes an operation that is in the queued +// state from the invocation. If the invocation no longer has any queued +// operations, it will be removed from the queued invocations heap in +// the containing platform queue. +func (o *operation) removeQueuedFromInvocation() { + i := o.invocation + heap.Remove(&i.queuedOperations, o.queueIndex) + i.maybeDeactivate() + for i.parent != nil { + i.updateFirstOperationPriority() + heapRemoveOrFix(&i.parent.queuedChildren, i.queuedChildrenIndex, i.queuedChildren.Len()+i.queuedOperations.Len()) + i.parent.maybeDeactivate() + i = i.parent + } +} + +// enqueue a newly created operation in the heap of queued operations of +// an invocation. This method is called whenever an operation can't be +// assigned to a worker immediately, due to no idle synchronizing +// workers for this size class queue being available. +func (o *operation) enqueue() { + i := o.invocation + i.maybeActivate() + heap.Push(&i.queuedOperations, o) + for i.parent != nil { + i.updateFirstOperationPriority() + i.parent.maybeActivate() + heapPushOrFix(&i.parent.queuedChildren, i.queuedChildrenIndex, i) + i = i.parent + } +} + +func (o *operation) remove(bq *InMemoryBuildQueue) { + delete(bq.operationsNameMap, o.name) + + t := o.task + if len(t.operations) == 1 { + // Forcefully terminate the associated task if it won't + // have any other operations associated with it. + t.complete(bq, &remoteexecution.ExecuteResponse{ + Status: status.New(codes.Canceled, "Task no longer has any waiting clients").Proto(), + }, false) + t.registerCompletedStageFinished(bq) + } else { + // The underlying task is shared with other operations. + // Remove the operation, while leaving the task intact. + i := o.invocation + switch t.getStage() { + case remoteexecution.ExecutionStage_QUEUED: + o.removeQueuedFromInvocation() + for i.removeIfEmpty() { + i = i.parent + } + case remoteexecution.ExecutionStage_EXECUTING: + i.decrementExecutingWorkersCount(bq, t.currentWorker) + } + } + delete(t.operations, o.invocation) +} + +func (o *operation) getOperationState(bq *InMemoryBuildQueue) *buildqueuestate.OperationState { + i := o.invocation + t := o.task + sizeClassKey := t.getCurrentSizeClassQueue().getKey() + invocationIDs := make([]*anypb.Any, 0, len(i.invocationKeys)) + for _, invocationKey := range i.invocationKeys { + invocationIDs = append(invocationIDs, invocationKey.GetID()) + } + s := &buildqueuestate.OperationState{ + Name: o.name, + InvocationName: &buildqueuestate.InvocationName{ + SizeClassQueueName: sizeClassKey.getSizeClassQueueName(), + Ids: invocationIDs, + }, + ExpectedDuration: durationpb.New(t.expectedDuration), + QueuedTimestamp: t.desiredState.QueuedTimestamp, + ActionDigest: t.desiredState.ActionDigest, + TargetId: t.targetID, + Timeout: bq.cleanupQueue.getTimestamp(o.cleanupKey), + Priority: o.priority, + InstanceNameSuffix: t.desiredState.InstanceNameSuffix, + DigestFunction: t.desiredState.DigestFunction, + } + switch t.getStage() { + case remoteexecution.ExecutionStage_QUEUED: + s.Stage = &buildqueuestate.OperationState_Queued{ + Queued: &emptypb.Empty{}, + } + case remoteexecution.ExecutionStage_EXECUTING: + s.Stage = &buildqueuestate.OperationState_Executing{ + Executing: &emptypb.Empty{}, + } + case remoteexecution.ExecutionStage_COMPLETED: + s.Stage = &buildqueuestate.OperationState_Completed{ + Completed: t.executeResponse, + } + } + return s +} + +func (o *operation) maybeStartCleanup(bq *InMemoryBuildQueue) { + if o.waiters == 0 && !o.mayExistWithoutWaiters { + bq.cleanupQueue.add(&o.cleanupKey, bq.now.Add(bq.configuration.OperationWithNoWaitersTimeout), func() { + o.remove(bq) + }) + } +} + +// task state that is created for every piece of work that needs to be +// executed by a worker. Tasks are associated with one or more +// operations. In the general case a task has one operation, but there +// may be multiple in case multiple clients request that the same action +// is built and deduplication is performed. +type task struct { + operations map[*invocation]*operation + actionDigest digest.Digest + desiredState remoteworker.DesiredState_Executing + + // The name of the target that triggered this operation. This + // field is not strictly necessary to implement the BuildQueue + // and OperationQueueServer interfaces. It needs to be present + // to implement BuildQueueState. + targetID string + + // currentStageStartTime is used by register*StageFinished() to + // obtain Prometheus metrics. + currentStageStartTime time.Time + + // The worker that is currently executing the task. The + // retryCount specifies how many additional times the operation + // was provided to the worker. This counter may be non-zero in + // case of network flakiness or worker crashes. + currentWorker *worker + retryCount int + + expectedDuration time.Duration + initialSizeClassLearner initialsizeclass.Learner + mayExistWithoutWaiters bool + + executeResponse *remoteexecution.ExecuteResponse + stageChangeWakeup chan struct{} +} + +// newOperation attaches a new operation to a task. This function must +// be called at least once after creating a task. It is most frequently +// called right after creating a task, but may also be used to attach +// additional operations to an existing task in case of in-flight +// deduplication. +func (t *task) newOperation(bq *InMemoryBuildQueue, priority int32, i *invocation, mayExistWithoutWaiters bool) *operation { + o := &operation{ + name: uuid.Must(bq.uuidGenerator()).String(), + task: t, + priority: priority, + invocation: i, + mayExistWithoutWaiters: mayExistWithoutWaiters, + queueIndex: -1, + } + if _, ok := t.operations[i]; ok { + panic("Task is already associated with this invocation") + } + t.operations[i] = o + bq.operationsNameMap[o.name] = o + return o +} + +// reportNonFinalStageChange can be used to wake up clients that are +// calling Execute() or WaitExecution(), causing them to receive another +// non-final stage change update. +func (t *task) reportNonFinalStageChange() { + close(t.stageChangeWakeup) + t.stageChangeWakeup = make(chan struct{}) +} + +// schedule a task. This function will first attempt to directly assign +// a task to an idle worker that is synchronizing against the scheduler. +// When no such worker exists, it will queue the operation, so that a +// worker may pick it up later. +func (t *task) schedule(bq *InMemoryBuildQueue) { + // Check whether there are idle workers that are synchronizing + // against the scheduler on which we can schedule the operation + // directly. + // + // We should prefer taking workers that are associated with + // invocations that have some similarity with those of the task, + // so that locality is improved. Scan the tree of invocations + // bottom up, breadth first to find an appropriate worker. + scq := t.getCurrentSizeClassQueue() + invocations := make([]*invocation, 0, len(t.operations)) + for i := range t.operations { + invocations = append(invocations, i) + } + for { + for idx, i := range invocations { + if len(i.idleSynchronizingWorkers) > 0 || i.idleSynchronizingWorkersChildren.Len() > 0 { + // This invocation either has idle + // workers available, or it contains an + // invocation that has idle workers. + // + // Schedule the task directly on the + // most preferable worker. + for len(i.idleSynchronizingWorkers) == 0 { + i = i.idleSynchronizingWorkersChildren[0] + } + // TODO: Do we want to provide a histogram + // on how far the new invocation is removed + // from the original one? + t.registerQueuedStageStarted(bq, &scq.tasksScheduledWorker) + i.idleSynchronizingWorkers[0].worker.assignUnqueuedTaskAndWakeUp(bq, t, 0) + return + } + if i.parent == nil { + // Even the root invocation has no idle + // workers available that are + // synchronizing against the scheduler. + // + // Queue the operation, so that workers + // can pick it up when they become + // available. + t.registerQueuedStageStarted(bq, &scq.tasksScheduledQueue) + for _, o := range t.operations { + o.enqueue() + } + return + } + invocations[idx] = i.parent + } + } +} + +// getStage returns whether the task is in the queued, executing or +// completed stage. +func (t *task) getStage() remoteexecution.ExecutionStage_Value { + if t.executeResponse != nil { + return remoteexecution.ExecutionStage_COMPLETED + } + if t.currentWorker != nil { + return remoteexecution.ExecutionStage_EXECUTING + } + return remoteexecution.ExecutionStage_QUEUED +} + +// complete execution of the task by registering the execution response. +// This function wakes up any clients waiting on the task to complete. +func (t *task) complete(bq *InMemoryBuildQueue, executeResponse *remoteexecution.ExecuteResponse, completedByWorker bool) { + currentSCQ := t.getCurrentSizeClassQueue() + switch t.getStage() { + case remoteexecution.ExecutionStage_QUEUED: + // The task isn't executing. Create a temporary worker + // on which we start the task, so that we can go through + // the regular completion code below. + var w worker + w.assignQueuedTask(bq, t, 0) + case remoteexecution.ExecutionStage_EXECUTING: + // Task is executing on a worker. Make sure to preserve + // worker.lastInvocation. + w := t.currentWorker + if completedByWorker { + // Due to in-flight deduplication, the task may + // be associated with multiple invocations. + // Compute the invocation that is the lowest + // common ancestor. + var iLowest *invocation + for i := range t.operations { + if iLowest == nil { + // First iteration. + iLowest = i + } else { + // Find lowest common ancestor + // of two invocations. + for len(iLowest.invocationKeys) > len(i.invocationKeys) { + iLowest = iLowest.parent + } + for len(i.invocationKeys) > len(iLowest.invocationKeys) { + i = i.parent + } + for iLowest != i { + iLowest = iLowest.parent + i = i.parent + } + } + } + w.setLastInvocation(iLowest) + } else { + // In case the worker didn't complete executing + // the task, move the worker back to the initial + // state where it's associated with the root + // invocation. There is no point in offering any + // locality/stickiness. + w.setLastInvocation(¤tSCQ.rootInvocation) + } + case remoteexecution.ExecutionStage_COMPLETED: + // Task is already completed. Nothing to do. + return + } + + for i := range t.operations { + i.decrementExecutingWorkersCount(bq, t.currentWorker) + } + t.currentWorker.currentTask = nil + t.currentWorker = nil + result, grpcCode := re_builder.GetResultAndGRPCCodeFromExecuteResponse(executeResponse) + t.registerExecutingStageFinished(bq, result, grpcCode) + + // Communicate the results to the initial size class learner, + // which may request that the task is re-executed. + pq := currentSCQ.platformQueue + var expectedDuration, timeout time.Duration + if code, actionResult := status.FromProto(executeResponse.Status).Code(), executeResponse.Result; code == codes.OK && actionResult.GetExitCode() == 0 { + // The task succeeded, but we're still getting + // instructed to run the task again for training + // purposes. If that happens, create a new task that + // runs in the background. The user does not need to be + // blocked on this. + executionMetadata := actionResult.GetExecutionMetadata() + backgroundSizeClassIndex, backgroundExpectedDuration, backgroundTimeout, backgroundInitialSizeClassLearner := t.initialSizeClassLearner.Succeeded( + executionMetadata.GetVirtualExecutionDuration().AsDuration(), + pq.sizeClasses) + t.initialSizeClassLearner = nil + if backgroundInitialSizeClassLearner != nil { + if pq.maximumQueuedBackgroundLearningOperations == 0 { + // No background learning permitted. + backgroundInitialSizeClassLearner.Abandoned() + } else { + backgroundSCQ := pq.sizeClassQueues[backgroundSizeClassIndex] + backgroundInvocation := backgroundSCQ.getOrCreateInvocation(bq, scheduler_invocation.BackgroundLearningKeys) + if backgroundInvocation.queuedOperations.Len() >= pq.maximumQueuedBackgroundLearningOperations { + // Already running too many background tasks. + backgroundInitialSizeClassLearner.Abandoned() + } else { + backgroundAction := *t.desiredState.Action + backgroundAction.DoNotCache = true + backgroundAction.Timeout = durationpb.New(backgroundTimeout) + backgroundTask := &task{ + operations: map[*invocation]*operation{}, + actionDigest: t.actionDigest, + desiredState: t.desiredState, + targetID: t.targetID, + expectedDuration: backgroundExpectedDuration, + initialSizeClassLearner: backgroundInitialSizeClassLearner, + stageChangeWakeup: make(chan struct{}), + } + backgroundTask.desiredState.Action = &backgroundAction + backgroundTask.newOperation(bq, pq.backgroundLearningOperationPriority, backgroundInvocation, true) + backgroundTask.schedule(bq) + } + } + } + } else if completedByWorker { + // The worker communicated that the task failed. Attempt + // to run it on another size class. + expectedDuration, timeout, t.initialSizeClassLearner = t.initialSizeClassLearner.Failed(code == codes.DeadlineExceeded) + } else { + // The task was completed, but this was not done by the + // worker. Treat is as a regular failure. + t.initialSizeClassLearner.Abandoned() + t.initialSizeClassLearner = nil + } + + if t.initialSizeClassLearner != nil { + // Re-execution against the largest size class is + // requested, using the original timeout value. + // Transplant all operations to the other size class + // queue and reschedule. + t.expectedDuration = expectedDuration + t.desiredState.Action.Timeout = durationpb.New(timeout) + t.registerCompletedStageFinished(bq) + largestSCQ := pq.sizeClassQueues[len(pq.sizeClassQueues)-1] + operations := t.operations + t.operations = make(map[*invocation]*operation, len(operations)) + for oldI, o := range operations { + i := largestSCQ.getOrCreateInvocation(bq, oldI.invocationKeys) + t.operations[i] = o + o.invocation = i + } + t.schedule(bq) + t.reportNonFinalStageChange() + } else { + // The task succeeded or it failed on the largest size + // class. Let's just complete it. + // + // Scrub data from the task that are no longer needed + // after completion. This reduces memory usage + // significantly. Keep the Action digest, so that + // there's still a way to figure out what the task was. + delete(bq.inFlightDeduplicationMap, t.actionDigest) + t.executeResponse = executeResponse + t.desiredState.Action = nil + close(t.stageChangeWakeup) + t.stageChangeWakeup = nil + + // Background learning tasks may continue to exist, even + // if no clients wait for the results. Now that this + // task is completed, it must go through the regular + // cleanup process. + for _, o := range t.operations { + if o.mayExistWithoutWaiters { + o.mayExistWithoutWaiters = false + o.maybeStartCleanup(bq) + } + } + } +} + +// registerQueuedStageStarted updates Prometheus metrics related to the +// task entering the QUEUED stage. +func (t *task) registerQueuedStageStarted(bq *InMemoryBuildQueue, tasksScheduledCounterVec *tasksScheduledCounterVec) { + if t.desiredState.Action.DoNotCache { + tasksScheduledCounterVec.doNotCacheTrue.Inc() + } else { + tasksScheduledCounterVec.doNotCacheFalse.Inc() + } + t.currentStageStartTime = bq.now +} + +// registerQueuedStageFinished updates Prometheus metrics related to the +// task finishing the QUEUED stage. +func (t *task) registerQueuedStageFinished(bq *InMemoryBuildQueue) { + scq := t.getCurrentSizeClassQueue() + scq.tasksQueuedDurationSeconds.Observe(bq.now.Sub(t.currentStageStartTime).Seconds()) + t.currentStageStartTime = bq.now +} + +// registerExecutingStageFinished updates Prometheus metrics related to +// the task finishing the EXECUTING stage. +func (t *task) registerExecutingStageFinished(bq *InMemoryBuildQueue, result, grpcCode string) { + scq := t.getCurrentSizeClassQueue() + scq.tasksExecutingDurationSeconds.WithLabelValues(result, grpcCode).Observe(bq.now.Sub(t.currentStageStartTime).Seconds()) + scq.tasksExecutingRetries.WithLabelValues(result, grpcCode).Observe(float64(t.retryCount)) + t.currentStageStartTime = bq.now +} + +// registerCompletedStageFinished updates Prometheus metrics related to +// the task finishing the COMPLETED stage, meaning the task got removed. +func (t *task) registerCompletedStageFinished(bq *InMemoryBuildQueue) { + scq := t.getCurrentSizeClassQueue() + scq.tasksCompletedDurationSeconds.Observe(bq.now.Sub(t.currentStageStartTime).Seconds()) + t.currentStageStartTime = bq.now +} + +// getCurrentSizeClassQueue returns the size class queue that is +// currently associated with the task. The size class queue may change +// if execution fails, and execution is retried on the largest size +// class queue. +func (t *task) getCurrentSizeClassQueue() *sizeClassQueue { + for i := range t.operations { + return i.sizeClassQueue + } + panic("Task is not associated with any operations") +} + +// worker state for every node capable of executing operations. +type worker struct { + workerKey workerKey + + // The task that this worker is currently executing. This field + // must be kept in sync with task.currentWorker. + currentTask *task + // Used to garbage collect workers that have disappeared. + cleanupKey cleanupKey + // When true, this worker is going to terminate in the nearby + // future. This is effectively a drain that cannot be cleared + // through the BuildQueueState interface. + terminating bool + // The invocation that was associated with the task that this + // worker completed most recently. This is used to make sure + // successive tasks belonging to this invocation is more likely + // to end up on this worker. This improves locality. + // + // Because of in-flight deduplication, it may be the case that + // the worker was executing a task that belonged to multiple + // invocations. If this is the case, lastInvocation will point + // to the lowest common ancestor. + lastInvocation *invocation + // When set, the worker is idle and currently issuing a blocking + // Synchronize() call against the scheduler. This channel can be + // closed to wake up the worker, either after assigning a task + // to it or to force it becoming drained. + wakeup chan<- struct{} + // When 'wakeup' is set, this contains the index at which this + // worker is placed in lastInvocation's + // idleSynchronizingWorkers. This index is needed to efficiently + // dequeue the worker in case of wakeups or Synchronize() + // interruptions. + listIndex int + // For every level of worker invocation of stickiness, the time + // at which we started executing operations belonging to the + // current invocation. These values are used to determine + // whether the stickiness limit has been reached. + stickinessStartingTimes []time.Time +} + +func workerMatchesPattern(workerID, workerIDPattern map[string]string) bool { + for key, value := range workerIDPattern { + if workerID[key] != value { + return false + } + } + return true +} + +func (w *worker) isDrained(scq *sizeClassQueue, workerID map[string]string) bool { + // Implicitly treat workers that are terminating as being + // drained. This prevents tasks from getting interrupted. + if w.terminating { + return true + } + for _, drain := range scq.drains { + if workerMatchesPattern(workerID, drain.WorkerIdPattern) { + return true + } + } + return false +} + +// dequeue a worker. This method is either called by the worker itself +// at the end of Synchronize(), or when a worker needs to be woken up. +func (w *worker) dequeue(scq *sizeClassQueue) { + i := w.lastInvocation + i.idleSynchronizingWorkers.dequeue(w.listIndex) + for i.parent != nil { + heapRemoveOrFix( + &i.parent.idleSynchronizingWorkersChildren, + i.idleSynchronizingWorkersChildrenIndex, + len(i.idleSynchronizingWorkers)+i.idleSynchronizingWorkersChildren.Len()) + i = i.parent + } + w.wakeup = nil +} + +// maybeDequeue is the same as dequeue(), except that it may also be +// called if the worker is not actually queued. This is used to dequeue +// a worker as part of interrupted Synchronize() calls. +func (w *worker) maybeDequeue(scq *sizeClassQueue) { + if w.wakeup != nil { + w.dequeue(scq) + } +} + +// wakeUp wakes up an idle worker that is currently synchronizing +// against the scheduler. +func (w *worker) wakeUp(scq *sizeClassQueue) { + close(w.wakeup) + w.dequeue(scq) +} + +// assignUnqueuedTask assigns a task that is not queued to a worker. +func (w *worker) assignUnqueuedTask(bq *InMemoryBuildQueue, t *task, stickinessRetained int) { + if w.currentTask != nil { + panic("Worker is already associated with a task") + } + if t.currentWorker != nil { + panic("Task is already associated with a worker") + } + + t.registerQueuedStageFinished(bq) + w.currentTask = t + t.currentWorker = w + t.retryCount = 0 + for i := range t.operations { + i.incrementExecutingWorkersCount(bq, w) + } + w.clearLastInvocation() + for i := stickinessRetained; i < len(w.stickinessStartingTimes); i++ { + w.stickinessStartingTimes[i] = bq.now + } +} + +// assignQueuedTask assigns a task that is queued to a worker. The task +// is unqueued in the process. +func (w *worker) assignQueuedTask(bq *InMemoryBuildQueue, t *task, stickinessRetained int) { + w.assignUnqueuedTask(bq, t, stickinessRetained) + + for _, o := range t.operations { + o.removeQueuedFromInvocation() + } + t.reportNonFinalStageChange() +} + +// assignNextQueuedTask determines which queued task is the best +// candidate for execution and assigns it to the current task. +func (w *worker) assignNextQueuedTask(bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string) bool { + lastInvocationKeys := w.lastInvocation.invocationKeys + pq := scq.platformQueue + workerInvocationStickinessLimits := pq.workerInvocationStickinessLimits + stickinessStartingTimes := w.stickinessStartingTimes + i := &scq.rootInvocation + stickinessRetained := 0 + for { + // Even though an invocation can both have directly + // queued operations and queued children, it is uncommon + // in practice. Don't bother making smart decisions + // which to pick; always prefer directly queued + // operations over queued children. + if len(i.queuedOperations) > 0 { + // One or more operations are enqueued in this + // invocation directly. Pick the most preferable + // operation. + scq.workerInvocationStickinessRetained.Observe(float64(stickinessRetained)) + w.assignQueuedTask(bq, i.queuedOperations[0].task, stickinessRetained) + return true + } else if len(i.queuedChildren) > 0 { + // One or more operations are enqueued in a + // child invocation. + // + // Determine from which invocation we need to + // extract an operation. We always want to pick + // the one that has the fewest executing + // operations (corrected for the priority). In + // case of ties, we want to schedule the + // invocation that is least recently used, so + // that they are scheduled round robin. + // + // The exception to this rule is when worker + // invocation stickiness is enabled. In that + // case tie breaking gives a slight advantage to + // any of the invocations associated with the + // last executed task. This reduces the startup + // overhead of actions that leave state behind + // on the worker. + iBest := i.queuedChildren[0] + if len(lastInvocationKeys) > 0 && len(workerInvocationStickinessLimits) > 0 { + iSticky := i.children[lastInvocationKeys[0]] + if iSticky.isQueued() && iSticky.isPreferred(iBest, w.stickinessStartingTimes[0].Add(workerInvocationStickinessLimits[0]).After(bq.now)) { + iBest = iSticky + } + if iBest == iSticky { + // Continue to process stickiness + // for child invocations. + stickinessRetained++ + lastInvocationKeys = lastInvocationKeys[1:] + workerInvocationStickinessLimits = workerInvocationStickinessLimits[1:] + stickinessStartingTimes = stickinessStartingTimes[1:] + } else { + // Stop processing any further + // stickiness, as we're going to + // pick another invocation. + lastInvocationKeys = nil + } + } + i = iBest + } else { + // No queued operations available. + return false + } + } +} + +// clearLastInvocation clears the invocation of the last task to run on +// this worker. This method needs to be called when workers are removed +// to make sure that invocations don't leak. +func (w *worker) clearLastInvocation() { + if w.wakeup != nil { + panic("Clearing last invocations would make it impossible to dequeue the worker") + } + if iLast := w.lastInvocation; iLast != nil { + for i := iLast; i != nil; i = i.parent { + if i.idleWorkersCount == 0 { + panic("Invalid workers count") + } + i.idleWorkersCount-- + i.removeIfEmpty() + } + w.lastInvocation = nil + } +} + +// setLastInvocation sets the invocation of the last task to run on this +// worker. This method needs to be called when execution of a task +// completes. +func (w *worker) setLastInvocation(iLast *invocation) { + if w.lastInvocation != nil { + panic("Executing worker cannot have a last invocation associated with it") + } + w.lastInvocation = iLast + for i := iLast; i != nil; i = i.parent { + i.idleWorkersCount++ + } +} + +// assignUnqueuedTaskAndWakeUp is used to assign a task to an idle +// worker that is synchronizing against the scheduler and to wake it up. +// This method is used when clients directly assign a task to a worker, +// as opposed to letting the worker select a task itself. +func (w *worker) assignUnqueuedTaskAndWakeUp(bq *InMemoryBuildQueue, t *task, stickinessRetained int) { + // Wake up the worker prior to assigning the task. Assigning + // clears w.lastInvocations, which cannot be done while the + // worker is queued. + w.wakeUp(t.getCurrentSizeClassQueue()) + w.assignUnqueuedTask(bq, t, stickinessRetained) +} + +// getExecutingSynchronizeResponse returns a synchronization response +// that instructs a worker to start executing a task. +func (w *worker) getExecutingSynchronizeResponse(bq *InMemoryBuildQueue) *remoteworker.SynchronizeResponse { + t := w.currentTask + return &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: bq.getNextSynchronizationAtDelay(), + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &t.desiredState, + }, + }, + } +} + +// getNextTask extracts the next queued task from the queue and assigns +// it to the current worker. Depending on whether a context object is +// provided, this function either blocks until work is available or +// returns immediately. When returning immediately, it instructs the +// worker to go idle. +func (w *worker) getNextTask(ctx context.Context, bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string, preferBeingIdle bool) (*remoteworker.SynchronizeResponse, error) { + if preferBeingIdle { + // The worker wants to terminate or is experiencing some + // issues. Explicitly instruct the worker to go idle, so + // that it knows it can hold off synchronizing. + return bq.getIdleSynchronizeResponse(), nil + } + + isDrained := w.isDrained(scq, workerID) + if !isDrained && w.assignNextQueuedTask(bq, scq, workerID) { + return w.getExecutingSynchronizeResponse(bq), nil + } + + if ctx == nil { + // We shouldn't block, as the worker is currently doing + // some work that it shouldn't be doing. Request that + // the worker goes idle immediately. It will + // resynchronize as soon as it's done terminating its + // current build action. + return bq.getIdleSynchronizeResponse(), nil + } + + timeoutTimer, timeoutChannel := bq.clock.NewTimer(bq.configuration.GetIdleWorkerSynchronizationInterval()) + defer timeoutTimer.Stop() + + for { + if isDrained { + // The worker is drained. Simply wait until + // undrain operations occur. + undrainWakeup := scq.undrainWakeup + bq.leave() + + select { + case t := <-timeoutChannel: + // Timeout has been reached. + bq.enter(t) + return bq.getIdleSynchronizeResponse(), nil + case <-ctx.Done(): + // Worker has canceled the request. + bq.enter(bq.clock.Now()) + return nil, util.StatusFromContext(ctx) + case <-undrainWakeup: + // Worker might have been undrained. + bq.enter(bq.clock.Now()) + } + } else if w.assignNextQueuedTask(bq, scq, workerID) { + // One or more tasks were queued. We're able to + // synchronize without blocking by starting one + // of those tasks. + return w.getExecutingSynchronizeResponse(bq), nil + } else { + // No queued tasks available. Queue the worker, + // so that clients may assign a task to us. + // + // Place ourselves in the queue belonging to the + // invocation of the last task that ran on this + // worker, so that we're more likely to receive + // tasks that share the same invocation keys + // prefix. + if w.wakeup != nil || w.listIndex != -1 { + panic("Worker is already queued") + } + wakeup := make(chan struct{}) + w.wakeup = wakeup + i := w.lastInvocation + i.idleSynchronizingWorkers.enqueue(&idleSynchronizingWorker{ + worker: w, + listIndex: &w.listIndex, + }) + for i.parent != nil { + heapPushOrFix(&i.parent.idleSynchronizingWorkersChildren, i.idleSynchronizingWorkersChildrenIndex, i) + i = i.parent + } + bq.leave() + + select { + case t := <-timeoutChannel: + // Timeout has been reached. + bq.enter(t) + w.maybeDequeue(scq) + if w.currentTask != nil { + return w.getExecutingSynchronizeResponse(bq), nil + } + return bq.getIdleSynchronizeResponse(), nil + case <-ctx.Done(): + // Worker has canceled the request. + bq.enter(bq.clock.Now()) + w.maybeDequeue(scq) + return nil, util.StatusFromContext(ctx) + case <-wakeup: + // Worker got woken up, meaning a task + // got assigned to it, or it got drained. + bq.enter(bq.clock.Now()) + if w.currentTask != nil { + return w.getExecutingSynchronizeResponse(bq), nil + } + } + } + isDrained = w.isDrained(scq, workerID) + } +} + +// getCurrentOrNextTask either returns a synchronization response that +// instructs the worker to run the task it should be running. When the +// worker has no task assigned to it, it attempts to request a task from +// the queue. +func (w *worker) getCurrentOrNextTask(ctx context.Context, bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string, preferBeingIdle bool) (*remoteworker.SynchronizeResponse, error) { + if t := w.currentTask; t != nil { + if t.retryCount < bq.configuration.WorkerTaskRetryCount { + t.retryCount++ + return &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: bq.getNextSynchronizationAtDelay(), + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &t.desiredState, + }, + }, + }, nil + } + t.complete(bq, &remoteexecution.ExecuteResponse{ + Status: status.Newf( + codes.Internal, + "Attempted to execute task %d times, but it never completed. This task may cause worker %s to crash.", + t.retryCount+1, + newWorkerKey(workerID)).Proto(), + }, false) + } + return w.getNextTask(ctx, bq, scq, workerID, preferBeingIdle) +} + +// isRunningCorrectTask determines whether the worker is actually +// running the task the scheduler instructed it to run previously. +func (w *worker) isRunningCorrectTask(actionDigest *remoteexecution.Digest) bool { + t := w.currentTask + if t == nil { + return false + } + desiredDigest := t.desiredState.ActionDigest + return proto.Equal(actionDigest, desiredDigest) +} + +// updateTask processes execution status updates from the worker that do +// not equal the 'completed' state. +func (w *worker) updateTask(bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string, actionDigest *remoteexecution.Digest, preferBeingIdle bool) (*remoteworker.SynchronizeResponse, error) { + if !w.isRunningCorrectTask(actionDigest) { + return w.getCurrentOrNextTask(nil, bq, scq, workerID, preferBeingIdle) + } + // The worker is doing fine. Allow it to continue with what it's + // doing right now. + return &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: bq.getNextSynchronizationAtDelay(), + }, nil +} + +// completeTask processes execution status updates from the worker that +// equal the 'completed' state. It causes the execute response to be +// preserved and communicated to clients that are waiting on the +// completion of the task. +func (w *worker) completeTask(ctx context.Context, bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string, actionDigest *remoteexecution.Digest, executeResponse *remoteexecution.ExecuteResponse, preferBeingIdle bool) (*remoteworker.SynchronizeResponse, error) { + if !w.isRunningCorrectTask(actionDigest) { + return w.getCurrentOrNextTask(ctx, bq, scq, workerID, preferBeingIdle) + } + w.currentTask.complete(bq, executeResponse, true) + return w.getNextTask(ctx, bq, scq, workerID, preferBeingIdle) +} + +type idleSynchronizingWorker struct { + worker *worker + listIndex *int +} + +type idleSynchronizingWorkersList []idleSynchronizingWorker + +func (l *idleSynchronizingWorkersList) enqueue(entry *idleSynchronizingWorker) { + *entry.listIndex = len(*l) + *l = append(*l, *entry) +} + +func (l *idleSynchronizingWorkersList) dequeue(listIndex int) { + w := (*l)[listIndex].worker + (*l)[listIndex] = (*l)[len(*l)-1] + *(*l)[listIndex].listIndex = listIndex + w.listIndex = -1 + (*l)[len(*l)-1] = idleSynchronizingWorker{} + (*l) = (*l)[:len(*l)-1] +} + +// heapRemoveOrFix either calls heap.Remove() or heap.Fix(), depending +// on a provided counter value. This function may be used to correct the +// position of an object that contains a counter or list inside a heap, +// after the counter or list is mutated. +func heapRemoveOrFix(h heap.Interface, i, count int) { + if count > 0 { + heap.Fix(h, i) + } else { + heap.Remove(h, i) + } +} + +// heapPushOrFix either calls heap.Push() or heap.Fix(), depending on an +// existing heap index. This function can be used to push an object into +// a heap, only if it's not part of the heap already. +func heapPushOrFix(h heap.Interface, i int, v interface{}) { + if i < 0 { + heap.Push(h, v) + } else { + heap.Fix(h, i) + } +} + +// heapMaybeFix calls heap.Fix() only when an object is actually stored +// in the heap. +func heapMaybeFix(h heap.Interface, i int) { + if i >= 0 { + heap.Fix(h, i) + } +} + +// cleanupKey is a handle that is used by cleanupQueue to refer to +// scheduled cleanups. It can be used to cancel a cleanup task. The key +// refers to the index of the corresponding entry in the cleanupHeap. +// Keys are offset by one, so that a zero value indicates the key is not +// associated with any cleanup entry. +type cleanupKey int + +func (k cleanupKey) isActive() bool { + return k != 0 +} + +// cleanupHeap is an implementation of container.Heap for cleanupEntry +// objects. It ensures that the cleanupKeys remain in sync with the +// indices of the cleanupEntries. +type cleanupHeap []cleanupEntry + +func (h cleanupHeap) Len() int { + return len(h) +} + +func (h cleanupHeap) Less(i, j int) bool { + return h[i].timestamp.Before(h[j].timestamp) +} + +func (h cleanupHeap) Swap(i, j int) { + if *h[i].key != cleanupKey(i+1) || *h[j].key != cleanupKey(j+1) { + panic("Invalid cleanup keys") + } + h[i], h[j] = h[j], h[i] + *h[i].key = cleanupKey(i + 1) + *h[j].key = cleanupKey(j + 1) +} + +func (h *cleanupHeap) Push(x interface{}) { + e := x.(cleanupEntry) + if *e.key != 0 { + panic("Cleanup key already in use") + } + *h = append(*h, e) + *e.key = cleanupKey(len(*h)) +} + +func (h *cleanupHeap) Pop() interface{} { + old := *h + n := len(old) + e := old[n-1] + old[n-1] = cleanupEntry{} + *h = old[:n-1] + if *e.key != cleanupKey(n) { + panic("Invalid cleanup key") + } + *e.key = 0 + return e +} + +// cleanupEntry stores at what point in time a certain cleanup function +// needs to be executed. +type cleanupEntry struct { + key *cleanupKey + timestamp time.Time + callback func() +} + +// cleanupQueue is an event queue that keeps track of closures that need +// to be executed at some point in the future. This data structure is +// used by InMemoryBuildQueue to keep track of workers, platform queues, +// operations, etc. that need to be garbage collected. +// +// Every entry in cleanupQueue is associated with one cleanupKey. The +// cleanupKey can be used to cancel the execution of a cleanup function. +type cleanupQueue struct { + heap cleanupHeap +} + +func (q *cleanupQueue) add(key *cleanupKey, timestamp time.Time, callback func()) { + if *key != 0 { + panic("Cleanup key is already in use") + } + heap.Push(&q.heap, cleanupEntry{ + key: key, + timestamp: timestamp, + callback: callback, + }) +} + +func (q *cleanupQueue) remove(key cleanupKey) { + heap.Remove(&q.heap, int(key)-1) +} + +func (q *cleanupQueue) run(now time.Time) { + for len(q.heap) > 0 && !q.heap[0].timestamp.After(now) { + heap.Pop(&q.heap).(cleanupEntry).callback() + } +} + +func (q *cleanupQueue) getTimestamp(key cleanupKey) *timestamppb.Timestamp { + if key == 0 { + return nil + } + return timestamppb.New(q.heap[key-1].timestamp) +} diff --git a/pkg/scheduler/in_memory_build_queue_test.go b/pkg/scheduler/in_memory_build_queue_test.go new file mode 100644 index 0000000..6443e5d --- /dev/null +++ b/pkg/scheduler/in_memory_build_queue_test.go @@ -0,0 +1,4501 @@ +package scheduler_test + +import ( + "context" + "io" + "net" + "strconv" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/auth" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/test/bufconn" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "cloud.google.com/go/longrunning/autogen/longrunningpb" +) + +var buildQueueConfigurationForTesting = scheduler.InMemoryBuildQueueConfiguration{ + ExecutionUpdateInterval: time.Minute, + OperationWithNoWaitersTimeout: time.Minute, + PlatformQueueWithNoWorkersTimeout: 15 * time.Minute, + BusyWorkerSynchronizationInterval: 10 * time.Second, + GetIdleWorkerSynchronizationInterval: func() time.Duration { return time.Minute }, + WorkerTaskRetryCount: 9, + WorkerWithNoSynchronizationsTimeout: time.Minute, +} + +var platformForTesting = &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "cpu", Value: "armv6"}, + {Name: "os", Value: "linux"}, + }, +} + +var allowAllAuthorizer = auth.NewStaticAuthorizer(func(digest.InstanceName) bool { return true }) + +// getExecutionClient creates a GRPC client for calling Execute() and +// WaitExecution() operations against a build queue. These operations +// use streaming RPCs, which prevents us from invoking these operations +// directly. +// +// By using the bufconn package, we can create a GRPC client and server +// that communicate with each other entirely in memory. +func getExecutionClient(t *testing.T, buildQueue builder.BuildQueue) remoteexecution.ExecutionClient { + conn := bufconn.Listen(1) + server := grpc.NewServer() + remoteexecution.RegisterExecutionServer(server, buildQueue) + go func() { + require.NoError(t, server.Serve(conn)) + }() + client, err := grpc.Dial( + "myself", + grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return conn.Dial() + }), + grpc.WithInsecure()) + require.NoError(t, err) + return remoteexecution.NewExecutionClient(client) +} + +func TestInMemoryBuildQueueExecuteBadRequest(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // ExecuteRequest contains an invalid action digest. + t.Run("InvalidActionDigest", func(t *testing.T) { + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: &remoteexecution.Digest{ + Hash: "This is not a valid hash", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + _, err = stream.Recv() + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Unknown digest function"), err) + }) + + // Action cannot be found in the Content Addressable Storage (CAS). + t.Run("MissingAction", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewBufferFromError(status.Error(codes.FailedPrecondition, "Blob not found"))) + + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + _, err = stream.Recv() + testutil.RequireEqualStatus(t, err, status.Error(codes.FailedPrecondition, "Failed to obtain action: Blob not found")) + }) + + // No workers have registered themselves against this queue, + // meaninig calls to Execute() should fail unconditionally. A + // soft error code should be returned if this happens not long + // after startup, as workers may still appear. + t.Run("UnknownPlatformSoft", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassSelector.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(899, 999999999)) + + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + _, err = stream.Recv() + testutil.RequireEqualStatus(t, status.Error(codes.Unavailable, "No workers exist for instance name prefix \"main\" platform {\"properties\":[{\"name\":\"cpu\",\"value\":\"armv6\"},{\"name\":\"os\",\"value\":\"linux\"}]}"), err) + }) + + // We can be certain that no workers will appear if a sufficient + // amount of time has passed. We may then start returning a hard + // error code. + t.Run("UnknownPlatformHard", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassSelector.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(900, 0)) + + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + _, err = stream.Recv() + testutil.RequireEqualStatus(t, status.Error(codes.FailedPrecondition, "No workers exist for instance name prefix \"main\" platform {\"properties\":[{\"name\":\"cpu\",\"value\":\"armv6\"},{\"name\":\"os\",\"value\":\"linux\"}]}"), err) + }) +} + +func TestInMemoryBuildQueuePurgeStaleWorkersAndQueues(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + for i := 0; i < 10; i++ { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + }, buffer.UserProvided)) + } + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Let a client enqueue a new operation. + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + }), nil).Return(platform.MustNewKey("main", &remoteexecution.Platform{}), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + initialSizeClassLearner.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Assign it to the worker. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + // The timer should stop and we should get an immediate + // update on the state of the operation. + timer1.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + timer2 := mock.NewMockTimer(ctrl) + wakeup2 := make(chan time.Time, 1) + clock.EXPECT().NewTimer(time.Minute).Return(timer2, wakeup2) + + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + }, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executingMessage := &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + } + testutil.RequireEqualProto(t, executingMessage, update) + + // The next time the client receives an update on the operation, + // it should (still) be in the EXECUTING state. + timer3 := mock.NewMockTimer(ctrl) + wakeup3 := make(chan time.Time, 1) + clock.EXPECT().NewTimer(time.Minute).Return(timer3, wakeup3) + wakeup2 <- time.Unix(1061, 0) + update, err = stream1.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, executingMessage, update) + + // Because the worker is not providing any updates, the + // operation should be terminated. + // TODO: This could already trigger as soon as 1062, but would + // require waitExecution() to do a short sleep, which may + // increase complexity/overhead. + clock.EXPECT().Now().Return(time.Unix(1121, 0)) + wakeup3 <- time.Unix(1121, 0) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Unavailable, "Worker {\"hostname\":\"worker123\",\"thread\":\"42\"} disappeared while task was executing").Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + + // Even with the worker being gone, it's permitted to enqueue + // operations for a limited amount of time. These will only be + // executed if another worker would appear. Spawn eight + // operations. + fakeUUIDs := []string{ + "0fb1dd7c-ef72-4a42-94c1-60d7cd587736", + "1ef11db1-7b06-44ec-b5d2-af3a4b9a249f", + "59fafbe5-6f5d-4cf9-9ff4-9d320fa11626", + "62f855dc-9106-44c5-937a-dd33977f92f4", + "7144c9c0-6684-4bf0-8ada-1b50c52878d0", + "b331f0b2-b852-476c-95cc-9888aa246a3d", + "c016e168-2f65-43e8-85d9-7340fc462eb6", + "eaabd51d-10e7-4b66-a42c-2e00be0daf3d", + } + streams := make([]remoteexecution.Execution_ExecuteClient, 0, len(fakeUUIDs)) + for _, fakeUUID := range fakeUUIDs { + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + }), nil).Return(platform.MustNewKey("main", &remoteexecution.Platform{}), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + initialSizeClassLearner.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1961, 999999999)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(fakeUUID)) + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: fakeUUID, + Metadata: metadata, + }, update) + streams = append(streams, stream) + } + + // After workers are absent for long enough, the corresponding + // platform queue is also garbage collected. + initialSizeClassSelector = mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + }), nil).Return(platform.MustNewKey("main", &remoteexecution.Platform{}), nil, initialSizeClassSelector, nil) + initialSizeClassSelector.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1962, 0)).Times(17) + stream3, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + _, err = stream3.Recv() + testutil.RequireEqualStatus(t, err, status.Error(codes.FailedPrecondition, "No workers exist for instance name prefix \"main\" platform {}")) + + // Operations that were queued should have been cancelled when + // the platform queue was garbage collected. All eight should + // get woken up. + for i, fakeUUID := range fakeUUIDs { + update, err = streams[i].Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err = anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Unavailable, "Workers for this instance name, platform and size class disappeared while task was queued").Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: fakeUUID, + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + } +} + +func TestInMemoryBuildQueuePurgeStaleOperations(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + action := &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + } + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + for i := 0; i < 2; i++ { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(action, buffer.UserProvided)) + } + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Let one client enqueue an operation. + initialSizeClassSelector1 := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil). + Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector1, nil) + initialSizeClassLearner1 := mock.NewMockLearner(ctrl) + initialSizeClassSelector1.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner1) + clock.EXPECT().Now().Return(time.Unix(1070, 0)) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + timer1.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + ctx1, cancel1 := context.WithCancel(ctx) + stream1, err := executionClient.Execute(ctx1, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let a second client enqueue the same action. Due to + // deduplication of in-flight actions, it will obtain the same + // operation. + initialSizeClassSelector2 := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector2, nil) + initialSizeClassSelector2.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1075, 0)) + timer2 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer2, nil) + timer2.EXPECT().Stop().Return(true) + ctx2, cancel2 := context.WithCancel(ctx) + stream2, err := executionClient.Execute(ctx2, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err = stream2.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let a third client use WaitExecution() to block on the same + // operation. + clock.EXPECT().Now().Return(time.Unix(1080, 0)).Times(2) + timer3 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer3, nil) + timer3.EXPECT().Stop().Return(true) + ctx3, cancel3 := context.WithCancel(ctx) + stream3, err := executionClient.WaitExecution(ctx3, &remoteexecution.WaitExecutionRequest{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + }) + require.NoError(t, err) + update, err = stream3.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // The operation should be present without any timeout + // associated with it, as there are multiple waiters. + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + } + clock.EXPECT().Now().Return(time.Unix(1080, 0)) + allOperations, err := buildQueue.ListOperations(ctx, &buildqueuestate.ListOperationsRequest{ + PageSize: 10, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListOperationsResponse{ + Operations: []*buildqueuestate.OperationState{ + { + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + InvocationName: invocationName, + ExpectedDuration: &durationpb.Duration{Seconds: 900}, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1070}, + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Stage: &buildqueuestate.OperationState_Queued{ + Queued: &emptypb.Empty{}, + }, + }, + }, + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 1, + }, + }, allOperations) + + // Cancel all Execute() and WaitExecution() calls. + cancelWait := make(chan struct{}) + clock.EXPECT().Now().Return(time.Unix(1090, 0)).Times(3).Do(func() { + cancelWait <- struct{}{} + }) + cancel1() + <-cancelWait + cancel2() + <-cancelWait + cancel3() + <-cancelWait + + // The operation should still be available up until the deadline. + clock.EXPECT().Now().Return(time.Unix(1149, 999999999)) + allOperations, err = buildQueue.ListOperations(ctx, &buildqueuestate.ListOperationsRequest{ + PageSize: 10, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListOperationsResponse{ + Operations: []*buildqueuestate.OperationState{ + { + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + InvocationName: invocationName, + ExpectedDuration: &durationpb.Duration{Seconds: 900}, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1070}, + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Timeout: ×tamppb.Timestamp{Seconds: 1150}, + Stage: &buildqueuestate.OperationState_Queued{ + Queued: &emptypb.Empty{}, + }, + }, + }, + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 1, + }, + }, allOperations) + + // And it should be gone after it. + initialSizeClassLearner1.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1150, 0)) + allOperations, err = buildQueue.ListOperations(ctx, &buildqueuestate.ListOperationsRequest{ + PageSize: 10, + }) + require.NoError(t, err) + require.True(t, proto.Equal(&buildqueuestate.ListOperationsResponse{ + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 0, + }, + }, allOperations)) +} + +func TestInMemoryBuildQueueCrashLoopingWorker(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main/suffix", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Let one client enqueue an operation. + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main/suffix", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main/suffix", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let the same worker repeatedly ask for work. It should + // constantly get the same operation assigned. This may happen + // when the network is flaky or the worker is crash-looping. + + // At the first iteration, the worker will report execution start. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + timer = mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + + for i := int64(0); i < 10; i++ { + clock.EXPECT().Now().Return(time.Unix(1002+i, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012 + i}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + InstanceNameSuffix: "suffix", + }, + }, + }, + }, response) + + // At the first iteration, we expect an execution-start message. + if i == 0 { + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) + } + } + + // Requesting the same operation too many times should cause the + // scheduler to give up on handing out the same operation. We + // don't want a single operation to crash-loop a worker + // indefinitely. + initialSizeClassLearner.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1012, 0)).Times(3) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // The client should be informed that the operation causes the + // worker to crash-loop. + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Internal, "Attempted to execute task 10 times, but it never completed. This task may cause worker {\"hostname\":\"worker123\",\"thread\":\"42\"} to crash.").Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) +} + +func TestInMemoryBuildQueueKillOperationsOperationName(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Let one client enqueue an operation. + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let the worker extract the operation from the queue. + clock.EXPECT().Now().Return(time.Unix(1002, 0)).Times(2) + timer = mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + }, + }, + }, + }, response) + // The client should be notified the the operation has started executing. + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) + + // Kill the operation. + initialSizeClassLearner.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1007, 0)).Times(4) + _, err = buildQueue.KillOperations(ctx, &buildqueuestate.KillOperationsRequest{ + Filter: &buildqueuestate.KillOperationsRequest_Filter{ + Type: &buildqueuestate.KillOperationsRequest_Filter_OperationName{ + OperationName: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + }, + }, + Status: status.New(codes.Unavailable, "Operation was killed administratively").Proto(), + }) + require.NoError(t, err) + + // The client should be informed that the operation was killed. + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Unavailable, "Operation was killed administratively").Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + + // The worker should be requested to switch back to idle the + // next time it contacts the scheduler. + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) +} + +func TestInMemoryBuildQueueKillOperationsSizeClassQueueWithoutWorkers(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // If the scheduler is in the initial state, the size class + // queue won't exist, meaning there are no operations to kill. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + _, err := buildQueue.KillOperations(ctx, &buildqueuestate.KillOperationsRequest{ + Filter: &buildqueuestate.KillOperationsRequest_Filter{ + Type: &buildqueuestate.KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers{ + SizeClassQueueWithoutWorkers: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + }, + }, + Status: status.New(codes.Unavailable, "This should have no effect, as the size class queue does not exist").Proto(), + }) + testutil.RequireEqualStatus(t, status.Error(codes.NotFound, "Size class queue not found"), err) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Let one client enqueue an operation. + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Immediately killing the operation should fail, as the size + // class queue still has one worker at this point. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + _, err = buildQueue.KillOperations(ctx, &buildqueuestate.KillOperationsRequest{ + Filter: &buildqueuestate.KillOperationsRequest_Filter{ + Type: &buildqueuestate.KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers{ + SizeClassQueueWithoutWorkers: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + }, + }, + Status: status.New(codes.Unavailable, "This should have no effect, as the size class queue still has workers").Proto(), + }) + testutil.RequireEqualStatus(t, status.Error(codes.FailedPrecondition, "Cannot kill operations, as size class queue still has workers"), err) + + // If a sufficient amount of time has passed, the worker should + // have disappeared. At that point KillOperations() should + // succeed. + initialSizeClassLearner.EXPECT().Abandoned() + clock.EXPECT().Now().Return(time.Unix(1060, 0)).Times(3) + _, err = buildQueue.KillOperations(ctx, &buildqueuestate.KillOperationsRequest{ + Filter: &buildqueuestate.KillOperationsRequest_Filter{ + Type: &buildqueuestate.KillOperationsRequest_Filter_SizeClassQueueWithoutWorkers{ + SizeClassQueueWithoutWorkers: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "cpu", Value: "armv6"}, + {Name: "os", Value: "linux"}, + }, + }, + }, + }, + }, + }, + Status: status.New(codes.Unavailable, "Operation was killed administratively").Proto(), + }) + require.NoError(t, err) + + // The client should be informed that the operation was killed. + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.New(codes.Unavailable, "Operation was killed administratively").Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) +} + +func TestInMemoryBuildQueueIdleWorkerSynchronizationTimeout(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + + // When no work appears, workers should still be woken up + // periodically to resynchronize. This ensures that workers that + // disappear without closing their TCP connections are purged + // quickly. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + timer := mock.NewMockTimer(ctrl) + timerChannel := make(chan time.Time, 1) + timerChannel <- time.Unix(1060, 0) + timer.EXPECT().Stop() + clock.EXPECT().NewTimer(time.Minute).Return(timer, timerChannel) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "cpu", Value: "armv6"}, + {Name: "os", Value: "linux"}, + }, + }, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1060}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) +} + +func TestInMemoryBuildQueueDrainedWorker(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // The worker should not be drained by default. + sizeClassQueueName := &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "cpu", Value: "armv6"}, + {Name: "os", Value: "linux"}, + }, + }, + }, + } + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + workerState, err := buildQueue.ListWorkers(ctx, &buildqueuestate.ListWorkersRequest{ + Filter: &buildqueuestate.ListWorkersRequest_Filter{ + Type: &buildqueuestate.ListWorkersRequest_Filter_All{ + All: sizeClassQueueName, + }, + }, + PageSize: 1000, + }) + require.NoError(t, err) + require.Equal(t, &buildqueuestate.ListWorkersResponse{ + Workers: []*buildqueuestate.WorkerState{ + { + Id: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + Timeout: ×tamppb.Timestamp{Seconds: 1060}, + Drained: false, + }, + }, + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 1, + }, + }, workerState) + + // Adding a drain that doesn't match the worker should cause no + // changes. + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + _, err = buildQueue.AddDrain(ctx, &buildqueuestate.AddOrRemoveDrainRequest{ + SizeClassQueueName: sizeClassQueueName, + WorkerIdPattern: map[string]string{ + "hostname": "worker124", + }, + }) + require.NoError(t, err) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + workerState, err = buildQueue.ListWorkers(ctx, &buildqueuestate.ListWorkersRequest{ + Filter: &buildqueuestate.ListWorkersRequest_Filter{ + Type: &buildqueuestate.ListWorkersRequest_Filter_All{ + All: sizeClassQueueName, + }, + }, + PageSize: 1000, + }) + require.NoError(t, err) + require.Equal(t, &buildqueuestate.ListWorkersResponse{ + Workers: []*buildqueuestate.WorkerState{ + { + Id: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + Timeout: ×tamppb.Timestamp{Seconds: 1060}, + Drained: false, + }, + }, + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 1, + }, + }, workerState) + + // Adding a drain that does match the worker should cause it to + // be reported as if being drained. + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + _, err = buildQueue.AddDrain(ctx, &buildqueuestate.AddOrRemoveDrainRequest{ + SizeClassQueueName: sizeClassQueueName, + WorkerIdPattern: map[string]string{ + "hostname": "worker123", + }, + }) + require.NoError(t, err) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + workerState, err = buildQueue.ListWorkers(ctx, &buildqueuestate.ListWorkersRequest{ + Filter: &buildqueuestate.ListWorkersRequest_Filter{ + Type: &buildqueuestate.ListWorkersRequest_Filter_All{ + All: sizeClassQueueName, + }, + }, + PageSize: 1000, + }) + require.NoError(t, err) + require.Equal(t, &buildqueuestate.ListWorkersResponse{ + Workers: []*buildqueuestate.WorkerState{ + { + Id: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + Timeout: ×tamppb.Timestamp{Seconds: 1060}, + Drained: true, + }, + }, + PaginationInfo: &buildqueuestate.PaginationInfo{ + StartIndex: 0, + TotalEntries: 1, + }, + }, workerState) + + // Enqueue an operation. + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Because the worker is drained, the scheduler should not be + // willing to return the operation. + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1008}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + // Remove the drain. The scheduler should now return the + // operation if requested. + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + _, err = buildQueue.RemoveDrain(ctx, &buildqueuestate.AddOrRemoveDrainRequest{ + SizeClassQueueName: sizeClassQueueName, + WorkerIdPattern: map[string]string{ + "hostname": "worker123", + }, + }) + require.NoError(t, err) + clock.EXPECT().Now().Return(time.Unix(1010, 0)).Times(2) + timer1.EXPECT().Stop().Return(true) + clock.EXPECT().NewTimer(time.Minute).Return(nil, nil) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1020}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1007}, + }, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) +} + +func TestInMemoryBuildQueueInvocationFairness(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, response, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + + operationParameters := [...]struct { + invocationID string + actionHash string + commandHash string + operationName string + }{ + {"dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", "f4d362da1f854e54984275aa78e2d9f8", "5fbd808d5cf24a219824664040a95c44", "93e2738a-837c-4524-9f0f-430b47caa889"}, + {"dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", "25b997dcfbe34f95bbbab10bc02e1a61", "2ac9ddc1a64442fabd819358a206909e", "fadbaf2f-669f-47ee-bce4-35f255d2ba16"}, + {"dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", "deb57a22d6b149b2b83d26dbc30d8f28", "5def73fca7c945d3998d984194879f5f", "1d320c02-399a-40bb-bb9a-031960c7e542"}, + {"dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", "13590054c8aa457d88a2abc8a0a76d32", "6f9f4cc9c831495bbee40d5a4c9dd183", "a5fb4aeb-e444-4851-b207-7c4da7407955"}, + {"dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", "697a50f669144463acb45cb23a195aee", "756a36b01ff148c88a43fcc240accb2b", "7359bd3c-32b0-40b9-9d8d-341420267d8e"}, + {"4712de52-4518-4840-91d5-c9e13a38cc5a", "d35515d9ebe349639957da438a21bd3c", "9f58a3e3feb044e2a31ffad73479185b", "e7922f00-fc9b-4390-97fe-8564a00d190a"}, + {"4712de52-4518-4840-91d5-c9e13a38cc5a", "f8362f9b02b04765b4873afce32aa7df", "6196409166614f5691f34b18affaa3a8", "76f7be3b-a685-4ac0-b770-d4305aabab5f"}, + {"4712de52-4518-4840-91d5-c9e13a38cc5a", "1ee144ab7e7c43cfaa46ca1ccecad26e", "391d52750e9c494cac10dc72e8f7e3db", "7d206d16-9e83-4f91-a515-0ee8132026ee"}, + {"4712de52-4518-4840-91d5-c9e13a38cc5a", "15f5459fc4004ab0b95f51a9b933648a", "352f5536cd304f24947019801b357ec1", "28ae0c5a-3b93-4ac0-b434-b2d104b43562"}, + {"4712de52-4518-4840-91d5-c9e13a38cc5a", "916792a1220a4d81b6135adbae25c75d", "500673772b944b9f8ee654ea0adb2961", "bfeedf7c-29ec-437a-8b61-dedb2a73a3e4"}, + {"351fdffe-04df-4fe0-98c7-b4f5a463fd52", "9a5d807902e048e896a0d7c437943598", "57efd9f34d664874a4bbba85e90a1515", "91a7f281-b9af-4f06-85f3-751a01baf7ba"}, + {"351fdffe-04df-4fe0-98c7-b4f5a463fd52", "449a88c7885f4dbb94383e496646cfd2", "64fb977929214df794b30ad1d896c212", "081cffbb-4e14-44ff-b4a0-bd7c1ceadd6c"}, + {"351fdffe-04df-4fe0-98c7-b4f5a463fd52", "6fcd0a2f3c0040f2902891d4557cddc0", "c3ad5148dd304a74b010d948f606784e", "489af73a-2c3a-429b-9fa0-22129fbddfd2"}, + {"351fdffe-04df-4fe0-98c7-b4f5a463fd52", "219cc379cdcc4e4fa2cae57b251c5582", "d95707fc59364e4593947262d2d64cf9", "61fdcf8f-5212-45eb-8e72-30b913e8cce9"}, + {"351fdffe-04df-4fe0-98c7-b4f5a463fd52", "97e481163453429a88cfdad7e8ce96f8", "1f12e1fb0ee9430b9a1b172b96dad01a", "a1384f92-1737-4d60-8714-f6e517fe4f5d"}, + {"4fd2080e-d805-4d68-ab51-460106c8f372", "82fdcadeca184486a3c6a3aa780c6fe9", "493ea806febb4cd1803e6ad9ea58746b", "13787980-1f0c-4ff3-912a-866d86039696"}, + {"4fd2080e-d805-4d68-ab51-460106c8f372", "d1384d0de35c4bab9f424a8be6283030", "9e14e453d9ef4abb9c6fc98edd248641", "8e20ef3a-0916-40b5-a733-8256257e05c8"}, + {"4fd2080e-d805-4d68-ab51-460106c8f372", "eff21bbd08904e8188e2946285ff0de3", "c170201058794b9bb97b20e8258080cb", "1c26e52f-fa8b-4d73-bba6-cd423a099244"}, + {"4fd2080e-d805-4d68-ab51-460106c8f372", "08183d1fcb694554b092cd629b5b9b47", "ef2de9a7bf5d4ed2bb08ead541c9f36c", "8f773c13-3d96-4024-b0a0-ca9502818366"}, + {"4fd2080e-d805-4d68-ab51-460106c8f372", "f81d21e375fc4dc6ad5b74fe1f966ecf", "99d95d845f2e4aadbeab1c7b6192d1c8", "15d2c9be-220b-4a58-9052-9a19b58e571a"}, + {"66275a66-8aad-498a-9b4a-26b4f5a66789", "69df79b84df94001bd261101f4e3b092", "ca42e214f92a4d6fab0d697b5f7f539a", "299bde44-6432-4ef6-a3fd-8349ada25a14"}, + {"66275a66-8aad-498a-9b4a-26b4f5a66789", "51a015f6ab8f495a9bcba9569932b8d4", "1f8d98a889234c8288c94c333744b2a7", "e67f2cfa-88b0-4d2c-88d0-a4b025eb63d5"}, + {"66275a66-8aad-498a-9b4a-26b4f5a66789", "94713786eca2417aa18f499a9d72a29b", "168696de2a0b408c933d316a55e52500", "63b3cf66-1401-4ae6-9177-aa2c7a1a2b7a"}, + {"66275a66-8aad-498a-9b4a-26b4f5a66789", "88ed817ebd1340aab4711750196ed8b1", "087b8173438348be9ed2cd6e7a04d49f", "f158a947-a4ed-4c5c-9a6e-053f73b4039f"}, + {"66275a66-8aad-498a-9b4a-26b4f5a66789", "e74171e0a6934f65b2895624a66680fc", "22af639fb2714f5f8b9beedb1fb519a6", "1a843f0e-f234-40c0-86c3-0dec2b2b9f21"}, + } + + // Let 5 clients (based on distinct invocation IDs) enqueue a + // total of 25 operations for different actions. No in-flight + // deduplication should take place. + streams := make([]remoteexecution.Execution_ExecuteClient, 0, 25) + for i, p := range operationParameters { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_MD5, p.actionHash, 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: p.commandHash, + SizeBytes: 456, + }, + }, buffer.UserProvided)) + + requestMetadata := &remoteexecution.RequestMetadata{ + ToolInvocationId: p.invocationID, + } + requestMetadataAny, err := anypb.New(requestMetadata) + require.NoError(t, err) + requestMetadataBin, err := proto.Marshal(requestMetadata) + require.NoError(t, err) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: p.commandHash, + SizeBytes: 456, + }, + }), testutil.EqProto(t, requestMetadata)).Return( + platform.MustNewKey("main", platformForTesting), + []invocation.Key{invocation.MustNewKey(requestMetadataAny)}, + initialSizeClassSelector, + nil, + ) + + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + initialSizeClassLearner.EXPECT().Abandoned() + + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(p.operationName)) + stream, err := executionClient.Execute( + metadata.AppendToOutgoingContext( + ctx, + "build.bazel.remote.execution.v2.requestmetadata-bin", + string(requestMetadataBin)), + &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: p.actionHash, + SizeBytes: 123, + }, + }) + require.NoError(t, err) + streams = append(streams, stream) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: p.actionHash, + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }) + } + + // Check that ListInvocationChildren() reports all five + // invocations, both when justQueuedInvocations is true and + // false. When true, the invocations should be returned in + // scheduling order. Otherwise, they should be returned + // alphabetically. + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + } + clock.EXPECT().Now().Return(time.Unix(1036, 0)) + invocationStates, err := buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, 5) + for i, toolInvocationID := range []string{ + "dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", + "4712de52-4518-4840-91d5-c9e13a38cc5a", + "351fdffe-04df-4fe0-98c7-b4f5a463fd52", + "4fd2080e-d805-4d68-ab51-460106c8f372", + "66275a66-8aad-498a-9b4a-26b4f5a66789", + } { + invocationID, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: toolInvocationID, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, invocationID, invocationStates.Children[i].Id) + require.Equal(t, uint32(5), invocationStates.Children[i].State.QueuedOperationsCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.ExecutingWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleSynchronizingWorkersCount) + } + + clock.EXPECT().Now().Return(time.Unix(1036, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, 5) + for i, toolInvocationID := range []string{ + "351fdffe-04df-4fe0-98c7-b4f5a463fd52", + "4712de52-4518-4840-91d5-c9e13a38cc5a", + "4fd2080e-d805-4d68-ab51-460106c8f372", + "66275a66-8aad-498a-9b4a-26b4f5a66789", + "dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", + } { + invocationID, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: toolInvocationID, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, invocationID, invocationStates.Children[i].Id) + require.Equal(t, uint32(5), invocationStates.Children[i].State.QueuedOperationsCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.ExecutingWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleSynchronizingWorkersCount) + } + + // Let 25 workers execute the operations that were created + // previously. Because the operations originated from different + // client invocations, we should not execute them in the order + // in which they arrived. Instead, we should alternate between + // invocations, so that each of them gets their fair share. + for i, j := range []int{ + 0, 5, 10, 15, 20, + 1, 6, 11, 16, 21, + 2, 7, 12, 17, 22, + 3, 8, 13, 18, 23, + 4, 9, 14, 19, 24, + } { + p := operationParameters[j] + clock.EXPECT().Now().Return(time.Unix(1040+int64(i), 0)).Times(2) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": strconv.FormatInt(int64(j), 10), + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + requestMetadata, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: p.invocationID, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1050 + int64(i)}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_MD5, + ActionDigest: &remoteexecution.Digest{ + Hash: p.actionHash, + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: p.commandHash, + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1010 + int64(j)}, + AuxiliaryMetadata: []*anypb.Any{requestMetadata}, + }, + }, + }, + }, response) + + update, err := streams[j].Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: p.actionHash, + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }, update) + } + + // Call ListInvocationChildren() again. All operations should now be + // reported as executing, instead of being queued. + clock.EXPECT().Now().Return(time.Unix(1070, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + require.Empty(t, invocationStates.Children) + + clock.EXPECT().Now().Return(time.Unix(1071, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, 5) + for i, toolInvocationID := range []string{ + "351fdffe-04df-4fe0-98c7-b4f5a463fd52", + "4712de52-4518-4840-91d5-c9e13a38cc5a", + "4fd2080e-d805-4d68-ab51-460106c8f372", + "66275a66-8aad-498a-9b4a-26b4f5a66789", + "dfe3ca8b-64bc-4efd-b8a9-2bdc3827f0ac", + } { + invocationID, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: toolInvocationID, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, invocationID, invocationStates.Children[i].Id) + require.Equal(t, uint32(0), invocationStates.Children[i].State.QueuedOperationsCount) + require.Equal(t, uint32(5), invocationStates.Children[i].State.ExecutingWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleWorkersCount) + require.Equal(t, uint32(0), invocationStates.Children[i].State.IdleSynchronizingWorkersCount) + } + + clock.EXPECT().Now().Return(time.Unix(1072, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, 5) + + // Call ListInvocationChildren() a final time after letting a + // sufficient amount of time pass. This should cause all workers + // to be removed from the scheduler, as they didn't provide any + // updates. All associated operations should be completed, + // meaning that no invocations will be reported. + clock.EXPECT().Now().Return(time.Unix(1200, 0)).Times(51) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + require.Empty(t, invocationStates.Children) + + clock.EXPECT().Now().Return(time.Unix(1200, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + require.Empty(t, invocationStates.Children) + + clock.EXPECT().Now().Return(time.Unix(1200, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + require.Empty(t, invocationStates.Children) + + // All clients should receive an error that their operations + // terminated due to the loss of workers. + for i, p := range operationParameters { + update, err := streams[i].Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: p.actionHash, + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Status: status.Newf(codes.Unavailable, "Worker {\"hostname\":\"worker123\",\"thread\":\"%d\"} disappeared while task was executing", i).Proto(), + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + + _, err = streams[i].Recv() + require.Equal(t, io.EOF, err) + } +} + +// Test what happens when multiple operations are in-flight deduplicated +// against the same underlying task, and are subsequently abandoned +// while being in the QUEUED stage. This should cause all associated +// operations and invocations to be removed eventually. +func TestInMemoryBuildQueueInFlightDeduplicationAbandonQueued(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // Let ten clients create ten operations. Because they all refer + // to the same action, all requests should be deduplicated into + // the same task. This means that we create ten initial size + // class selectors, of which the last nine are abandoned + // immediately. + operationParameters := [...]struct { + invocationID string + operationName string + }{ + {"0f0f22ec-908a-4ea7-8a78-b92ab4188e78", "b4667823-9f8e-451d-a3e4-4481ec67329f"}, + {"0f67bd82-2867-45ec-9412-f058f27d2686", "1b9e4aaf-b984-4ebc-9b51-0e31bf1b0edb"}, + {"3e3975fa-d723-42c6-bccb-a3358793f656", "e662fb47-f162-41b8-b29c-45b24fe9e273"}, + {"557cd041-1d24-423c-9733-f94c8d2916b2", "def137ac-7724-43ff-98f9-b16a3ba01dcd"}, + {"56a827ff-d0bb-4f90-839d-eb55d8060269", "64943e71-86c3-4153-a760-76c0ff30cd68"}, + {"849810af-2e0b-45ae-965d-28642d6c6453", "da009be0-93fe-40ad-9e03-a14e2bee2ff9"}, + {"9cadf0eb-1e28-49ea-b052-5d05cdc50303", "e0f4e177-369d-4412-a19c-b7b1969dd46e"}, + {"9ff4fd36-7123-4b59-90e2-7f49cd0af05e", "34f633ac-c418-4a1d-8a69-796990008e9c"}, + {"d0438436-cff3-45e1-9c0b-7e5af632c0a4", "46cdaa7c-6bfa-49e2-822e-31be760c51c5"}, + {"e4896008-d596-44c7-8df6-6ced53dff6b0", "88929b3e-f664-4f11-873d-40324d06378e"}, + } + initialSizeClassLearner := mock.NewMockLearner(ctrl) + for i, p := range operationParameters { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA256, "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "f7a3ac7c17e535bc9b54ab13dbbb95a52ca1f1edaf9503ce23ccb3eca331a4f5", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + + initialSizeClassSelector := mock.NewMockSelector(ctrl) + requestMetadata := &remoteexecution.RequestMetadata{ + ToolInvocationId: p.invocationID, + } + requestMetadataAny, err := anypb.New(requestMetadata) + require.NoError(t, err) + requestMetadataBin, err := proto.Marshal(&remoteexecution.RequestMetadata{ + ToolInvocationId: p.invocationID, + }) + require.NoError(t, err) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), gomock.Any(), testutil.EqProto(t, requestMetadata)).Return( + platform.MustNewKey("main", platformForTesting), + []invocation.Key{invocation.MustNewKey(requestMetadataAny)}, + initialSizeClassSelector, + nil, + ) + if i == 0 { + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + } else { + initialSizeClassSelector.EXPECT().Abandoned() + } + + ctxWithCancel, cancel := context.WithCancel(ctx) + + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(p.operationName)) + stream, err := executionClient.Execute( + metadata.AppendToOutgoingContext( + ctxWithCancel, + "build.bazel.remote.execution.v2.requestmetadata-bin", + string(requestMetadataBin)), + &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }, update) + + // Immediately cancel the request. The operation should + // still be valid for the next minute. + // + // Because cancelling the RPC happens asynchronously, we + // wait on clock.Now() to be called to ensure + // InMemoryBuildQueue has detected the cancelation. + cancelWait := make(chan struct{}) + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)).Do(func() { + cancelWait <- struct{}{} + }) + cancel() + <-cancelWait + } + + // Get listings of invocations known by the scheduler. Because + // we're requesting this one minute after the operations were + // created, we should gradually see this list shrink. Eventually + // all invocations should be removed. + initialSizeClassLearner.EXPECT().Abandoned() + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + } + for i := 0; i <= len(operationParameters); i++ { + clock.EXPECT().Now().Return(time.Unix(1069+int64(i), 0)).Times(3) + + invocationStates, err := buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, len(operationParameters)-i) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, len(operationParameters)-i) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, len(operationParameters)-i) + } +} + +// This test is identical to the previous one, except that the operation +// is placed in the EXECUTING stage when being abandoned. The logic for +// removing such operations is different from operations in the QUEUED +// stage. +func TestInMemoryBuildQueueInFlightDeduplicationAbandonExecuting(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // Let ten clients create ten operations. Because they all refer + // to the same action, all requests should be deduplicated into + // the same task. This means that we create ten initial size + // class selectors, of which the last nine are abandoned + // immediately. + operationParameters := [...]struct { + invocationID string + operationName string + }{ + {"0f0f22ec-908a-4ea7-8a78-b92ab4188e78", "b4667823-9f8e-451d-a3e4-4481ec67329f"}, + {"0f67bd82-2867-45ec-9412-f058f27d2686", "1b9e4aaf-b984-4ebc-9b51-0e31bf1b0edb"}, + {"3e3975fa-d723-42c6-bccb-a3358793f656", "e662fb47-f162-41b8-b29c-45b24fe9e273"}, + {"557cd041-1d24-423c-9733-f94c8d2916b2", "def137ac-7724-43ff-98f9-b16a3ba01dcd"}, + {"56a827ff-d0bb-4f90-839d-eb55d8060269", "64943e71-86c3-4153-a760-76c0ff30cd68"}, + {"849810af-2e0b-45ae-965d-28642d6c6453", "da009be0-93fe-40ad-9e03-a14e2bee2ff9"}, + {"9cadf0eb-1e28-49ea-b052-5d05cdc50303", "e0f4e177-369d-4412-a19c-b7b1969dd46e"}, + {"9ff4fd36-7123-4b59-90e2-7f49cd0af05e", "34f633ac-c418-4a1d-8a69-796990008e9c"}, + {"d0438436-cff3-45e1-9c0b-7e5af632c0a4", "46cdaa7c-6bfa-49e2-822e-31be760c51c5"}, + {"e4896008-d596-44c7-8df6-6ced53dff6b0", "88929b3e-f664-4f11-873d-40324d06378e"}, + } + initialSizeClassLearner := mock.NewMockLearner(ctrl) + for i, p := range operationParameters { + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA256, "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "f7a3ac7c17e535bc9b54ab13dbbb95a52ca1f1edaf9503ce23ccb3eca331a4f5", + SizeBytes: 456, + }, + Platform: platformForTesting, + }, buffer.UserProvided)) + + initialSizeClassSelector := mock.NewMockSelector(ctrl) + requestMetadata := &remoteexecution.RequestMetadata{ + ToolInvocationId: p.invocationID, + TargetId: "//:hello_world", + } + requestMetadataAny, err := anypb.New(requestMetadata) + require.NoError(t, err) + requestMetadataBin, err := proto.Marshal(requestMetadata) + require.NoError(t, err) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), gomock.Any(), testutil.EqProto(t, requestMetadata)).Return( + platform.MustNewKey("main", platformForTesting), + []invocation.Key{invocation.MustNewKey(requestMetadataAny)}, + initialSizeClassSelector, + nil, + ) + if i == 0 { + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + } else { + initialSizeClassSelector.EXPECT().Abandoned() + } + + ctxWithCancel, cancel := context.WithCancel(ctx) + + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(p.operationName)) + stream, err := executionClient.Execute( + metadata.AppendToOutgoingContext( + ctxWithCancel, + "build.bazel.remote.execution.v2.requestmetadata-bin", + string(requestMetadataBin)), + &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }, update) + + // Immediately cancel the request. The operation should + // still be valid for the next minute. + // + // Because cancelling the RPC happens asynchronously, we + // wait on clock.Now() to be called to ensure + // InMemoryBuildQueue has detected the cancelation. + cancelWait := make(chan struct{}) + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)).Do(func() { + cancelWait <- struct{}{} + }) + cancel() + <-cancelWait + } + + // Let one worker execute the task. Because of in-flight + // deduplication, all ten operations should now be in the + // EXECUTING stage. + clock.EXPECT().Now().Return(time.Unix(1065, 0)) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + requestMetadata, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "0f0f22ec-908a-4ea7-8a78-b92ab4188e78", + TargetId: "//:hello_world", + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1075}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA256, + ActionDigest: &remoteexecution.Digest{ + Hash: "fc96ea0eee854b45950d3a7448332445730886691b992cb7917da0853664f7c2", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "f7a3ac7c17e535bc9b54ab13dbbb95a52ca1f1edaf9503ce23ccb3eca331a4f5", + SizeBytes: 456, + }, + Platform: platformForTesting, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1010}, + AuxiliaryMetadata: []*anypb.Any{requestMetadata}, + }, + }, + }, + }, response) + + // Get listings of invocations known by the scheduler. Because + // we're requesting this one minute after the operations were + // created, we should gradually see this list shrink. Eventually + // all invocations should be removed. + initialSizeClassLearner.EXPECT().Abandoned() + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "main", + Platform: platformForTesting, + }, + }, + } + for i := 0; i <= len(operationParameters); i++ { + clock.EXPECT().Now().Return(time.Unix(1069+int64(i), 0)).Times(3) + + invocationStates, err := buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, len(operationParameters)-i) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + require.Len(t, invocationStates.Children, len(operationParameters)-i) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + require.Empty(t, invocationStates.Children) + } +} + +func TestInMemoryBuildQueuePreferBeingIdle(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Announce a new worker, which creates a queue for operations. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1000}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // Let a client enqueue an operation. + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("b9bb6e2c-04ff-4fbd-802b-105be93a8fb7")) + stream, err := executionClient.Execute( + ctx, + &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "b9bb6e2c-04ff-4fbd-802b-105be93a8fb7", + Metadata: metadata, + }, update) + + // Let a worker pick up the operation. + clock.EXPECT().Now().Return(time.Unix(1002, 0)).Times(2) + timer = mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + timer.EXPECT().Stop().Return(true) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + // The client should be informed the operation has started executing. + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "b9bb6e2c-04ff-4fbd-802b-105be93a8fb7", + Metadata: metadata, + }, update) + + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1012}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 1800}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + }, + }, + }, + }, response) + + // Let the worker complete the execution of the operation. + // Normally this would be a blocking call, as it would wait + // until more work is available. However, because + // PreferBeingIdle is set, the call will return immediately, + // explicitly forcing the worker to the idle state. This allows + // workers to terminate gracefully. + initialSizeClassLearner.EXPECT().Succeeded(10*time.Second, []uint32{0}) + clock.EXPECT().Now().Return(time.Unix(1003, 0)).Times(3) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 10}, + }, + }, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1003}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // The client should be informed the operation has completed. This + // should be the last message to be returned. + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 10}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "b9bb6e2c-04ff-4fbd-802b-105be93a8fb7", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + + _, err = stream.Recv() + require.Equal(t, io.EOF, err) +} + +func TestInMemoryBuildQueueMultipleSizeClasses(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Register a platform queue that allows workers up to size + // class 8. The maximum needs to be provided to ensure that the + // execution strategy remains deterministic. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + require.NoError(t, buildQueue.RegisterPredeclaredPlatformQueue( + digest.MustNewInstanceName("main"), + platformForTesting, + /* workerInvocationStickinessLimits = */ nil, + /* maximumQueuedBackgroundLearningOperations = */ 0, + /* backgroundLearningOperationPriority = */ 0, + /* maximumSizeClass = */ 8)) + + // Workers with a higher size class should be rejected, as no + // requests will end up getting sent to them. + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + _, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 9, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Worker provided size class 9, which exceeds the predeclared maximum of 8"), err) + + // Announce a worker with a smaller size class, which should be + // permitted. + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1002}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // Let a client enqueue a new operation, which we'll schedule on + // the smaller size class. + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner1 := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{3, 8}). + Return(0, 3*time.Minute, 7*time.Minute, initialSizeClassLearner1) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let the worker for the small size class pick it up. + timer1.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1004, 0)).Times(2) + timer2 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer2, nil) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1014}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 420}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + }, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) + + // The action fails on the worker of the small size class, + // meaning that we retry it on the largest size class. An update + // should be sent back to the client that the operation has + // moved back to the QUEUED stage. + initialSizeClassLearner2 := mock.NewMockLearner(ctrl) + initialSizeClassLearner1.EXPECT().Failed(false). + Return(2*time.Minute, 5*time.Minute, initialSizeClassLearner2) + timer2.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1005, 0)).Times(2) + timer3 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer3, nil) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExitCode: 137, + }, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1005}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let a worker for the largest size class pick it up once more. + // The client should get notified that the operation is in the + // EXECUTING stage once again. + timer3.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1006, 0)).Times(2) + timer4 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer4, nil) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker456", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 8, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1016}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 300}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + }, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) + + // Let the action succeed on the largest size class. This should + // cause the executing time on the largest size class to be + // provided to the learner, and completion to be reported to the + // client. + initialSizeClassLearner2.EXPECT().Succeeded(3*time.Second, []uint32{3, 8}) + clock.EXPECT().Now().Return(time.Unix(1019, 0)).Times(3) + timer4.EXPECT().Stop().Return(true) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker456", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 8, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 3}, + }, + }, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1019}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 3}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) +} + +func TestInMemoryBuildQueueBackgroundRun(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Register a platform queue that allows workers up to size + // class 8. The maximum needs to be provided to ensure that the + // execution strategy remains deterministic. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + require.NoError(t, buildQueue.RegisterPredeclaredPlatformQueue( + digest.MustNewInstanceName("main"), + platformForTesting, + /* workerInvocationStickinessLimits = */ nil, + /* maximumQueuedBackgroundLearningOperations = */ 10, + /* backgroundLearningOperationPriority = */ 100, + /* maximumSizeClass = */ 8)) + + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "099a3f6dc1e8e91dbcca4ea964cd2237d4b11733", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_FetchingInputs{ + FetchingInputs: &emptypb.Empty{}, + }, + }, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1002}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + // Let a client enqueue a new operation, which we'll initially + // schedule on the largest size class. + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("main", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }, buffer.UserProvided)) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }), nil).Return(platform.MustNewKey("main", platformForTesting), nil, initialSizeClassSelector, nil) + initialSizeClassLearner1 := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{3, 8}). + Return(1, 3*time.Minute, 7*time.Minute, initialSizeClassLearner1) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + InstanceName: "main", + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + // Let a worker for the largest size class pick it up. + timer1.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1004, 0)).Times(2) + timer2 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer2, nil) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker456", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 8, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1014}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 420}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + }, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }, update) + + // The action succeeds on the worker of the largest size class. + // In response, request that the same action is rerun on the + // smaller size class. Because we don't want to leave the client + // blocked on that, this should be done as part of a separate + // task. + initialSizeClassLearner2 := mock.NewMockLearner(ctrl) + initialSizeClassLearner1.EXPECT().Succeeded(3*time.Second, []uint32{3, 8}). + Return(0, 30*time.Second, time.Minute, initialSizeClassLearner2) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("30326ed7-101a-4bf2-93eb-fcb6e7672415")) + timer2.EXPECT().Stop().Return(true) + clock.EXPECT().Now().Return(time.Unix(1005, 0)).Times(3) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker456", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 8, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 3}, + }, + }, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1005}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + update, err = stream1.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 3}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + + // Let the worker for the smaller size class pick up the + // background task. The action should be identical to the + // original one, except that the timeout is altered and + // do_not_cache is set. + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1016}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 60}, + DoNotCache: true, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1003}, + }, + }, + }, + }, response) + + // Let the action succeed on the smaller size class. This should + // cause the initial size class learner to be finalized. + initialSizeClassLearner2.EXPECT().Succeeded(3*time.Second, []uint32{3, 8}) + clock.EXPECT().Now().Return(time.Unix(1019, 0)) + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + InstanceNamePrefix: "main", + Platform: platformForTesting, + SizeClass: 3, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{ + ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ + VirtualExecutionDuration: &durationpb.Duration{Seconds: 3}, + }, + }, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1019}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) +} + +func TestInMemoryBuildQueueIdleSynchronizingWorkers(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + mockClock := mock.NewMockClock(ctrl) + mockClock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, mockClock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Common values used by steps below. + action := &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + Timeout: &durationpb.Duration{Seconds: 420}, + } + actionDigest := &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + } + invocationID1, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "33b38903-d456-4417-951b-bd8a2681c136", + }) + require.NoError(t, err) + invocationID2, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "75b27319-2704-4fa0-84c9-6881ea5b93ad", + }) + require.NoError(t, err) + invocationID3, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "d6be714f-cef6-408f-b3c7-dfeeae48f63f", + }) + require.NoError(t, err) + workerID1 := map[string]string{ + "hostname": "worker123", + "thread": "42", + } + workerID2 := map[string]string{ + "hostname": "worker123", + "thread": "43", + } + sizeClassQueueName := &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + Platform: platformForTesting, + }, + } + metadataExecuting, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: actionDigest, + }) + require.NoError(t, err) + metadataCompleted, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: actionDigest, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }) + require.NoError(t, err) + + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("", remoteexecution.DigestFunction_SHA1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(action, buffer.UserProvided)).AnyTimes() + + // Create a worker that does a blocking Synchronize() call + // against the scheduler. + mockClock.EXPECT().Now().Return(time.Unix(1000, 0)) + timer1 := mock.NewMockTimer(ctrl) + wait1 := make(chan struct{}, 1) + mockClock.EXPECT().NewTimer(time.Minute).DoAndReturn(func(d time.Duration) (clock.Timer, <-chan time.Time) { + wait1 <- struct{}{} + return timer1, nil + }) + var response1 *remoteworker.SynchronizeResponse + var err1 error + wait2 := make(chan struct{}, 1) + go func() { + response1, err1 = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID1, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + wait2 <- struct{}{} + }() + <-wait1 + + // Assign a task to it. The worker should be woken up directly. + // The client should immediately receive an EXECUTING update. + // There is no need to return QUEUED. + initialSizeClassSelector1 := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil).Return( + platform.MustNewKey("", platformForTesting), + []invocation.Key{invocation.MustNewKey(invocationID1)}, + initialSizeClassSelector1, + nil, + ) + initialSizeClassLearner1 := mock.NewMockLearner(ctrl) + initialSizeClassSelector1.EXPECT().Select([]uint32{0}). + Return(0, 3*time.Minute, 7*time.Minute, initialSizeClassLearner1) + mockClock.EXPECT().Now().Return(time.Unix(1001, 0)).Times(2) + timer2 := mock.NewMockTimer(ctrl) + mockClock.EXPECT().NewTimer(time.Minute).Return(timer2, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + timer1.EXPECT().Stop() + + stream1, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: actionDigest, + }) + require.NoError(t, err) + update, err := stream1.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadataExecuting, + }) + + // The worker should get unblocked. + <-wait2 + require.NoError(t, err1) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1011}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: actionDigest, + Action: action, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1001}, + }, + }, + }, + }, response1) + + // Let the worker complete the operation. This should wake up + // the client. + mockClock.EXPECT().Now().Return(time.Unix(1002, 0)).Times(3) + initialSizeClassLearner1.EXPECT().Succeeded(time.Duration(0), []uint32{0}) + timer2.EXPECT().Stop() + timer3 := mock.NewMockTimer(ctrl) + wait3 := make(chan struct{}, 1) + mockClock.EXPECT().NewTimer(time.Minute).DoAndReturn(func(d time.Duration) (clock.Timer, <-chan time.Time) { + wait3 <- struct{}{} + return timer3, nil + }) + var response2 *remoteworker.SynchronizeResponse + var err2 error + wait4 := make(chan struct{}, 1) + go func() { + response2, err2 = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID1, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }, + }, + }, + }, + }, + }) + wait4 <- struct{}{} + }() + <-wait3 + + update, err = stream1.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadataCompleted, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + _, err = stream1.Recv() + require.Equal(t, io.EOF, err) + + // Even though there are no longer any active or queued + // invocations, the scheduler should keep track of the + // invocation belonging to the previously completed action, + // keeping track of how which workers are associated with it. + mockClock.EXPECT().Now().Return(time.Unix(1003, 0)).Times(3) + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: sizeClassQueueName, + } + invocationStates, err := buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListInvocationChildrenResponse{ + Children: []*buildqueuestate.InvocationChildState{ + { + Id: invocationID1, + State: &buildqueuestate.InvocationState{ + IdleWorkersCount: 1, + IdleSynchronizingWorkersCount: 1, + }, + }, + }, + }, invocationStates) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ACTIVE, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListInvocationChildrenResponse{}, invocationStates) + + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_QUEUED, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListInvocationChildrenResponse{}, invocationStates) + + // Create a second worker that issues a blocking Synchronize() + // call against the scheduler. + mockClock.EXPECT().Now().Return(time.Unix(1004, 0)) + timer4 := mock.NewMockTimer(ctrl) + wait5 := make(chan struct{}, 1) + mockClock.EXPECT().NewTimer(time.Minute).DoAndReturn(func(d time.Duration) (clock.Timer, <-chan time.Time) { + wait5 <- struct{}{} + return timer4, nil + }) + var response3 *remoteworker.SynchronizeResponse + var err3 error + wait6 := make(chan struct{}, 1) + go func() { + response3, err3 = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID2, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + wait6 <- struct{}{} + }() + <-wait5 + + // Schedule another operation. Because this operation uses a + // different invocation ID, it must be scheduled on the second + // worker. We want to keep the first worker available for + // actions for the same invocation ID. + initialSizeClassSelector2 := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil).Return( + platform.MustNewKey("", platformForTesting), + []invocation.Key{invocation.MustNewKey(invocationID2)}, + initialSizeClassSelector2, + nil, + ) + initialSizeClassLearner2 := mock.NewMockLearner(ctrl) + initialSizeClassSelector2.EXPECT().Select([]uint32{0}). + Return(0, 3*time.Minute, 7*time.Minute, initialSizeClassLearner2) + mockClock.EXPECT().Now().Return(time.Unix(1005, 0)).Times(2) + timer5 := mock.NewMockTimer(ctrl) + mockClock.EXPECT().NewTimer(time.Minute).Return(timer5, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("e98bb734-0ec7-4cc5-bb98-bb3d5c0788c2")) + timer4.EXPECT().Stop() + + stream2, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: actionDigest, + }) + require.NoError(t, err) + update, err = stream2.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "e98bb734-0ec7-4cc5-bb98-bb3d5c0788c2", + Metadata: metadataExecuting, + }) + + <-wait6 + require.NoError(t, err3) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1015}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: actionDigest, + Action: action, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1005}, + }, + }, + }, + }, response3) + + // Let the second worker complete the operation. + mockClock.EXPECT().Now().Return(time.Unix(1006, 0)).Times(3) + initialSizeClassLearner2.EXPECT().Succeeded(time.Duration(0), []uint32{0}) + timer5.EXPECT().Stop() + timer6 := mock.NewMockTimer(ctrl) + wait7 := make(chan struct{}, 1) + mockClock.EXPECT().NewTimer(time.Minute).DoAndReturn(func(d time.Duration) (clock.Timer, <-chan time.Time) { + wait7 <- struct{}{} + return timer6, nil + }) + go func() { + buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID2, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }, + }, + }, + }, + }, + }) + }() + <-wait7 + + update, err = stream2.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "e98bb734-0ec7-4cc5-bb98-bb3d5c0788c2", + Metadata: metadataCompleted, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + _, err = stream1.Recv() + require.Equal(t, io.EOF, err) + + // Both invocations should now have one worker. + mockClock.EXPECT().Now().Return(time.Unix(1007, 0)) + invocationStates, err = buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListInvocationChildrenResponse{ + Children: []*buildqueuestate.InvocationChildState{ + { + Id: invocationID1, + State: &buildqueuestate.InvocationState{ + IdleWorkersCount: 1, + IdleSynchronizingWorkersCount: 1, + }, + }, + { + Id: invocationID2, + State: &buildqueuestate.InvocationState{ + IdleWorkersCount: 1, + IdleSynchronizingWorkersCount: 1, + }, + }, + }, + }, invocationStates) + + // When submitting a third operation that uses an unknown + // invocation, we should execute it on the first worker, as that + // worker is associated with an invocation that is least + // recently seen. + initialSizeClassSelector3 := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil).Return( + platform.MustNewKey("", platformForTesting), + []invocation.Key{invocation.MustNewKey(invocationID3)}, + initialSizeClassSelector3, + nil, + ) + initialSizeClassLearner3 := mock.NewMockLearner(ctrl) + initialSizeClassSelector3.EXPECT().Select([]uint32{0}). + Return(0, 3*time.Minute, 7*time.Minute, initialSizeClassLearner3) + mockClock.EXPECT().Now().Return(time.Unix(1008, 0)).Times(2) + timer7 := mock.NewMockTimer(ctrl) + mockClock.EXPECT().NewTimer(time.Minute).Return(timer7, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse("a0942b25-9c84-42da-93cb-cbd16cf61917")) + timer3.EXPECT().Stop() + + stream3, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: actionDigest, + }) + require.NoError(t, err) + update, err = stream3.Recv() + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "a0942b25-9c84-42da-93cb-cbd16cf61917", + Metadata: metadataExecuting, + }) + + <-wait4 + require.NoError(t, err2) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1018}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: actionDigest, + Action: action, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1008}, + }, + }, + }, + }, response2) +} + +func TestInMemoryBuildQueueWorkerInvocationStickinessLimit(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + // Register a platform queue that has a small amount of worker + // invocation stickiness. This should workers to prefer picking + // up operations belonging to the same invocation as the last + // executed task, if the difference in queueing time is small. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + require.NoError(t, buildQueue.RegisterPredeclaredPlatformQueue( + digest.EmptyInstanceName, + platformForTesting, + /* workerInvocationStickinessLimits = */ []time.Duration{3 * time.Second}, + /* maximumQueuedBackgroundLearningOperations = */ 10, + /* backgroundLearningOperationPriority = */ 100, + /* maximumSizeClass = */ 0)) + + operationParameters := []struct { + operationName string + toolInvocationID string + }{ + {"716f2e32-d273-49e7-a842-82282e89d1a3", "76d8f589-8d22-4541-a612-39e4c670e531"}, + {"0a656823-9f91-4220-9dcd-58503e62e6e8", "76d8f589-8d22-4541-a612-39e4c670e531"}, + {"175ead9a-a095-43a5-9f2c-e3a293938ff3", "76d8f589-8d22-4541-a612-39e4c670e531"}, + {"d354a1ea-0945-4c84-be17-ac01b8058d06", "0175ce91-1363-4cc9-8eb9-e67e82f9fdbb"}, + {"5fe458ce-a28d-4478-9ced-7ee23a539cb6", "0175ce91-1363-4cc9-8eb9-e67e82f9fdbb"}, + {"30e6d53c-a737-47bc-8b06-8e8f9cf7a3b3", "0175ce91-1363-4cc9-8eb9-e67e82f9fdbb"}, + {"77d52264-940c-461b-b67f-e3e68cb5b7f0", "8414fe9e-67a9-4b0b-854a-e7f8f56cca50"}, + {"201d6ff3-ffe3-4126-928b-cba38938ebb5", "8414fe9e-67a9-4b0b-854a-e7f8f56cca50"}, + {"303fa653-8516-4a4f-9b6b-60c3f1f45f29", "8414fe9e-67a9-4b0b-854a-e7f8f56cca50"}, + } + type streamHandle struct { + timer *mock.MockTimer + stream remoteexecution.Execution_ExecuteClient + initialSizeClassLearner *mock.MockLearner + } + var streamHandles []streamHandle + + // Schedule some actions belonging to three different invocation IDs. + for i, p := range operationParameters { + action := &remoteexecution.Action{ + DoNotCache: true, + CommandDigest: &remoteexecution.Digest{ + Hash: "9b818e201c59f31954cb1e126cc67562ec545ab4", + SizeBytes: 456, + }, + } + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("", remoteexecution.DigestFunction_SHA1, "0474d2f48968a56da4de20718d8ac23aafd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(action, buffer.UserProvided)) + requestMetadata := &remoteexecution.RequestMetadata{ + ToolInvocationId: p.toolInvocationID, + } + requestMetadataAny, err := anypb.New(requestMetadata) + require.NoError(t, err) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil).Return( + platform.MustNewKey("", platformForTesting), + []invocation.Key{invocation.MustNewKey(requestMetadataAny)}, + initialSizeClassSelector, + nil, + ) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 30*time.Second, time.Minute, initialSizeClassLearner) + clock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)) + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(p.operationName)) + + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }) + + streamHandles = append(streamHandles, streamHandle{ + timer: timer, + stream: stream, + initialSizeClassLearner: initialSizeClassLearner, + }) + } + + // Let a worker run the actions sequentially. The order in which + // execution takes place differs from the queueing order, + // because of stickiness. Every invocation is permitted to run + // for at least three seconds before switching to tasks + // belonging to another invocation. + for i, operationIndex := range []int{0, 1, 3, 4, 6, 7, 2, 5, 8} { + // Starting execution should cause the client to receive + // an EXECUTING message. + clock.EXPECT().Now().Return(time.Unix(1030+int64(i)*2, 0)).Times(2) + streamHandles[operationIndex].timer.EXPECT().Stop() + timer := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer, nil) + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1040 + int64(i)*2}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + Action: &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "9b818e201c59f31954cb1e126cc67562ec545ab4", + SizeBytes: 456, + }, + DoNotCache: true, + Timeout: &durationpb.Duration{Seconds: 60}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1010 + int64(operationIndex)}, + }, + }, + }, + }, response) + + stream := streamHandles[operationIndex].stream + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + operationName := operationParameters[operationIndex].operationName + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: operationName, + Metadata: metadata, + }) + + // Finishing execution should cause the client to + // receive a COMPLETED message. + streamHandles[operationIndex].initialSizeClassLearner.EXPECT().Succeeded(time.Duration(0), []uint32{0}) + clock.EXPECT().Now().Return(time.Unix(1021+int64(i)*2, 0)).Times(3) + timer.EXPECT().Stop() + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": "42", + }, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: operationName, + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }) + } +} + +func TestInMemoryBuildQueueAuthorization(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + clock := mock.NewMockClock(ctrl) + clock.EXPECT().Now().Return(time.Unix(0, 0)).AnyTimes() + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + authorizer := mock.NewMockAuthorizer(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, clock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, authorizer, authorizer, authorizer) + beepboop := digest.MustNewInstanceName("beepboop") + + t.Run("GetCapabilities-NotAuthorized", func(t *testing.T) { + authorizer.EXPECT().Authorize(gomock.Any(), []digest.InstanceName{beepboop}).Return([]error{status.Error(codes.PermissionDenied, "You shall not pass")}) + _, err := buildQueue.GetCapabilities(ctx, beepboop) + testutil.RequireEqualStatus(t, status.Error(codes.PermissionDenied, "Authorization: You shall not pass"), err) + }) + + t.Run("GetCapabilities-Error", func(t *testing.T) { + authorizer.EXPECT().Authorize(gomock.Any(), []digest.InstanceName{beepboop}).Return([]error{status.Error(codes.Internal, "I fell over")}) + _, err := buildQueue.GetCapabilities(ctx, beepboop) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Authorization: I fell over"), err) + }) + + t.Run("Execute-NotAuthorized", func(t *testing.T) { + executionClient := getExecutionClient(t, buildQueue) + authorizer.EXPECT().Authorize(gomock.Any(), []digest.InstanceName{beepboop}).Return([]error{status.Error(codes.PermissionDenied, "You shall not pass")}) + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + InstanceName: "beepboop", + }) + require.NoError(t, err) + _, err = stream.Recv() + testutil.RequireEqualStatus(t, status.Error(codes.PermissionDenied, "Authorization: You shall not pass"), err) + }) + + t.Run("WaitExecution", func(t *testing.T) { + buildQueue.RegisterPredeclaredPlatformQueue( + digest.MustNewInstanceName(""), + &remoteexecution.Platform{}, + /* workerInvocationStickinessLimits = */ nil, + /* maximumQueuedBackgroundLearningOperations = */ 0, + /* backgroundLearningOperationPriority = */ 0, + /* maximumSizeClass = */ 0) + + // Allow the Execute + authorizer.EXPECT().Authorize(gomock.Any(), []digest.InstanceName{beepboop}).Return([]error{nil}) + + action := &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + SizeBytes: 123, + }, + Platform: &remoteexecution.Platform{}, + } + + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("beepboop", remoteexecution.DigestFunction_SHA1, "61c585c297d00409bd477b6b80759c94ec545ab4", 456), + ).Return(buffer.NewProtoBufferFromProto(action, buffer.UserProvided)) + + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil). + Return(platform.MustNewKey("beepboop", &remoteexecution.Platform{}), nil, initialSizeClassSelector, nil) + + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 15*time.Minute, 30*time.Minute, initialSizeClassLearner) + + timer1 := mock.NewMockTimer(ctrl) + clock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + + uuidGenerator.EXPECT().Call().Return(uuid.Parse("36ebab65-3c4f-4faf-818b-2eabb4cd1b02")) + + executionClient := getExecutionClient(t, buildQueue) + + // Error on the WaitExecution + authorizer.EXPECT().Authorize(gomock.Any(), []digest.InstanceName{beepboop}).Return([]error{status.Error(codes.PermissionDenied, "You shall not pass")}) + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + InstanceName: "beepboop", + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: &remoteexecution.Digest{ + Hash: "61c585c297d00409bd477b6b80759c94ec545ab4", + SizeBytes: 456, + }, + }) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + Metadata: metadata, + }) + + stream2, err := executionClient.WaitExecution(ctx, &remoteexecution.WaitExecutionRequest{ + Name: "36ebab65-3c4f-4faf-818b-2eabb4cd1b02", + }) + require.NoError(t, err) + _, err = stream2.Recv() + testutil.RequireEqualStatus(t, status.Error(codes.PermissionDenied, "Authorization: You shall not pass"), err) + }) +} + +func TestInMemoryBuildQueueNestedInvocationsSynchronization(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + mockClock := mock.NewMockClock(ctrl) + mockClock.EXPECT().Now().Return(time.Unix(0, 0)) + uuidGenerator := mock.NewMockUUIDGenerator(ctrl) + actionRouter := mock.NewMockActionRouter(ctrl) + buildQueue := scheduler.NewInMemoryBuildQueue(contentAddressableStorage, mockClock, uuidGenerator.Call, &buildQueueConfigurationForTesting, 10000, actionRouter, allowAllAuthorizer, allowAllAuthorizer, allowAllAuthorizer) + executionClient := getExecutionClient(t, buildQueue) + + mockClock.EXPECT().Now().Return(time.Unix(1000, 0)) + require.NoError(t, buildQueue.RegisterPredeclaredPlatformQueue( + digest.EmptyInstanceName, + platformForTesting, + /* workerInvocationStickinessLimits = */ nil, + /* maximumQueuedBackgroundLearningOperations = */ 0, + /* backgroundLearningOperationPriority = */ 0, + /* maximumSizeClass = */ 0)) + + // Create ten workers. Let all of them complete a task that + // belonged to the same correlated invocations ID, but a + // different tool invocation ID. + correlatedInvocationsID := &remoteexecution.RequestMetadata{ + CorrelatedInvocationsId: "7e5d4921-a6d2-480e-8e2d-a4afecd7ab33", + } + correlatedInvocationsIDAny, err := anypb.New(correlatedInvocationsID) + require.NoError(t, err) + for i, p := range []struct { + operationName string + toolInvocationID string + }{ + {"716f2e32-d273-49e7-a842-82282e89d1a3", "93b8f304-4a3d-4832-91b2-e596e8a08f42"}, + {"0a656823-9f91-4220-9dcd-58503e62e6e8", "e3658358-6325-4405-a43c-b3f81ca6e52a"}, + {"175ead9a-a095-43a5-9f2c-e3a293938ff3", "a76ecbfc-4a3a-4822-99c5-30c0ff7dbbb8"}, + {"d354a1ea-0945-4c84-be17-ac01b8058d06", "167fda12-0c84-4936-978f-2b54ebe3b600"}, + {"5fe458ce-a28d-4478-9ced-7ee23a539cb6", "02c0d7d2-2e0a-4664-8030-60ad5e9026bd"}, + {"30e6d53c-a737-47bc-8b06-8e8f9cf7a3b3", "892d2891-8344-4cb0-88b6-852106b2b329"}, + {"77d52264-940c-461b-b67f-e3e68cb5b7f0", "1ccd7a67-0b55-4242-a4a1-942064a79341"}, + {"201d6ff3-ffe3-4126-928b-cba38938ebb5", "cbb6455f-ede1-4314-ba63-a44f18188672"}, + {"303fa653-8516-4a4f-9b6b-60c3f1f45f29", "3c5c6475-5ce6-42ef-b8ba-aa0a67348174"}, + {"ca172285-ba23-42fd-a714-b582cad3bf32", "fd345278-02e8-48d8-8ff6-4f467c0cd942"}, + } { + action := &remoteexecution.Action{ + DoNotCache: true, + CommandDigest: &remoteexecution.Digest{ + Hash: "9b818e201c59f31954cb1e126cc67562ec545ab4", + SizeBytes: 456, + }, + } + contentAddressableStorage.EXPECT().Get( + gomock.Any(), + digest.MustNewDigest("", remoteexecution.DigestFunction_SHA1, "0474d2f48968a56da4de20718d8ac23aafd80709", 123), + ).Return(buffer.NewProtoBufferFromProto(action, buffer.UserProvided)) + toolInvocationID := &remoteexecution.RequestMetadata{ + ToolInvocationId: p.toolInvocationID, + } + toolInvocationIDAny, err := anypb.New(toolInvocationID) + require.NoError(t, err) + initialSizeClassSelector := mock.NewMockSelector(ctrl) + actionRouter.EXPECT().RouteAction(gomock.Any(), gomock.Any(), testutil.EqProto(t, action), nil).Return( + platform.MustNewKey("", platformForTesting), + []invocation.Key{ + invocation.MustNewKey(correlatedInvocationsIDAny), + invocation.MustNewKey(toolInvocationIDAny), + }, + initialSizeClassSelector, + nil, + ) + initialSizeClassLearner := mock.NewMockLearner(ctrl) + initialSizeClassSelector.EXPECT().Select([]uint32{0}). + Return(0, 30*time.Second, time.Minute, initialSizeClassLearner) + mockClock.EXPECT().Now().Return(time.Unix(1010+int64(i), 0)) + timer1 := mock.NewMockTimer(ctrl) + mockClock.EXPECT().NewTimer(time.Minute).Return(timer1, nil) + uuidGenerator.EXPECT().Call().Return(uuid.Parse(p.operationName)) + + actionDigest := &remoteexecution.Digest{ + Hash: "0474d2f48968a56da4de20718d8ac23aafd80709", + SizeBytes: 123, + } + stream, err := executionClient.Execute(ctx, &remoteexecution.ExecuteRequest{ + ActionDigest: actionDigest, + }) + require.NoError(t, err) + update, err := stream.Recv() + require.NoError(t, err) + metadata, err := anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_QUEUED, + ActionDigest: actionDigest, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }, update) + + mockClock.EXPECT().Now().Return(time.Unix(1010+int64(i), 1)).Times(2) + timer1.EXPECT().Stop() + timer2 := mock.NewMockTimer(ctrl) + mockClock.EXPECT().NewTimer(time.Minute).Return(timer2, nil) + workerID := map[string]string{ + "hostname": "worker123", + "thread": strconv.FormatInt(int64(i), 10), + } + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1020 + int64(i), Nanos: 1}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Executing_{ + Executing: &remoteworker.DesiredState_Executing{ + DigestFunction: remoteexecution.DigestFunction_SHA1, + ActionDigest: actionDigest, + Action: &remoteexecution.Action{ + DoNotCache: true, + CommandDigest: &remoteexecution.Digest{ + Hash: "9b818e201c59f31954cb1e126cc67562ec545ab4", + SizeBytes: 456, + }, + Timeout: &durationpb.Duration{Seconds: 60}, + }, + QueuedTimestamp: ×tamppb.Timestamp{Seconds: 1010 + int64(i)}, + }, + }, + }, + }, response) + + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_EXECUTING, + ActionDigest: actionDigest, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, update, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + }) + + initialSizeClassLearner.EXPECT().Succeeded(time.Duration(0), []uint32{0}) + mockClock.EXPECT().Now().Return(time.Unix(1010+int64(i), 2)).Times(3) + timer2.EXPECT().Stop() + response, err = buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: workerID, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Executing_{ + Executing: &remoteworker.CurrentState_Executing{ + ActionDigest: actionDigest, + ExecutionState: &remoteworker.CurrentState_Executing_Completed{ + Completed: &remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }, + }, + }, + }, + }, + PreferBeingIdle: true, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1010 + int64(i), Nanos: 2}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + update, err = stream.Recv() + require.NoError(t, err) + metadata, err = anypb.New(&remoteexecution.ExecuteOperationMetadata{ + Stage: remoteexecution.ExecutionStage_COMPLETED, + ActionDigest: actionDigest, + }) + require.NoError(t, err) + executeResponse, err := anypb.New(&remoteexecution.ExecuteResponse{ + Result: &remoteexecution.ActionResult{}, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &longrunningpb.Operation{ + Name: p.operationName, + Metadata: metadata, + Done: true, + Result: &longrunningpb.Operation_Response{Response: executeResponse}, + }, update) + } + + // At the top level, we should see an invocation with ten children. + invocationName := &buildqueuestate.InvocationName{ + SizeClassQueueName: &buildqueuestate.SizeClassQueueName{ + PlatformQueueName: &buildqueuestate.PlatformQueueName{ + Platform: platformForTesting, + }, + }, + } + mockClock.EXPECT().Now().Return(time.Unix(1030, 0)) + invocationStates, err := buildQueue.ListInvocationChildren(ctx, &buildqueuestate.ListInvocationChildrenRequest{ + InvocationName: invocationName, + Filter: buildqueuestate.ListInvocationChildrenRequest_ALL, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.ListInvocationChildrenResponse{ + Children: []*buildqueuestate.InvocationChildState{ + { + Id: correlatedInvocationsIDAny, + State: &buildqueuestate.InvocationState{ + IdleWorkersCount: 10, + ChildrenCount: 10, + }, + }, + }, + }, invocationStates) + + // Let all ten workers perform a blocking Synchronize() call. + allWorkersWait := make(chan struct{}) + timers := make([]*mock.MockTimer, 0, 10) + wakeups := make([]chan<- time.Time, 0, 10) + for iIter := 0; iIter < 10; iIter++ { + i := iIter + mockClock.EXPECT().Now().Return(time.Unix(1040+int64(i), 0)) + timer := mock.NewMockTimer(ctrl) + wakeup := make(chan time.Time, 1) + timerCreationWait := make(chan struct{}) + mockClock.EXPECT().NewTimer(time.Minute).DoAndReturn(func(d time.Duration) (clock.Timer, <-chan time.Time) { + timerCreationWait <- struct{}{} + return timer, wakeup + }) + + go func() { + response, err := buildQueue.Synchronize(ctx, &remoteworker.SynchronizeRequest{ + WorkerId: map[string]string{ + "hostname": "worker123", + "thread": strconv.FormatInt(int64(i), 10), + }, + Platform: platformForTesting, + CurrentState: &remoteworker.CurrentState{ + WorkerState: &remoteworker.CurrentState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &remoteworker.SynchronizeResponse{ + NextSynchronizationAt: ×tamppb.Timestamp{Seconds: 1100 + int64(i)}, + DesiredState: &remoteworker.DesiredState{ + WorkerState: &remoteworker.DesiredState_Idle{ + Idle: &emptypb.Empty{}, + }, + }, + }, response) + + allWorkersWait <- struct{}{} + }() + + <-timerCreationWait + timers = append(timers, timer) + wakeups = append(wakeups, wakeup) + } + + // Wake up all ten workers without receiving any new work. This + // should not cause any crashes. + for i := 0; i < 10; i++ { + timers[i].EXPECT().Stop() + wakeups[i] <- time.Unix(1100+int64(i), 0) + <-allWorkersWait + } +} diff --git a/pkg/scheduler/initialsizeclass/BUILD.bazel b/pkg/scheduler/initialsizeclass/BUILD.bazel new file mode 100644 index 0000000..d000f3e --- /dev/null +++ b/pkg/scheduler/initialsizeclass/BUILD.bazel @@ -0,0 +1,61 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "initialsizeclass", + srcs = [ + "action_timeout_extractor.go", + "analyzer.go", + "configuration.go", + "fallback_analyzer.go", + "feedback_driven_analyzer.go", + "outcomes.go", + "page_rank_strategy_calculator.go", + "smallest_size_class_strategy_calculator.go", + "strategy_calculator.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass", + visibility = ["//visibility:public"], + deps = [ + "//pkg/blobstore", + "//pkg/proto/configuration/scheduler", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/proto/iscc", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +go_test( + name = "initialsizeclass_test", + srcs = [ + "action_timeout_extractor_test.go", + "fallback_analyzer_test.go", + "feedback_driven_analyzer_test.go", + "outcomes_test.go", + "page_rank_strategy_calculator_test.go", + ], + deps = [ + ":initialsizeclass", + "//internal/mock", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/proto/iscc", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) diff --git a/pkg/scheduler/initialsizeclass/action_timeout_extractor.go b/pkg/scheduler/initialsizeclass/action_timeout_extractor.go new file mode 100644 index 0000000..86448b8 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/action_timeout_extractor.go @@ -0,0 +1,49 @@ +package initialsizeclass + +import ( + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ActionTimeoutExtractor is a helper type for extracting the execution +// timeout from an REv2 Action, taking configured default and maximum +// timeout values into account. +type ActionTimeoutExtractor struct { + defaultExecutionTimeout time.Duration + maximumExecutionTimeout time.Duration +} + +// NewActionTimeoutExtractor creates a new ActionTimeoutExtractor using +// the parameters provided. +func NewActionTimeoutExtractor(defaultExecutionTimeout, maximumExecutionTimeout time.Duration) *ActionTimeoutExtractor { + return &ActionTimeoutExtractor{ + defaultExecutionTimeout: defaultExecutionTimeout, + maximumExecutionTimeout: maximumExecutionTimeout, + } +} + +// ExtractTimeout extracts the execution timeout field from an REv2 +// Action, converting it to a time.Duration. It returns errors in case +// the provided execution timeout is invalid or out of bounds. +func (e ActionTimeoutExtractor) ExtractTimeout(action *remoteexecution.Action) (time.Duration, error) { + if action.Timeout == nil { + return e.defaultExecutionTimeout, nil + } + if err := action.Timeout.CheckValid(); err != nil { + return 0, util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid execution timeout") + } + executionTimeout := action.Timeout.AsDuration() + if executionTimeout < 0 || executionTimeout > e.maximumExecutionTimeout { + return 0, status.Errorf( + codes.InvalidArgument, + "Execution timeout of %s is outside permitted range [0s, %s]", + executionTimeout, + e.maximumExecutionTimeout) + } + return executionTimeout, nil +} diff --git a/pkg/scheduler/initialsizeclass/action_timeout_extractor_test.go b/pkg/scheduler/initialsizeclass/action_timeout_extractor_test.go new file mode 100644 index 0000000..5d9bfce --- /dev/null +++ b/pkg/scheduler/initialsizeclass/action_timeout_extractor_test.go @@ -0,0 +1,98 @@ +package initialsizeclass_test + +import ( + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" +) + +func TestActionTimeoutExtractor(t *testing.T) { + actionTimeoutExtractor := initialsizeclass.NewActionTimeoutExtractor( + /* defaultExecutionTimeout = */ time.Hour, + /* maximumExecutionTimeout = */ 2*time.Hour) + + t.Run("DefaultExecutionTimeout", func(t *testing.T) { + // Specifying no timeout should yield the default. + timeout, err := actionTimeoutExtractor.ExtractTimeout(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "3f146c6bc1789053afb12db35f6e91ff", + SizeBytes: 104, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "6fb86fab077bc2023cc1419cbc28998c", + SizeBytes: 493, + }, + }) + require.NoError(t, err) + require.Equal(t, time.Hour, timeout) + }) + + t.Run("InvalidTimeout", func(t *testing.T) { + _, err := actionTimeoutExtractor.ExtractTimeout(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "3f146c6bc1789053afb12db35f6e91ff", + SizeBytes: 104, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "6fb86fab077bc2023cc1419cbc28998c", + SizeBytes: 493, + }, + Timeout: &durationpb.Duration{Nanos: 1000000000}, + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Invalid execution timeout: "), err) + }) + + t.Run("TimeoutTooLow", func(t *testing.T) { + _, err := actionTimeoutExtractor.ExtractTimeout(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "3f146c6bc1789053afb12db35f6e91ff", + SizeBytes: 104, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "6fb86fab077bc2023cc1419cbc28998c", + SizeBytes: 493, + }, + Timeout: &durationpb.Duration{Seconds: -1}, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Execution timeout of -1s is outside permitted range [0s, 2h0m0s]"), err) + }) + + t.Run("TimeoutTooHigh", func(t *testing.T) { + _, err := actionTimeoutExtractor.ExtractTimeout(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "3f146c6bc1789053afb12db35f6e91ff", + SizeBytes: 104, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "6fb86fab077bc2023cc1419cbc28998c", + SizeBytes: 493, + }, + Timeout: &durationpb.Duration{Seconds: 7201}, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Execution timeout of 2h0m1s is outside permitted range [0s, 2h0m0s]"), err) + }) + + t.Run("Success", func(t *testing.T) { + timeout, err := actionTimeoutExtractor.ExtractTimeout(&remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "3f146c6bc1789053afb12db35f6e91ff", + SizeBytes: 104, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "6fb86fab077bc2023cc1419cbc28998c", + SizeBytes: 493, + }, + Timeout: &durationpb.Duration{Seconds: 900}, + }) + require.NoError(t, err) + require.Equal(t, 15*time.Minute, timeout) + }) +} diff --git a/pkg/scheduler/initialsizeclass/analyzer.go b/pkg/scheduler/initialsizeclass/analyzer.go new file mode 100644 index 0000000..00221aa --- /dev/null +++ b/pkg/scheduler/initialsizeclass/analyzer.go @@ -0,0 +1,76 @@ +package initialsizeclass + +import ( + "context" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// Analyzer of REv2 Actions, determining which worker size class is the +// most suitable for running it, and what timeout to use. +type Analyzer interface { + // Analyze an REv2 Action. This operation should be called + // without holding any locks, as it may block. + // + // All methods called against the Selector that is returned and + // any Learners yielded from it should be called while holding a + // global lock. + Analyze(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Selector, error) +} + +// Selector of a size class for an analyzed action. +type Selector interface { + // Given a list of size classes that are currently present, + // select a size class on which the action needs to be + // scheduled. + // + // The Selector can override the timeout of the action. When + // attempting to run an action on a smaller size class, it may + // be desirable to put a timeout in place that is based on + // previous execution times observed on larger size classes. + // This ensures that the system remains responsive, even in the + // case of mispredictions. + // + // A Learner is returned that the scheduler must use to + // communicate the outcome of the execution, so that future + // executions have a lower probability of making mispredictions. + Select(sizeClasses []uint32) (sizeClass int, expectedDuration, timeout time.Duration, learner Learner) + + // Clients have abandoned the action, meaning that no size class + // selection decision needs to be made. This may, for example, + // happen if the action is deduplicated against one that is + // already running. + Abandoned() +} + +// Learner for size class selection. The information provided by the +// scheduler to this object may allow the Analyzer and Selector to make +// more accurate predictions in the future. +type Learner interface { + // The action completed successfully. The execution time is + // provided. + // + // If this method returns a nil Learner, the scheduler can + // finalize the operation entirely. If this method returns a new + // Learner, the scheduler is requested to run the action another + // time in the background, just for learning purposes. It is + // valid for the scheduler to already communicate completion to + // the client. The scheduler may limit the amount of work it's + // willing to run in the background. + Succeeded(duration time.Duration, sizeClasses []uint32) (sizeClass int, expectedDuration, timeout time.Duration, learner Learner) + + // The action completed with a failure. + // + // If this method returns a nil Learner, the execution failure + // is definitive and should be propagated to the client. If this + // method returns a new Learner, execution must be retried on + // the largest size class, using the timeout that is returned. + Failed(timedOut bool) (expectedDuration, timeout time.Duration, learner Learner) + + // Clients have abandoned the action, meaning that execution of + // the action was terminated. Nothing may be learned from this + // action. + Abandoned() +} diff --git a/pkg/scheduler/initialsizeclass/configuration.go b/pkg/scheduler/initialsizeclass/configuration.go new file mode 100644 index 0000000..7b2100e --- /dev/null +++ b/pkg/scheduler/initialsizeclass/configuration.go @@ -0,0 +1,64 @@ +package initialsizeclass + +import ( + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewAnalyzerFromConfiguration creates a new initial size class +// analyzer based on options provided in a configuration file. +func NewAnalyzerFromConfiguration(configuration *pb.InitialSizeClassAnalyzerConfiguration, previousExecutionStatsStore PreviousExecutionStatsStore) (Analyzer, error) { + if configuration == nil { + return nil, status.Error(codes.InvalidArgument, "No initial size class analyzer configuration provided") + } + + defaultExecutionTimeout := configuration.DefaultExecutionTimeout + if err := defaultExecutionTimeout.CheckValid(); err != nil { + return nil, util.StatusWrap(err, "Invalid default execution timeout") + } + maximumExecutionTimeout := configuration.MaximumExecutionTimeout + if err := maximumExecutionTimeout.CheckValid(); err != nil { + return nil, util.StatusWrap(err, "Invalid maximum execution timeout") + } + actionTimeoutExtractor := NewActionTimeoutExtractor( + defaultExecutionTimeout.AsDuration(), + maximumExecutionTimeout.AsDuration()) + + if fdConfiguration := configuration.FeedbackDriven; fdConfiguration != nil { + if previousExecutionStatsStore == nil { + return nil, status.Error(codes.InvalidArgument, "Feedback driven analysis can only be enabled if an Initial Size Class Cache (ISCC) is configured") + } + failureCacheDuration := fdConfiguration.FailureCacheDuration + if err := failureCacheDuration.CheckValid(); err != nil { + return nil, util.StatusWrap(err, "Invalid failure cache duration") + } + + strategyCalculator := SmallestSizeClassStrategyCalculator + if pageRankConfiguration := fdConfiguration.PageRank; pageRankConfiguration != nil { + minimumExecutionTimeout := pageRankConfiguration.MinimumExecutionTimeout + if err := minimumExecutionTimeout.CheckValid(); err != nil { + return nil, util.StatusWrap(err, "Invalid minimum acceptable execution time") + } + strategyCalculator = NewPageRankStrategyCalculator( + minimumExecutionTimeout.AsDuration(), + pageRankConfiguration.AcceptableExecutionTimeIncreaseExponent, + pageRankConfiguration.SmallerSizeClassExecutionTimeoutMultiplier, + pageRankConfiguration.MaximumConvergenceError) + } + + return NewFeedbackDrivenAnalyzer( + previousExecutionStatsStore, + random.NewFastSingleThreadedGenerator(), + clock.SystemClock, + actionTimeoutExtractor, + failureCacheDuration.AsDuration(), + strategyCalculator, + int(fdConfiguration.HistorySize)), nil + } + return NewFallbackAnalyzer(actionTimeoutExtractor), nil +} diff --git a/pkg/scheduler/initialsizeclass/fallback_analyzer.go b/pkg/scheduler/initialsizeclass/fallback_analyzer.go new file mode 100644 index 0000000..6e4c423 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/fallback_analyzer.go @@ -0,0 +1,84 @@ +package initialsizeclass + +import ( + "context" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type fallbackAnalyzer struct { + actionTimeoutExtractor *ActionTimeoutExtractor +} + +// NewFallbackAnalyzer creates a simple Analyzer that runs all actions +// on the smallest size class. Upon failure, the action is retried on +// the largest size class. +// +// This Analyzer is ideal for setups that only use a single size class, +// or if the number of actions that does not succeed on the smallest +// size cache is very low. Any more complex setup should use +// FeedbackDrivenAnalyzer. +func NewFallbackAnalyzer(actionTimeoutExtractor *ActionTimeoutExtractor) Analyzer { + return &fallbackAnalyzer{ + actionTimeoutExtractor: actionTimeoutExtractor, + } +} + +func (a *fallbackAnalyzer) Analyze(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Selector, error) { + timeout, err := a.actionTimeoutExtractor.ExtractTimeout(action) + if err != nil { + return nil, err + } + return fallbackSelector{ + timeout: timeout, + }, nil +} + +type fallbackSelector struct { + timeout time.Duration +} + +func (s fallbackSelector) Select(sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + if len(sizeClasses) > 1 { + // Multiple size classes available. Run all actions on + // the smallest size class, falling back to the largest. + return 0, s.timeout, s.timeout, smallerFallbackLearner{ + timeout: s.timeout, + } + } + return 0, s.timeout, s.timeout, largestFallbackLearner{} +} + +func (fallbackSelector) Abandoned() {} + +type fallbackLearner struct{} + +func (fallbackLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + // There is no learning that needs to be performed in the + // background. + return 0, 0, 0, nil +} + +func (fallbackLearner) Abandoned() {} + +type smallerFallbackLearner struct { + fallbackLearner + timeout time.Duration +} + +func (l smallerFallbackLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + // Action failed on a smaller size class. Retry on the largest + // size class. + return l.timeout, l.timeout, largestFallbackLearner{} +} + +type largestFallbackLearner struct { + fallbackLearner +} + +func (largestFallbackLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + // Action failed on the largest size class. + return 0, 0, nil +} diff --git a/pkg/scheduler/initialsizeclass/fallback_analyzer_test.go b/pkg/scheduler/initialsizeclass/fallback_analyzer_test.go new file mode 100644 index 0000000..d397042 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/fallback_analyzer_test.go @@ -0,0 +1,98 @@ +package initialsizeclass_test + +import ( + "context" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/durationpb" +) + +func TestFallbackAnalyzer(t *testing.T) { + ctx := context.Background() + analyzer := initialsizeclass.NewFallbackAnalyzer( + initialsizeclass.NewActionTimeoutExtractor( + 30*time.Minute, + 60*time.Minute)) + + exampleDigestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5) + exampleAction := &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "bf4d2b56ee892c3fc862cdb863a6c2f4", + SizeBytes: 720, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "9a0be105e682022830da33578b909521", + SizeBytes: 951, + }, + Timeout: &durationpb.Duration{Seconds: 300}, + } + + t.Run("SelectorAbandoned", func(t *testing.T) { + // Action that is analyzed, but that is immediately + // abandoned. This should have no effect. + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + selector.Abandoned() + }) + + t.Run("SingleSizeClassFailure", func(t *testing.T) { + // When we have a single size class, we shouldn't do any + // retried upon failure. + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + sizeClassIndex, expectedDuration1, timeout1, learner1 := selector.Select([]uint32{4}) + require.Equal(t, 0, sizeClassIndex) + require.Equal(t, 300*time.Second, expectedDuration1) + require.Equal(t, 300*time.Second, timeout1) + + _, _, learner2 := learner1.Failed(true) + require.Nil(t, learner2) + }) + + t.Run("MultipleSizeClassFailure", func(t *testing.T) { + // When we have multiple size classes, we should first + // try executing on the smallest size class, followed by + // executing on the largest one. + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + sizeClassIndex, expectedDuration1, timeout1, learner1 := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 0, sizeClassIndex) + require.Equal(t, 300*time.Second, expectedDuration1) + require.Equal(t, 300*time.Second, timeout1) + + expectedDuration2, timeout2, learner2 := learner1.Failed(true) + require.NotNil(t, learner2) + require.Equal(t, 300*time.Second, expectedDuration2) + require.Equal(t, 300*time.Second, timeout2) + + _, _, learner3 := learner2.Failed(false) + require.Nil(t, learner3) + }) + + t.Run("MultipleSizeClassFirstSuccess", func(t *testing.T) { + // When we have multiple size classes and successfully + // complete an action on the smallest size class, there + // is nothing else to do. There won't be any retried on + // a larger size class. + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + sizeClassIndex1, expectedDuration1, timeout1, learner1 := selector.Select([]uint32{1, 2, 4, 8}) + require.NotNil(t, learner1) + require.Equal(t, 0, sizeClassIndex1) + require.Equal(t, 300*time.Second, expectedDuration1) + require.Equal(t, 300*time.Second, timeout1) + + _, _, _, learner2 := learner1.Succeeded(100*time.Second, []uint32{1, 2, 4, 8}) + require.Nil(t, learner2) + }) +} diff --git a/pkg/scheduler/initialsizeclass/feedback_driven_analyzer.go b/pkg/scheduler/initialsizeclass/feedback_driven_analyzer.go new file mode 100644 index 0000000..5bf9dca --- /dev/null +++ b/pkg/scheduler/initialsizeclass/feedback_driven_analyzer.go @@ -0,0 +1,431 @@ +package initialsizeclass + +import ( + "context" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + re_blobstore "github.com/buildbarn/bb-remote-execution/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// PreviousExecutionStatsStore is used by FeedbackDrivenAnalyzer +// to gain access to previous execution stats stored in the +// Initial Size Class Cache (ISCC). +type PreviousExecutionStatsStore re_blobstore.MutableProtoStore[*iscc.PreviousExecutionStats] + +// PreviousExecutionStatsHandle refers to a single previous execution +// stats message read from the Initial Size Class Cache (ISCC). +type PreviousExecutionStatsHandle re_blobstore.MutableProtoHandle[*iscc.PreviousExecutionStats] + +type feedbackDrivenAnalyzer struct { + store PreviousExecutionStatsStore + randomNumberGenerator random.SingleThreadedGenerator + clock clock.Clock + actionTimeoutExtractor *ActionTimeoutExtractor + failureCacheDuration time.Duration + strategyCalculator StrategyCalculator + historySize int +} + +// NewFeedbackDrivenAnalyzer creates an Analyzer that selects the +// initial size class on which actions are run by reading previous +// execution stats from the Initial Size Class Cache (ISCC) and +// analyzing these results. Upon completion, stats in the ISCC are +// updated. +func NewFeedbackDrivenAnalyzer(store PreviousExecutionStatsStore, randomNumberGenerator random.SingleThreadedGenerator, clock clock.Clock, actionTimeoutExtractor *ActionTimeoutExtractor, failureCacheDuration time.Duration, strategyCalculator StrategyCalculator, historySize int) Analyzer { + return &feedbackDrivenAnalyzer{ + store: store, + randomNumberGenerator: randomNumberGenerator, + clock: clock, + actionTimeoutExtractor: actionTimeoutExtractor, + failureCacheDuration: failureCacheDuration, + strategyCalculator: strategyCalculator, + historySize: historySize, + } +} + +func (a *feedbackDrivenAnalyzer) Analyze(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Selector, error) { + timeout, err := a.actionTimeoutExtractor.ExtractTimeout(action) + if err != nil { + return nil, err + } + reducedActionDigest, err := blobstore.GetReducedActionDigest(digestFunction, action) + if err != nil { + return nil, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to obtain reduced action digest") + } + handle, err := a.store.Get(ctx, reducedActionDigest) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to read previous execution stats for reduced action digest %#v", reducedActionDigest.String()) + } + return &feedbackDrivenSelector{ + analyzer: a, + handle: handle, + originalTimeout: timeout, + }, nil +} + +type feedbackDrivenSelector struct { + analyzer *feedbackDrivenAnalyzer + handle PreviousExecutionStatsHandle + originalTimeout time.Duration +} + +func getExpectedExecutionDuration(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClass uint32, timeout time.Duration) time.Duration { + if perSizeClassStats, ok := perSizeClassStatsMap[sizeClass]; ok { + if medianExecutionTime := getOutcomesFromPreviousExecutions(perSizeClassStats.PreviousExecutions).GetMedianExecutionTime(); medianExecutionTime != nil && *medianExecutionTime < timeout { + return *medianExecutionTime + } + } + return timeout +} + +func (s *feedbackDrivenSelector) Select(sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + a := s.analyzer + stats := s.handle.GetMutableProto() + if stats.SizeClasses == nil { + stats.SizeClasses = map[uint32]*iscc.PerSizeClassStats{} + } + perSizeClassStatsMap := stats.SizeClasses + largestSizeClass := sizeClasses[len(sizeClasses)-1] + if lastSeenFailure := stats.LastSeenFailure; lastSeenFailure.CheckValid() != nil || lastSeenFailure.AsTime().Before(a.clock.Now().Add(-a.failureCacheDuration)) { + strategies := a.strategyCalculator.GetStrategies(perSizeClassStatsMap, sizeClasses, s.originalTimeout) + + // Randomly pick a size class according to the probabilities + // that we computed above. + r := a.randomNumberGenerator.Float64() + for i, strategy := range strategies { + if r < strategy.Probability { + smallerSizeClass := sizeClasses[i] + if strategy.RunInBackground { + // The action is prone to failures. Run + // it on the largest size class first. + // Upon success, still run it on the + // smaller size class for training + // purposes. + return len(sizeClasses) - 1, + getExpectedExecutionDuration(perSizeClassStatsMap, largestSizeClass, s.originalTimeout), + s.originalTimeout, + &largestBackgroundLearner{ + cleanLearner: cleanLearner{ + baseLearner: baseLearner{ + analyzer: s.analyzer, + handle: s.handle, + }, + }, + largestSizeClass: largestSizeClass, + largestTimeout: s.originalTimeout, + smallerSizeClass: smallerSizeClass, + } + } + // The action doesn't seem prone to + // failures. Just run it on the smaller + // size class, only falling back to the + // largest size class upon failure. + smallerTimeout := strategy.ForegroundExecutionTimeout + return i, + getExpectedExecutionDuration(perSizeClassStatsMap, smallerSizeClass, smallerTimeout), + smallerTimeout, + &smallerForegroundLearner{ + cleanLearner: cleanLearner{ + baseLearner: baseLearner{ + analyzer: s.analyzer, + handle: s.handle, + }, + }, + smallerSizeClass: smallerSizeClass, + smallerTimeout: smallerTimeout, + largestSizeClass: largestSizeClass, + largestTimeout: s.originalTimeout, + } + } + r -= strategy.Probability + } + } + + // Random selection ended up choosing the largest size class. We + // can use the original timeout value. There is never any need + // to retry. + return len(sizeClasses) - 1, + getExpectedExecutionDuration(perSizeClassStatsMap, largestSizeClass, s.originalTimeout), + s.originalTimeout, + &largestLearner{ + cleanLearner: cleanLearner{ + baseLearner: baseLearner{ + analyzer: s.analyzer, + handle: s.handle, + }, + }, + largestSizeClass: largestSizeClass, + } +} + +func (s *feedbackDrivenSelector) Abandoned() { + s.handle.Release(false) + s.handle = nil +} + +// baseLearner is the base type for all Learner objects returned by +// FeedbackDrivenAnalyzer. +type baseLearner struct { + analyzer *feedbackDrivenAnalyzer + handle PreviousExecutionStatsHandle +} + +func (l *baseLearner) addPreviousExecution(sizeClass uint32, previousExecution *iscc.PreviousExecution) { + perSizeClassStatsMap := l.handle.GetMutableProto().SizeClasses + perSizeClassStats, ok := perSizeClassStatsMap[sizeClass] + if !ok { + // Size class does not exist yet. Create it. + perSizeClassStats = &iscc.PerSizeClassStats{} + perSizeClassStatsMap[sizeClass] = perSizeClassStats + } + + // Append new outcome, potentially removing the oldest one present. + perSizeClassStats.PreviousExecutions = append(perSizeClassStats.PreviousExecutions, previousExecution) + if l, historySize := len(perSizeClassStats.PreviousExecutions), l.analyzer.historySize; l > historySize { + perSizeClassStats.PreviousExecutions = perSizeClassStats.PreviousExecutions[l-historySize:] + } +} + +func (l *baseLearner) updateLastSeenFailure() { + stats := l.handle.GetMutableProto() + stats.LastSeenFailure = timestamppb.New(l.analyzer.clock.Now()) +} + +// cleanLearner is a common type for all Learner objects returned by +// FeedbackDrivenAnalyzer that haven't made any modifications to the +// underlying PreviousExecutionStatsHandle yet. Abandoning learners of +// this type will not cause any writes into the Initial Size Class Cache +// (ISCC). +type cleanLearner struct { + baseLearner +} + +func (l *cleanLearner) Abandoned() { + l.handle.Release(false) + l.handle = nil +} + +// smallerForegroundLearner is the initial Learner that is returned by +// FeedbackDrivenAnalyzer when executing an action on a smaller size +// class under the assumption execution is going to succeed. +type smallerForegroundLearner struct { + cleanLearner + smallerSizeClass uint32 + smallerTimeout time.Duration + largestSizeClass uint32 + largestTimeout time.Duration +} + +func (l *smallerForegroundLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + l.addPreviousExecution(l.smallerSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Succeeded{ + Succeeded: durationpb.New(duration), + }, + }) + l.handle.Release(true) + l.handle = nil + return 0, 0, 0, nil +} + +func (l *smallerForegroundLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + // Retry execution on the largest size class. Store the outcome + // of this invocation, so that we can write it into the ISCC in + // case the action does succeed on the largest size class. + newL := &largestForegroundLearner{ + cleanLearner: cleanLearner{ + baseLearner: baseLearner{ + analyzer: l.analyzer, + handle: l.handle, + }, + }, + smallerSizeClass: l.smallerSizeClass, + largestSizeClass: l.largestSizeClass, + } + if timedOut { + newL.smallerExecution.Outcome = &iscc.PreviousExecution_TimedOut{ + TimedOut: durationpb.New(l.smallerTimeout), + } + } else { + newL.smallerExecution.Outcome = &iscc.PreviousExecution_Failed{ + Failed: &emptypb.Empty{}, + } + } + perSizeClassStatsMap := l.handle.GetMutableProto().SizeClasses + return getExpectedExecutionDuration(perSizeClassStatsMap, l.largestSizeClass, l.largestTimeout), l.largestTimeout, newL +} + +// largestForegroundLearner is the final Learner that is returned by +// FeedbackDrivenAnalyzer when initially executing an action on a +// smaller size class under the assumption execution is going to +// succeed (which didn't end up being the case). +type largestForegroundLearner struct { + cleanLearner + smallerSizeClass uint32 + smallerExecution iscc.PreviousExecution + largestSizeClass uint32 +} + +func (l *largestForegroundLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + l.addPreviousExecution(l.smallerSizeClass, &l.smallerExecution) + l.addPreviousExecution(l.largestSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Succeeded{ + Succeeded: durationpb.New(duration), + }, + }) + l.handle.Release(true) + l.handle = nil + return 0, 0, 0, nil +} + +func (l *largestForegroundLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + l.updateLastSeenFailure() + l.handle.Release(true) + l.handle = nil + return 0, 0, nil +} + +// largestBackgroundLearner is the initial Learner that is returned by +// FeedbackDrivenAnalyzer when executing an action on a smaller size +// class under the assumption that doing this is going to fail anyway. +// Before executing the action on the smaller size class, we run it on +// the largest size class. That way the user isn't blocked. +type largestBackgroundLearner struct { + cleanLearner + largestSizeClass uint32 + largestTimeout time.Duration + smallerSizeClass uint32 +} + +func (l *largestBackgroundLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + l.addPreviousExecution(l.largestSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Succeeded{ + Succeeded: durationpb.New(duration), + }, + }) + for i, sizeClass := range sizeClasses { + if sizeClass == l.smallerSizeClass { + // The smaller size class on which we originally + // wanted to run the action still exists. + // Request that it's run on that size class once + // again, for training purposes. + perSizeClassStatsMap := l.handle.GetMutableProto().SizeClasses + smallerTimeout := l.analyzer.strategyCalculator.GetBackgroundExecutionTimeout( + perSizeClassStatsMap, + sizeClasses, + i, + l.largestTimeout) + return i, + getExpectedExecutionDuration(perSizeClassStatsMap, l.smallerSizeClass, smallerTimeout), + smallerTimeout, + &smallerBackgroundLearner{ + baseLearner: baseLearner{ + analyzer: l.analyzer, + handle: l.handle, + }, + smallerSizeClass: l.smallerSizeClass, + smallerTimeout: smallerTimeout, + } + } + } + // Corner case: the smaller size class disappeared before we got + // a chance to schedule the action on it. Let's not do any + // background learning. + l.handle.Release(true) + l.handle = nil + return 0, 0, 0, nil +} + +func (l *largestBackgroundLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + l.updateLastSeenFailure() + l.handle.Release(true) + l.handle = nil + return 0, 0, nil +} + +// smallerBackgroundLearner is the final Learner that is returned by +// FeedbackDrivenAnalyzer when executing an action on a smaller size +// class under the assumption that doing this is going to fail anyway. +// The action has already run on the largest size class and succeeded. +// We can now run it on the smaller size class for training purposes. +type smallerBackgroundLearner struct { + baseLearner + smallerSizeClass uint32 + smallerTimeout time.Duration +} + +func (l *smallerBackgroundLearner) Abandoned() { + // Still make sure the results of the execution on the largest + // size class end up getting written. + l.handle.Release(true) + l.handle = nil +} + +func (l *smallerBackgroundLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + if timedOut { + l.addPreviousExecution(l.smallerSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_TimedOut{ + TimedOut: durationpb.New(l.smallerTimeout), + }, + }) + } else { + l.addPreviousExecution(l.smallerSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Failed{ + Failed: &emptypb.Empty{}, + }, + }) + } + l.handle.Release(true) + l.handle = nil + return 0, 0, nil +} + +func (l *smallerBackgroundLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + l.addPreviousExecution(l.smallerSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Succeeded{ + Succeeded: durationpb.New(duration), + }, + }) + l.handle.Release(true) + l.handle = nil + return 0, 0, 0, nil +} + +// largestLearner is returned by FeedbackDrivenAnalyzer when executing +// an action on the largest size class immediately. there is no need to +// do any fallback to different size classes. It's also not necessary to +// register failures, as those samples don't contribute to the analysis +// in any way. +type largestLearner struct { + cleanLearner + largestSizeClass uint32 +} + +func (l *largestLearner) Succeeded(duration time.Duration, sizeClasses []uint32) (int, time.Duration, time.Duration, Learner) { + l.addPreviousExecution(l.largestSizeClass, &iscc.PreviousExecution{ + Outcome: &iscc.PreviousExecution_Succeeded{ + Succeeded: durationpb.New(duration), + }, + }) + l.handle.Release(true) + l.handle = nil + return 0, 0, 0, nil +} + +func (l *largestLearner) Failed(timedOut bool) (time.Duration, time.Duration, Learner) { + l.updateLastSeenFailure() + l.handle.Release(true) + l.handle = nil + return 0, 0, nil +} diff --git a/pkg/scheduler/initialsizeclass/feedback_driven_analyzer_test.go b/pkg/scheduler/initialsizeclass/feedback_driven_analyzer_test.go new file mode 100644 index 0000000..d87df8a --- /dev/null +++ b/pkg/scheduler/initialsizeclass/feedback_driven_analyzer_test.go @@ -0,0 +1,355 @@ +package initialsizeclass_test + +import ( + "context" + "testing" + "time" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestFeedbackDrivenAnalyzer(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + store := mock.NewMockPreviousExecutionStatsStore(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + clock := mock.NewMockClock(ctrl) + actionTimeoutExtractor := initialsizeclass.NewActionTimeoutExtractor( + 30*time.Minute, + 60*time.Minute) + strategyCalculator := mock.NewMockStrategyCalculator(ctrl) + analyzer := initialsizeclass.NewFeedbackDrivenAnalyzer( + store, + randomNumberGenerator, + clock, + actionTimeoutExtractor, + /* failureCacheDuration = */ 24*time.Hour, + strategyCalculator, + /* historySize = */ 5) + + exampleDigestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5) + exampleAction := &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "bf4d2b56ee892c3fc862cdb863a6c2f4", + SizeBytes: 720, + }, + InputRootDigest: &remoteexecution.Digest{ + Hash: "9a0be105e682022830da33578b909521", + SizeBytes: 951, + }, + } + exampleReducedActionDigest := digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "5057a5db1b97ee73b2466f60e781d607", 39) + + t.Run("StorageFailure", func(t *testing.T) { + // Failures reading existing entries from the Initial + // Size Class Cache (ISCC) should be propagated. + store.EXPECT().Get(ctx, exampleReducedActionDigest). + Return(nil, status.Error(codes.Internal, "Network error")) + + _, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to read previous execution stats for reduced action digest \"3-5057a5db1b97ee73b2466f60e781d607-39-hello\": Network error"), err) + }) + + t.Run("InitialAbandoned", func(t *testing.T) { + handle := mock.NewMockPreviousExecutionStatsHandle(ctrl) + store.EXPECT().Get(ctx, exampleReducedActionDigest).Return(handle, nil) + + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + // Return an empty stats message. The strategy + // calculator will most likely just return a uniform + // distribution. Let's pick the smallest size class. + var stats iscc.PreviousExecutionStats + handle.EXPECT().GetMutableProto().Return(&stats).AnyTimes() + strategyCalculator.EXPECT().GetStrategies(gomock.Not(gomock.Nil()), []uint32{1, 2, 4, 8}, 30*time.Minute). + Return([]initialsizeclass.Strategy{ + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + }) + randomNumberGenerator.EXPECT().Float64().Return(0.1) + + sizeClassIndex, expectedDuration, timeout, learner := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 0, sizeClassIndex) + require.Equal(t, 15*time.Second, expectedDuration) + require.Equal(t, 15*time.Second, timeout) + + // Action didn't get run after all. + handle.EXPECT().Release(false) + + learner.Abandoned() + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{}, + }, &stats) + }) + + t.Run("InitialSuccess", func(t *testing.T) { + handle := mock.NewMockPreviousExecutionStatsHandle(ctrl) + store.EXPECT().Get(ctx, exampleReducedActionDigest).Return(handle, nil) + + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + // Same as before: empty stats message. Now pick the + // second smallest size class. + var stats iscc.PreviousExecutionStats + handle.EXPECT().GetMutableProto().Return(&stats).AnyTimes() + strategyCalculator.EXPECT().GetStrategies(gomock.Not(gomock.Nil()), []uint32{1, 2, 4, 8}, 30*time.Minute). + Return([]initialsizeclass.Strategy{ + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + { + Probability: 0.25, + ForegroundExecutionTimeout: 15 * time.Second, + }, + }) + randomNumberGenerator.EXPECT().Float64().Return(0.4) + + sizeClassIndex, expectedDuration, timeout, learner1 := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 1, sizeClassIndex) + require.Equal(t, 15*time.Second, expectedDuration) + require.Equal(t, 15*time.Second, timeout) + + // Report that execution succeeded. This should cause + // the execution time to be recorded. + handle.EXPECT().Release(true) + + _, _, _, learner2 := learner1.Succeeded(time.Minute, []uint32{1, 2, 4, 8}) + require.Nil(t, learner2) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 2: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 60}}}, + }, + }, + }, + }, &stats) + }) + + t.Run("SuccessAfterFailure", func(t *testing.T) { + handle := mock.NewMockPreviousExecutionStatsHandle(ctrl) + store.EXPECT().Get(ctx, exampleReducedActionDigest).Return(handle, nil) + + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + // Let the action run on size class 1. + stats := iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 10}}}, + }, + }, + }, + } + handle.EXPECT().GetMutableProto().Return(&stats).AnyTimes() + strategyCalculator.EXPECT().GetStrategies(gomock.Not(gomock.Nil()), []uint32{1, 2, 4, 8}, 30*time.Minute). + Return([]initialsizeclass.Strategy{ + { + Probability: 0.6, + ForegroundExecutionTimeout: 40 * time.Second, + }, + { + Probability: 0.2, + ForegroundExecutionTimeout: 30 * time.Second, + }, + { + Probability: 0.1, + ForegroundExecutionTimeout: 20 * time.Second, + }, + }) + randomNumberGenerator.EXPECT().Float64().Return(0.55) + + sizeClassIndex, expectedDuration1, timeout1, learner1 := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 0, sizeClassIndex) + require.Equal(t, 40*time.Second, expectedDuration1) + require.Equal(t, 40*time.Second, timeout1) + + // Let execution fail on size class 1. Because this is + // not the largest size class, a new learner for size + // class 8 is returned. + expectedDuration2, timeout2, learner2 := learner1.Failed(false) + require.NotNil(t, learner2) + require.Equal(t, 10*time.Second, expectedDuration2) + require.Equal(t, 30*time.Minute, timeout2) + + // Report success on size class 8. This should cause the + // result of both executions to be stored. + handle.EXPECT().Release(true) + + _, _, _, learner3 := learner2.Succeeded(12*time.Second, []uint32{1, 2, 4, 8}) + require.Nil(t, learner3) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + }, + }, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 10}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 12}}}, + }, + }, + }, + }, &stats) + }) + + t.Run("SkipSmallerAfterFailure", func(t *testing.T) { + handle := mock.NewMockPreviousExecutionStatsHandle(ctrl) + store.EXPECT().Get(ctx, exampleReducedActionDigest).Return(handle, nil) + + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + // Provide statistics for an action that failed + // recently. We should always schedule these on the + // largest size class, so that we don't introduce + // unnecessary delays. + stats := iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 10}}}, + }, + }, + }, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620218381}, + } + handle.EXPECT().GetMutableProto().Return(&stats).AnyTimes() + clock.EXPECT().Now().Return(time.Unix(1620242374, 0)) + + sizeClassIndex, expectedDuration, timeout, learner := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 3, sizeClassIndex) + require.Equal(t, 10*time.Second, expectedDuration) + require.Equal(t, 30*time.Minute, timeout) + + // Abandoning it should not cause any changes to it. + handle.EXPECT().Release(false) + + learner.Abandoned() + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 10}}}, + }, + }, + }, + LastSeenFailure: ×tamppb.Timestamp{Seconds: 1620218381}, + }, &stats) + }) + + t.Run("BackgroundRun", func(t *testing.T) { + handle := mock.NewMockPreviousExecutionStatsHandle(ctrl) + store.EXPECT().Get(ctx, exampleReducedActionDigest).Return(handle, nil) + + selector, err := analyzer.Analyze(ctx, exampleDigestFunction, exampleAction) + require.NoError(t, err) + + // Provide statistics for an action that has never been + // run before. It should be run on the largest size + // class, but we do want to perform a background run on + // the smallest size class. If both succeed, we have + // more freedom when scheduling this action the next + // time. + var stats iscc.PreviousExecutionStats + handle.EXPECT().GetMutableProto().Return(&stats).AnyTimes() + strategyCalculator.EXPECT().GetStrategies(gomock.Not(gomock.Nil()), []uint32{1, 2, 4, 8}, 30*time.Minute). + Return([]initialsizeclass.Strategy{ + { + Probability: 1.0, + RunInBackground: true, + }, + }) + randomNumberGenerator.EXPECT().Float64().Return(0.32) + + sizeClassIndex1, expectedDuration1, timeout1, learner1 := selector.Select([]uint32{1, 2, 4, 8}) + require.Equal(t, 3, sizeClassIndex1) + require.Equal(t, 30*time.Minute, expectedDuration1) + require.Equal(t, 30*time.Minute, timeout1) + + // Once execution on the largest size class has + // succeeded, we should obtain a new learner for running + // it on the smallest size class. + // + // Because the execution timeout to be used on the + // smallest size class depends on that of the largest + // size class, we should see a request to recompute the + // execution timeout. + strategyCalculator.EXPECT().GetBackgroundExecutionTimeout(gomock.Not(gomock.Nil()), []uint32{1, 2, 4, 8}, 0, 30*time.Minute).DoAndReturn( + func(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, sizeClassIndex int, originalTimeout time.Duration) time.Duration { + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 42}}}, + }, + }, + }, + }, &stats) + return 80 * time.Second + }) + + sizeClassIndex2, expectedDuration2, timeout2, learner2 := learner1.Succeeded(42*time.Second, []uint32{1, 2, 4, 8}) + require.NotNil(t, learner2) + require.Equal(t, 0, sizeClassIndex2) + require.Equal(t, 80*time.Second, expectedDuration2) + require.Equal(t, 80*time.Second, timeout2) + + // Once execution on the smallest size class completes, + // both outcomes are stored. + handle.EXPECT().Release(true) + + _, _, _, learner3 := learner2.Succeeded(72*time.Second, []uint32{1, 2, 4, 8}) + require.Nil(t, learner3) + testutil.RequireEqualProto(t, &iscc.PreviousExecutionStats{ + SizeClasses: map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 72}}}, + }, + }, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 42}}}, + }, + }, + }, + }, &stats) + }) + + // TODO: Are there more test cases we want to cover? +} diff --git a/pkg/scheduler/initialsizeclass/outcomes.go b/pkg/scheduler/initialsizeclass/outcomes.go new file mode 100644 index 0000000..da60953 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/outcomes.go @@ -0,0 +1,137 @@ +package initialsizeclass + +import ( + "sort" + "time" +) + +// Outcomes of previous executions of an action. For successful +// outcomes, the execution times are stored in ascending order. For +// failures, a count is stored. +type Outcomes struct { + successes durationsList + failures int +} + +// NewOutcomes creates a new Outcomes object that contains samples for +// successful and failed executions based on the arguments provided. +// This function takes ownership of the list of durations, sorting it in +// ascending order. +func NewOutcomes(successes []time.Duration, failures int) Outcomes { + sort.Sort(durationsList(successes)) + return Outcomes{ + successes: successes, + failures: failures, + } +} + +// GetMedianExecutionTime computes the median execution time of all of +// the successful outcomes. It may return nil in case no successful +// outcomes have been registered. +func (o Outcomes) GetMedianExecutionTime() *time.Duration { + if len(o.successes) == 0 { + return nil + } + middle := len(o.successes) / 2 + median := o.successes[middle] + if len(o.successes)%2 == 0 { + median = (o.successes[middle-1] + median) / 2 + } + return &median +} + +// IsFaster returns a probability in range (0.0, 1.0) of the current set +// of outcomes being faster than another one. The algorithm for this is +// to compute the average rank in B for every element in A, similar to +// the Mann-Whitney U test. This is done for two reasons: +// +// - Analysis on mean or median values is not always possible, as a set +// of outcomes may contain (or consist only of) failures of which the +// execution time is unknown. +// - When implemented properly, it is an asymmetric relation, in that +// x.IsFaster(x) == 0.5 and x.IsFaster(y) + y.IsFaster(x) == 1.0 for +// any sets of outcomes x and y. +// +// This function works by running a 2-way merge algorithm against both +// sets of outcomes, awarding scores between [0, 2*len(B)] based on the +// rank in B for each of the elements in A, meaning a total score of +// 2*len(A)*len(B) may be earned. Inequality between elements always +// yields an even score. Odd scores may need to be given in case of +// identical values. +// +// To ensure that the probability returned by this function doesn't +// become too extreme for small sample counts, we add 1+len(B) to A's +// score, and 1+len(A) to B's score. This also makes sure that empty +// sets don't cause divisions by zero, and that the probability never +// becomes exactly 0.0 or 1.0. The latter is important for PageRank +// computation, as eigenvalue computation wouldn't converge otherwise. +// It also causes smaller sets to get an advantage, which is important +// for ensuring that all size classes are tested sufficiently. This is +// similar in spirit to the "plus four" rule for computing confidence +// intervals. +func (o Outcomes) IsFaster(other Outcomes) float64 { + successesA, successesB := o.successes, other.successes + countA, countB := len(successesA)+o.failures, len(successesB)+other.failures + score := 1 + countB + remainingA, remainingB := countA, countB + for len(successesA) > 0 && len(successesB) > 0 { + if successesA[0] < successesB[0] { + // The first sample in A is faster than the + // first sample in B. Award full points. + score += 2 * remainingB + successesA = successesA[1:] + remainingA-- + } else if successesA[0] > successesB[0] { + // The first sample in A is slower than the + // first sample in B. Award no points. + successesB = successesB[1:] + remainingB-- + } else { + // First sample in A is identical to the first + // sample in B. Consume all identical values in + // A and B and award half points for the entire + // region. + equalA, equalB := 1, 1 + current := successesA[0] + for { + successesA = successesA[1:] + if len(successesA) == 0 || successesA[0] != current { + break + } + equalA++ + } + for { + successesB = successesB[1:] + if len(successesB) == 0 || successesB[0] != current { + break + } + equalB++ + } + score += equalA * (2*remainingB - equalB) + remainingA -= equalA + remainingB -= equalB + } + } + // Add score for trailing elements and failures. All failures + // are effectively treated as having the same execution time, + // exceeding that of any of the successful outcomes. + score += 2 * len(successesA) * remainingB + score += o.failures * other.failures + return float64(score) / float64(2+countA+countB+2*countA*countB) +} + +// durationsList is a list of time.Duration values. It implements +// sort.Interface. +type durationsList []time.Duration + +func (l durationsList) Len() int { + return len(l) +} + +func (l durationsList) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l durationsList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/pkg/scheduler/initialsizeclass/outcomes_test.go b/pkg/scheduler/initialsizeclass/outcomes_test.go new file mode 100644 index 0000000..7ecc5ad --- /dev/null +++ b/pkg/scheduler/initialsizeclass/outcomes_test.go @@ -0,0 +1,141 @@ +package initialsizeclass_test + +import ( + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/stretchr/testify/require" +) + +func TestOutcomesIsFasterIdentity(t *testing.T) { + t.Run("Identity", func(t *testing.T) { + // Calling IsFaster() against the same sets should + // always yield 0.5. + for _, outcomes := range []initialsizeclass.Outcomes{ + initialsizeclass.NewOutcomes(nil, 0), + initialsizeclass.NewOutcomes([]time.Duration{ + time.Second, + }, 0), + initialsizeclass.NewOutcomes([]time.Duration{ + time.Second, + time.Second, + }, 0), + initialsizeclass.NewOutcomes([]time.Duration{ + 7 * time.Second, + 8 * time.Second, + 9 * time.Second, + 10 * time.Second, + 11 * time.Second, + 12 * time.Second, + }, 14), + } { + require.Equal(t, 0.5, outcomes.IsFaster(outcomes)) + } + }) + + t.Run("Asymmetry1", func(t *testing.T) { + // With one list containing 1 element and the other one + // being empty, IsFaster() should use a divisor of + // 2 + 1 + 0 + 1*0 = 3. + outcomesA := initialsizeclass.NewOutcomes([]time.Duration{ + time.Second, + }, 0) + outcomesB := initialsizeclass.NewOutcomes(nil, 0) + require.Equal(t, float64(1)/3, outcomesA.IsFaster(outcomesB)) + require.Equal(t, float64(2)/3, outcomesB.IsFaster(outcomesA)) + }) + + t.Run("Asymmetry2", func(t *testing.T) { + // With lists of 10 elements, IsFaster() should use a + // divisor of 2 + 10 + 10 + 2*10*10 = 222. + outcomesA := initialsizeclass.NewOutcomes([]time.Duration{ + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + time.Second, + }, 0) + outcomesB := initialsizeclass.NewOutcomes(nil, 10) + require.Equal(t, float64(211)/222, outcomesA.IsFaster(outcomesB)) + require.Equal(t, float64(11)/222, outcomesB.IsFaster(outcomesA)) + }) + + t.Run("Wider", func(t *testing.T) { + // Samples in both sets center around 10 seconds. It's + // just that the ones in set A spread a bit wider. + outcomesA := initialsizeclass.NewOutcomes([]time.Duration{ + 6 * time.Second, + 8 * time.Second, + 10 * time.Second, + 10 * time.Second, + 12 * time.Second, + 14 * time.Second, + }, 1) + outcomesB := initialsizeclass.NewOutcomes([]time.Duration{ + 9 * time.Second, + 9 * time.Second, + 9 * time.Second, + 11 * time.Second, + 11 * time.Second, + 11 * time.Second, + }, 1) + require.Equal(t, 0.5, outcomesA.IsFaster(outcomesB)) + require.Equal(t, 0.5, outcomesB.IsFaster(outcomesA)) + }) + + t.Run("ZigZagFaster", func(t *testing.T) { + // The outcomes in sets A and B alternate. Because the + // outcomes in set A are all slightly smaller, the + // probability should be in favor of set A. + outcomesA := initialsizeclass.NewOutcomes([]time.Duration{ + 1 * time.Second, + 3 * time.Second, + 5 * time.Second, + 7 * time.Second, + 9 * time.Second, + 11 * time.Second, + }, 0) + outcomesB := initialsizeclass.NewOutcomes([]time.Duration{ + 2 * time.Second, + 4 * time.Second, + 6 * time.Second, + 8 * time.Second, + 10 * time.Second, + 12 * time.Second, + }, 0) + require.Equal(t, float64(49)/86, outcomesA.IsFaster(outcomesB)) + require.Equal(t, float64(37)/86, outcomesB.IsFaster(outcomesA)) + }) + + t.Run("ZigZagEqual", func(t *testing.T) { + // The same sets as before, except that we place another + // sample at the end of set A. This should bring the + // probability closer to 0.5. Set B is still preferred, + // because it has a smaller number of samples. + outcomesA := initialsizeclass.NewOutcomes([]time.Duration{ + 1 * time.Second, + 3 * time.Second, + 5 * time.Second, + 7 * time.Second, + 9 * time.Second, + 11 * time.Second, + 13 * time.Second, + }, 0) + outcomesB := initialsizeclass.NewOutcomes([]time.Duration{ + 2 * time.Second, + 4 * time.Second, + 6 * time.Second, + 8 * time.Second, + 10 * time.Second, + 12 * time.Second, + }, 0) + require.Equal(t, float64(49)/99, outcomesA.IsFaster(outcomesB)) + require.Equal(t, float64(50)/99, outcomesB.IsFaster(outcomesA)) + }) +} diff --git a/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator.go b/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator.go new file mode 100644 index 0000000..3508dd2 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator.go @@ -0,0 +1,309 @@ +package initialsizeclass + +import ( + "math" + "sync" + "time" + + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + pageRankStrategyCalculatorMetrics sync.Once + + pageRankStrategyCalculatorConvergenceIterations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "page_rank_strategy_calculator_convergence_iterations", + Help: "Number of iterations matrix multiplication was performed until convergence.", + Buckets: prometheus.ExponentialBuckets(1.0, 2.0, 11), + }) +) + +type pageRankStrategyCalculator struct { + minimumExecutionTimeout time.Duration + acceptableExecutionTimeIncreaseExponent float64 + timeoutMultiplier float64 + maximumConvergenceError float64 +} + +// NewPageRankStrategyCalculator creates a StrategyCalculator that uses +// outcomes of previous executions to determine probabilities for +// running actions on a given set of size classes. +// +// The algorithm that it uses to compute probabilities is similar to +// PageRank, in that it constructs a stochastic matrix of which the +// resulting eigenvector contains the probabilities. +func NewPageRankStrategyCalculator(minimumExecutionTimeout time.Duration, acceptableExecutionTimeIncreaseExponent, timeoutMultiplier, maximumConvergenceError float64) StrategyCalculator { + pageRankStrategyCalculatorMetrics.Do(func() { + prometheus.MustRegister(pageRankStrategyCalculatorConvergenceIterations) + }) + + return &pageRankStrategyCalculator{ + minimumExecutionTimeout: minimumExecutionTimeout, + acceptableExecutionTimeIncreaseExponent: acceptableExecutionTimeIncreaseExponent, + timeoutMultiplier: timeoutMultiplier, + maximumConvergenceError: maximumConvergenceError, + } +} + +// getOutcomesFromPreviousExecutions returns an Outcomes object that +// stores all execution times observed on a given size class. The +// results are not normalized with respect to other size classes. +func getOutcomesFromPreviousExecutions(previousExecutionsOnLargest []*iscc.PreviousExecution) Outcomes { + executionTimesOnLargest := make([]time.Duration, 0, len(previousExecutionsOnLargest)) + for _, previousExecution := range previousExecutionsOnLargest { + if outcome, ok := previousExecution.Outcome.(*iscc.PreviousExecution_Succeeded); ok { + executionTimesOnLargest = append(executionTimesOnLargest, outcome.Succeeded.AsDuration()) + } + } + return NewOutcomes(executionTimesOnLargest, 0) +} + +// smallerSizeClassExecutionParameters contains the acceptable execution +// time and the desirable execution timeout to use when executing an +// action on a smaller size class. +type smallerSizeClassExecutionParameters struct { + acceptableExecutionTimeIncreaseFactor float64 + maximumAcceptableExecutionTime time.Duration + executionTimeout time.Duration +} + +// getSmallerSizeClassExecutionParameters computes the acceptable +// execution time and desirable execution timeout for a given size +// class. +func (sc *pageRankStrategyCalculator) getSmallerSizeClassExecutionParameters(smallerSizeClass, largestSizeClass uint32, medianExecutionTimeOnLargest, originalTimeout time.Duration) (p smallerSizeClassExecutionParameters) { + p.acceptableExecutionTimeIncreaseFactor = math.Pow(float64(largestSizeClass)/float64(smallerSizeClass), sc.acceptableExecutionTimeIncreaseExponent) + p.maximumAcceptableExecutionTime = time.Duration(float64(medianExecutionTimeOnLargest) * p.acceptableExecutionTimeIncreaseFactor) + p.executionTimeout = time.Duration(float64(p.maximumAcceptableExecutionTime) * sc.timeoutMultiplier) + if p.executionTimeout < sc.minimumExecutionTimeout { + p.executionTimeout = sc.minimumExecutionTimeout + } + if p.executionTimeout > originalTimeout { + p.executionTimeout = originalTimeout + } + if ceiling := time.Duration(float64(p.executionTimeout) / sc.timeoutMultiplier); p.maximumAcceptableExecutionTime > ceiling { + // Make sure the maximum acceptable execution + // time is not too close to the execution timeout. + p.maximumAcceptableExecutionTime = ceiling + } + return +} + +func (sc *pageRankStrategyCalculator) GetStrategies(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, originalTimeout time.Duration) []Strategy { + // No need to compute strategies in case there is only one size + // class available. + if len(sizeClasses) <= 1 { + return nil + } + + // Extract statistics for each of the size classes from the + // existing stats message. Create a new map entry for each of + // the size classes not seen before. + perSizeClassStatsList := make([]*iscc.PerSizeClassStats, 0, len(perSizeClassStatsMap)) + for _, sizeClass := range sizeClasses { + perSizeClassStats, ok := perSizeClassStatsMap[sizeClass] + if !ok { + perSizeClassStats = &iscc.PerSizeClassStats{} + perSizeClassStatsMap[sizeClass] = perSizeClassStats + } + perSizeClassStatsList = append(perSizeClassStatsList, perSizeClassStats) + } + + // Extract previous execution times on the largest size class. + // Compute the median, which we'll use as the baseline for + // determining what the execution timeout should be on smaller + // size classes. + n := len(sizeClasses) + outcomesOnLargest := getOutcomesFromPreviousExecutions(perSizeClassStatsList[n-1].PreviousExecutions) + medianExecutionTimeOnLargest := outcomesOnLargest.GetMedianExecutionTime() + if medianExecutionTimeOnLargest == nil { + // This action never succeeded on the largest size + // class. Force a run on both the largest and smallest + // size class. That way we both obtain a median + // execution time and learn whether the action can run + // on any size class. + return []Strategy{ + { + Probability: 1.0, + RunInBackground: true, + }, + } + } + + // Extract previous execution times on all other size classes. + largestSizeClass := sizeClasses[n-1] + outcomesList := make([]Outcomes, 0, n) + strategies := make([]Strategy, 0, n) + runInBackground := true + for i, sizeClass := range sizeClasses[:n-1] { + // Extract previous execution times on the smaller size + // class, normalized to the equivalent on the largest + // size class. Treat execution times that are not + // acceptable as failures, so that the probability of + // picking this size class is reduced. + p := sc.getSmallerSizeClassExecutionParameters(sizeClass, largestSizeClass, *medianExecutionTimeOnLargest, originalTimeout) + previousExecutionsOnSmaller := perSizeClassStatsList[i].PreviousExecutions + normalizedExecutionTimes := make(durationsList, 0, len(previousExecutionsOnSmaller)) + failuresOrTimeouts := 0 + for _, previousExecution := range previousExecutionsOnSmaller { + switch outcome := previousExecution.Outcome.(type) { + case *iscc.PreviousExecution_Failed: + failuresOrTimeouts++ + case *iscc.PreviousExecution_TimedOut: + if duration := outcome.TimedOut.AsDuration(); duration >= p.maximumAcceptableExecutionTime { + failuresOrTimeouts++ + } + case *iscc.PreviousExecution_Succeeded: + if duration := outcome.Succeeded.AsDuration(); duration < p.maximumAcceptableExecutionTime { + normalizedExecutionTimes = append(normalizedExecutionTimes, time.Duration(float64(duration)/p.acceptableExecutionTimeIncreaseFactor)) + } else { + failuresOrTimeouts++ + } + } + } + outcomes := NewOutcomes(normalizedExecutionTimes, failuresOrTimeouts) + outcomesList = append(outcomesList, outcomes) + + if failuresOrTimeouts == 0 && len(normalizedExecutionTimes) == 0 { + if runInBackground { + // We have no outcomes for this size + // class, but we do know that it fails + // on the size class before it. + // + // Do a forced background run on this + // specific size class. If it succeeds, + // we know exactly where the tipping + // point is between success and failure. + // This reduces the need for background + // execution (and thus execution on the + // largest size class) later on. + return append(strategies, Strategy{ + Probability: 1.0, + RunInBackground: true, + }) + } + } else { + // We have outcomes for this size class. If + // there is a more than 50% of failure, run this + // action in the background. This ensures that + // the critical path duration of builds remains + // low. If no outcomes are available, we simply + // inherit the behaviour from smaller size + // classes. + runInBackground = failuresOrTimeouts > len(normalizedExecutionTimes) + } + if runInBackground { + strategies = append(strategies, Strategy{ + RunInBackground: runInBackground, + }) + } else { + strategies = append(strategies, Strategy{ + ForegroundExecutionTimeout: p.executionTimeout, + }) + } + } + outcomesList = append(outcomesList, outcomesOnLargest) + strategies = append(strategies, Strategy{}) + + // Create square matrix M with the size corresponding to + // the number of size classes. In each cell we store the + // probability of one size class being faster than the + // other. These values are normalized, so that it is a + // left stochastic matrix. + // + // Because Outcomes.IsFaster() is symmetric, we only + // need to call it once for every pair (i.e., + // (n-1)*(n-2) times). + mFields := make([]float64, n*n) + m := make([][]float64, 0, n) + for i := 0; i < n; i++ { + mFields[i] = 1.0 + m = append(m, mFields[:n]) + mFields = mFields[n:] + } + for i := 1; i < n; i++ { + for j := 0; j < i; j++ { + probability := outcomesList[i].IsFaster(outcomesList[j]) + p1 := probability / float64(n-1) + m[j][i] = p1 + m[j][j] -= p1 + p2 := (1.0 - probability) / float64(n-1) + m[i][j] = p2 + m[i][i] -= p2 + } + } + + // Restore previously computed probabilities from the + // ISCC entry. Using these as a starting point has the + // advantage that we need fewer rounds of the matrix + // multiplication below. + // + // Only restore probabilities that are in range. Also + // infer the first entry from the others, so that + // rounding errors don't accumulate over time. + var probabilitiesSum float64 + for i := 1; i < n; i++ { + probability := 0.5 + if restoredProbability := perSizeClassStatsList[i].InitialPageRankProbability; restoredProbability > 0 && restoredProbability < 1 { + probability = restoredProbability + } + strategies[i].Probability = probability + probabilitiesSum += probability + } + strategies[0].Probability = 1.0 - probabilitiesSum + + // Perform power iteration to compute the eigenvector of + // M, continuing until the rate of convergence drops + // below a certain minimum. + newProbabilities := make([]float64, n) + convergenceIterations := 0 + for { + for i := 0; i < n; i++ { + newProbabilities[i] = 0 + } + for i, column := range m { + for j, v := range column { + newProbabilities[j] += strategies[i].Probability * v + } + } + convergenceIterations++ + + convergenceError := 0.0 + for i := 0; i < n; i++ { + convergenceError += math.Abs(strategies[i].Probability - newProbabilities[i]) + strategies[i].Probability = newProbabilities[i] + } + if convergenceError < sc.maximumConvergenceError { + break + } + } + pageRankStrategyCalculatorConvergenceIterations.Observe(float64(convergenceIterations)) + + // Save the probabilities that have been computed. + for _, perSizeClassStats := range perSizeClassStatsMap { + perSizeClassStats.InitialPageRankProbability = 0 + } + for i, perSizeClassStats := range perSizeClassStatsList { + perSizeClassStats.InitialPageRankProbability = strategies[i].Probability + } + return strategies[:n-1] +} + +func (sc *pageRankStrategyCalculator) GetBackgroundExecutionTimeout(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, sizeClassIndex int, originalTimeout time.Duration) time.Duration { + // Trimmed down version of the algorithm above that is only + // capable of returning the execution timeout for a given size + // class. This is used to obtain the most up-to-date value of + // the execution timeout in case of background runs. + largestSizeClass := sizeClasses[len(sizeClasses)-1] + return sc.getSmallerSizeClassExecutionParameters( + sizeClasses[sizeClassIndex], + largestSizeClass, + *getOutcomesFromPreviousExecutions( + perSizeClassStatsMap[largestSizeClass].PreviousExecutions, + ).GetMedianExecutionTime(), + originalTimeout, + ).executionTimeout +} diff --git a/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator_test.go b/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator_test.go new file mode 100644 index 0000000..5b7d1ab --- /dev/null +++ b/pkg/scheduler/initialsizeclass/page_rank_strategy_calculator_test.go @@ -0,0 +1,419 @@ +package initialsizeclass_test + +import ( + "fmt" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-storage/pkg/proto/iscc" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" +) + +// If only a single size class is available, there is no need to make +// any choices. We should always run on that size class. +func TestPageRankStrategyCalculatorSingleSizeClass(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + require.Empty(t, strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{}, []uint32{8}, 15*time.Minute)) +} + +// requireEqualStrategies compares two lists of Strategy objects for +// equality. Probabilities are compared with an error margin of 0.5%. +func requireEqualStrategies(t *testing.T, expected, actual []initialsizeclass.Strategy) { + require.Len(t, actual, len(expected)) + for i := range actual { + require.InDelta(t, expected[i].Probability, actual[i].Probability, 0.005, fmt.Sprintf("Index %d", i)) + expectedStrategy := expected[i] + expectedStrategy.Probability = 0 + actualStrategy := actual[i] + actualStrategy.Probability = 0 + require.Equal(t, expectedStrategy, actualStrategy, fmt.Sprintf("Index %d", i)) + } +} + +// The first time an action is executed, all of the smaller size classes +// should have an equal probability of running the action. +func TestPageRankStrategyCalculatorEmpty(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: {}, + 2: {}, + 4: {}, + 8: {}, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 1.0, + RunInBackground: true, + }, + }, + strategies) +} + +// If the action has succeeded once on both the smallest and the largest +// size class, we can assume it's relatively safe to run the action on +// all size classes. We should propose foreground execution on any size +// class. The size classes without any outcomes should have a higher +// probability, so that those also get trained. +func TestPageRankStrategyCalculatorSingleRunSuccess(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 1}}}, + }, + }, + 2: {}, + 4: {}, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 1}}}, + }, + }, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 0.19, + ForegroundExecutionTimeout: 5 * time.Second, + }, + { + Probability: 0.33, + ForegroundExecutionTimeout: 5 * time.Second, + }, + { + Probability: 0.33, + ForegroundExecutionTimeout: 5 * time.Second, + }, + }, + strategies) +} + +// If execution succeeded on the largest and failed on the smallest, the +// smartest thing to do is to schedule a single background run against +// size class 2. The reason being that if we know that that succeeds, we +// don't need to perform any background runs to train size class 4. +func TestPageRankStrategyCalculatorSingleRunFailure(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + }, + }, + 2: {}, + 4: {}, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 1}}}, + }, + }, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + RunInBackground: true, + }, + { + Probability: 1.0, + RunInBackground: true, + }, + }, + strategies) +} + +// When timeoutMultiplier is set to 1.5, an action with a 900s timeout +// should preferably finish within 600s. It may be the case that this +// can't even be achieved on the largest size class, as the action's +// timeout is set to a very tight value. +// +// In this case the largest size class should be the one with the +// highest probability, so that we reduce the need for doing retries. +func TestPageRankStrategyCalculatorCloseToTimeout(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 7, Nanos: 500000000}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + }, + }, + 2: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + }, + }, + 4: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 744, Nanos: 745171748}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 736, Nanos: 585305066}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 786, Nanos: 526637558}}}, + {Outcome: &iscc.PreviousExecution_TimedOut{TimedOut: &durationpb.Duration{Seconds: 900}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 773, Nanos: 860202581}}}, + }, + }, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 652, Nanos: 236376306}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 624, Nanos: 11911117}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 630, Nanos: 320095712}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 627, Nanos: 102638899}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 651, Nanos: 795797310}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 655, Nanos: 97161482}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 649, Nanos: 54963830}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 653, Nanos: 183883239}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 648, Nanos: 783209241}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 666, Nanos: 485370182}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 640, Nanos: 917318827}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 636, Nanos: 910996040}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 669, Nanos: 358977129}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 638, Nanos: 876466482}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 667, Nanos: 615625730}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 639, Nanos: 109428595}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 645, Nanos: 421212352}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 659, Nanos: 724568628}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 645, Nanos: 199012224}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 623, Nanos: 819328226}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 642, Nanos: 84340620}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 633, Nanos: 645871363}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 692, Nanos: 204251786}}}, + }, + }, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 0.07, + RunInBackground: true, + }, + { + Probability: 0.06, + RunInBackground: true, + }, + { + Probability: 0.07, + RunInBackground: true, + }, + }, + strategies) +} + +// Size classes for which we don't have any outcomes should always +// receive a high probability. This ensures that we properly test all of +// them. +func TestPageRankStrategyCalculatorUntestedSizeClass(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 0.5, 1.5, 0.001) + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 19941089}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20017118}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21509286}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 31062553}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 32028792}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 56637488}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20011641}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 32338320}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21190311}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 19520433}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 19496810}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 34248944}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 39543182}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21466694}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20287814}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20572146}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20582404}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21701414}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21688507}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20296545}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 19621454}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 41513823}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 22492816}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 20089137}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 36233309}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21063001}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 37055862}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 18909835}}}, + }, + }, + 2: {}, + 4: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 19648577}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 26058621}}}, + }, + }, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Nanos: 21127338}}}, + }, + }, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 0.14, + ForegroundExecutionTimeout: 5 * time.Second, + }, + { + Probability: 0.56, + ForegroundExecutionTimeout: 5 * time.Second, + }, + { + Probability: 0.15, + ForegroundExecutionTimeout: 5 * time.Second, + }, + }, + strategies) +} + +// Test the extreme case, where an action always fails on all size +// classes, except the largest. The resulting probability values should +// be very low. +func TestPageRankStrategyCalculatorExtremelyHighProbability(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 1.0, 1.5, 0.001) + thirtyFailures := iscc.PerSizeClassStats{ + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + {Outcome: &iscc.PreviousExecution_Failed{Failed: &emptypb.Empty{}}}, + }, + } + strategies := strategyCalculator.GetStrategies(map[uint32]*iscc.PerSizeClassStats{ + 1: &thirtyFailures, + 2: &thirtyFailures, + 4: &thirtyFailures, + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 14}}}, + + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 15}}}, + + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 16}}}, + }, + }, + }, []uint32{1, 2, 4, 8}, 15*time.Minute) + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 0.02, + RunInBackground: true, + }, + { + Probability: 0.02, + RunInBackground: true, + }, + { + Probability: 0.02, + RunInBackground: true, + }, + }, + strategies) +} + +// Due to measurement inaccuracies of execution times on workers, it may +// be the case that stats messages contain durations that are slightly +// out of bounds. Even in those cases should GetStrategies() and +// GetBackgroundExecutionTimeout() behave correctly and return proper +// results. +func TestPageRankStrategyCalculatorExecutionTimesLargerThanTimeout(t *testing.T) { + strategyCalculator := initialsizeclass.NewPageRankStrategyCalculator(5*time.Second, 1.0, 1.5, 0.001) + stats := map[uint32]*iscc.PerSizeClassStats{ + 8: { + PreviousExecutions: []*iscc.PreviousExecution{ + {Outcome: &iscc.PreviousExecution_Succeeded{Succeeded: &durationpb.Duration{Seconds: 151}}}, + }, + }, + } + + requireEqualStrategies( + t, + []initialsizeclass.Strategy{ + { + Probability: 1.0, + RunInBackground: true, + }, + }, + strategyCalculator.GetStrategies(stats, []uint32{1, 2, 4, 8}, 150*time.Second)) + require.Equal( + t, + 150*time.Second, + strategyCalculator.GetBackgroundExecutionTimeout(stats, []uint32{1, 2, 4, 8}, 0, 150*time.Second)) +} diff --git a/pkg/scheduler/initialsizeclass/smallest_size_class_strategy_calculator.go b/pkg/scheduler/initialsizeclass/smallest_size_class_strategy_calculator.go new file mode 100644 index 0000000..b4e5835 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/smallest_size_class_strategy_calculator.go @@ -0,0 +1,33 @@ +package initialsizeclass + +import ( + "time" + + "github.com/buildbarn/bb-storage/pkg/proto/iscc" +) + +type smallestSizeClassStrategyCalculator struct{} + +func (sc smallestSizeClassStrategyCalculator) GetStrategies(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, originalTimeout time.Duration) []Strategy { + if len(sizeClasses) <= 1 { + return nil + } + return []Strategy{ + { + Probability: 1.0, + ForegroundExecutionTimeout: originalTimeout, + }, + } +} + +func (sc smallestSizeClassStrategyCalculator) GetBackgroundExecutionTimeout(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, sizeClassIndex int, originalTimeout time.Duration) time.Duration { + panic("Background execution should not be performed") +} + +// SmallestSizeClassStrategyCalculator implements a StrategyCalculator +// that always prefers running actions on the smallest size class. +// +// This StrategyCalculator behaves similar to FallbackAnalyzer, with the +// main difference that it still causes execution times and outcomes to +// be tracked in the Initial Size Class Cache (ISCC). +var SmallestSizeClassStrategyCalculator StrategyCalculator = smallestSizeClassStrategyCalculator{} diff --git a/pkg/scheduler/initialsizeclass/strategy_calculator.go b/pkg/scheduler/initialsizeclass/strategy_calculator.go new file mode 100644 index 0000000..36b2306 --- /dev/null +++ b/pkg/scheduler/initialsizeclass/strategy_calculator.go @@ -0,0 +1,47 @@ +package initialsizeclass + +import ( + "time" + + "github.com/buildbarn/bb-storage/pkg/proto/iscc" +) + +// Strategy for running an action on a size class that is not the +// largest size class. +type Strategy struct { + // Probability between [0.0, 1.0] at which this strategy should + // be chosen. The sum of all probabilities returned by + // GetStrategies() should at most be 1.0. If the sum of all + // probabilities is less than 1.0, the remainder should be the + // probability of running the action on the largest size class. + Probability float64 + // Whether the action has a high probability of failing. In that + // case it is preferable to run the action on the largest size + // class immediately, only running it on the smaller size class + // in the background afterwards. + RunInBackground bool + // The execution timeout to use when running this action in the + // foreground on this size class. For the largest size class, + // the original timeout value should be used. + // + // To obtain the execution timeout when running this action in + // the background, a separate call to + // GetBackgroundExecutionTimeout() needs to be made. This + // ensures that the latest obtained execution time of the + // foreground execution on the largest size class is taken into + // account when computing the timeout for the smaller size + // class. + ForegroundExecutionTimeout time.Duration +} + +// StrategyCalculator is responsible for computing the probabilities for +// choosing to run an action on size classes. Given a list of n size +// classes, this function will return a list of n-1 strategies for +// running the action on the smaller size classes. +// +// No strategy for the largest size class is returned, as both is +// probability and options can be inferred. +type StrategyCalculator interface { + GetStrategies(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, originalTimeout time.Duration) []Strategy + GetBackgroundExecutionTimeout(perSizeClassStatsMap map[uint32]*iscc.PerSizeClassStats, sizeClasses []uint32, sizeClassIndex int, originalTimeout time.Duration) time.Duration +} diff --git a/pkg/scheduler/invocation/BUILD.bazel b/pkg/scheduler/invocation/BUILD.bazel new file mode 100644 index 0000000..3ce5cbd --- /dev/null +++ b/pkg/scheduler/invocation/BUILD.bazel @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "invocation", + srcs = [ + "authentication_metadata_key_extractor.go", + "configuration.go", + "correlated_invocations_id_key_extractor.go", + "key.go", + "key_extractor.go", + "tool_invocation_id_key_extractor.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/buildqueuestate", + "//pkg/proto/configuration/scheduler", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/auth", + "@com_github_buildbarn_bb_storage//pkg/util", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + ], +) + +go_test( + name = "invocation_test", + srcs = [ + "correlated_invocations_id_key_extractor_test.go", + "tool_invocation_id_key_extractor_test.go", + ], + deps = [ + ":invocation", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_stretchr_testify//require", + "@org_golang_google_protobuf//types/known/anypb", + ], +) diff --git a/pkg/scheduler/invocation/authentication_metadata_key_extractor.go b/pkg/scheduler/invocation/authentication_metadata_key_extractor.go new file mode 100644 index 0000000..a957076 --- /dev/null +++ b/pkg/scheduler/invocation/authentication_metadata_key_extractor.go @@ -0,0 +1,28 @@ +package invocation + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/auth" + + "google.golang.org/protobuf/types/known/anypb" +) + +type authenticationMetadataKeyExtractor struct{} + +func (ke authenticationMetadataKeyExtractor) ExtractKey(ctx context.Context, requestMetadata *remoteexecution.RequestMetadata) (Key, error) { + authenticationMetadata, _ := auth.AuthenticationMetadataFromContext(ctx).GetPublicProto() + any, err := anypb.New(authenticationMetadata) + if err != nil { + return "", err + } + return NewKey(any) +} + +// AuthenticationMetadataKeyExtractor is an implementation of +// KeyExtractor that returns a Key that is based on the publicly +// displayable part of the authentication metadata. This will cause +// InMemoryBuildQueue to group all operations created by the same user +// together, which ensures fair scheduling between users. +var AuthenticationMetadataKeyExtractor KeyExtractor = authenticationMetadataKeyExtractor{} diff --git a/pkg/scheduler/invocation/configuration.go b/pkg/scheduler/invocation/configuration.go new file mode 100644 index 0000000..d28cd71 --- /dev/null +++ b/pkg/scheduler/invocation/configuration.go @@ -0,0 +1,26 @@ +package invocation + +import ( + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewKeyExtractorFromConfiguration creates a KeyExtractor based on +// settings provided in a configuration file. +func NewKeyExtractorFromConfiguration(configuration *pb.InvocationKeyExtractorConfiguration) (KeyExtractor, error) { + if configuration == nil { + return nil, status.Error(codes.InvalidArgument, "No invocation key extractor coniguration provided") + } + switch configuration.Kind.(type) { + case *pb.InvocationKeyExtractorConfiguration_ToolInvocationId: + return ToolInvocationIDKeyExtractor, nil + case *pb.InvocationKeyExtractorConfiguration_CorrelatedInvocationsId: + return CorrelatedInvocationsIDKeyExtractor, nil + case *pb.InvocationKeyExtractorConfiguration_AuthenticationMetadata: + return AuthenticationMetadataKeyExtractor, nil + default: + return nil, status.Error(codes.InvalidArgument, "Configuration did not contain a supported invocation key extractor type") + } +} diff --git a/pkg/scheduler/invocation/correlated_invocations_id_key_extractor.go b/pkg/scheduler/invocation/correlated_invocations_id_key_extractor.go new file mode 100644 index 0000000..4639b73 --- /dev/null +++ b/pkg/scheduler/invocation/correlated_invocations_id_key_extractor.go @@ -0,0 +1,29 @@ +package invocation + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + + "google.golang.org/protobuf/types/known/anypb" +) + +type correlatedInvocationsIDKeyExtractor struct{} + +func (ke correlatedInvocationsIDKeyExtractor) ExtractKey(ctx context.Context, requestMetadata *remoteexecution.RequestMetadata) (Key, error) { + any, err := anypb.New(&remoteexecution.RequestMetadata{ + CorrelatedInvocationsId: requestMetadata.GetCorrelatedInvocationsId(), + }) + if err != nil { + return "", err + } + return NewKey(any) +} + +// CorrelatedInvocationsIDKeyExtractor is an implementation of +// KeyExtractor that returns a Key that is based on the +// correlated_invocations_id field of the RequestMetadata provided by a +// client. This will cause InMemoryBuildQueue to group all operations +// created by all invocation of Bazel that use the same +// --build_request_id together, which ensures scheduling fairness. +var CorrelatedInvocationsIDKeyExtractor KeyExtractor = correlatedInvocationsIDKeyExtractor{} diff --git a/pkg/scheduler/invocation/correlated_invocations_id_key_extractor_test.go b/pkg/scheduler/invocation/correlated_invocations_id_key_extractor_test.go new file mode 100644 index 0000000..7f79eb8 --- /dev/null +++ b/pkg/scheduler/invocation/correlated_invocations_id_key_extractor_test.go @@ -0,0 +1,35 @@ +package invocation_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/anypb" +) + +func TestCorrelatedInvocationsIDInvocationKeyExtractor(t *testing.T) { + ctx := context.Background() + + key, err := invocation.CorrelatedInvocationsIDKeyExtractor.ExtractKey(ctx, &remoteexecution.RequestMetadata{ + ToolDetails: &remoteexecution.ToolDetails{ + ToolName: "bazel", + ToolVersion: "4.2.1", + }, + ToolInvocationId: "9c9e7705-d757-4e57-b0df-58bc69c1cb51", + CorrelatedInvocationsId: "92d71492-175e-40ba-9c8c-3b5d3b9a6808", + ActionMnemonic: "CppLink", + TargetId: "//:hello_world", + ConfigurationId: "cfdad5b3966911c7ca6cf551c4b64c1bbe3642f1d1f7ec373bc449671e1d5c02", + }) + require.NoError(t, err) + id, err := anypb.New(&remoteexecution.RequestMetadata{ + CorrelatedInvocationsId: "92d71492-175e-40ba-9c8c-3b5d3b9a6808", + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, id, key.GetID()) +} diff --git a/pkg/scheduler/invocation/key.go b/pkg/scheduler/invocation/key.go new file mode 100644 index 0000000..e34527b --- /dev/null +++ b/pkg/scheduler/invocation/key.go @@ -0,0 +1,66 @@ +package invocation + +import ( + "fmt" + + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func mustNewAny(src proto.Message) *anypb.Any { + any, err := anypb.New(src) + if err != nil { + panic(err) + } + return any +} + +// Key for identifying client invocations. InMemoryBuildQueue uses this +// type to group operations within a given size class queue. This +// grouping is used to introduce fairness between builds. +// +// For most setups, it is sufficient to set the Key to the tool +// invocation ID that's part of the RequestMetadata header. For more +// advanced setups, it may be recommended to include information such as +// the username. +type Key string + +// NewKey creates a new key based on a freeform Protobuf message. +func NewKey(id *anypb.Any) (Key, error) { + data, err := protojson.Marshal(id) + if err != nil { + return "", util.StatusWrap(err, "Failed to marshal invocation ID") + } + return Key(data), nil +} + +// MustNewKey is identical to NewKey, except that it panics in case of +// failures. +func MustNewKey(id *anypb.Any) Key { + key, err := NewKey(id) + if err != nil { + panic(err) + } + return key +} + +// GetID reobtains the Protobuf message that was used to construct the +// key. +func (k Key) GetID() *anypb.Any { + var id anypb.Any + if err := protojson.Unmarshal([]byte(k), &id); err != nil { + panic(fmt.Sprintf("Failed to unmarshal previously marshalled invocation ID: %s", err)) + } + return &id +} + +// BackgroundLearningKeys is a predefined list of Keys that is used for +// all operations that are created to perform background learning (see +// initialsizeclass.FeedbackAnalyzer). +var BackgroundLearningKeys = []Key{ + MustNewKey(mustNewAny(&buildqueuestate.BackgroundLearning{})), +} diff --git a/pkg/scheduler/invocation/key_extractor.go b/pkg/scheduler/invocation/key_extractor.go new file mode 100644 index 0000000..21bcf79 --- /dev/null +++ b/pkg/scheduler/invocation/key_extractor.go @@ -0,0 +1,17 @@ +package invocation + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" +) + +// KeyExtractor is responsible for extracting an invocation key from an +// incoming execution request. Operations will be grouped by invocation +// key and scheduled fairly. +// +// Implementations of KeyExtract may construct keys based on REv2 +// request metadata or user credentials. +type KeyExtractor interface { + ExtractKey(ctx context.Context, requestMetadata *remoteexecution.RequestMetadata) (Key, error) +} diff --git a/pkg/scheduler/invocation/tool_invocation_id_key_extractor.go b/pkg/scheduler/invocation/tool_invocation_id_key_extractor.go new file mode 100644 index 0000000..2bcbea2 --- /dev/null +++ b/pkg/scheduler/invocation/tool_invocation_id_key_extractor.go @@ -0,0 +1,28 @@ +package invocation + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + + "google.golang.org/protobuf/types/known/anypb" +) + +type toolInvocationIDKeyExtractor struct{} + +func (ke toolInvocationIDKeyExtractor) ExtractKey(ctx context.Context, requestMetadata *remoteexecution.RequestMetadata) (Key, error) { + any, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: requestMetadata.GetToolInvocationId(), + }) + if err != nil { + return "", err + } + return NewKey(any) +} + +// ToolInvocationIDKeyExtractor is an implementation of KeyExtractor +// that returns a Key that is based on the tool_invocation_id field of +// the RequestMetadata provided by a client. This will cause +// InMemoryBuildQueue to group all operations created by a single +// invocation of Bazel together, which ensures scheduling fairness. +var ToolInvocationIDKeyExtractor KeyExtractor = toolInvocationIDKeyExtractor{} diff --git a/pkg/scheduler/invocation/tool_invocation_id_key_extractor_test.go b/pkg/scheduler/invocation/tool_invocation_id_key_extractor_test.go new file mode 100644 index 0000000..442e8de --- /dev/null +++ b/pkg/scheduler/invocation/tool_invocation_id_key_extractor_test.go @@ -0,0 +1,35 @@ +package invocation_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/types/known/anypb" +) + +func TestToolInvocationIDInvocationKeyExtractor(t *testing.T) { + ctx := context.Background() + + key, err := invocation.ToolInvocationIDKeyExtractor.ExtractKey(ctx, &remoteexecution.RequestMetadata{ + ToolDetails: &remoteexecution.ToolDetails{ + ToolName: "bazel", + ToolVersion: "4.2.1", + }, + ToolInvocationId: "9c9e7705-d757-4e57-b0df-58bc69c1cb51", + CorrelatedInvocationsId: "92d71492-175e-40ba-9c8c-3b5d3b9a6808", + ActionMnemonic: "CppLink", + TargetId: "//:hello_world", + ConfigurationId: "cfdad5b3966911c7ca6cf551c4b64c1bbe3642f1d1f7ec373bc449671e1d5c02", + }) + require.NoError(t, err) + id, err := anypb.New(&remoteexecution.RequestMetadata{ + ToolInvocationId: "9c9e7705-d757-4e57-b0df-58bc69c1cb51", + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, id, key.GetID()) +} diff --git a/pkg/scheduler/platform/BUILD.bazel b/pkg/scheduler/platform/BUILD.bazel new file mode 100644 index 0000000..bfb4e2f --- /dev/null +++ b/pkg/scheduler/platform/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "platform", + srcs = [ + "action_and_command_key_extractor.go", + "action_key_extractor.go", + "configuration.go", + "key.go", + "key_extractor.go", + "static_key_extractor.go", + "trie.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/buildqueuestate", + "//pkg/proto/configuration/scheduler", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + ], +) + +go_test( + name = "platform_test", + srcs = [ + "action_and_command_key_extractor_test.go", + "action_key_extractor_test.go", + "key_test.go", + "static_key_extractor_test.go", + ], + deps = [ + ":platform", + "//internal/mock", + "//pkg/proto/buildqueuestate", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore/buffer", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/scheduler/platform/action_and_command_key_extractor.go b/pkg/scheduler/platform/action_and_command_key_extractor.go new file mode 100644 index 0000000..0fcd659 --- /dev/null +++ b/pkg/scheduler/platform/action_and_command_key_extractor.go @@ -0,0 +1,79 @@ +package platform + +import ( + "context" + "sync" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + actionAndCommandKeyExtractorPrometheusMetrics sync.Once + + actionAndCommandKeyExtractorCommandMessagesReadTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "action_and_command_key_extractor_command_messages_read_total", + Help: "Number of times REv2 Action messages did not contain platform properties, meaning Command messages had to be loaded instead.", + }) +) + +type actionAndCommandKeyExtractor struct { + contentAddressableStorage blobstore.BlobAccess + maximumMessageSizeBytes int +} + +// NewActionAndCommandKeyExtractor creates a new KeyExtractor is capable +// of extracting a platform key from an REv2 Action message. If no +// platform properties are specified in the Action, it falls back to +// reading them from the Command message. +// +// This platform key extractor needs to be used if requests from clients +// that implement REv2.1 or older need to be processed, as platform +// properties were only added to the Action message in REv2.2. +func NewActionAndCommandKeyExtractor(contentAddressableStorage blobstore.BlobAccess, maximumMessageSizeBytes int) KeyExtractor { + actionAndCommandKeyExtractorPrometheusMetrics.Do(func() { + prometheus.MustRegister(actionAndCommandKeyExtractorCommandMessagesReadTotal) + }) + + return &actionAndCommandKeyExtractor{ + contentAddressableStorage: contentAddressableStorage, + maximumMessageSizeBytes: maximumMessageSizeBytes, + } +} + +func (ke *actionAndCommandKeyExtractor) ExtractKey(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Key, error) { + instanceName := digestFunction.GetInstanceName() + if action.Platform != nil { + // REv2.2 or newer: platform properties are stored in + // the Action message. + key, err := NewKey(instanceName, action.Platform) + if err != nil { + return Key{}, util.StatusWrap(err, "Failed to extract platform key from action") + } + return key, nil + } + + // REv2.1 or older: platform properties are stored in the + // Command message. + commandDigest, err := digestFunction.NewDigestFromProto(action.CommandDigest) + if err != nil { + return Key{}, util.StatusWrap(err, "Failed to extract digest for command") + } + commandMessage, err := ke.contentAddressableStorage.Get(ctx, commandDigest).ToProto(&remoteexecution.Command{}, ke.maximumMessageSizeBytes) + if err != nil { + return Key{}, util.StatusWrap(err, "Failed to obtain command") + } + command := commandMessage.(*remoteexecution.Command) + key, err := NewKey(instanceName, command.Platform) + if err != nil { + return Key{}, util.StatusWrap(err, "Failed to extract platform key from command") + } + actionAndCommandKeyExtractorCommandMessagesReadTotal.Inc() + return key, nil +} diff --git a/pkg/scheduler/platform/action_and_command_key_extractor_test.go b/pkg/scheduler/platform/action_and_command_key_extractor_test.go new file mode 100644 index 0000000..e0b116d --- /dev/null +++ b/pkg/scheduler/platform/action_and_command_key_extractor_test.go @@ -0,0 +1,152 @@ +package platform_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestActionAndCommandKeyExtractor(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + contentAddressableStorage := mock.NewMockBlobAccess(ctrl) + keyExtractor := platform.NewActionAndCommandKeyExtractor(contentAddressableStorage, 1024*1024) + digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5) + + t.Run("ActionInvalidProperties", func(t *testing.T) { + _, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + {Name: "arch", Value: "aarch64"}, + }, + }, + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to extract platform key from action: Platform properties are not lexicographically sorted, as property "), err) + }) + + t.Run("ActionSuccess", func(t *testing.T) { + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }, key.GetPlatformQueueName()) + }) + + t.Run("CommandInvalidDigest", func(t *testing.T) { + _, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "4216455ceebbc3038bd0550c85b6a3bf", + SizeBytes: -1, + }, + }) + testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Failed to extract digest for command: Invalid digest size: -1 bytes"), err) + }) + + t.Run("CommandStorageFailure", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4216455ceebbc3038bd0550c85b6a3bf", 123)). + Return(buffer.NewBufferFromError(status.Error(codes.Internal, "Cannot establish network connection"))) + + _, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "4216455ceebbc3038bd0550c85b6a3bf", + SizeBytes: 123, + }, + }) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Failed to obtain command: Cannot establish network connection"), err) + }) + + t.Run("CommandInvalidProperties", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4216455ceebbc3038bd0550c85b6a3bf", 123)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + {Name: "arch", Value: "aarch64"}, + }, + }, + }, buffer.UserProvided)) + + _, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "4216455ceebbc3038bd0550c85b6a3bf", + SizeBytes: 123, + }, + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to extract platform key from command: Platform properties are not lexicographically sorted, as property "), err) + }) + + t.Run("CommandSuccess", func(t *testing.T) { + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4216455ceebbc3038bd0550c85b6a3bf", 123)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }, buffer.UserProvided)) + + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "4216455ceebbc3038bd0550c85b6a3bf", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }, key.GetPlatformQueueName()) + }) + + t.Run("NoPlatformPresent", func(t *testing.T) { + // If no platform object is, assume the empty set of + // platform properties. Clients such as BuildStream are + // known for not providing them. + contentAddressableStorage.EXPECT().Get(ctx, digest.MustNewDigest("hello", remoteexecution.DigestFunction_MD5, "4216455ceebbc3038bd0550c85b6a3bf", 123)). + Return(buffer.NewProtoBufferFromProto(&remoteexecution.Command{}, buffer.UserProvided)) + + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + CommandDigest: &remoteexecution.Digest{ + Hash: "4216455ceebbc3038bd0550c85b6a3bf", + SizeBytes: 123, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{}, + }, key.GetPlatformQueueName()) + }) +} diff --git a/pkg/scheduler/platform/action_key_extractor.go b/pkg/scheduler/platform/action_key_extractor.go new file mode 100644 index 0000000..0482d6b --- /dev/null +++ b/pkg/scheduler/platform/action_key_extractor.go @@ -0,0 +1,22 @@ +package platform + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type actionKeyExtractor struct{} + +func (ke actionKeyExtractor) ExtractKey(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Key, error) { + return NewKey(digestFunction.GetInstanceName(), action.Platform) +} + +// ActionKeyExtractor is capable of extracting a platform key from an +// REv2 Action message. +// +// Because it does not fall back to reading platform properties from the +// Command message, it is only capable of processing requests sent by a +// client that implements REv2.2 or newer. +var ActionKeyExtractor KeyExtractor = actionKeyExtractor{} diff --git a/pkg/scheduler/platform/action_key_extractor_test.go b/pkg/scheduler/platform/action_key_extractor_test.go new file mode 100644 index 0000000..9ea7527 --- /dev/null +++ b/pkg/scheduler/platform/action_key_extractor_test.go @@ -0,0 +1,67 @@ +package platform_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestActionKeyExtractor(t *testing.T) { + keyExtractor := platform.ActionKeyExtractor + ctx := context.Background() + digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_SHA256) + + t.Run("InvalidProperties", func(t *testing.T) { + _, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + {Name: "arch", Value: "aarch64"}, + }, + }, + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Platform properties are not lexicographically sorted, as property "), err) + }) + + t.Run("Success", func(t *testing.T) { + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }, key.GetPlatformQueueName()) + }) + + t.Run("NoPlatformPresent", func(t *testing.T) { + // If no platform object is, assume the empty set of + // platform properties. Clients such as BuildStream are + // known for not providing them. + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{}) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{}, + }, key.GetPlatformQueueName()) + }) +} diff --git a/pkg/scheduler/platform/configuration.go b/pkg/scheduler/platform/configuration.go new file mode 100644 index 0000000..1b0a403 --- /dev/null +++ b/pkg/scheduler/platform/configuration.go @@ -0,0 +1,27 @@ +package platform + +import ( + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler" + "github.com/buildbarn/bb-storage/pkg/blobstore" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewKeyExtractorFromConfiguration creates a new KeyExtractor based on +// options specified in a configuration file. +func NewKeyExtractorFromConfiguration(configuration *pb.PlatformKeyExtractorConfiguration, contentAddressableStorage blobstore.BlobAccess, maximumMessageSizeBytes int) (KeyExtractor, error) { + if configuration == nil { + return nil, status.Error(codes.InvalidArgument, "No platform key extractor configuration provided") + } + switch kind := configuration.Kind.(type) { + case *pb.PlatformKeyExtractorConfiguration_Action: + return ActionKeyExtractor, nil + case *pb.PlatformKeyExtractorConfiguration_ActionAndCommand: + return NewActionAndCommandKeyExtractor(contentAddressableStorage, maximumMessageSizeBytes), nil + case *pb.PlatformKeyExtractorConfiguration_Static: + return NewStaticKeyExtractor(kind.Static), nil + default: + return nil, status.Error(codes.InvalidArgument, "Configuration did not contain a supported platform key extractor type") + } +} diff --git a/pkg/scheduler/platform/key.go b/pkg/scheduler/platform/key.go new file mode 100644 index 0000000..843057d --- /dev/null +++ b/pkg/scheduler/platform/key.go @@ -0,0 +1,97 @@ +package platform + +import ( + "fmt" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/golang/protobuf/jsonpb" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" +) + +// Key of a platform on which execution may take place. In addition to +// the REv2 platform properties, the key contains the instance name +// value. Each instance name may be associated with its own kinds of +// workers. +type Key struct { + instanceNamePrefix digest.InstanceName + platform string +} + +// NewKey creates a new Key of a platform, given an instance name and +// REv2 platform properties message. +func NewKey(instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform) (Key, error) { + // Ensure that the platform properties are in normal form. + if platform == nil { + platform = &remoteexecution.Platform{} + } + + // REv2 requires that platform properties are lexicographically + // sorted by name and value. + properties := platform.Properties + for i := 1; i < len(properties); i++ { + if properties[i-1].Name > properties[i].Name || + (properties[i-1].Name == properties[i].Name && + properties[i-1].Value >= properties[i].Value) { + marshaler := protojson.MarshalOptions{} + return Key{}, status.Errorf(codes.InvalidArgument, "Platform properties are not lexicographically sorted, as property %s should have been placed before property %s", marshaler.Format(properties[i]), marshaler.Format(properties[i-1])) + } + } + + // TODO: Switch to protojson.Marshal(). We don't want to use it + // right now, as that will cause Prometheus metrics labels to + // become non-deterministic. protojson.Marshal() injects random + // whitespace into its output. + marshaler := jsonpb.Marshaler{} + platformString, err := marshaler.MarshalToString(platform) + if err != nil { + return Key{}, util.StatusWrapWithCode(err, codes.InvalidArgument, "Failed to marshal platform message") + } + return Key{ + instanceNamePrefix: instanceNamePrefix, + platform: platformString, + }, nil +} + +// MustNewKey is identical to NewKey, except that it panics upon failure. +func MustNewKey(instanceNamePrefix string, platform *remoteexecution.Platform) Key { + key, err := NewKey(digest.MustNewInstanceName(instanceNamePrefix), platform) + if err != nil { + panic(err) + } + return key +} + +// GetInstanceNamePrefix returns the instance name that was provided +// when the Key was created. +func (k Key) GetInstanceNamePrefix() digest.InstanceName { + return k.instanceNamePrefix +} + +// GetPlatformString returns the Platform message that was used to +// construct the Key in JSON form. This string is generated +// deterministically, so it is safe to use for stable comparisons, map +// keys and Prometheus metric label values. +func (k Key) GetPlatformString() string { + return k.platform +} + +// GetPlatformQueueName reobtains the instance name prefix and Platform +// message that was used to construct the Key. As this is only used +// infrequently, we don't bother keeping the unmarshalled Platform +// message around to preserve memory usage. +func (k Key) GetPlatformQueueName() *buildqueuestate.PlatformQueueName { + var platform remoteexecution.Platform + if err := protojson.Unmarshal([]byte(k.platform), &platform); err != nil { + panic(fmt.Sprintf("Failed to unmarshal previously marshalled platform: %s", err)) + } + return &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: k.instanceNamePrefix.String(), + Platform: &platform, + } +} diff --git a/pkg/scheduler/platform/key_extractor.go b/pkg/scheduler/platform/key_extractor.go new file mode 100644 index 0000000..25ebf46 --- /dev/null +++ b/pkg/scheduler/platform/key_extractor.go @@ -0,0 +1,21 @@ +package platform + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// KeyExtractor is responsible for creating a platform key for an +// incoming action execution request, which InMemoryBuildQueue will use +// to determine in which queue an operation is placed. +// +// This interface is used to switch between REv2.1 semantics (reading +// platform properties from the Command message), REv2.2 semantics +// (reading platform properties from the Action message), or to let the +// scheduler rewrite/override platform properties provided by the +// client. +type KeyExtractor interface { + ExtractKey(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Key, error) +} diff --git a/pkg/scheduler/platform/key_test.go b/pkg/scheduler/platform/key_test.go new file mode 100644 index 0000000..e88650a --- /dev/null +++ b/pkg/scheduler/platform/key_test.go @@ -0,0 +1,109 @@ +package platform_test + +import ( + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestKey(t *testing.T) { + t.Run("NilPlatform", func(t *testing.T) { + // Some clients (e.g., Buildstream) don't set the + // Platform message at all. Treat it as if the message + // was empty. + k, err := platform.NewKey(digest.EmptyInstanceName, nil) + require.NoError(t, err) + + require.Equal(t, digest.EmptyInstanceName, k.GetInstanceNamePrefix()) + require.Equal(t, "{}", k.GetPlatformString()) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "", + Platform: &remoteexecution.Platform{}, + }, k.GetPlatformQueueName()) + }) + + t.Run("EmptyPlatform", func(t *testing.T) { + k, err := platform.NewKey(digest.MustNewInstanceName("a/b"), &remoteexecution.Platform{}) + require.NoError(t, err) + + require.Equal(t, digest.MustNewInstanceName("a/b"), k.GetInstanceNamePrefix()) + require.Equal(t, "{}", k.GetPlatformString()) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "a/b", + Platform: &remoteexecution.Platform{}, + }, k.GetPlatformQueueName()) + }) + + t.Run("InvalidPropertiesOrder", func(t *testing.T) { + // Platform properties must be provided in sorted order, + // as the encoding is ambiguous otherwise. + _, err := platform.NewKey(digest.MustNewInstanceName("a"), &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + { + Name: "os", + Value: "linux", + }, + { + Name: "cpu", + Value: "x86", + }, + }, + }) + testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Platform properties are not lexicographically sorted, as property "), err) + }) + + t.Run("MultipleValues", func(t *testing.T) { + // It is valid to provide multiple values for the same + // platform property, as long as the values are sorted. + k, err := platform.NewKey(digest.MustNewInstanceName("a"), &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + { + Name: "cpu", + Value: "i386", + }, + { + Name: "cpu", + Value: "x86_64", + }, + { + Name: "os", + Value: "linux", + }, + }, + }) + require.NoError(t, err) + + require.Equal(t, digest.MustNewInstanceName("a"), k.GetInstanceNamePrefix()) + require.Equal( + t, + "{\"properties\":[{\"name\":\"cpu\",\"value\":\"i386\"},{\"name\":\"cpu\",\"value\":\"x86_64\"},{\"name\":\"os\",\"value\":\"linux\"}]}", + k.GetPlatformString()) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "a", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + { + Name: "cpu", + Value: "i386", + }, + { + Name: "cpu", + Value: "x86_64", + }, + { + Name: "os", + Value: "linux", + }, + }, + }, + }, k.GetPlatformQueueName()) + }) +} diff --git a/pkg/scheduler/platform/static_key_extractor.go b/pkg/scheduler/platform/static_key_extractor.go new file mode 100644 index 0000000..ab41d5c --- /dev/null +++ b/pkg/scheduler/platform/static_key_extractor.go @@ -0,0 +1,27 @@ +package platform + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +type staticKeyExtractor struct { + platform *remoteexecution.Platform +} + +// NewStaticKeyExtractor creates a KeyExtractor that ignores the +// platform properties provided as part of the action and returns a +// constant value. This implementation is useful in combination with +// DemultiplexingActionRouter, in case rewriting of platform properties +// needs to be performed. +func NewStaticKeyExtractor(platform *remoteexecution.Platform) KeyExtractor { + return &staticKeyExtractor{ + platform: platform, + } +} + +func (ke *staticKeyExtractor) ExtractKey(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action) (Key, error) { + return NewKey(digestFunction.GetInstanceName(), ke.platform) +} diff --git a/pkg/scheduler/platform/static_key_extractor_test.go b/pkg/scheduler/platform/static_key_extractor_test.go new file mode 100644 index 0000000..809dde0 --- /dev/null +++ b/pkg/scheduler/platform/static_key_extractor_test.go @@ -0,0 +1,43 @@ +package platform_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/proto/buildqueuestate" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/stretchr/testify/require" +) + +func TestStaticKeyExtractor(t *testing.T) { + keyExtractor := platform.NewStaticKeyExtractor(&remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }) + ctx := context.Background() + digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_SHA256) + + key, err := keyExtractor.ExtractKey(ctx, digestFunction, &remoteexecution.Action{ + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "x86_64"}, + {Name: "os", Value: "freebsd"}, + }, + }, + }) + require.NoError(t, err) + testutil.RequireEqualProto(t, &buildqueuestate.PlatformQueueName{ + InstanceNamePrefix: "hello", + Platform: &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "arch", Value: "aarch64"}, + {Name: "os", Value: "linux"}, + }, + }, + }, key.GetPlatformQueueName()) +} diff --git a/pkg/scheduler/platform/trie.go b/pkg/scheduler/platform/trie.go new file mode 100644 index 0000000..8c4af18 --- /dev/null +++ b/pkg/scheduler/platform/trie.go @@ -0,0 +1,73 @@ +package platform + +import ( + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// Trie is a prefix tree for instance names and platform properties. It +// can be used by implementations of BuildQueue to match instance names +// and platform properties provides as part of execution requests +// against individual platform queues. +// +// For every key stored in the trie, an integer value is tracked. This +// can, for example, be used by the caller to look up a corresponding +// value in a contiguous list. +type Trie struct { + platforms map[string]*digest.InstanceNameTrie +} + +// NewTrie creates a new Trie that is initialized with no elements. +func NewTrie() *Trie { + return &Trie{ + platforms: map[string]*digest.InstanceNameTrie{}, + } +} + +// ContainsExact returns whether the trie contains a key that is exactly +// the same as the one provided. +func (t *Trie) ContainsExact(key Key) bool { + if pt, ok := t.platforms[key.platform]; ok { + return pt.ContainsExact(key.instanceNamePrefix) + } + return false +} + +// GetExact returns the value associated with the key. If none of the +// keys provided to Set() are exactly the same as the key provided to +// GetExact(), this function returns -1. +func (t *Trie) GetExact(key Key) int { + if pt, ok := t.platforms[key.platform]; ok { + return pt.GetExact(key.instanceNamePrefix) + } + return -1 +} + +// GetLongestPrefix returns the value associated with the longest +// matching instance name prefix, having the same platform properties. +// If none of the keys provided to Set() have an instance name that is a +// prefix, or have the same platform properties as that of the key +// provided to GetLongestPrefix(), this function returns -1. +func (t *Trie) GetLongestPrefix(key Key) int { + if pt, ok := t.platforms[key.platform]; ok { + return pt.GetLongestPrefix(key.instanceNamePrefix) + } + return -1 +} + +// Remove a value associated with a key. +func (t *Trie) Remove(key Key) { + if t.platforms[key.platform].Remove(key.instanceNamePrefix) { + // The trie is now empty. Remove it from the map. + delete(t.platforms, key.platform) + } +} + +// Set a key in the trie to a given integer value. +func (t *Trie) Set(key Key, value int) { + pt, ok := t.platforms[key.platform] + if !ok { + pt = digest.NewInstanceNameTrie() + t.platforms[key.platform] = pt + } + pt.Set(key.instanceNamePrefix, value) +} diff --git a/pkg/scheduler/routing/BUILD.bazel b/pkg/scheduler/routing/BUILD.bazel new file mode 100644 index 0000000..e0f42cc --- /dev/null +++ b/pkg/scheduler/routing/BUILD.bazel @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "routing", + srcs = [ + "action_router.go", + "configuration.go", + "demultiplexing_action_router.go", + "simple_action_router.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/scheduler/routing", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proto/configuration/scheduler", + "//pkg/scheduler/initialsizeclass", + "//pkg/scheduler/invocation", + "//pkg/scheduler/platform", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/blobstore", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_test( + name = "routing_test", + srcs = ["demultiplexing_action_router_test.go"], + deps = [ + ":routing", + "//internal/mock", + "//pkg/scheduler/platform", + "@com_github_bazelbuild_remote_apis//build/bazel/remote/execution/v2:execution", + "@com_github_buildbarn_bb_storage//pkg/digest", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/pkg/scheduler/routing/action_router.go b/pkg/scheduler/routing/action_router.go new file mode 100644 index 0000000..d2dc6f9 --- /dev/null +++ b/pkg/scheduler/routing/action_router.go @@ -0,0 +1,28 @@ +package routing + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// ActionRouter is responsible for doing all forms of analysis on an +// incoming execution request up to the point where InMemoryBuildQueue +// acquires locks and enqueues an operation. ActionRouter is responsible +// for the following things: +// +// - To extract a platform key from the action, so that +// InMemoryBuildQueue knows on which workers the action needs to +// execute. +// - To extract invocation keys from the client's context, so that +// InMemoryBuildQueue can group operations belonging to the same +// client and schedule them fairly with respect to other clients. +// - To create an initial size class selector, which InMemoryBuildQueue +// can use to select the appropriate worker size. +type ActionRouter interface { + RouteAction(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action, requestMetadata *remoteexecution.RequestMetadata) (platform.Key, []invocation.Key, initialsizeclass.Selector, error) +} diff --git a/pkg/scheduler/routing/configuration.go b/pkg/scheduler/routing/configuration.go new file mode 100644 index 0000000..6579831 --- /dev/null +++ b/pkg/scheduler/routing/configuration.go @@ -0,0 +1,68 @@ +package routing + +import ( + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/scheduler" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/blobstore" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewActionRouterFromConfiguration creates an ActionRouter based on +// options specified in a configuration file. +func NewActionRouterFromConfiguration(configuration *pb.ActionRouterConfiguration, contentAddressableStorage blobstore.BlobAccess, maximumMessageSizeBytes int, previousExecutionStatsStore initialsizeclass.PreviousExecutionStatsStore) (ActionRouter, error) { + if configuration == nil { + return nil, status.Error(codes.InvalidArgument, "No action router configuration provided") + } + switch kind := configuration.Kind.(type) { + case *pb.ActionRouterConfiguration_Simple: + platformKeyExtractor, err := platform.NewKeyExtractorFromConfiguration(kind.Simple.PlatformKeyExtractor, contentAddressableStorage, maximumMessageSizeBytes) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create platform key extractor") + } + invocationKeyExtractors := make([]invocation.KeyExtractor, 0, len(kind.Simple.InvocationKeyExtractors)) + for i, entry := range kind.Simple.InvocationKeyExtractors { + invocationKeyExtractor, err := invocation.NewKeyExtractorFromConfiguration(entry) + if err != nil { + return nil, util.StatusWrapf(err, "Failed to create invocation key extractor at index %d", i) + } + invocationKeyExtractors = append(invocationKeyExtractors, invocationKeyExtractor) + } + initialSizeClassAnalyzer, err := initialsizeclass.NewAnalyzerFromConfiguration(kind.Simple.InitialSizeClassAnalyzer, previousExecutionStatsStore) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create initial size class analyzer") + } + return NewSimpleActionRouter(platformKeyExtractor, invocationKeyExtractors, initialSizeClassAnalyzer), nil + case *pb.ActionRouterConfiguration_Demultiplexing: + platformKeyExtractor, err := platform.NewKeyExtractorFromConfiguration(kind.Demultiplexing.PlatformKeyExtractor, contentAddressableStorage, maximumMessageSizeBytes) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create platform key extractor") + } + defaultActionRouter, err := NewActionRouterFromConfiguration(kind.Demultiplexing.DefaultActionRouter, contentAddressableStorage, maximumMessageSizeBytes, previousExecutionStatsStore) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create default action router") + } + actionRouter := NewDemultiplexingActionRouter(platformKeyExtractor, defaultActionRouter) + for _, backend := range kind.Demultiplexing.Backends { + instanceNamePrefix, err := digest.NewInstanceName(backend.InstanceNamePrefix) + if err != nil { + return nil, util.StatusWrapf(err, "Invalid instance name prefix %#v", backend.InstanceNamePrefix) + } + backendActionRouter, err := NewActionRouterFromConfiguration(backend.ActionRouter, contentAddressableStorage, maximumMessageSizeBytes, previousExecutionStatsStore) + if err != nil { + return nil, util.StatusWrap(err, "Failed to create demultiplexing action router backend") + } + if err := actionRouter.RegisterActionRouter(instanceNamePrefix, backend.Platform, backendActionRouter); err != nil { + return nil, util.StatusWrap(err, "Failed to register demultiplexing action router backend") + } + } + return actionRouter, nil + default: + return nil, status.Error(codes.InvalidArgument, "Configuration did not contain a supported action router type") + } +} diff --git a/pkg/scheduler/routing/demultiplexing_action_router.go b/pkg/scheduler/routing/demultiplexing_action_router.go new file mode 100644 index 0000000..d4049ae --- /dev/null +++ b/pkg/scheduler/routing/demultiplexing_action_router.go @@ -0,0 +1,102 @@ +package routing + +import ( + "context" + "sync" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/prometheus/client_golang/prometheus" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + demultiplexingActionRouterPrometheusMetrics sync.Once + + demultiplexingActionRouterRequestsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "builder", + Name: "demultiplexing_action_router_requests_total", + Help: "Number of actions that were routed using the demultiplexing action router.", + }, + []string{"instance_name_prefix", "platform"}) +) + +type demultiplexingActionRouterEntry struct { + actionRouter ActionRouter + requestsTotal prometheus.Counter +} + +// DemultiplexingActionRouter is an implementation of ActionRouter that +// demultiplexes routing requests by REv2 instance name and platform +// properties, using a platform.Trie. This makes it possible to use +// different invocation key extractors or initial size class analyzers +// depending on the platform. When combined with +// platform.StaticKeyExtractor, it can be used to rewrite platform +// properties. +type DemultiplexingActionRouter struct { + platformKeyExtractor platform.KeyExtractor + trie *platform.Trie + entries []demultiplexingActionRouterEntry +} + +// NewDemultiplexingActionRouter creates a new +// DemultiplexingActionRouter that forwards all incoming requests to a +// single ActionRouter. +func NewDemultiplexingActionRouter(platformKeyExtractor platform.KeyExtractor, defaultActionRouter ActionRouter) *DemultiplexingActionRouter { + demultiplexingActionRouterPrometheusMetrics.Do(func() { + prometheus.MustRegister(demultiplexingActionRouterRequestsTotal) + }) + + return &DemultiplexingActionRouter{ + platformKeyExtractor: platformKeyExtractor, + trie: platform.NewTrie(), + entries: []demultiplexingActionRouterEntry{ + { + actionRouter: defaultActionRouter, + requestsTotal: demultiplexingActionRouterRequestsTotal.WithLabelValues("", ""), + }, + }, + } +} + +var _ ActionRouter = (*DemultiplexingActionRouter)(nil) + +// RegisterActionRouter registers a new ActionRouter by REv2 instance +// name prefix and platform properties, so that subsequent calls to +// RouteAction() may forward requests to it. +func (ar *DemultiplexingActionRouter) RegisterActionRouter(instanceNamePrefix digest.InstanceName, platformMessage *remoteexecution.Platform, actionRouter ActionRouter) error { + platformKey, err := platform.NewKey(instanceNamePrefix, platformMessage) + if err != nil { + return err + } + if ar.trie.ContainsExact(platformKey) { + return status.Error(codes.AlreadyExists, "An action router with the same instance name prefix and platform already exists") + } + ar.trie.Set(platformKey, len(ar.entries)-1) + ar.entries = append(ar.entries, demultiplexingActionRouterEntry{ + actionRouter: actionRouter, + requestsTotal: demultiplexingActionRouterRequestsTotal.WithLabelValues(platformKey.GetInstanceNamePrefix().String(), platformKey.GetPlatformString()), + }) + return nil +} + +// RouteAction forwards requests to one of the ActionRouters that was +// provided to NewDemultiplexingActionRouter() or +// RegisterActionRouter(). +func (ar *DemultiplexingActionRouter) RouteAction(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action, requestMetadata *remoteexecution.RequestMetadata) (platform.Key, []invocation.Key, initialsizeclass.Selector, error) { + key, err := ar.platformKeyExtractor.ExtractKey(ctx, digestFunction, action) + if err != nil { + return platform.Key{}, nil, nil, util.StatusWrap(err, "Failed to extract platform key") + } + entry := &ar.entries[ar.trie.GetLongestPrefix(key)+1] + entry.requestsTotal.Inc() + return entry.actionRouter.RouteAction(ctx, digestFunction, action, requestMetadata) +} diff --git a/pkg/scheduler/routing/demultiplexing_action_router_test.go b/pkg/scheduler/routing/demultiplexing_action_router_test.go new file mode 100644 index 0000000..78d3573 --- /dev/null +++ b/pkg/scheduler/routing/demultiplexing_action_router_test.go @@ -0,0 +1,87 @@ +package routing_test + +import ( + "context" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/routing" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/testutil" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestDemultiplexingActionRouter(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + platformKeyExtractor := mock.NewMockPlatformKeyExtractor(ctrl) + defaultActionRouter := mock.NewMockActionRouter(ctrl) + actionRouter := routing.NewDemultiplexingActionRouter(platformKeyExtractor, defaultActionRouter) + + // Register two action router backends. + linuxPlatform := &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "linux"}, + }, + } + linuxActionRouter := mock.NewMockActionRouter(ctrl) + require.NoError(t, actionRouter.RegisterActionRouter(digest.MustNewInstanceName("a"), linuxPlatform, linuxActionRouter)) + + windowsPlatform := &remoteexecution.Platform{ + Properties: []*remoteexecution.Platform_Property{ + {Name: "os", Value: "windows"}, + }, + } + windowsActionRouter := mock.NewMockActionRouter(ctrl) + require.NoError(t, actionRouter.RegisterActionRouter(digest.MustNewInstanceName("a"), windowsPlatform, windowsActionRouter)) + + // Attempting to register another backend with the same instance + // name prefix and platform properties should fail. + unusedActionRouter := mock.NewMockActionRouter(ctrl) + testutil.RequireEqualStatus( + t, + status.Error(codes.AlreadyExists, "An action router with the same instance name prefix and platform already exists"), + actionRouter.RegisterActionRouter(digest.MustNewInstanceName("a"), linuxPlatform, unusedActionRouter)) + + t.Run("DefaultActionRouter", func(t *testing.T) { + // Even though the platform properties indicate Linux, + // the provided REv2 instance name is not a prefix of + // "a". This means that the request should be sent to + // the default action router. + digestFunction := digest.MustNewFunction("", remoteexecution.DigestFunction_SHA256) + platformKeyExtractor.EXPECT().ExtractKey(ctx, digestFunction, testutil.EqProto(t, &remoteexecution.Action{})). + Return(platform.MustNewKey("", linuxPlatform), nil) + defaultActionRouter.EXPECT().RouteAction(ctx, gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{}), testutil.EqProto(t, &remoteexecution.RequestMetadata{})). + Return(platform.Key{}, nil, nil, status.Error(codes.Internal, "Got routed to default")) + + _, _, _, err := actionRouter.RouteAction( + ctx, + digest.MustNewFunction("", remoteexecution.DigestFunction_SHA256), + &remoteexecution.Action{}, + &remoteexecution.RequestMetadata{}) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Got routed to default"), err) + }) + + t.Run("LinuxActionRouter", func(t *testing.T) { + // By setting the REv2 instance name to "a", we should + // get directed to the Linux action router. + digestFunction := digest.MustNewFunction("a", remoteexecution.DigestFunction_SHA256) + platformKeyExtractor.EXPECT().ExtractKey(ctx, digestFunction, testutil.EqProto(t, &remoteexecution.Action{})). + Return(platform.MustNewKey("a", linuxPlatform), nil) + linuxActionRouter.EXPECT().RouteAction(ctx, gomock.Any(), testutil.EqProto(t, &remoteexecution.Action{}), testutil.EqProto(t, &remoteexecution.RequestMetadata{})). + Return(platform.Key{}, nil, nil, status.Error(codes.Internal, "Got routed to Linux")) + + _, _, _, err := actionRouter.RouteAction( + ctx, + digest.MustNewFunction("a", remoteexecution.DigestFunction_SHA256), + &remoteexecution.Action{}, + &remoteexecution.RequestMetadata{}) + testutil.RequireEqualStatus(t, status.Error(codes.Internal, "Got routed to Linux"), err) + }) +} diff --git a/pkg/scheduler/routing/simple_action_router.go b/pkg/scheduler/routing/simple_action_router.go new file mode 100644 index 0000000..f799a2b --- /dev/null +++ b/pkg/scheduler/routing/simple_action_router.go @@ -0,0 +1,53 @@ +package routing + +import ( + "context" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/initialsizeclass" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/invocation" + "github.com/buildbarn/bb-remote-execution/pkg/scheduler/platform" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/util" +) + +type simpleActionRouter struct { + platformKeyExtractor platform.KeyExtractor + invocationKeyExtractors []invocation.KeyExtractor + initialSizeClassAnalyzer initialsizeclass.Analyzer +} + +// NewSimpleActionRouter creates an ActionRouter that creates a platform +// key, invocation key and initial size class selector by independently +// calling into separate extractors/analyzers. +// +// This implementation should be sufficient for most simple setups, +// where only a small number of execution platforms exist, or where +// scheduling decisions are identical for all platforms. +func NewSimpleActionRouter(platformKeyExtractor platform.KeyExtractor, invocationKeyExtractors []invocation.KeyExtractor, initialSizeClassAnalyzer initialsizeclass.Analyzer) ActionRouter { + return &simpleActionRouter{ + platformKeyExtractor: platformKeyExtractor, + invocationKeyExtractors: invocationKeyExtractors, + initialSizeClassAnalyzer: initialSizeClassAnalyzer, + } +} + +func (ar *simpleActionRouter) RouteAction(ctx context.Context, digestFunction digest.Function, action *remoteexecution.Action, requestMetadata *remoteexecution.RequestMetadata) (platform.Key, []invocation.Key, initialsizeclass.Selector, error) { + platformKey, err := ar.platformKeyExtractor.ExtractKey(ctx, digestFunction, action) + if err != nil { + return platform.Key{}, nil, nil, util.StatusWrap(err, "Failed to extract platform key") + } + invocationKeys := make([]invocation.Key, 0, len(ar.invocationKeyExtractors)) + for _, invocationKeyExtractor := range ar.invocationKeyExtractors { + invocationKey, err := invocationKeyExtractor.ExtractKey(ctx, requestMetadata) + if err != nil { + return platform.Key{}, nil, nil, util.StatusWrap(err, "Failed to extract invocation key") + } + invocationKeys = append(invocationKeys, invocationKey) + } + initialSizeClassSelector, err := ar.initialSizeClassAnalyzer.Analyze(ctx, digestFunction, action) + if err != nil { + return platform.Key{}, nil, nil, util.StatusWrap(err, "Failed to analyze initial size class") + } + return platformKey, invocationKeys, initialSizeClassSelector, nil +} diff --git a/pkg/sync/BUILD.bazel b/pkg/sync/BUILD.bazel new file mode 100644 index 0000000..5b72b02 --- /dev/null +++ b/pkg/sync/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "sync", + srcs = ["lock_pile.go"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/sync", + visibility = ["//visibility:public"], +) + +go_test( + name = "sync_test", + srcs = ["lock_pile_test.go"], + deps = [ + ":sync", + "//internal/mock", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/sync/lock_pile.go b/pkg/sync/lock_pile.go new file mode 100644 index 0000000..05468f7 --- /dev/null +++ b/pkg/sync/lock_pile.go @@ -0,0 +1,152 @@ +package sync + +import ( + "sync" +) + +// TryLocker represents a lock type that can both be acquired in a +// blocking and non-blocking fashion. +type TryLocker interface { + sync.Locker + TryLock() bool +} + +var ( + _ TryLocker = &sync.Mutex{} + _ TryLocker = &sync.RWMutex{} +) + +type lockHandle struct { + lock TryLocker + recursion int +} + +// LockPile is a list to keep track of locks held by a thread. For every +// lock, it keeps track of a recursion count, allowing locks that don't +// support recursion to be acquired multiple times. The underlying lock +// will only be unlocked if the recursion count reaches zero. +// +// LockPile implements a deadlock avoidance algorithm, ensuring that +// blocking on a lock is only performed when no other locks are held. +// What LockPile implements is equivalent to the "Smart" algorithm +// described on Howard Hinnant's page titled "Dining Philosophers +// Rebooted": +// +// https://howardhinnant.github.io/dining_philosophers.html +// +// As the set of locks held can be extended over time, there may be a +// possibility LockPile has to backtrack and temporarily unlock one or +// more locks it held prior to acquiring more. The caller is signalled +// when this happens, so that it may revalidate its state. Depending on +// the state's validity, the caller may either continue or retry. +type LockPile []lockHandle + +func (lp *LockPile) insert(newLock TryLocker) { + for i := 0; i < len(*lp); i++ { + lh := &(*lp)[i] + if lh.lock == newLock { + // Lock already required. Increase recursion count. + lh.recursion++ + return + } + } + *lp = append(*lp, lockHandle{lock: newLock}) +} + +// Lock one or more TryLocker objects, adding them to the LockPile. This +// function returns true iff it was capable of acquiring all locks +// without temporarily unlocking one of the existingly owned locks. +// Regardless of whether this function returns true or false, the same +// set of locks is held by the calling threads afterwards. +// +// Example usage, of a function that computes the sum of two value nodes +// in a tree atomically: +// +// func (node *Node) GetSumOfParentAndChild(name string) (int, bool) { +// lockPile := util.LockPile{} +// defer lockPile.UnlockAll() +// lockPile.Lock(&node.lock) // Always returns 'true' +// for { +// if child, ok := node.children[name]; !ok { +// return 0, false +// } else if lockPile.Lock(&child.lock) { +// // Successfully acquired child lock without unlocking +// // the parent. +// break +// } else if node.children[name] == child { +// // Even though the parent was temporarily unlocked, +// // the parent-child relationship did not change. +// break +// } +// // Race condition during unlock. Retry. +// lockPile.Unlock(&child.Lock) +// } +// return node.value + child.value, true +// } +func (lp *LockPile) Lock(newLocks ...TryLocker) bool { + currentlyAcquired := len(*lp) + for _, newLock := range newLocks { + lp.insert(newLock) + } + + completedWithoutUnlocking := true + lhFirst := &(*lp)[0] + for currentlyAcquired < len(*lp) { + if currentlyAcquired > 0 { + lhTry := &(*lp)[currentlyAcquired] + if lhTry.lock.TryLock() { + // Successfully acquired a subsequent lock. + currentlyAcquired++ + continue + } + + // Cannot acquire a subsequent lock. Temporarily + // release all other locks, so that we can + // attempt a blocking acquisition of the + // subsequent lock. + completedWithoutUnlocking = false + for i := 0; i < currentlyAcquired; i++ { + lhUnlock := &(*lp)[i] + lhUnlock.lock.Unlock() + } + *lhFirst, *lhTry = *lhTry, *lhFirst + } + + // First lock to acquire. Perform blocking acquisition. + lhFirst.lock.Lock() + currentlyAcquired = 1 + } + return completedWithoutUnlocking +} + +// Unlock a TryLocker object, removing it from the LockPile. +func (lp *LockPile) Unlock(oldLock TryLocker) { + // Find lock to unlock. + i := 0 + for (*lp)[i].lock != oldLock { + i++ + } + + // When locked recursively, just decrement the recursion count. + if (*lp)[i].recursion > 0 { + (*lp)[i].recursion-- + return + } + + // Unlock and remove entry from lock pile. + (*lp)[i].lock.Unlock() + (*lp)[i] = (*lp)[len(*lp)-1] + (*lp)[len(*lp)-1].lock = nil + *lp = (*lp)[:len(*lp)-1] +} + +// UnlockAll unlocks all locks associated with a LockPile. Calling this +// function using 'defer' ensures that no locks remain acquired after +// the calling function returns. +func (lp *LockPile) UnlockAll() { + // Release all locks contained in the pile exactly once. + for _, lockHandle := range *lp { + lockHandle.lock.Unlock() + } + *lp = nil +} diff --git a/pkg/sync/lock_pile_test.go b/pkg/sync/lock_pile_test.go new file mode 100644 index 0000000..0b598df --- /dev/null +++ b/pkg/sync/lock_pile_test.go @@ -0,0 +1,54 @@ +package sync_test + +import ( + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/sync" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestLockPile(t *testing.T) { + ctrl := gomock.NewController(t) + + // Attempt to acquire an initial lock. Because it's the initial + // lock, it's fine to block. + l1 := mock.NewMockTryLocker(ctrl) + l1.EXPECT().Lock() + lockPile := sync.LockPile{} + require.True(t, lockPile.Lock(l1)) + + // Subsequent lock acquisition attempts should not be blocking, + // as that could cause deadlocks. + l2 := mock.NewMockTryLocker(ctrl) + l3 := mock.NewMockTryLocker(ctrl) + l2.EXPECT().TryLock().Return(true) + l3.EXPECT().TryLock().Return(true) + require.True(t, lockPile.Lock(l2, l3)) + + // If a subsequent lock acquisition attempt fails, we should + // temporarily release all locks and retry acquiring it through + // blocking. + l4 := mock.NewMockTryLocker(ctrl) + l4.EXPECT().TryLock().Return(false) + l1.EXPECT().Unlock() + l2.EXPECT().Unlock() + l3.EXPECT().Unlock() + l4.EXPECT().Lock() + l1.EXPECT().TryLock().Return(true) + l2.EXPECT().TryLock().Return(true) + l3.EXPECT().TryLock().Return(true) + require.False(t, lockPile.Lock(l4)) + + // Recursively acquiring a lock should have no effect. + require.True(t, lockPile.Lock(l1)) + lockPile.Unlock(l1) + + // Drop all locks. + l1.EXPECT().Unlock() + l2.EXPECT().Unlock() + l3.EXPECT().Unlock() + l4.EXPECT().Unlock() + lockPile.UnlockAll() +} diff --git a/pkg/util/BUILD.bazel b/pkg/util/BUILD.bazel new file mode 100644 index 0000000..d399151 --- /dev/null +++ b/pkg/util/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "util", + srcs = ["browser_url.go"], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/util", + visibility = ["//visibility:public"], + deps = ["@com_github_buildbarn_bb_storage//pkg/digest"], +) diff --git a/pkg/util/browser_url.go b/pkg/util/browser_url.go new file mode 100644 index 0000000..e7d77a8 --- /dev/null +++ b/pkg/util/browser_url.go @@ -0,0 +1,24 @@ +package util + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/buildbarn/bb-storage/pkg/digest" +) + +// GetBrowserURL generates a URL that can be visited to obtain more +// information about an object stored in the Content Addressable Storage +// (CAS) or Action Cache (AC). +func GetBrowserURL(browserURL *url.URL, objectType string, digest digest.Digest) string { + return browserURL.JoinPath( + digest.GetInstanceName().String(), + "blobs", + strings.ToLower(digest.GetDigestFunction().GetEnumValue().String()), + objectType, + fmt.Sprintf("%s-%s", digest.GetHashString(), strconv.FormatInt(digest.GetSizeBytes(), 10)), + "/", + ).String() +} diff --git a/tools/BUILD.bazel b/tools/BUILD.bazel new file mode 100644 index 0000000..e69de29 diff --git a/tools/github_workflows/BUILD.bazel b/tools/github_workflows/BUILD.bazel new file mode 100644 index 0000000..5877fe7 --- /dev/null +++ b/tools/github_workflows/BUILD.bazel @@ -0,0 +1,11 @@ +load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_library", "jsonnet_to_json") + +jsonnet_to_json( + name = "github_workflows", + src = "github_workflows.jsonnet", + outs = [ + "master.yaml", + "pull-requests.yaml", + ], + deps = ["@com_github_buildbarn_bb_storage//tools/github_workflows:workflows_template"], +) diff --git a/tools/github_workflows/github_workflows.jsonnet b/tools/github_workflows/github_workflows.jsonnet new file mode 100644 index 0000000..150a224 --- /dev/null +++ b/tools/github_workflows/github_workflows.jsonnet @@ -0,0 +1,20 @@ +local workflows_template = import 'external/com_github_buildbarn_bb_storage/tools/github_workflows/workflows_template.libsonnet'; + +workflows_template.getWorkflows( + [ + 'bb_noop_worker', + 'bb_runner', + 'bb_scheduler', + 'bb_virtual_tmp', + 'bb_worker', + 'fake_python', + 'fake_xcrun', + ], + [ + 'bb_noop_worker:bb_noop_worker', + 'bb_runner:bb_runner_bare', + 'bb_runner:bb_runner_installer', + 'bb_scheduler:bb_scheduler', + 'bb_worker:bb_worker', + ], +) diff --git a/tools/workspace-status.sh b/tools/workspace-status.sh new file mode 100644 index 0000000..2525498 --- /dev/null +++ b/tools/workspace-status.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e + +if test "${GITHUB_ACTIONS}" = "true"; then + echo "BUILD_SCM_REVISION $(git rev-parse --short HEAD)" + echo "BUILD_SCM_TIMESTAMP $(TZ=UTC date --date "@$(git show -s --format=%ct HEAD)" +%Y%m%dT%H%M%SZ)" +fi