From 6e3fa5f13b5b6c0c783e73adabb436822063dad8 Mon Sep 17 00:00:00 2001 From: Dominique Orban Date: Sun, 25 Sep 2022 16:32:47 -0400 Subject: [PATCH 1/2] set up IPOPT benchmarks I Add benchmark files for PkgBenchmark. --- benchmarks/Project.toml | 11 +++++ benchmarks/benchmarks.jl | 13 ++++++ benchmarks/run_benchmarks.jl | 88 ++++++++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+) create mode 100644 benchmarks/Project.toml create mode 100644 benchmarks/benchmarks.jl create mode 100644 benchmarks/run_benchmarks.jl diff --git a/benchmarks/Project.toml b/benchmarks/Project.toml new file mode 100644 index 0000000..2d231d9 --- /dev/null +++ b/benchmarks/Project.toml @@ -0,0 +1,11 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +CUTEst = "1b53aba6-35b6-5f92-a507-53c67d53f819" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +Git = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2" +JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" diff --git a/benchmarks/benchmarks.jl b/benchmarks/benchmarks.jl new file mode 100644 index 0000000..13ba657 --- /dev/null +++ b/benchmarks/benchmarks.jl @@ -0,0 +1,13 @@ +using BenchmarkTools + +using CUTEst +using NLPModelsIpopt + +const SUITE = BenchmarkGroup() + +# all_sif_problems = CUTEst.select() +all_sif_problems = ("ROSENBR", "WOODS", "PENALTY1") # to debug +for prob ∈ all_sif_problems + SUITE[prob] = @benchmarkable ipopt(model) setup = (model = CUTEstModel($prob)) teardown = (finalize(model)) +end + diff --git a/benchmarks/run_benchmarks.jl b/benchmarks/run_benchmarks.jl new file mode 100644 index 0000000..3e6724f --- /dev/null +++ b/benchmarks/run_benchmarks.jl @@ -0,0 +1,88 @@ +using Pkg +bmark_dir = @__DIR__ +println(@__DIR__) +Pkg.activate(bmark_dir) +Pkg.instantiate() +repo_name = string(split(ARGS[1], ".")[1]) +bmarkname = lowercase(repo_name) +using Git + +const git = Git.git() + +# if we are running these benchmarks from the git repository +# we want to develop the package instead of using the release +if isdir(joinpath(bmark_dir, "..", ".git")) + Pkg.develop(PackageSpec(url = joinpath(bmark_dir, ".."))) + bmarkname = readchomp(`$git rev-parse HEAD`) # sha of HEAD +end + +using DataFrames +using GitHub +using JLD2 +using JSON +using PkgBenchmark +using Plots + +using SolverBenchmark + +# NB: benchmarkpkg will run benchmarks/benchmarks.jl by default +commit = benchmarkpkg(repo_name) # current state of repository +main = benchmarkpkg(repo_name, "main") +judgement = judge(commit, main) + +commit_stats = bmark_results_to_dataframes(commit) +main_stats = bmark_results_to_dataframes(main) +judgement_stats = judgement_results_to_dataframes(judgement) + +export_markdown("judgement_$(bmarkname).md", judgement) +export_markdown("main.md", main) +export_markdown("$(bmarkname).md", commit) + +function profile_solvers_from_pkgbmark(stats::Dict{Symbol, DataFrame}) + # guard against zero gctimes + costs = + [df -> df[!, :time], df -> df[!, :memory], df -> df[!, :gctime] .+ 1, df -> df[!, :allocations]] + profile_solvers(stats, costs, ["time", "memory", "gctime+1", "allocations"]) +end + +# extract stats for each benchmark to plot profiles +# files_dict will be part of json_dict below +files_dict = Dict{String, Any}() +file_num = 1 +for k ∈ keys(judgement_stats) + global file_num + k_stats = Dict{Symbol, DataFrame}(:commit => commit_stats[k], :main => main_stats[k]) + save_stats(k_stats, "ldl_$(bmarkname)_vs_main_$(k).jld2", force = true) + + k_profile = profile_solvers_from_pkgbmark(k_stats) + savefig("profiles_commit_vs_main_$(k).svg") + # read contents of svg file to add to gist + k_svgfile = open("profiles_commit_vs_main_$(k).svg", "r") do fd + readlines(fd) + end + # file_num makes sure svg files appear before md files (added below) + files_dict["$(file_num)_$(k).svg"] = Dict{String, Any}("content" => join(k_svgfile)) + file_num += 1 +end + +for mdfile ∈ [:judgement, :main, :commit] + global file_num + files_dict["$(file_num)_$(mdfile).md"] = + Dict{String, Any}("content" => "$(sprint(export_markdown, eval(mdfile)))") + file_num += 1 +end + +jldopen("ldl_$(bmarkname)_vs_main_judgement.jld2", "w") do file + file["jstats"] = judgement_stats +end + +# json description of gist +json_dict = Dict{String, Any}( + "description" => "$(repo_name) repository benchmark", + "public" => true, + "files" => files_dict, +) + +open("$(bmarkname).json", "w") do f + JSON.print(f, json_dict) +end From b2a41bb8d655f74f01097d0e4fe3129fafa7f836 Mon Sep 17 00:00:00 2001 From: Dominique Orban Date: Sun, 25 Sep 2022 16:34:07 -0400 Subject: [PATCH 2/2] set up IPOPT benchmarks II Add benchmark workflows. --- .github/workflows/bmark_M1.yml | 36 ++++++++++++++++++++++++++++++++ .github/workflows/pr-comment.yml | 20 ++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 .github/workflows/bmark_M1.yml create mode 100644 .github/workflows/pr-comment.yml diff --git a/.github/workflows/bmark_M1.yml b/.github/workflows/bmark_M1.yml new file mode 100644 index 0000000..bde5da1 --- /dev/null +++ b/.github/workflows/bmark_M1.yml @@ -0,0 +1,36 @@ +name: bmark_M1 +on: + pull_request_target: + types: + - labeled +jobs: + bmark: + name: Julia ${{ matrix.version }} - macOS - ${{ matrix.arch }} - ${{ github.event_name }} + if: contains(github.event.pull_request.labels.*.name, 'benchmarks') + runs-on: self-hosted + strategy: + fail-fast: false + matrix: + version: + - 1 + arch: + - aarch64 + steps: + - uses: actions/checkout@v3 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/julia-buildpkg@v1 + - name: Install benchmark dependencies + run: julia --project=benchmark -e 'using Pkg; Pkg.instantiate()' + - name: Run benchmarks + run: julia --project=benchmark benchmark/run_benchmarks.jl ${{ github.event.repository.name }} + - name: Upload artifacts + uses: actions/upload-artifact@v2 + with: + name: benchmarks + path: | + *.svg + *.jld2 + *.json diff --git a/.github/workflows/pr-comment.yml b/.github/workflows/pr-comment.yml new file mode 100644 index 0000000..713ae39 --- /dev/null +++ b/.github/workflows/pr-comment.yml @@ -0,0 +1,20 @@ +name: add artifact links to pr +on: + workflow_run: + workflows: [bmark_M1] + types: [completed] + +jobs: + artifacts-url-comments: + name: add artifact links to pull request and related issues job + runs-on: [ubuntu-latest] + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: add artifact links to pull request and related issues step + uses: veitbjarsch/artifacts-url-comments@v1.1.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + prefix: "Here are the benchmark results" + format: "name" + addTo: "pull"