diff --git a/.github/workflows/Benchmark.yml b/.github/workflows/Benchmark.yml new file mode 100644 index 0000000..1bb0cd7 --- /dev/null +++ b/.github/workflows/Benchmark.yml @@ -0,0 +1,59 @@ +name: Benchmarks +on: + push: + branches: + - master + pull_request: + branches: + - master + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + benchmark: + permissions: + contents: write + pull-requests: write + repository-projects: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v1 + with: + version: '1' + arch: x64 + - uses: actions/cache@v4 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - name: Run benchmark + run: | + cd bench + julia --project --color=yes -e ' + using Pkg; + Pkg.develop(PackageSpec(path=joinpath(pwd(), ".."))); + Pkg.instantiate(); + include("runbenchmarks.jl")' + - name: Parse & Upload Benchmark Results + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Benchmark Results + tool: 'julia' + output-file-path: bench/benchmark_results.json + summary-always: true + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-always: true + alert-threshold: "200%" + fail-on-alert: true + benchmark-data-dir-path: benchmarks + auto-push: ${{ github.event_name != 'pull_request' }} \ No newline at end of file diff --git a/bench/.JuliaFormatter.toml b/bench/.JuliaFormatter.toml new file mode 100644 index 0000000..a1917d8 --- /dev/null +++ b/bench/.JuliaFormatter.toml @@ -0,0 +1,9 @@ +style = "sciml" +whitespace_in_kwargs = false +always_use_return = true +margin = 92 +indent = 4 +format_docstrings = true +separate_kwargs_with_semicolon = true +always_for_in = true +annotate_untyped_fields_with_any = false \ No newline at end of file diff --git a/bench/Project.toml b/bench/Project.toml new file mode 100644 index 0000000..75b006e --- /dev/null +++ b/bench/Project.toml @@ -0,0 +1,5 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +FractionalCalculus = "638fb199-4bb2-4014-80c8-6dc0d90f156b" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" \ No newline at end of file diff --git a/bench/README.md b/bench/README.md new file mode 100644 index 0000000..9b183ac --- /dev/null +++ b/bench/README.md @@ -0,0 +1,6 @@ +# FractionalCalculus.jl Continuous Benchmarking + +Currently we use the BenchmarkTools.jl package to benchmark the performance of FractionalCalculus.jl over time. + +This is built using https://github.com/benchmark-action/github-action-benchmark/ so it +allows for nice visualizations of the benchmark results in github pages and produces warnings on PRs if the benchmarks regress. \ No newline at end of file diff --git a/bench/runbenchmarks.jl b/bench/runbenchmarks.jl new file mode 100644 index 0000000..68026eb --- /dev/null +++ b/bench/runbenchmarks.jl @@ -0,0 +1,16 @@ +using BenchmarkTools: BenchmarkTools, BenchmarkGroup, @btime, @benchmarkable +using FractionalCalculus +using Statistics: median + +const SUITE = BenchmarkGroup() + +testf(x) = x^2 + +SUITE["Caputo"]["CaputoDiethelm"] = @benchmarkable fracdiff(testf, 0.5, 0.01, CaputoDiethelm()) +SUITE["Caputo"]["CaputoTrap"] = @benchmarkable fracdiff(testf, 0.5, 0.01, CaputoTrap()) +SUITE["Caputo"]["CaputoL1"] = @benchmarkable fracdiff(testf, 0.5, 0.01, CaputoL1()) + +BenchmarkTools.tune!(SUITE) +results = BenchmarkTools.run(SUITE; verbose=true) + +BenchmarkTools.save(joinpath(@__DIR__, "benchmark_results.json"), median(results)) \ No newline at end of file