Skip to content

Commit

Permalink
Add pyCFML benchmark tests
Browse files Browse the repository at this point in the history
  • Loading branch information
AndrewSazonov committed Jul 26, 2024
1 parent 8a5ab66 commit 57f4df1
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 15 deletions.
21 changes: 13 additions & 8 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -128,19 +128,15 @@ jobs:
scripts/build_cfml_test_programs.sh
scripts/copy_cfml_test_programs_to_tests_dir.sh
scripts/run_cfml_functional_tests_no_benchmarks.sh
# # scripts/run_cfml_functional_tests_with_benchmarks.sh
# scripts/run_cfml_functional_tests_with_benchmarks.sh

#- name: Push benchmark results to repository
# uses: EndBug/add-and-commit@v9
# with:
# add: '.benchmarks'
# message: 'Auto push benchmark results by GitHub Action for ${{ runner.os }} + ${{ matrix.toolchain.exe }}'
# add: '.benchmarks/CFML'
# message: 'Auto push CFML benchmark results by GitHub Action for ${{ runner.os }} + ${{ matrix.toolchain.exe }}'
# pull: '--rebase --autostash'

#- name: Copy powder_mod from CFML to pyCFML
# shell: bash
# run: scripts/copy_powder_mod_to_pycfml_repo.sh

- name: Create pyCFML source code
shell: bash
run: scripts/create_pycfml_src.sh
Expand Down Expand Up @@ -263,7 +259,16 @@ jobs:

- name: Run pyCFML functional tests
shell: bash
run: scripts/run_pycfml_functional_tests_no_benchmarks.sh
run: |
scripts/run_pycfml_functional_tests_no_benchmarks.sh
scripts/run_pycfml_functional_tests_with_benchmarks.sh
- name: Push benchmark results to repository
uses: EndBug/add-and-commit@v9
with:
add: '.benchmarks/pyCFML'
message: 'Auto push pyCFML benchmark results by GitHub Action for ${{ runner.os }} + ${{ matrix.toolchain.exe }}'
pull: '--rebase --autostash'

##############################################################################
# JOB 3
Expand Down
29 changes: 28 additions & 1 deletion pybuild.py
Original file line number Diff line number Diff line change
Expand Up @@ -831,6 +831,7 @@ def run_cfml_functional_tests_no_benchmarks():
append_to_main_script(lines)

def run_cfml_functional_tests_with_benchmarks():
project_name = CONFIG['cfml']['log-name']
relpath = os.path.join('tests', 'functional_tests', 'CFML')
abspath = os.path.join(_project_path(), relpath)
lines = []
Expand All @@ -842,6 +843,7 @@ def run_cfml_functional_tests_with_benchmarks():
else:
cmd += ' ' + CONFIG['template']['run-benchmarks']['non-master-branch']
cmd = cmd.replace('{PATH}', abspath)
cmd = cmd.replace('{PROJECT}', project_name)
if _github_actions():
cmd = cmd.replace('{RUNNER}', 'github')
else:
Expand All @@ -853,7 +855,6 @@ def run_cfml_functional_tests_with_benchmarks():
_write_lines_to_file(lines, script_name)
append_to_main_script(lines)


def copy_powder_mod_to_pycfml_repo():
CFML = CONFIG['cfml']['log-name']
cfml_repo_dir = CONFIG['cfml']['dir']['repo']
Expand Down Expand Up @@ -1384,6 +1385,31 @@ def run_pycfml_functional_tests_no_benchmarks():
_write_lines_to_file(lines, script_name)
append_to_main_script(lines)

def run_pycfml_functional_tests_with_benchmarks():
project_name = CONFIG['pycfml']['log-name']
relpath = os.path.join('tests', 'functional_tests', 'pyCFML')
abspath = os.path.join(_project_path(), relpath)
lines = []
msg = _echo_msg(f"Running functional tests with benchmarks from '{relpath}'")
lines.append(msg)
cmd = CONFIG['template']['run-benchmarks']['base']
if _github_branch() == 'master':
cmd += ' ' + CONFIG['template']['run-benchmarks']['master-branch']
else:
cmd += ' ' + CONFIG['template']['run-benchmarks']['non-master-branch']
cmd = cmd.replace('{PATH}', abspath)
cmd = cmd.replace('{PROJECT}', project_name)
if _github_actions():
cmd = cmd.replace('{RUNNER}', 'github')
else:
cmd = cmd.replace('{RUNNER}', 'local')
cmd = cmd.replace('{COMPILER}', _compiler_name())
cmd = cmd.replace('{PROCESSOR}', _processor())
lines.append(cmd)
script_name = f'{sys._getframe().f_code.co_name}.sh'
_write_lines_to_file(lines, script_name)
append_to_main_script(lines)


if __name__ == '__main__':
ARGS = parsed_args()
Expand Down Expand Up @@ -1485,5 +1511,6 @@ def run_pycfml_functional_tests_no_benchmarks():
add_main_script_header(f"Run {pyCFML} tests")
run_pycfml_unit_tests()
run_pycfml_functional_tests_no_benchmarks()
run_pycfml_functional_tests_with_benchmarks()

_print_msg(f'All scripts were successfully created in {_scripts_path()}')
5 changes: 3 additions & 2 deletions pybuild.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,10 @@ rename-wheel = 'python3 -m wheel tags --python-tag {PYTHON_TAG} --platform-tag {
install-wheel = 'python3 -m pip install {PACKAGE} --force-reinstall --find-links={PATH}' # Dependencies "{PACKAGE}[test]" are installed via ci script
run-python = 'python3 {PATH} {OPTIONS}'
run-tests = 'pytest {PATH} --color=yes --benchmark-disable'
run-benchmarks.base = 'pytest {PATH} --color=yes --benchmark-only --benchmark-storage="file://./.benchmarks/{RUNNER}/{COMPILER}/{PROCESSOR}" --benchmark-warmup=on --benchmark-columns="median, iqr, ops"'
run-benchmarks.base = 'pytest {PATH} --color=yes --benchmark-only --benchmark-storage="file://./.benchmarks/{PROJECT}/{RUNNER}/{COMPILER}/{PROCESSOR}" --benchmark-warmup=on --benchmark-columns="median, iqr, ops"'
run-benchmarks.master-branch = '--benchmark-autosave'
run-benchmarks.non-master-branch = '--benchmark-compare --benchmark-compare-fail=median:25%'
run-benchmarks.non-master-branch = '--benchmark-autosave'
#run-benchmarks.non-master-branch = '--benchmark-compare --benchmark-compare-fail=median:25%'

######################
# Build configurations
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,25 +102,25 @@ def test__crysfml_db_path():
desired = os.environ['CRYSFML_DB']
assert desired == actual

def test__compute_pattern__SrTiO3_Pm3m():
def test__compute_pattern__SrTiO3_Pm3m(benchmark):
study_dict = copy.deepcopy(STUDY_DICT_PM3M)
norm = 120
_, desired = np.loadtxt(path_to_desired('srtio3-pm3m-pattern_Nebil-ifort.xy'), unpack=True)
desired = desired - 20.0 # remove background
desired = np.roll(desired, -1) # compensate for a 1-element horizontal shift in y data between old Nebil windows build and Andrew current gfortran build
desired = desired / norm
actual = compute_pattern(study_dict)
actual = benchmark(compute_pattern, study_dict)
actual = actual / norm
assert_almost_equal(desired, actual, decimal=0, verbose=True)

def test__compute_pattern__SrTiO3_Pnma():
def test__compute_pattern__SrTiO3_Pnma(benchmark):
study_dict = copy.deepcopy(STUDY_DICT_PM3M)
study_dict['phases'][0]['SrTiO3']['_space_group_name_H-M_alt'] = 'P n m a'
norm = 0.65
desired = np.loadtxt(path_to_desired('srtio3-pnma-pattern_Andrew-ifort.y'), unpack=True)
desired = desired - 20.0 # remove background
desired = desired / norm
actual = compute_pattern(study_dict)
actual = benchmark(compute_pattern, study_dict)
actual = actual / norm
assert_almost_equal(desired, actual, decimal=2, verbose=True)

Expand Down

0 comments on commit 57f4df1

Please sign in to comment.