diff --git a/open/UCSD/code/stable-diffusion-xl/README.md b/open/UCSD/code/stable-diffusion-xl/README.md new file mode 100644 index 0000000..2fd9f95 --- /dev/null +++ b/open/UCSD/code/stable-diffusion-xl/README.md @@ -0,0 +1 @@ +TBD \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/README.md b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/README.md new file mode 100644 index 0000000..def6c9e --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/README.md @@ -0,0 +1,3 @@ +| Model | Scenario | Accuracy | Throughput | Latency (in ms) | +|---------------------|------------|-----------------------|--------------|-------------------| +| stable-diffusion-xl | offline | (15.22786, 236.96183) | 0.209 | - | \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/README.md b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/README.md new file mode 100644 index 0000000..a187b81 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/README.md @@ -0,0 +1,57 @@ +This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/cm4mlops). + +*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.* + +## Host platform + +* OS version: Linux-5.14.0-427.42.1.el9_4.x86_64-x86_64-with-glibc2.34 +* CPU version: x86_64 +* Python version: 3.11.7 (main, Dec 15 2023, 18:12:31) [GCC 11.2.0] +* MLCommons CM version: 3.1.0 + +## CM Run Command + +See [CM installation guide](https://docs.mlcommons.org/inference/install/). + +```bash +pip install -U cmind + +cm rm cache -f + +cm pull repo mlcommons@cm4mlops --checkout=e8235832b1ca225f65ecc8272c597d5c1a112d82 + +cm run script \ + --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base \ + --model=sdxl \ + --implementation=reference \ + --framework=pytorch \ + --category=datacenter \ + --scenario=Offline \ + --execution_mode=test \ + --device=rocm \ + --quiet \ + --precision=float16 \ + --env.CM_GET_PLATFORM_DETAILS=no +``` +*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts), + you should simply reload mlcommons@cm4mlops without checkout and clean CM cache as follows:* + +```bash +cm rm repo mlcommons@cm4mlops +cm pull repo mlcommons@cm4mlops +cm rm cache -f + +``` + +## Results + +Platform: aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base + +Model Precision: fp32 + +### Accuracy Results +`CLIP_SCORE`: `15.22786`, Required accuracy for closed division `>= 31.68632` and `<= 31.81332` +`FID_SCORE`: `236.96183`, Required accuracy for closed division `>= 23.01086` and `<= 23.95008` + +### Performance Results +`Samples per second`: `0.209132` diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/accuracy_console.out b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/accuracy_console.out new file mode 100644 index 0000000..e69de29 diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base.json new file mode 100644 index 0000000..07589d2 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base.json @@ -0,0 +1,7 @@ +{ + "starting_weights_filename": "https://github.com/mlcommons/inference/tree/master/text_to_image#download-model", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cm-version-info.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cm-version-info.json new file mode 100644 index 0000000..0b3ad46 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cm-version-info.json @@ -0,0 +1,554 @@ +{ + "app-mlperf-inference,d775cac873ee4231:reference,sdxl,pytorch,rocm,test,r4.1-dev_default,float16,offline": [ + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference,d775cac873ee4231 ( reference,_sdxl,_pytorch,_rocm,_test,_r4.1-dev_default,_float16,_offline )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-sys-utils-cm,bc90993277e84b8e" + } + }, + { + "get,python": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "app-mlperf-inference,d775cac873ee4231 ( reference,_sdxl,_pytorch,_rocm,_test,_r4.1-dev_default,_float16,_offline )" + } + }, + { + "get,mlcommons,inference,src": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "", + "version": "master-git-dffd29273e98ffcbbfad0e66648c5d390d814509", + "parent": "app-mlperf-inference,d775cac873ee4231 ( reference,_sdxl,_pytorch,_rocm,_test,_r4.1-dev_default,_float16,_offline )" + } + }, + { + "get,mlperf,inference,src": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "", + "version": "master-git-dffd29273e98ffcbbfad0e66648c5d390d814509", + "parent": "get-mlperf-inference-utils,e341e5f86d8342e5" + } + }, + { + "get,mlperf,inference,utils": { + "script_uid": "e341e5f86d8342e5", + "script_alias": "get-mlperf-inference-utils", + "script_tags": "get,mlperf,inference,util,utils,functions", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference,d775cac873ee4231 ( reference,_sdxl,_pytorch,_rocm,_test,_r4.1-dev_default,_float16,_offline )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "detect-cpu,586c8a43320142f7" + } + }, + { + "detect,cpu": { + "script_uid": "586c8a43320142f7", + "script_alias": "detect-cpu", + "script_tags": "detect,cpu,detect-cpu,info", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-sys-utils-cm,bc90993277e84b8e" + } + }, + { + "get,python": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_torch,_rocm": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "torch,rocm", + "version": "2.6.0.dev20241109", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,ml-model,stable-diffusion,text-to-image,sdxl,raw,_pytorch,_fp16": { + "script_uid": "22c6516b2d4d4c23", + "script_alias": "get-ml-model-stable-diffusion", + "script_tags": "get,raw,ml-model,stable-diffusion,sdxl,text-to-image", + "script_variations": "pytorch,fp16", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,dataset,coco2014,_validation,_size.50,_with-sample-ids": { + "script_uid": "3f7ad9d42f4040f8", + "script_alias": "get-dataset-coco2014", + "script_tags": "get,dataset,coco2014,object-detection,original", + "script_variations": "validation,size.50,with-sample-ids", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "generate-mlperf-inference-user-conf,3af4475745964b93" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "detect-cpu,586c8a43320142f7" + } + }, + { + "detect,cpu": { + "script_uid": "586c8a43320142f7", + "script_alias": "detect-cpu", + "script_tags": "detect,cpu,detect-cpu,info", + "script_variations": "", + "version": "", + "parent": "generate-mlperf-inference-user-conf,3af4475745964b93" + } + }, + { + "get,python": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "generate-mlperf-inference-user-conf,3af4475745964b93" + } + }, + { + "get,mlcommons,inference,src": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "", + "version": "master-git-dffd29273e98ffcbbfad0e66648c5d390d814509", + "parent": "generate-mlperf-inference-user-conf,3af4475745964b93" + } + }, + { + "get,cache,dir,_name.mlperf-inference-sut-configs": { + "script_uid": "48f4622e059b45ce", + "script_alias": "get-cache-dir", + "script_tags": "get,cache,dir,directory", + "script_variations": "name.mlperf-inference-sut-configs", + "version": "", + "parent": "get-mlperf-inference-sut-configs,c2fbf72009e2445b" + } + }, + { + "get,sut,configs": { + "script_uid": "c2fbf72009e2445b", + "script_alias": "get-mlperf-inference-sut-configs", + "script_tags": "get,mlperf,inference,sut,configs,sut-configs", + "script_variations": "", + "version": "", + "parent": "generate-mlperf-inference-user-conf,3af4475745964b93" + } + }, + { + "generate,user-conf,mlperf,inference": { + "script_uid": "3af4475745964b93", + "script_alias": "generate-mlperf-inference-user-conf", + "script_tags": "generate,mlperf,inference,user-conf,inference-user-conf", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,python3": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-mlperf-inference-src,4b57186581024797" + } + }, + { + "get,python3": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "get-mlperf-inference-src,4b57186581024797" + } + }, + { + "get,git,repo,_branch.master,_repo.https://github.com/mlcommons/inference": { + "script_uid": "ed603e7292974f10", + "script_alias": "get-git-repo", + "script_tags": "get,git,repo,repository,clone", + "script_variations": "branch.master,repo.https://github.com/mlcommons/inference", + "version": "", + "parent": "get-mlperf-inference-src,4b57186581024797" + } + }, + { + "get,mlcommons,inference,src": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "", + "version": "master-git-dffd29273e98ffcbbfad0e66648c5d390d814509", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,compiler": { + "script_uid": "99832a103ed04eb8", + "script_alias": "get-llvm", + "script_tags": "get,llvm,compiler,c-compiler,cpp-compiler,get-llvm", + "script_variations": "", + "version": "15.0.6", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,cmake": { + "script_uid": "52bf974d791b4fc8", + "script_alias": "get-cmake", + "script_tags": "get,cmake,get-cmake", + "script_variations": "", + "version": "3.31.0", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,generic-python-lib,_package.wheel": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.wheel", + "version": "0.44.0", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,generic-python-lib,_pip": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "pip", + "version": "24.2", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,generic-python-lib,_package.pybind11": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.pybind11", + "version": "2.13.6", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,generic-python-lib,_package.setuptools": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.setuptools", + "version": "75.1.0", + "parent": "get-mlperf-inference-loadgen,64c3d98d0ba04950" + } + }, + { + "get,loadgen": { + "script_uid": "64c3d98d0ba04950", + "script_alias": "get-mlperf-inference-loadgen", + "script_tags": "get,loadgen,inference,inference-loadgen,mlperf,mlcommons", + "script_variations": "", + "version": "master", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,mlcommons,inference,src": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "", + "version": "master-git-dffd29273e98ffcbbfad0e66648c5d390d814509", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-mlperf-inference-src,4b57186581024797 ( branch.dev )" + } + }, + { + "get,python3": { + "script_uid": "d0b5dd74373f4a62", + "script_alias": "get-python3", + "script_tags": "get,python,python3,get-python,get-python3", + "script_variations": "", + "version": "3.11.7", + "parent": "get-mlperf-inference-src,4b57186581024797 ( branch.dev )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "get-git-repo,ed603e7292974f10 ( branch.dev,_repo.https://github.com/mlcommons/inference )" + } + }, + { + "get,git,repo,_branch.dev,_repo.https://github.com/mlcommons/inference": { + "script_uid": "ed603e7292974f10", + "script_alias": "get-git-repo", + "script_tags": "get,git,repo,repository,clone", + "script_variations": "branch.dev,repo.https://github.com/mlcommons/inference", + "version": "", + "parent": "get-mlperf-inference-src,4b57186581024797 ( branch.dev )" + } + }, + { + "get,mlcommons,inference,src,_branch.dev": { + "script_uid": "4b57186581024797", + "script_alias": "get-mlperf-inference-src", + "script_tags": "get,src,source,inference,inference-src,inference-source,mlperf,mlcommons", + "script_variations": "branch.dev", + "version": "custom", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.psutil": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.psutil", + "version": "6.0.0", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.diffusers": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.diffusers", + "version": "0.21.2", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.transformers": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.transformers", + "version": "4.46.2", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.torchvision,_rocm": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.torchvision,rocm", + "version": "0.20.0.dev20241107", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.accelerate": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.accelerate", + "version": "1.1.0", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.torchmetrics": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.torchmetrics", + "version": "1.5.2", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.torch-fidelity": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.torch-fidelity", + "version": "0.3.0", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.open_clip_torch": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.open_clip_torch", + "version": "2.7.0", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.opencv-python": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.opencv-python", + "version": "4.8.1.78", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "get,generic-python-lib,_package.scipy": { + "script_uid": "94b62a682bc44791", + "script_alias": "get-generic-python-lib", + "script_tags": "get,install,generic,generic-python-lib", + "script_variations": "package.scipy", + "version": "1.10.1", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + }, + { + "detect,os": { + "script_uid": "863735b7db8c44fc", + "script_alias": "detect-os", + "script_tags": "detect-os,detect,os,info", + "script_variations": "", + "version": "", + "parent": "detect-cpu,586c8a43320142f7" + } + }, + { + "detect,cpu": { + "script_uid": "586c8a43320142f7", + "script_alias": "detect-cpu", + "script_tags": "detect,cpu,detect-cpu,info", + "script_variations": "", + "version": "", + "parent": "benchmark-program,19f369ef47084895" + } + }, + { + "benchmark-program,program": { + "script_uid": "19f369ef47084895", + "script_alias": "benchmark-program", + "script_tags": "program,benchmark,benchmark-program", + "script_variations": "", + "version": "", + "parent": "benchmark-program-mlperf,cfff0132a8aa4018" + } + }, + { + "benchmark-mlperf": { + "script_uid": "cfff0132a8aa4018", + "script_alias": "benchmark-program-mlperf", + "script_tags": "mlperf,benchmark-mlperf", + "script_variations": "", + "version": "", + "parent": "app-mlperf-inference-mlcommons-python,ff149e9781fc4b65 ( rocm,_pytorch,_sdxl,_offline,_float16 )" + } + } + ] +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cpu_info.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cpu_info.json new file mode 100644 index 0000000..7502822 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/cpu_info.json @@ -0,0 +1,27 @@ +{ + "CM_HOST_CPU_WRITE_PROTECT_SUPPORT": "yes", + "CM_HOST_CPU_MICROCODE": "0xa101148", + "CM_HOST_CPU_FPU_SUPPORT": "yes", + "CM_HOST_CPU_FPU_EXCEPTION_SUPPORT": "yes", + "CM_HOST_CPU_BUGS": "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass srso", + "CM_HOST_CPU_TLB_SIZE": "3584 4K pages", + "CM_HOST_CPU_CFLUSH_SIZE": "64", + "CM_HOST_CPU_ARCHITECTURE": "x86_64", + "CM_HOST_CPU_TOTAL_CORES": "64", + "CM_HOST_CPU_ON_LINE_CPUS_LIST": "0-63", + "CM_HOST_CPU_VENDOR_ID": "AuthenticAMD", + "CM_HOST_CPU_MODEL_NAME": "AMD EPYC 9354 32-Core Processor", + "CM_HOST_CPU_FAMILY": "25", + "CM_HOST_CPU_THREADS_PER_CORE": "1", + "CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET": "32", + "CM_HOST_CPU_SOCKETS": "2", + "CM_HOST_CPU_MAX_MHZ": "3799.0720", + "CM_HOST_CPU_L1D_CACHE_SIZE": "2 MiB (64 instances)", + "CM_HOST_CPU_L1I_CACHE_SIZE": "2 MiB (64 instances)", + "CM_HOST_CPU_L2_CACHE_SIZE": "64 MiB (64 instances)", + "CM_HOST_CPU_L3_CACHE_SIZE": "512 MiB (16 instances)", + "CM_HOST_CPU_NUMA_NODES": "16", + "CM_HOST_CPU_TOTAL_LOGICAL_CORES": "64", + "CM_HOST_MEMORY_CAPACITY": "773G", + "CM_HOST_DISK_CAPACITY": "3.9T" +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/mlperf.conf b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/mlperf.conf new file mode 100644 index 0000000..10f7ae7 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/mlperf.conf @@ -0,0 +1,98 @@ +# The format of this config file is 'key = value'. +# The key has the format 'model.scenario.key'. Value is mostly int64_t. +# Model maybe '*' as wildcard. In that case the value applies to all models. +# All times are in milli seconds + +# Set performance_sample_count for each model. +# User can optionally set this to higher values in user.conf. +resnet50.*.performance_sample_count_override = 1024 +ssd-mobilenet.*.performance_sample_count_override = 256 +retinanet.*.performance_sample_count_override = 64 +bert.*.performance_sample_count_override = 10833 +dlrm.*.performance_sample_count_override = 204800 +dlrm-v2.*.performance_sample_count_override = 204800 +rnnt.*.performance_sample_count_override = 2513 +gptj.*.performance_sample_count_override = 13368 +llama2-70b.*.performance_sample_count_override = 24576 +stable-diffusion-xl.*.performance_sample_count_override = 5000 +# set to 0 to let entire sample set to be performance sample +3d-unet.*.performance_sample_count_override = 0 + +# Set seeds. The seeds will be distributed two weeks before the submission. +*.*.qsl_rng_seed = 3066443479025735752 +*.*.sample_index_rng_seed = 10688027786191513374 +*.*.schedule_rng_seed = 14962580496156340209 +# Set seeds for TEST_05. The seeds will be distributed two weeks before the submission. +*.*.test05_qsl_rng_seed = 16799458546791641818 +*.*.test05_sample_index_rng_seed = 5453809927556429288 +*.*.test05_schedule_rng_seed = 5435552105434836064 + + +*.SingleStream.target_latency_percentile = 90 +*.SingleStream.min_duration = 600000 + +*.MultiStream.target_latency_percentile = 99 +*.MultiStream.samples_per_query = 8 +*.MultiStream.min_duration = 600000 +*.MultiStream.min_query_count = 662 +retinanet.MultiStream.target_latency = 528 + +# 3D-UNet uses equal issue mode because it has non-uniform inputs +3d-unet.*.sample_concatenate_permutation = 1 + +# LLM benchmarks have non-uniform inputs and outputs, and use equal issue mode for all latency scenario +gptj.*.sample_concatenate_permutation = 1 +llama2-70b.*.sample_concatenate_permutation = 1 +mixtral-8x7b.*.sample_concatenate_permutation = 1 + +*.Server.target_latency = 10 +*.Server.target_latency_percentile = 99 +*.Server.target_duration = 0 +*.Server.min_duration = 600000 +resnet50.Server.target_latency = 15 +retinanet.Server.target_latency = 100 +bert.Server.target_latency = 130 +dlrm.Server.target_latency = 60 +dlrm-v2.Server.target_latency = 60 +rnnt.Server.target_latency = 1000 +gptj.Server.target_latency = 20000 +stable-diffusion-xl.Server.target_latency = 20000 +# Llama2-70b benchmarks measures token latencies +llama2-70b.*.use_token_latencies = 1 +mixtral-8x7b.*.use_token_latencies = 1 +# gptj benchmark infers token latencies +gptj.*.infer_token_latencies = 1 +gptj.*.token_latency_scaling_factor = 69 +# Only ttft and tpot are tracked for the llama2-70b & mixtral-8x7B benchmark therefore target_latency = 0 +llama2-70b.Server.target_latency = 0 +llama2-70b.Server.ttft_latency = 2000 +llama2-70b.Server.tpot_latency = 200 + +mixtral-8x7b.Server.target_latency = 0 +mixtral-8x7b.Server.ttft_latency = 2000 +mixtral-8x7b.Server.tpot_latency = 200 + +*.Offline.target_latency_percentile = 90 +*.Offline.min_duration = 600000 + +# In Offline scenario, we always have one query. But LoadGen maps this to +# min_sample_count internally in Offline scenario. If the dataset size is larger +# than 24576 we limit the min_query_count to 24576 and otherwise we use +# the dataset size as the limit + +resnet50.Offline.min_query_count = 24576 +retinanet.Offline.min_query_count = 24576 +dlrm-v2.Offline.min_query_count = 24576 +bert.Offline.min_query_count = 10833 +gptj.Offline.min_query_count = 13368 +rnnt.Offline.min_query_count = 2513 +3d-unet.Offline.min_query_count = 43 +stable-diffusion-xl.Offline.min_query_count = 5000 +llama2-70b.Offline.min_query_count = 24576 +mixtral-8x7b.Offline.min_query_count = 15000 + +# These fields should be defined and overridden by user.conf. +*.SingleStream.target_latency = 10 +*.MultiStream.target_latency = 80 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 1.0 diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/model-info.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/model-info.json new file mode 100644 index 0000000..07589d2 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/model-info.json @@ -0,0 +1,7 @@ +{ + "starting_weights_filename": "https://github.com/mlcommons/inference/tree/master/text_to_image#download-model", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/os_info.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/os_info.json new file mode 100644 index 0000000..032ab52 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/os_info.json @@ -0,0 +1,26 @@ +{ + "CM_HOST_OS_TYPE": "linux", + "CM_HOST_OS_BITS": "64", + "CM_HOST_OS_FLAVOR": "rocky", + "CM_HOST_OS_FLAVOR_LIKE": "rhel centos fedora", + "CM_HOST_OS_VERSION": "9.4", + "CM_HOST_OS_KERNEL_VERSION": "5.14.0-427.42.1.el9_4.x86_64", + "CM_HOST_OS_GLIBC_VERSION": "2.34", + "CM_HOST_OS_MACHINE": "x86_64", + "CM_HOST_OS_PACKAGE_MANAGER": "dnf", + "CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "dnf install -y", + "CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "dnf update -y", + "+CM_HOST_OS_DEFAULT_LIBRARY_PATH": [ + "/usr/x86_64-redhat-linux/lib64", + "/usr/lib64", + "/usr/local/lib64", + "/lib64", + "/usr/x86_64-redhat-linux/lib", + "/usr/local/lib", + "/lib", + "/usr/lib" + ], + "CM_HOST_PLATFORM_FLAVOR": "x86_64", + "CM_HOST_PYTHON_BITS": "64", + "CM_HOST_SYSTEM_NAME": "aqua" +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/performance_console.out b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/performance_console.out new file mode 100644 index 0000000..e69de29 diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/pip_freeze.json b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/pip_freeze.json new file mode 100644 index 0000000..874489d --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/pip_freeze.json @@ -0,0 +1,117 @@ +{ + "pip_freeze": { + "absl-py": "2.1.0", + "accelerate": "1.1.0", + "aiohappyeyeballs": "2.4.3", + "aiohttp": "3.10.10", + "aiosignal": "1.3.1", + "annotated-types": "0.7.0", + "async-timeout": "4.0.3", + "attrs": "24.2.0", + "blinker": "1.8.2", + "Cerberus": "1.3.5", + "certifi": "2024.8.30", + "charset-normalizer": "3.4.0", + "click": "8.1.7", + "cmake": "3.30.5", + "cmind": "3.1.0", + "coloredlogs": "15.0.1", + "contourpy": "1.3.0", + "cycler": "0.12.1", + "datasets": "3.1.0", + "deepspeed": "0.15.3", + "diffusers": "0.21.2", + "dill": "0.3.8", + "dmiparser": "5.1", + "filelock": "3.16.1", + "Flask": "3.0.3", + "flatbuffers": "24.3.25", + "fonttools": "4.54.1", + "frozenlist": "1.5.0", + "fsspec": "2024.9.0", + "ftfy": "6.3.0", + "giturlparse": "0.12.0", + "h5py": "3.12.1", + "hjson": "3.1.0", + "huggingface-hub": "0.25.2", + "humanfriendly": "10.0", + "idna": "3.10", + "ijson": "3.3.0", + "importlib_metadata": "8.5.0", + "itsdangerous": "2.2.0", + "Jinja2": "3.1.4", + "kiwisolver": "1.4.7", + "lightning-utilities": "0.11.7", + "lit": "18.1.8", + "lpips": "0.1.4", + "MarkupSafe": "3.0.1", + "matplotlib": "3.9.2", + "mlcommons_loadgen": "4.1", + "mpmath": "1.3.0", + "msgpack": "1.1.0", + "multidict": "6.1.0", + "multiprocess": "0.70.16", + "networkx": "3.4.1", + "ninja": "1.11.1.1", + "numpy": "1.24.4", + "nvidia-cublas-cu12": "12.4.5.8", + "nvidia-cuda-cupti-cu12": "12.4.127", + "nvidia-cuda-nvrtc-cu12": "12.4.127", + "nvidia-cuda-runtime-cu12": "12.4.127", + "nvidia-cudnn-cu12": "9.1.0.70", + "nvidia-cufft-cu12": "11.2.1.3", + "nvidia-curand-cu12": "10.3.5.147", + "nvidia-cusolver-cu12": "11.6.1.9", + "nvidia-cusparse-cu12": "12.3.1.170", + "nvidia-nccl-cu12": "2.21.5", + "nvidia-nvjitlink-cu12": "12.4.127", + "nvidia-nvtx-cu12": "12.4.127", + "open-clip-torch": "2.7.0", + "opencv-python": "4.8.1.78", + "opencv-python-headless": "4.10.0.84", + "optimum": "1.23.3", + "packaging": "24.1", + "pandas": "2.2.3", + "pillow": "10.4.0", + "propcache": "0.2.0", + "protobuf": "5.28.2", + "psutil": "6.0.0", + "py-cpuinfo": "9.0.0", + "pyarrow": "18.0.0", + "pybind11": "2.13.6", + "pycocotools": "2.0.7", + "pydantic": "2.9.2", + "pydantic_core": "2.23.4", + "pyparsing": "3.1.4", + "python-dateutil": "2.9.0.post0", + "pytorch-triton-rocm": "3.1.0+cf34004b8a", + "pytz": "2024.2", + "PyYAML": "6.0.2", + "regex": "2024.9.11", + "requests": "2.32.3", + "safetensors": "0.4.5", + "scipy": "1.10.1", + "setuptools": "75.1.0", + "six": "1.16.0", + "sympy": "1.13.1", + "tabulate": "0.9.0", + "tokenizers": "0.20.1", + "torch": "2.6.0.dev20241109+rocm6.2", + "torch-fidelity": "0.3.0", + "torchaudio": "2.5.0.dev20241110+rocm6.2", + "torchmetrics": "1.5.2", + "torchvision": "0.20.0.dev20241107+rocm6.2", + "tqdm": "4.66.5", + "transformers": "4.46.2", + "triton": "3.1.0", + "typing_extensions": "4.12.2", + "tzdata": "2024.2", + "urllib3": "2.2.3", + "wcwidth": "0.2.13", + "Werkzeug": "3.1.2", + "wheel": "0.44.0", + "xxhash": "3.5.0", + "yarl": "1.17.1", + "zipp": "3.20.2" + } +} \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/user.conf b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/user.conf new file mode 100644 index 0000000..21df3f1 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241109-scc24-base/stable-diffusion-xl/offline/user.conf @@ -0,0 +1,5 @@ +stable-diffusion-xl.Offline.target_qps = 0.05 +stable-diffusion-xl.Offline.max_query_count = 10 +stable-diffusion-xl.Offline.min_query_count = 10 +stable-diffusion-xl.Offline.min_duration = 0 +stable-diffusion-xl.Offline.sample_concatenate_permutation = 0 diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/README.md b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/README.md new file mode 100644 index 0000000..f41b17b --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/README.md @@ -0,0 +1,3 @@ +| Model | Scenario | Accuracy | Throughput | Latency (in ms) | +|---------------------|------------|-----------------------|--------------|-------------------| +| stable-diffusion-xl | offline | (16.38183, 236.85707) | 0.208 | - | \ No newline at end of file diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/README.md b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/README.md new file mode 100644 index 0000000..35107c0 --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/README.md @@ -0,0 +1,56 @@ +This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/cm4mlops). + +*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.* + +## Host platform + +* OS version: Linux-5.14.0-427.42.1.el9_4.x86_64-x86_64-with-glibc2.35 +* CPU version: x86_64 +* Python version: 3.10.15 (main, Oct 3 2024, 07:27:34) [GCC 11.2.0] +* MLCommons CM version: 3.4.1 + +## CM Run Command + +See [CM installation guide](https://docs.mlcommons.org/inference/install/). + +```bash +pip install -U cmind + +cm rm cache -f + +cm pull repo mlcommons@cm4mlops --checkout=b32ded2a4c3039ad16dadc734bee03dd1a97f228 + +cm run script \ + --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base \ + --model=sdxl \ + --implementation=reference \ + --framework=pytorch \ + --category=datacenter \ + --scenario=Offline \ + --execution_mode=test \ + --device=rocm \ + --quiet \ + --precision=float16 +``` +*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts), + you should simply reload mlcommons@cm4mlops without checkout and clean CM cache as follows:* + +```bash +cm rm repo mlcommons@cm4mlops +cm pull repo mlcommons@cm4mlops +cm rm cache -f + +``` + +## Results + +Platform: aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base + +Model Precision: fp32 + +### Accuracy Results +`CLIP_SCORE`: `16.38183`, Required accuracy for closed division `>= 31.68632` and `<= 31.81332` +`FID_SCORE`: `236.85707`, Required accuracy for closed division `>= 23.01086` and `<= 23.95008` + +### Performance Results +`Samples per second`: `0.20835` diff --git a/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/accuracy_console.out b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/accuracy_console.out new file mode 100644 index 0000000..d6db49d --- /dev/null +++ b/open/UCSD/measurements/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/accuracy_console.out @@ -0,0 +1,67 @@ +INFO:main:Namespace(dataset='coco-1024', dataset_path='/root/CM/repos/local/cache/77aae3d31c8f4cee/install', profile='stable-diffusion-xl-pytorch', scenario='Offline', max_batchsize=1, threads=1, accuracy=True, find_peak_performance=False, backend='pytorch', model_name='stable-diffusion-xl', output='/root/CM/repos/local/cache/d549713c4a534705/test_results/aqua-reference-rocm-pytorch-v2.6.0.dev20241118-scc24-base/stable-diffusion-xl/offline/accuracy', qps=None, model_path='/root/CM/repos/local/cache/c4b6bbbebe504f28/stable_diffusion_fp16', dtype='fp16', device='cuda', latent_framework='torch', user_conf='/root/CM/repos/mlcommons@cm4mlops/script/generate-mlperf-inference-user-conf/tmp/3a4cbe8cff5f4d839d1ece44b6458749.conf', audit_conf='audit.config', ids_path='/root/CM/repos/local/cache/77aae3d31c8f4cee/install/sample_ids.txt', time=None, count=10, debug=False, performance_sample_count=5000, max_latency=None, samples_per_query=8) +Keyword arguments {'safety_checker': None} are not expected by StableDiffusionXLPipeline and will be ignored. + Loading pipeline components...: 0%| | 0/7 [00:00