Skip to content

Commit

Permalink
#13368: Remove llama-specific packages from requirements-dev.txt
Browse files Browse the repository at this point in the history
  • Loading branch information
cglagovichTT committed Oct 28, 2024
1 parent 7eef19b commit bc99440
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 2 deletions.
3 changes: 3 additions & 0 deletions tests/scripts/t3000/run_t3000_demo_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ run_t3000_llama3_vision_tests() {
n300=N300
t3k=T3K

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt

for fake_device in "$n300" "$t3k"; do
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/demo/multimodal_demo_chat.py -k "tt and 1" --timeout 600; fail+=$?
echo "LOG_METAL: Llama3 vision tests for $fake_device completed"
Expand Down
6 changes: 6 additions & 0 deletions tests/scripts/t3000/run_t3000_frequent_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ run_t3000_llama3.2-11b-vision_freq_tests() {
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt

LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_transformer.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_vision_encoder.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_cross_attention_transformer_text.py ; fail+=$?
Expand Down Expand Up @@ -113,6 +116,9 @@ run_t3000_spoof_n300_llama3.2-11b-vision_freq_tests() {
# Use FAKE_DEVICE env variable to run on an N300 mesh
fake_device=N300

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt

FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_transformer.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_vision_encoder.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_cross_attention_transformer_text.py ; fail+=$?
Expand Down
6 changes: 6 additions & 0 deletions tests/scripts/t3000/run_t3000_unit_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,9 @@ run_t3000_llama3.2-11b-vision_unit_tests() {
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt

LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_mlp.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_attention.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_block.py ; fail+=$?
Expand Down Expand Up @@ -195,6 +198,9 @@ run_t3000_spoof_n300_llama3.2-11b-vision_unit_tests() {
# Use FAKE_DEVICE env variable to run on an N300 mesh
fake_device=N300

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt

FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_mlp.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_attention.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_block.py ; fail+=$?
Expand Down
2 changes: 0 additions & 2 deletions tt_metal/python_env/requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,5 @@ fsspec==2023.9.2 # Temporary pin to 2023.9.2: https://github.com/tenstorrent/tt-
docopt==0.6.2
tabulate==0.9.0
blobfile==2.1.1 # Required for llama3
pydantic==2.9.2 # Required for llama3
pydantic_core==2.23.4 # Required for llama3
numpy>=1.24.4,<2
huggingface-hub==0.25.2

0 comments on commit bc99440

Please sign in to comment.