Skip to content

Commit

Permalink
#13368: Remove llama_models as submodule. Move its install to llama3 …
Browse files Browse the repository at this point in the history
…requirements.txt.
  • Loading branch information
cglagovichTT committed Oct 29, 2024
1 parent d59f132 commit df3e545
Show file tree
Hide file tree
Showing 23 changed files with 30 additions and 33 deletions.
3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,3 @@
[submodule "tt_metal/third_party/tt_llk_blackhole"]
path = tt_metal/third_party/tt_llk_blackhole
url = https://github.com/tenstorrent/tt-llk-bh.git
[submodule "models/demos/llama3/reference/llama_models"]
path = models/demos/llama3/reference/llama_models
url = https://github.com/tenstorrent/llama-models.git
4 changes: 2 additions & 2 deletions models/demos/llama3/demo/multimodal_demo_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
from termcolor import cprint

from models.demos.llama3.demo.multimodal_demo_text import create_multimodal_model
import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.generation as llama_reference_generation
import llama_models.llama3.reference_impl.generation as llama_reference_generation

from models.demos.llama3.reference.llama_models.models.llama3.api.datatypes import ImageMedia, UserMessage
from llama_models.llama3.api.datatypes import ImageMedia, UserMessage

THIS_DIR = Path(__file__).parent.parent.resolve() / "reference/llama_models/models/scripts/"

Expand Down
4 changes: 2 additions & 2 deletions models/demos/llama3/demo/multimodal_demo_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from PIL import Image as PIL_Image
from termcolor import cprint

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.generation as llama_reference_generation
import llama_models.llama3.reference_impl.generation as llama_reference_generation

from models.demos.llama3.reference.llama_models.models.llama3.api.datatypes import ImageMedia
from llama_models.llama3.api.datatypes import ImageMedia

THIS_DIR = Path(__file__).parent.parent.resolve() / "reference/llama_models/models/scripts/"

Expand Down
1 change: 0 additions & 1 deletion models/demos/llama3/reference/llama_models
Submodule llama_models deleted from c217d3
1 change: 1 addition & 0 deletions models/demos/llama3/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
git+https://github.com/tenstorrent/llama-models.git@tt_metal_tag
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
)

from models.demos.llama3.tt.model_config import TtModelArgs
import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod


@skip_for_grayskull("Requires wormhole_b0 to run")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_cross_attention import TtLlamaCrossAttention
from models.demos.llama3.tt.model_config import TtModelArgs
from models.demos.llama3.tt.llama_common import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_cross_attention_transformer_text import (
TtLlamaCrossAttentionTransformerText,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_cross_attention_transformer_vision import (
TtLlamaCrossAttentionTransformerVision,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_cross_block import TtLlamaCrossAttentionTransformerBlock
from models.demos.llama3.tt.model_config import TtModelArgs
from models.demos.llama3.tt.llama_common import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal import encoder_utils
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from llama_models.llama3.reference_impl.multimodal import encoder_utils
from models.demos.llama3.tt.multimodal.llama_image_attention import TtLlamaImageAttention
from models.demos.llama3.tt.multimodal.llama_vision_encoder import pad_seq_one_tile, mask_tile_padding
from models.demos.llama3.tt.model_config import TtModelArgs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal import encoder_utils
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from llama_models.llama3.reference_impl.multimodal import encoder_utils
from models.demos.llama3.tt.multimodal.llama_image_block import TtLlamaImageTransformerBlock
from models.demos.llama3.tt.multimodal.llama_vision_encoder import pad_seq_one_tile, mask_tile_padding
from models.demos.llama3.tt.model_config import TtModelArgs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_image_mlp import TtLlamaImageFeedForward
from models.demos.llama3.tt.model_config import TtModelArgs
from models.utility_functions import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal import encoder_utils
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from llama_models.llama3.reference_impl.multimodal import encoder_utils
from models.demos.llama3.tt.multimodal.llama_image_transformer import TtLlamaImageTransformer
from models.demos.llama3.tt.model_config import TtModelArgs
from models.demos.llama3.tt.multimodal.llama_vision_encoder import pad_seq_one_tile, mask_tile_padding
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_layernorm import TtLayerNorm # Updated import for LayerNorm
from models.demos.llama3.tt.model_config import TtModelArgs
from models.utility_functions import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
)
from models.demos.llama3.tt.model_config import TtModelArgs

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod


@skip_for_grayskull("Requires wormhole_b0 to run")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os
import ttnn

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_mod
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_mod
from models.demos.llama3.tt.multimodal.llama_vision_encoder import TtLlamaVisionEncoder
from models.demos.llama3.tt.model_config import TtModelArgs
from models.demos.llama3.tt.llama_common import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from PIL import Image as PIL_Image
from termcolor import cprint

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.generation as llama_reference_generation
import llama_models.llama3.reference_impl.generation as llama_reference_generation

from models.demos.llama3.reference.llama_models.models.llama3.api.datatypes import ImageMedia
from llama_models.llama3.api.datatypes import ImageMedia

from models.utility_functions import (
comp_pcc,
Expand Down
2 changes: 1 addition & 1 deletion models/demos/llama3/tt/multimodal/llama_vision_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
synchronize_devices,
)

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.encoder_utils as encoder_utils
import llama_models.llama3.reference_impl.multimodal.encoder_utils as encoder_utils


def to_2tuple(x):
Expand Down
4 changes: 2 additions & 2 deletions models/demos/llama3/tt/multimodal/llama_vision_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@

from torch import nn, Tensor

import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.model as llama_reference_model
import models.demos.llama3.reference.llama_models.models.llama3.reference_impl.multimodal.image_transform as llama_reference_image_transforms
import llama_models.llama3.reference_impl.multimodal.model as llama_reference_model
import llama_models.llama3.reference_impl.multimodal.image_transform as llama_reference_image_transforms

import ttnn
from models.demos.llama3.tt.multimodal.llama_cross_attention_transformer_vision import (
Expand Down
2 changes: 1 addition & 1 deletion tests/scripts/t3000/run_t3000_demo_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ run_t3000_llama3_vision_tests() {
t3k=T3K

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt
pip install -r models/demos/llama3/requirements.txt

for fake_device in "$n300" "$t3k"; do
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/demo/multimodal_demo_chat.py -k "tt and 1" --timeout 600; fail+=$?
Expand Down
4 changes: 2 additions & 2 deletions tests/scripts/t3000/run_t3000_frequent_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ run_t3000_llama3.2-11b-vision_freq_tests() {
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt
pip install -r models/demos/llama3/requirements.txt

LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_transformer.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_vision_encoder.py ; fail+=$?
Expand Down Expand Up @@ -117,7 +117,7 @@ run_t3000_spoof_n300_llama3.2-11b-vision_freq_tests() {
fake_device=N300

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt
pip install -r models/demos/llama3/requirements.txt

FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_transformer.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_vision_encoder.py ; fail+=$?
Expand Down
4 changes: 2 additions & 2 deletions tests/scripts/t3000/run_t3000_unit_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ run_t3000_llama3.2-11b-vision_unit_tests() {
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt
pip install -r models/demos/llama3/requirements.txt

LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_mlp.py ; fail+=$?
LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_attention.py ; fail+=$?
Expand Down Expand Up @@ -199,7 +199,7 @@ run_t3000_spoof_n300_llama3.2-11b-vision_unit_tests() {
fake_device=N300

# Install Vision-specific packages
pip install -r models/demos/llama3/reference/llama_models/requirements.txt
pip install -r models/demos/llama3/requirements.txt

FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_mlp.py ; fail+=$?
FAKE_DEVICE=$fake_device LLAMA_DIR=$llama11b WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/multimodal/test_llama_image_attention.py ; fail+=$?
Expand Down

0 comments on commit df3e545

Please sign in to comment.