diff --git a/.ci/skipped_notebooks.yml b/.ci/skipped_notebooks.yml index 2566487502d..c82710a0853 100644 --- a/.ci/skipped_notebooks.yml +++ b/.ci/skipped_notebooks.yml @@ -468,13 +468,6 @@ skips: - python: - '3.9' -- notebook: notebooks/minicpm-v-multimodal-chatbot/minicpm-v-multimodal-chatbot.ipynb - skips: - - os: - - macos-13 - - ubuntu-20.04 - - ubuntu-22.04 - - windows-2019 - notebook: notebooks/stable-audio/stable-audio.ipynb skips: - os: diff --git a/notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb b/notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb index a85b63c213a..6ef4e670c8a 100644 --- a/notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb +++ b/notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb @@ -138,10 +138,13 @@ " \"transformers>=4.30.2\",\n", " \"controlnet-aux>=0.0.6\",\n", " \"gradio>=3.36\",\n", + " \"datasets>=2.14.6\",\n", + " \"nncf>=2.7.0\",\n", + " \"opencv-python\",\n", " \"--extra-index-url\",\n", " \"https://download.pytorch.org/whl/cpu\",\n", ")\n", - "pip_install(\"openvino>=2023.1.0\", \"datasets>=2.14.6\", \"nncf>=2.7.0\", \"opencv-python\")" + "pip_install(\"openvino>=2023.1.0\")" ] }, { diff --git a/notebooks/efficient-sam/efficient-sam.ipynb b/notebooks/efficient-sam/efficient-sam.ipynb index b9332330afa..e2829f2cd86 100644 --- a/notebooks/efficient-sam/efficient-sam.ipynb +++ b/notebooks/efficient-sam/efficient-sam.ipynb @@ -67,7 +67,14 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -q \"openvino>=2023.3.0\" \"nncf>=2.7.0\" opencv-python \"gradio>=4.13\" \"matplotlib>=3.4\" torch torchvision tqdm --extra-index-url https://download.pytorch.org/whl/cpu" + "import platform\n", + "\n", + "%pip install -q \"openvino>=2024.5.0\" \"nncf>=2.14.0\"\n", + "%pip install -q \"torch>=2.2.0\" \"torchaudio>=2.2.0\" \"torchvision>=0.17.0\" --extra-index-url https://download.pytorch.org/whl/cpu\n", + "%pip install -q opencv-python \"gradio>=4.13\" \"matplotlib>=3.4\" tqdm\n", + "\n", + "if platform.system() == \"Darwin\":\n", + " %pip install -q \"numpy<2.0.0\"" ] }, { diff --git a/notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot-genai.ipynb b/notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot-genai.ipynb index cc3ec17194e..2d79ed09301 100644 --- a/notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot-genai.ipynb +++ b/notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot-genai.ipynb @@ -97,7 +97,7 @@ "from pathlib import Path\n", "import requests\n", "\n", - "%pip install -q \"torch>=2.1.0\" \"torchvision\" \"torchaudio\" --index-url https://download.pytorch.org/whl/cpu\n", + "%pip install -q \"torch>=2.3.0\" \"torchvision\" \"torchaudio\" --index-url https://download.pytorch.org/whl/cpu\n", "%pip install -q \"git+https://github.com/huggingface/optimum-intel.git\" --index-url https://download.pytorch.org/whl/cpu\n", "%pip install -q \"nncf>=2.14.0\" \"sentencepiece\" \"tokenizers>=0.12.1\" \"transformers>=4.45.0\" \"gradio>=4.36\"\n", "%pip install -q -U \"openvino-tokenizers>=2024.5.0\" \"openvino>=2024.5.0\" \"openvino-genai>=2024.5.0\"|\n", diff --git a/notebooks/minicpm-v-multimodal-chatbot/minicpm-v-multimodal-chatbot.ipynb b/notebooks/minicpm-v-multimodal-chatbot/minicpm-v-multimodal-chatbot.ipynb index b266b019b5b..13661075b3a 100644 --- a/notebooks/minicpm-v-multimodal-chatbot/minicpm-v-multimodal-chatbot.ipynb +++ b/notebooks/minicpm-v-multimodal-chatbot/minicpm-v-multimodal-chatbot.ipynb @@ -131,7 +131,11 @@ "cell_type": "code", "execution_count": 3, "id": "82e846bb", - "metadata": {}, + "metadata": { + "test_replace": { + "openbmb/MiniCPM-V-2_6": "katuni4ka/tiny-random-minicpmv-2_6" + } + }, "outputs": [ { "name": "stdout", @@ -169,7 +173,7 @@ "model_dir = Path(model_id.split(\"/\")[-1] + \"-ov\")\n", "\n", "if not model_dir.exists():\n", - " optimum_cli(model_id, model_dir, additional_args={\"trust-remote-code\": \"\", \"weight-format\": \"fp16\"})\n", + " optimum_cli(model_id, model_dir, additional_args={\"trust-remote-code\": \"\", \"weight-format\": \"fp16\", \"task\": \"image-text-to-text\"})\n", " compress_lm_weights(model_dir)" ] }, @@ -204,14 +208,14 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "id": "626fef57", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "84bba5eaf8cc4b7e97a5e5d3768146e3", + "model_id": "2362638a795340e6b3effb0805848768", "version_major": 2, "version_minor": 0 }, @@ -219,7 +223,7 @@ "Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO')" ] }, - "execution_count": 4, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -434,21 +438,7 @@ "widgets": { "application/vnd.jupyter.widget-state+json": { "state": { - "41592555658f4eb69616c541894b88f0": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "DescriptionStyleModel", - "state": { - "description_width": "" - } - }, - "658a0c15a9cb47078c9c8647bff53d1e": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": {} - }, - "84bba5eaf8cc4b7e97a5e5d3768146e3": { + "2362638a795340e6b3effb0805848768": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "DropdownModel", @@ -459,9 +449,23 @@ ], "description": "Device:", "index": 1, - "layout": "IPY_MODEL_658a0c15a9cb47078c9c8647bff53d1e", - "style": "IPY_MODEL_41592555658f4eb69616c541894b88f0" + "layout": "IPY_MODEL_d737bcde20ac4ba38ecf0902eec67998", + "style": "IPY_MODEL_49b230bc877e422788033f49884843a2" + } + }, + "49b230bc877e422788033f49884843a2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "2.0.0", + "model_name": "DescriptionStyleModel", + "state": { + "description_width": "" } + }, + "d737bcde20ac4ba38ecf0902eec67998": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "2.0.0", + "model_name": "LayoutModel", + "state": {} } }, "version_major": 2, diff --git a/notebooks/parler-tts-text-to-speech/parler-tts-text-to-speech.ipynb b/notebooks/parler-tts-text-to-speech/parler-tts-text-to-speech.ipynb index 5f2591cbfc4..11ed9aa65a8 100644 --- a/notebooks/parler-tts-text-to-speech/parler-tts-text-to-speech.ipynb +++ b/notebooks/parler-tts-text-to-speech/parler-tts-text-to-speech.ipynb @@ -58,8 +58,9 @@ "\n", "os.environ[\"GIT_CLONE_PROTECTION_ACTIVE\"] = \"false\"\n", "\n", + "%pip uninstall -q -y torch torchvision torchaudio\n", "%pip install -q \"openvino>=2024.2.0\"\n", - "%pip install -q git+https://github.com/huggingface/parler-tts.git \"gradio>=4.19\" transformers \"torch>=2.2\" --extra-index-url https://download.pytorch.org/whl/cpu" + "%pip install -q git+https://github.com/huggingface/parler-tts.git \"gradio>=4.19\" transformers \"torch>=2.2\" \"torchaudio\" --extra-index-url https://download.pytorch.org/whl/cpu" ] }, { diff --git a/notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb b/notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb index d715ae4e044..e0945dccd39 100644 --- a/notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb +++ b/notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb @@ -83,15 +83,27 @@ "outputs": [], "source": [ "import platform\n", + "import importlib.metadata\n", + "import importlib.util\n", "\n", "%pip install -q \"nncf>=2.14.0\"\n", "%pip install -q -U \"openvino>=2024.5.0\" \"openvino-tokenizers>=2024.5.0\" \"openvino-genai>=2024.5.0\"\n", "%pip install -q \"python-ffmpeg<=1.0.16\" \"ffmpeg\" \"moviepy\" \"transformers>=4.45\" \"git+https://github.com/huggingface/optimum-intel.git\" \"torch>=2.1\" --extra-index-url https://download.pytorch.org/whl/cpu\n", - "%pip install -q -U \"yt_dlp>=2024.8.6\" soundfile librosa jiwer\n", + "%pip install -q -U \"yt_dlp>=2024.8.6\" soundfile librosa jiwer packaging\n", "%pip install -q \"gradio>=4.19\" \"typing_extensions>=4.9\"\n", "\n", "if platform.system() == \"Darwin\":\n", - " %pip install -q \"numpy<2.0\"" + " %pip install -q \"numpy<2.0\"\n", + "\n", + "\n", + "from packaging import version\n", + "\n", + "if (\n", + " importlib.util.find_spec(\"tensorflow\") is not None\n", + " and version.parse(importlib.metadata.version(\"tensorflow\")) < version.parse(\"2.18.0\")\n", + " and version.parse(importlib.metadata.version(\"numpy\")) >= version.parse(\"2.0.0\")\n", + "):\n", + " %pip uninstall -q -y tensorflow" ] }, { @@ -379,7 +391,10 @@ "metadata": {}, "outputs": [], "source": [ - "from moviepy.editor import VideoFileClip\n", + "try:\n", + " from moviepy import VideoFileClip\n", + "except ImportError:\n", + " from moviepy.editor import VideoFileClip\n", "from transformers.pipelines.audio_utils import ffmpeg_read\n", "\n", "\n",