Skip to content

Commit

Permalink
Merge branch 'main' into ui-tunning
Browse files Browse the repository at this point in the history
  • Loading branch information
ibelem authored Oct 8, 2024
2 parents 7a880b9 + 6853edb commit d1fca06
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 51 deletions.
7 changes: 4 additions & 3 deletions config.js
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,10 @@ export const ALL_NEEDED_MODEL_RESOURCES = {
},

// background-removal
"RMBG-1.4": {
linkPathPrefix: "https://huggingface.co/briaai/RMBG-1.4/resolve/main/",
localFolderPathPrefix: "briaai/",
BiRefNet_T: {
linkPathPrefix:
"https://huggingface.co/onnx-community/BiRefNet_T/blob/main/",
localFolderPathPrefix: "onnx-community/",
resources: ["onnx/model.onnx"]
},

Expand Down
18 changes: 4 additions & 14 deletions js/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -65,18 +65,6 @@ let SAMPLES = [
},

// WebGPU
{
id: "webgpu_background_removal",
title: "RMBG",
desc: "Remove the background of an image",
sampleUrl: "./samples/image_background_removal/index.html",
models: ["RMBG v1.4"],
tasks: "Image Segmentation",
webApis: [BACKENDS.WEBGPU],
framework: "Transformers.js",
devices: [DEVICES.GPU],
update: "2024-06-08"
},
{
id: "webgpu_benchmark",
title: "Benchmark",
Expand Down Expand Up @@ -118,7 +106,8 @@ let SAMPLES = [
id: "webgpu_gemini",
title: "Google Gemini Nano",
desc: "Google Gemini Nano integration with Chrome Canary",
sampleUrl: "https://huggingface.co/spaces/Xenova/experimental-built-in-ai-chat",
sampleUrl:
"https://huggingface.co/spaces/Xenova/experimental-built-in-ai-chat",
models: ["Gemini Nano"],
tasks: "Multimodal",
webApis: [BACKENDS.WEBGPU],
Expand Down Expand Up @@ -240,7 +229,8 @@ let SAMPLES = [
id: "webgpu_smollm",
title: "SmolLM",
desc: "A blazingly fast and powerful SLM",
sampleUrl: "https://huggingface.co/spaces/HuggingFaceTB/SmolLM-360M-Instruct-WebGPU",
sampleUrl:
"https://huggingface.co/spaces/HuggingFaceTB/SmolLM-360M-Instruct-WebGPU",
models: ["SmolLM"],
tasks: "SLM",
webApis: [BACKENDS.WEBGPU],
Expand Down
12 changes: 6 additions & 6 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

30 changes: 5 additions & 25 deletions samples/image_background_removal/background_removal.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ if (!VITE_ENV_USE_REMOTE_MODELS) {

// Constants
const DEFAULT_CACHE_STORAGE_NAME = "transformers-cache";
const MODEL_NAME = "RMBG-1.4";
// !FIXME: currently the model has bug running with webgpu execution provider, waiting for upstream fix then we will enable this example
const MODEL_NAME = "BiRefNet_T";
const MODEL_ID = "onnx-community/BiRefNet_T";

// Reference the elements that we will need
const fileUpload = document.getElementById("upload");
Expand All @@ -67,15 +69,6 @@ let model = null;
let processor = null;

function model_progress_cb_handler(message) {
/**
*
* file - "onnx/model_quantized.onnx"
* loaded - 3997696
* name - "briaai/RMBG-1.4"
* progress - 9.003165670890668
* status - "progress"
* total : 44403226
*/
const fileName = message.file;
let statusBarElement = null;
if (fileName) {
Expand Down Expand Up @@ -333,7 +326,7 @@ async function predict(url) {
try {
// if model and processor are not ready, initialize them first
if (!model) {
model = await AutoModel.from_pretrained("briaai/RMBG-1.4", {
model = await AutoModel.from_pretrained(MODEL_ID, {
// Do not require config.json to be present in the repository
progress_callback: model_progress_cb_handler,
device: "webgpu",
Expand All @@ -342,20 +335,7 @@ async function predict(url) {
}

if (!processor) {
processor = await AutoProcessor.from_pretrained("briaai/RMBG-1.4", {
// Do not require config.json to be present in the repository
config: {
do_normalize: true,
do_pad: false,
do_rescale: true,
do_resize: true,
image_mean: [0.5, 0.5, 0.5],
feature_extractor_type: "ImageFeatureExtractor",
image_std: [1, 1, 1],
resample: 2,
rescale_factor: 0.00392156862745098,
size: { width: 1024, height: 1024 }
},
processor = await AutoProcessor.from_pretrained(MODEL_ID, {
device: "webgpu",
dtype: "fp32"
});
Expand Down
6 changes: 3 additions & 3 deletions samples/phi3-webgpu/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit d1fca06

Please sign in to comment.