Spaces:
Running
Running
File size: 3,202 Bytes
b72c906 a76bb3d a42765b 976fba6 a76bb3d b8a9903 180a7d9 976fba6 180a7d9 a42765b a76bb3d 75ef24f a76bb3d 75ef24f 422e9a8 a76bb3d 75ef24f a4b10d7 75ef24f b8a9903 75ef24f b72c906 75ef24f b8a9903 75ef24f b8a9903 a76bb3d b8a9903 a76bb3d ccfd95d a76bb3d 976fba6 a76bb3d ccfd95d a76bb3d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import { env, AutoProcessor, AutoModel, RawImage } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');
const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
const IMAGE_SIZE = 256;
// Create a new object detection pipeline
status.textContent = 'Loading model...';
const processor = await AutoProcessor.from_pretrained('Xenova/yolov9-c');
// For this demo, we resize the image to IMAGE_SIZE x IMAGE_SIZE
processor.feature_extractor.size = { width: IMAGE_SIZE, height: IMAGE_SIZE }
const model = await AutoModel.from_pretrained('Xenova/yolov9-c');
status.textContent = 'Ready';
example.addEventListener('click', (e) => {
e.preventDefault();
detect(EXAMPLE_URL);
});
fileUpload.addEventListener('change', function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = e2 => detect(e2.target.result);
reader.readAsDataURL(file);
});
// Detect objects in the image
async function detect(url) {
// Update UI
imageContainer.innerHTML = '';
imageContainer.style.backgroundImage = `url(${url})`;
// Read image
const image = await RawImage.fromURL(url);
// Set container width and height depending on the image aspect ratio
const ar = image.width / image.height;
const [cw, ch] = (ar > 1) ? [640, 640 / ar] : [640 * ar, 640];
imageContainer.style.width = `${cw}px`;
imageContainer.style.height = `${ch}px`;
status.textContent = 'Analysing...';
// Preprocess image
const { pixel_values } = await processor(image);
// Predict bounding boxes
const { outputs } = await model({ images: pixel_values });
status.textContent = '';
outputs.tolist().forEach(renderBox);
}
// Render a bounding box and label on the image
function renderBox([xmin, ymin, xmax, ymax, score, id]) {
// Generate a random color for the box
const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
// Draw the box
const boxElement = document.createElement('div');
boxElement.className = 'bounding-box';
Object.assign(boxElement.style, {
borderColor: color,
left: 100 * xmin / IMAGE_SIZE + '%',
top: 100 * ymin / IMAGE_SIZE + '%',
width: 100 * (xmax - xmin) / IMAGE_SIZE + '%',
height: 100 * (ymax - ymin) / IMAGE_SIZE + '%',
})
// Draw label
const labelElement = document.createElement('span');
labelElement.textContent = model.config.id2label[id];
labelElement.className = 'bounding-box-label';
labelElement.style.backgroundColor = color;
boxElement.appendChild(labelElement);
imageContainer.appendChild(boxElement);
}
|