Ashot Vardanian
commited on
Commit
•
95669d7
1
Parent(s):
9f5ad91
Add: Dataset validation and docs
Browse files- .gitattributes +3 -0
- .gitignore +2 -0
- README.md +22 -0
- main.py +14 -2
.gitattributes
CHANGED
@@ -52,3 +52,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
55 |
+
# Unum files - uncomperessed
|
56 |
+
*.usearch filter=lfs diff=lfs merge=lfs -text
|
57 |
+
*.fbin filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
images/*
|
2 |
+
.DS_Store
|
README.md
CHANGED
@@ -1,3 +1,25 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
|
5 |
+
# 25K Unsplash Images for Search
|
6 |
+
|
7 |
+
This is a derivative work based on two existing datasets.
|
8 |
+
|
9 |
+
- `images.csv` metadata from [Unsplash](https://github.com/unsplash/datasets), sorted and converted to CSV.
|
10 |
+
- `images/` in 250x250 resolution by [kaggle/@jettchentt](https://www.kaggle.com/datasets/jettchentt/unsplash-dataset-images-downloaded-250x250).
|
11 |
+
- `images.fbin` is a binary file with UForm image embeddings.
|
12 |
+
- `images.usearch` is a binary file with a serialized USearch index.
|
13 |
+
|
14 |
+
The original `images.tsv` from Unsplash has been filtered to avoid missing images.
|
15 |
+
The embeddings and the index can be reconstructed with the `main.py` script.
|
16 |
+
On the Apple M2 Pro CPU:
|
17 |
+
|
18 |
+
- Image vectorization takes 100ms/image, or 10 inferences/second.
|
19 |
+
- Indexing vectors one-by-one happens at 700 vectors/second speed.
|
20 |
+
|
21 |
+
To rebuild the indexes:
|
22 |
+
|
23 |
+
```sh
|
24 |
+
./main.py
|
25 |
+
```
|
main.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
|
|
|
2 |
from os.path import isfile, join
|
3 |
|
4 |
import pandas as pd
|
@@ -24,9 +25,13 @@ def is_image(path: PathLike) -> bool:
|
|
24 |
return False
|
25 |
|
26 |
|
|
|
|
|
|
|
|
|
27 |
names = sorted(f for f in listdir('images') if is_image(join('images', f)))
|
|
|
28 |
|
29 |
-
names = [filename.rsplit('.', 1)[0] for filename in names]
|
30 |
table = pd.read_table('images.tsv') if path.exists(
|
31 |
'images.tsv') else pd.read_table('images.csv')
|
32 |
table = table[table['photo_id'].isin(names)]
|
@@ -35,6 +40,13 @@ table.reset_index()
|
|
35 |
table.to_csv('images.csv', index=False)
|
36 |
|
37 |
names = list(set(table['photo_id']).intersection(names))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
model = get_model('unum-cloud/uform-vl-english')
|
40 |
vectors = []
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from os import listdir, path, PathLike, remove
|
3 |
from os.path import isfile, join
|
4 |
|
5 |
import pandas as pd
|
|
|
25 |
return False
|
26 |
|
27 |
|
28 |
+
def trim_extension(filename: str) -> str:
|
29 |
+
return filename.rsplit('.', 1)[0]
|
30 |
+
|
31 |
+
|
32 |
names = sorted(f for f in listdir('images') if is_image(join('images', f)))
|
33 |
+
names = [trim_extension(f) for f in names]
|
34 |
|
|
|
35 |
table = pd.read_table('images.tsv') if path.exists(
|
36 |
'images.tsv') else pd.read_table('images.csv')
|
37 |
table = table[table['photo_id'].isin(names)]
|
|
|
40 |
table.to_csv('images.csv', index=False)
|
41 |
|
42 |
names = list(set(table['photo_id']).intersection(names))
|
43 |
+
names_to_delete = [f for f in listdir(
|
44 |
+
'images') if trim_extension(f) not in names]
|
45 |
+
|
46 |
+
if len(names_to_delete) > 0:
|
47 |
+
print(f'Plans to delete: {len(names_to_delete)} images without metadata')
|
48 |
+
for name in names_to_delete:
|
49 |
+
remove(join('images', name))
|
50 |
|
51 |
model = get_model('unum-cloud/uform-vl-english')
|
52 |
vectors = []
|