nan commited on
Commit
ef81d78
1 Parent(s): ce68525

docs: update the example

Browse files
Files changed (1) hide show
  1. README.md +17 -6
README.md CHANGED
@@ -205,23 +205,31 @@ print(image_embeddings[0] @ text_embeddings[0].T) # image-text cross-modal simi
205
  or via sentence-transformers:
206
 
207
  ```python
208
- # !pip install sentence-transformers
209
  from sentence_transformers import SentenceTransformer
210
 
211
  # Initialize the model
212
- model = SentenceTransformer('jinaai/jina-clip-v2', trust_remote_code=True)
 
 
 
213
 
214
  # Sentences
215
- sentences = ['A blue cat', 'A red cat']
 
 
 
216
 
217
  # Public image URLs
218
  image_urls = [
219
- 'https://i.pinimg.com/600x315/21/48/7e/21487e8e0970dd366dafaed6ab25d8d8.jpg',
220
- 'https://i.pinimg.com/736x/c9/f2/3e/c9f23e212529f13f19bad5602d84b78b.jpg'
221
  ]
222
 
223
  text_embeddings = model.encode(sentences)
224
  image_embeddings = model.encode(image_urls)
 
 
225
  ```
226
 
227
  JavaScript developers can use Jina CLIP via the [transformers.js](https://huggingface.co/docs/transformers.js) library. Note that to use this model, you need to install transformers.js [v3](https://github.com/xenova/transformers.js/tree/v3) from source using `npm install xenova/transformers.js#v3`.
@@ -238,7 +246,10 @@ const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patc
238
  const vision_model = await CLIPVisionModelWithProjection.from_pretrained('jinaai/jina-clip-v2');
239
 
240
  // Run tokenization
241
- const texts = ['A blue cat', 'A red cat'];
 
 
 
242
  const text_inputs = tokenizer(texts, { padding: true, truncation: true });
243
 
244
  // Compute text embeddings
 
205
  or via sentence-transformers:
206
 
207
  ```python
208
+ # !pip install sentence-transformers einops timm pillow
209
  from sentence_transformers import SentenceTransformer
210
 
211
  # Initialize the model
212
+ truncate_dim = 512
213
+ model = SentenceTransformer(
214
+ "jinaai/jina-clip-v2", trust_remote_code=True, truncate_dim=truncate_dim
215
+ )
216
 
217
  # Sentences
218
+ sentences = [
219
+ "A neural network walks into a bar and forgets why it came.",
220
+ "Why do programmers prefer dark mode? Because light attracts bugs.",
221
+ ]
222
 
223
  # Public image URLs
224
  image_urls = [
225
+ "https://i.pinimg.com/600x315/21/48/7e/21487e8e0970dd366dafaed6ab25d8d8.jpg",
226
+ "https://i.pinimg.com/736x/c9/f2/3e/c9f23e212529f13f19bad5602d84b78b.jpg",
227
  ]
228
 
229
  text_embeddings = model.encode(sentences)
230
  image_embeddings = model.encode(image_urls)
231
+ query = "tell me a joke about AI"
232
+ text_query_embeddings = model.encode(query, prompt_name="retrieval.query")
233
  ```
234
 
235
  JavaScript developers can use Jina CLIP via the [transformers.js](https://huggingface.co/docs/transformers.js) library. Note that to use this model, you need to install transformers.js [v3](https://github.com/xenova/transformers.js/tree/v3) from source using `npm install xenova/transformers.js#v3`.
 
246
  const vision_model = await CLIPVisionModelWithProjection.from_pretrained('jinaai/jina-clip-v2');
247
 
248
  // Run tokenization
249
+ const texts = [
250
+ 'A neural network walks into a bar and forgets why it came.',
251
+ 'Why do programmers prefer dark mode? Because light attracts bugs.',
252
+ ];
253
  const text_inputs = tokenizer(texts, { padding: true, truncation: true });
254
 
255
  // Compute text embeddings