BAAI
/

Shitao commited on
Commit
1897ff2
1 Parent(s): e808c62

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -77,7 +77,8 @@ print(scores) # [0.00027803096387751553, 0.9948403768236574]
77
 
78
  ```python
79
  from FlagEmbedding import FlagLLMReranker
80
- reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # Setting use_bf16 to True speeds up computation with a slight performance degradation
 
81
 
82
  score = reranker.compute_score(['query', 'passage'])
83
  print(score)
@@ -90,7 +91,8 @@ print(scores)
90
 
91
  ```python
92
  from FlagEmbedding import LayerWiseFlagLLMReranker
93
- reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # Setting use_bf16 to True speeds up computation with a slight performance degradation
 
94
 
95
  score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
96
  print(score)
@@ -230,7 +232,7 @@ def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
230
  return_tensors='pt',
231
  )
232
 
233
- tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
234
  model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
235
  model = model.to('cuda')
236
  model.eval()
 
77
 
78
  ```python
79
  from FlagEmbedding import FlagLLMReranker
80
+ reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
81
+ # reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
82
 
83
  score = reranker.compute_score(['query', 'passage'])
84
  print(score)
 
91
 
92
  ```python
93
  from FlagEmbedding import LayerWiseFlagLLMReranker
94
+ reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
95
+ # reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
96
 
97
  score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
98
  print(score)
 
232
  return_tensors='pt',
233
  )
234
 
235
+ tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True)
236
  model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
237
  model = model.to('cuda')
238
  model.eval()