ssmits commited on
Commit
decde55
1 Parent(s): b1638d5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -2
README.md CHANGED
@@ -65,14 +65,12 @@ Without sentence-transformers, you can use the model like this: First, you pass
65
  from transformers import AutoTokenizer, AutoModel
66
  import torch
67
 
68
-
69
  #Mean Pooling - Take attention mask into account for correct averaging
70
  def mean_pooling(model_output, attention_mask):
71
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
72
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
73
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
74
 
75
-
76
  # Sentences we want sentence embeddings for
77
  sentences = ['This is an example sentence', 'Each sentence is converted']
78
 
 
65
  from transformers import AutoTokenizer, AutoModel
66
  import torch
67
 
 
68
  #Mean Pooling - Take attention mask into account for correct averaging
69
  def mean_pooling(model_output, attention_mask):
70
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
71
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
72
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
73
 
 
74
  # Sentences we want sentence embeddings for
75
  sentences = ['This is an example sentence', 'Each sentence is converted']
76