Ellbendls commited on
Commit
4b1ac70
1 Parent(s): 5ffad35

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +18 -3
README.md CHANGED
@@ -91,10 +91,25 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
91
  model = AutoModelForCausalLM.from_pretrained(model_name)
92
 
93
  # Example usage
94
- input_text = "What are the common symptoms of a urinary tract infection?"
95
- inputs = tokenizer(input_text, return_tensors="pt")
96
- outputs = model.generate(**inputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
98
  print(response)
99
  ```
100
 
 
91
  model = AutoModelForCausalLM.from_pretrained(model_name)
92
 
93
  # Example usage
94
+ input_text = "I had a surgery which ended up with some failures. What can I do to fix it?"
95
+
96
+ # Prepare inputs with explicit padding and attention mask
97
+ inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
98
+
99
+ # Generate response with more explicit parameters
100
+ outputs = model.generate(
101
+ input_ids=inputs['input_ids'],
102
+ attention_mask=inputs['attention_mask'],
103
+ max_new_tokens=150, # Specify max new tokens to generate
104
+ do_sample=True, # Enable sampling for more diverse responses
105
+ temperature=0.7, # Control randomness of output
106
+ top_p=0.9, # Nucleus sampling to maintain quality
107
+ num_return_sequences=1 # Number of generated sequences
108
+ )
109
+
110
+ # Decode the generated response
111
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
112
+
113
  print(response)
114
  ```
115