sounar commited on
Commit
c0c54c0
·
verified ·
1 Parent(s): 121448b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -20,7 +20,11 @@ model = AutoModelForCausalLM.from_pretrained(
20
  )
21
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
 
23
- # Set the chat template
 
 
 
 
24
  chat_template = """<|im_start|>system
25
  {system}
26
  <|im_end|>
@@ -31,7 +35,7 @@ chat_template = """<|im_start|>system
31
  """
32
  tokenizer.chat_template = chat_template
33
 
34
- # Define the askme function
35
  def askme(question):
36
  sys_message = """
37
  You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
@@ -47,10 +51,11 @@ def askme(question):
47
  # Generate response
48
  outputs = model.generate(**inputs, max_new_tokens=100, use_cache=True)
49
 
50
- # Extract and return the generated text
51
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
- answer = response_text.split("<|im_start|>assistant")[-1].strip()
53
- return answer
 
54
 
55
  # Example usage
56
  question = """
 
20
  )
21
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
 
23
+ # Set pad_token_id to eos_token_id if None
24
+ if tokenizer.pad_token_id is None:
25
+ tokenizer.pad_token_id = tokenizer.eos_token_id
26
+
27
+ # Define the chat template
28
  chat_template = """<|im_start|>system
29
  {system}
30
  <|im_end|>
 
35
  """
36
  tokenizer.chat_template = chat_template
37
 
38
+ # Function to generate a response
39
  def askme(question):
40
  sys_message = """
41
  You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
 
51
  # Generate response
52
  outputs = model.generate(**inputs, max_new_tokens=100, use_cache=True)
53
 
54
+ # Decode and clean up the response
55
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
+ if "<|im_start|>assistant" in response_text:
57
+ response_text = response_text.split("<|im_start|>assistant")[-1].strip()
58
+ return response_text
59
 
60
  # Example usage
61
  question = """