Spaces:
Sleeping
Sleeping
Ajay12345678980
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import gradio as gr
|
|
4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
5 |
|
6 |
# Load the model and tokenizer from Hugging Face repository
|
7 |
-
model_repo_id = "Ajay12345678980/QA_bot" #
|
8 |
|
9 |
# Initialize the model and tokenizer
|
10 |
model = GPT2LMHeadModel.from_pretrained(model_repo_id)
|
@@ -23,15 +23,7 @@ def predict(text):
|
|
23 |
# Decode the generated output
|
24 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
25 |
|
26 |
-
#
|
27 |
-
match = re.search(r'Answer\s*(.*?)\s*<ANS', prediction, re.DOTALL)
|
28 |
-
|
29 |
-
if match:
|
30 |
-
# Return the extracted content
|
31 |
-
return match.group(1).strip()
|
32 |
-
else:
|
33 |
-
# If no match is found, return a message indicating that
|
34 |
-
return "No answer found between 'Answer' and '<ANS>' markers."
|
35 |
except Exception as e:
|
36 |
# Handle and print any exceptions for debugging
|
37 |
return f"An error occurred: {str(e)}"
|
|
|
4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
5 |
|
6 |
# Load the model and tokenizer from Hugging Face repository
|
7 |
+
model_repo_id = "Ajay12345678980/QA_bot" # Replace with your model repository ID
|
8 |
|
9 |
# Initialize the model and tokenizer
|
10 |
model = GPT2LMHeadModel.from_pretrained(model_repo_id)
|
|
|
23 |
# Decode the generated output
|
24 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
25 |
|
26 |
+
return prediction.strip() # Return the clean output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
except Exception as e:
|
28 |
# Handle and print any exceptions for debugging
|
29 |
return f"An error occurred: {str(e)}"
|