Ajay12345678980 commited on
Commit
5c31fd7
·
verified ·
1 Parent(s): bea575c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -18
app.py CHANGED
@@ -1,25 +1,13 @@
1
  import gradio as gr
2
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
- import torch
4
- import os
5
 
6
- # Load token from environment variable
7
- token = os.getenv('ACCESS_SECRET')
8
 
9
- # Specify the repository ID
10
- model_repo_id = "Ajay12345678980/QA_bot"
11
-
12
- # Load model and tokenizer
13
- model = GPT2LMHeadModel.from_pretrained(model_repo_id, use_auth_token=token)
14
- tokenizer = GPT2Tokenizer.from_pretrained(model_repo_id, use_auth_token=token)
15
-
16
- # Define prediction function
17
  def predict(text):
18
- inputs = tokenizer.encode(text, return_tensors="pt")
19
- with torch.no_grad():
20
- outputs = model.generate(inputs, max_length=50, do_sample=True)
21
- prediction = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
- return prediction
23
 
24
  # Set up Gradio interface
25
  interface = gr.Interface(
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
3
 
4
+ # Load the model using the Hugging Face pipeline
5
+ model = pipeline("text-generation", model="Ajay12345678980/QA_bot")
6
 
7
+ # Define the prediction function
 
 
 
 
 
 
 
8
  def predict(text):
9
+ outputs = model(text, max_length=50, do_sample=True)
10
+ return outputs[0]["generated_text"]
 
 
 
11
 
12
  # Set up Gradio interface
13
  interface = gr.Interface(