jophex commited on
Commit
c43c8ba
·
verified ·
1 Parent(s): 8651d8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -36
app.py CHANGED
@@ -1,43 +1,45 @@
 
1
 
2
- # gr.load("models/microsoft/Phi-3.5-mini-instruct").launch(share=True)
3
 
4
- import gradio as gr
5
- from transformers import AutoTokenizer, AutoModelForCausalLM
6
- import torch
7
 
8
- # Load the model and tokenizer
9
- tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
10
- model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
11
 
12
- # Define the role prompt for advertisement assistance
13
- role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
 
14
 
15
- # Function to generate responses
16
- def generate_response(user_input):
17
- # Prepend role information to user input
18
- input_text = role_prompt + user_input
 
 
 
19
 
20
- # Tokenize and generate response
21
- inputs = tokenizer(input_text, return_tensors="pt")
22
- outputs = model.generate(
23
- **inputs,
24
- max_new_tokens=50, # Increase this if you want longer responses
25
- temperature=0.7, # Adjust for creativity
26
- top_p=0.9 # Nucleus sampling to control randomness
27
- )
28
 
29
- # Decode and return the response
30
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
- return response
32
-
33
- # Set up Gradio interface
34
- interface = gr.Interface(
35
- fn=generate_response,
36
- inputs="text",
37
- outputs="text",
38
- title="Advertisement Assistant Chatbot",
39
- description="Ask me anything related to advertising. I'm here to help!"
40
- )
41
-
42
- # Launch the Gradio app with sharing enabled
43
- interface.launch(share=True)
 
1
+ import gradio as gr
2
 
 
3
 
4
+ gr.load("models/microsoft/Phi-3.5-mini-instruct").launch(share=True)
5
+
 
6
 
7
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
8
+ # import torch
 
9
 
10
+ # # Load the model and tokenizer
11
+ # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
12
+ # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
13
 
14
+ # # Define the role prompt for advertisement assistance
15
+ # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
16
+
17
+ # # Function to generate responses
18
+ # def generate_response(user_input):
19
+ # # Prepend role information to user input
20
+ # input_text = role_prompt + user_input
21
 
22
+ # # Tokenize and generate response
23
+ # inputs = tokenizer(input_text, return_tensors="pt")
24
+ # outputs = model.generate(
25
+ # **inputs,
26
+ # max_new_tokens=50, # Increase this if you want longer responses
27
+ # temperature=0.7, # Adjust for creativity
28
+ # top_p=0.9 # Nucleus sampling to control randomness
29
+ # )
30
 
31
+ # # Decode and return the response
32
+ # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ # return response
34
+
35
+ # # Set up Gradio interface
36
+ # interface = gr.Interface(
37
+ # fn=generate_response,
38
+ # inputs="text",
39
+ # outputs="text",
40
+ # title="Advertisement Assistant Chatbot",
41
+ # description="Ask me anything related to advertising. I'm here to help!"
42
+ # )
43
+
44
+ # # Launch the Gradio app with sharing enabled
45
+ # interface.launch(share=True)