jophex commited on
Commit
81c6112
·
verified ·
1 Parent(s): fe4e476

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -39
app.py CHANGED
@@ -1,61 +1,63 @@
1
  import gradio as gr
2
 
3
 
4
- # gr.load("models/microsoft/Phi-3.5-mini-instruct", max_new_tokens=50).launch(share=True)
5
 
 
6
 
7
- # gradio_app = gr.Interface(
8
- # fn=model,
9
- # inputs="text",
10
- # outputs="text",
11
- # title="Advertisment companion",
12
- # )
 
 
13
 
14
 
15
 
16
- # gradio_app
17
 
18
 
19
 
20
 
21
- from transformers import AutoTokenizer, AutoModelForCausalLM
22
- import torch
23
 
24
- # Load the model and tokenizer
25
- tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
26
- model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
27
 
28
- # Define the role prompt for advertisement assistance
29
- # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
30
 
31
- # Function to generate responses
32
- def generate_response(user_input):
33
- # Prepend role information to user input
34
- # input_text = user_input
35
 
36
- # Tokenize and generate response
37
- inputs = tokenizer(user_input, return_tensors="pt")
38
- outputs = model.generate(
39
- **inputs,
40
- max_new_tokens=100, # Increase this if you want longer responses
41
- # Nucleus sampling to control randomness
42
- )
43
 
44
- # Decode and return the response
45
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
- return response
47
 
48
- # Set up Gradio interface
49
- interface = gr.Interface(
50
- fn=generate_response,
51
- inputs="text",
52
- outputs="text",
53
- title="Advertisement Assistant Chatbot",
54
- description="Ask me anything related to advertising. I'm here to help!"
55
- )
56
 
57
- # Launch the Gradio app with sharing enabled
58
- interface.launch(share=True)
59
 
60
 
61
  # import gradio as gr
 
1
  import gradio as gr
2
 
3
 
 
4
 
5
+ model = gr.load("models/microsoft/Phi-3.5-mini-instruct").
6
 
7
+
8
+ gradio_app = gr.Interface(
9
+ fn=model,
10
+ inputs="text",
11
+ outputs="text",
12
+ max_batch_size=50,
13
+ title="Advertisment companion",
14
+ )
15
 
16
 
17
 
18
+ gradio_app.launch(share=True)
19
 
20
 
21
 
22
 
23
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
24
+ # import torch
25
 
26
+ # # Load the model and tokenizer
27
+ # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
28
+ # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
29
 
30
+ # # Define the role prompt for advertisement assistance
31
+ # # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
32
 
33
+ # # Function to generate responses
34
+ # def generate_response(user_input):
35
+ # # Prepend role information to user input
36
+ # # input_text = user_input
37
 
38
+ # # Tokenize and generate response
39
+ # inputs = tokenizer(user_input, return_tensors="pt")
40
+ # outputs = model.generate(
41
+ # **inputs,
42
+ # max_new_tokens=100, # Increase this if you want longer responses
43
+ # # Nucleus sampling to control randomness
44
+ # )
45
 
46
+ # # Decode and return the response
47
+ # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+ # return response
49
 
50
+ # # Set up Gradio interface
51
+ # interface = gr.Interface(
52
+ # fn=generate_response,
53
+ # inputs="text",
54
+ # outputs="text",
55
+ # title="Advertisement Assistant Chatbot",
56
+ # description="Ask me anything related to advertising. I'm here to help!"
57
+ # )
58
 
59
+ # # Launch the Gradio app with sharing enabled
60
+ # interface.launch(share=True)
61
 
62
 
63
  # import gradio as gr