jophex commited on
Commit
dfc296a
·
verified ·
1 Parent(s): 8f6f64f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -52
app.py CHANGED
@@ -2,11 +2,15 @@
2
 
3
 
4
 
5
- # model = gr.load("models/microsoft/Phi-3.5-mini-instruct")
 
 
 
 
6
 
7
 
8
  # gradio_app = gr.Interface(
9
- # fn=model,
10
  # inputs="text",
11
  # outputs="text",
12
  # max_batch_size=50,
@@ -15,61 +19,44 @@
15
 
16
 
17
 
18
- # gradio_app.launch(share=True)
19
-
20
- import gradio as gr
21
-
22
- # Load the model with gr.load()
23
- model_interface = gr.load("models/microsoft/Phi-3.5-mini-instruct")
24
-
25
- # Add customizations directly to the interface
26
- model_interface.title = "Advertisement Companion"
27
- model_interface.description = "Ask anything about advertising. Get professional advice and tips!"
28
-
29
- # Launch the model with batch processing
30
- model_interface.queue(max_size=50).launch(share=True)
31
-
32
-
33
 
 
 
 
34
 
35
- # from transformers import AutoTokenizer, AutoModelForCausalLM
36
- # import torch
37
-
38
- # # Load the model and tokenizer
39
- # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
40
- # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
41
-
42
- # # Define the role prompt for advertisement assistance
43
- # # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
44
 
45
- # # Function to generate responses
46
- # def generate_response(user_input):
47
- # # Prepend role information to user input
48
- # # input_text = user_input
49
 
50
- # # Tokenize and generate response
51
- # inputs = tokenizer(user_input, return_tensors="pt")
52
- # outputs = model.generate(
53
- # **inputs,
54
- # max_new_tokens=100, # Increase this if you want longer responses
55
- # # Nucleus sampling to control randomness
56
- # )
57
 
58
- # # Decode and return the response
59
- # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
- # return response
61
-
62
- # # Set up Gradio interface
63
- # interface = gr.Interface(
64
- # fn=generate_response,
65
- # inputs="text",
66
- # outputs="text",
67
- # title="Advertisement Assistant Chatbot",
68
- # description="Ask me anything related to advertising. I'm here to help!"
69
- # )
70
-
71
- # # Launch the Gradio app with sharing enabled
72
- # interface.launch(share=True)
73
 
74
 
75
  # import gradio as gr
 
2
 
3
 
4
 
5
+ # def generate_responce(user_input):
6
+ # gr.load("models/microsoft/Phi-3.5-mini-instruct")
7
+
8
+ # inputs = tokenize(user_input, return_tensor="pt")
9
+ # outputs =
10
 
11
 
12
  # gradio_app = gr.Interface(
13
+ # fn=generate_responce,
14
  # inputs="text",
15
  # outputs="text",
16
  # max_batch_size=50,
 
19
 
20
 
21
 
22
+ from transformers import AutoTokenizer, AutoModelForCausalLM
23
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Load the model and tokenizer
26
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
27
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
28
 
29
+ # Define the role prompt for advertisement assistance
30
+ # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
 
 
 
 
 
 
 
31
 
32
+ # Function to generate responses
33
+ def generate_response(user_input):
34
+ # Prepend role information to user input
35
+ # input_text = user_input
36
 
37
+ # Tokenize and generate response
38
+ inputs = tokenizer(user_input, return_tensors="pt")
39
+ outputs = model.generate(
40
+ **inputs,
41
+ max_new_tokens=100, # Increase this if you want longer responses
42
+ # Nucleus sampling to control randomness
43
+ )
44
 
45
+ # Decode and return the response
46
+ response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
47
+ return response
48
+
49
+ # Set up Gradio interface
50
+ interface = gr.Interface(
51
+ fn=generate_response,
52
+ inputs="text",
53
+ outputs="text",
54
+ title="Advertisement Assistant Chatbot",
55
+ description="Ask me anything related to advertising. I'm here to help!"
56
+ )
57
+
58
+ # Launch the Gradio app with sharing enabled
59
+ interface.launch(share=True)
60
 
61
 
62
  # import gradio as gr