Spaces:
Running
Running
dingusagar
commited on
Commit
•
bd1c71d
1
Parent(s):
f6807a1
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import subprocess
|
4 |
+
import time
|
5 |
+
import json
|
6 |
+
|
7 |
+
from ollama import chat
|
8 |
+
from ollama import ChatResponse
|
9 |
+
|
10 |
+
|
11 |
+
def start_ollama_server():
|
12 |
+
# Start Ollama server in the background
|
13 |
+
print("Starting Ollama server...")
|
14 |
+
subprocess.Popen(["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
15 |
+
time.sleep(5) # Give some time for the server to start
|
16 |
+
|
17 |
+
# Pull the required model
|
18 |
+
print("Pulling the required model...")
|
19 |
+
subprocess.run(["ollama", "pull", "llama3.2:1b"], check=True)
|
20 |
+
|
21 |
+
print("Pulling the required model...")
|
22 |
+
subprocess.Popen(["ollama", "run", "llama3.2:1b"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
23 |
+
print("Ollama started model.")
|
24 |
+
|
25 |
+
# Function to send a prompt to the Ollama server and return the response
|
26 |
+
def ask_ollama(question):
|
27 |
+
# url = "http://localhost:11434/api/generate"
|
28 |
+
# headers = {"Content-Type": "application/json"}
|
29 |
+
# data = {
|
30 |
+
# "model": "llama3.1",
|
31 |
+
# "prompt": prompt,
|
32 |
+
# "format": "json",
|
33 |
+
# "stream": False
|
34 |
+
# }
|
35 |
+
# try:
|
36 |
+
# # Send the POST request to the API
|
37 |
+
# response = requests.post(url, headers=headers, json=data)
|
38 |
+
# response.raise_for_status() # Raise an exception for HTTP errors
|
39 |
+
# result = response.json() # Parse the JSON response
|
40 |
+
#
|
41 |
+
# # Extract and clean the "response" field
|
42 |
+
# actual_response = result.get("response", "").strip()
|
43 |
+
# print(actual_response)
|
44 |
+
# return actual_response if actual_response else "No response found"
|
45 |
+
# except requests.exceptions.RequestException as e:
|
46 |
+
# return f"Error: {str(e)}"
|
47 |
+
|
48 |
+
prompt_template = f"""
|
49 |
+
### You are an expert in the subreddit r/AmItheAsshole.
|
50 |
+
|
51 |
+
### The task for you is to classify the given text content as YTA or NTA label and give an explanation for the same.
|
52 |
+
|
53 |
+
### The output format is as follows:
|
54 |
+
"YTA" or "NTA", explanation for the label.
|
55 |
+
|
56 |
+
### Input Text : {question}
|
57 |
+
"""
|
58 |
+
|
59 |
+
response: ChatResponse = chat(model='llama3.2:1b', messages=[
|
60 |
+
{
|
61 |
+
'role': 'user',
|
62 |
+
'content': prompt_template,
|
63 |
+
},
|
64 |
+
])
|
65 |
+
print(response['message']['content'])
|
66 |
+
# or access fields directly from the response object
|
67 |
+
return response['message']['content']
|
68 |
+
|
69 |
+
|
70 |
+
# Gradio Interface
|
71 |
+
def gradio_interface(prompt):
|
72 |
+
return ask_ollama(prompt)
|
73 |
+
|
74 |
+
|
75 |
+
# Build the Gradio app
|
76 |
+
with gr.Blocks() as demo:
|
77 |
+
gr.Markdown("# Ollama Server Interface")
|
78 |
+
gr.Markdown("Ask questions and get responses from the Ollama server.")
|
79 |
+
|
80 |
+
with gr.Row():
|
81 |
+
input_prompt = gr.Textbox(label="Enter your question", placeholder="Type your question here...")
|
82 |
+
|
83 |
+
with gr.Row():
|
84 |
+
submit_button = gr.Button("Ask")
|
85 |
+
|
86 |
+
with gr.Row():
|
87 |
+
output_response = gr.Textbox(label="Response", lines=10)
|
88 |
+
|
89 |
+
submit_button.click(gradio_interface, inputs=input_prompt, outputs=output_response)
|
90 |
+
|
91 |
+
# Launch the app
|
92 |
+
if __name__ == "__main__":
|
93 |
+
start_ollama_server()
|
94 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
95 |
+
|