daniel-dona commited on
Commit
b34d81a
·
1 Parent(s): 3e1facf
Files changed (1) hide show
  1. app.py +41 -29
app.py CHANGED
@@ -1,10 +1,16 @@
1
- import gradio as gr
2
- import ollama
3
  import json
4
 
 
 
 
 
 
 
 
5
  json_code = "```json\n{\n \"classes\": [\n \"http://data.europa.eu/949/ContactLineSystem\"\n ],\n \"properties\": [\n \"http://data.europa.eu/949/contactLineSystemType\",\n \"http://data.europa.eu/949/energySupplySystemTSICompliant\",\n \"http://data.europa.eu/949/conditionsAppliedRegenerativeBraking\",\n \"http://data.europa.eu/949/conditionalRegenerativeBrake\"\n ]\n}\n```"
6
 
7
- client_gpu_local = ollama.Client(host='http://192.168.10.100:11434')
8
 
9
  model = "test_class_prop"
10
 
@@ -24,27 +30,33 @@ def clean_output():
24
  def submit_query(msg):
25
 
26
  if msg != "":
27
-
28
- prompt = f"Identify the classes and properties used in this natural language query: \"{msg}\""
29
-
30
- messages = [{'role': 'user', 'content': prompt}]
31
 
32
- response = client_gpu_local.chat(
33
- model,
34
- messages=messages,
35
- options=model_options
36
- )
 
 
 
 
 
 
37
 
38
- msg = response.message.content
39
 
40
- try:
41
 
42
- json_data_test = json.loads(msg.split("```json")[1].split("```")[0])
 
 
 
 
 
43
 
44
- return json.dumps(json_data_test, indent=2)
45
-
46
  except:
47
- return "{}"
 
48
 
49
  return "{}"
50
 
@@ -98,32 +110,32 @@ examples_data = [[e["instruction"].split(": ")[1], e["output"].split("```json")[
98
 
99
 
100
 
101
- with gr.Blocks() as demo:
102
 
103
- query = gr.Textbox(render=False, label="Query", placeholder="Write a query and press Enter.", submit_btn="Send query")
104
 
105
 
106
- code_llm = gr.Code(render=False, label="LLM output", interactive=False, language="json")
107
- code_ref = gr.Code(render=False, label="Expected output", interactive=False, language="json")
108
 
109
- #chat = gr.Chatbot(render=False, value=history, label="LLM output", type="messages")
110
 
111
- with gr.Row():
112
  query.render()
113
 
114
- with gr.Row():
115
 
116
- with gr.Accordion(label="Examples", open=False):
117
 
118
- gr.Examples(label="Query examples", examples=examples_data, example_labels=[e[0] for e in examples_data], cache_examples=False, inputs=[query, code_ref],examples_per_page=10)
119
 
120
  code_ref.render()
121
- #with gr.Row():
122
  #chat.render()
123
 
124
 
125
 
126
- with gr.Row():
127
  code_llm.render()
128
 
129
  query.submit(submit_query, inputs=[query], outputs=[code_llm])
 
1
+ import os
 
2
  import json
3
 
4
+ import ollama
5
+ import gradio
6
+
7
+
8
+ OLLAMA_HOST = os.getenv('OLLAMA_HOST', "192.168.10.100:11434")
9
+
10
+
11
  json_code = "```json\n{\n \"classes\": [\n \"http://data.europa.eu/949/ContactLineSystem\"\n ],\n \"properties\": [\n \"http://data.europa.eu/949/contactLineSystemType\",\n \"http://data.europa.eu/949/energySupplySystemTSICompliant\",\n \"http://data.europa.eu/949/conditionsAppliedRegenerativeBraking\",\n \"http://data.europa.eu/949/conditionalRegenerativeBrake\"\n ]\n}\n```"
12
 
13
+ client_gpu_local = ollama.Client(host=f"http://{OLLAMA_HOST}", timeout=15)
14
 
15
  model = "test_class_prop"
16
 
 
30
  def submit_query(msg):
31
 
32
  if msg != "":
 
 
 
 
33
 
34
+ try:
35
+
36
+ prompt = f"Identify the classes and properties used in this natural language query: \"{msg}\""
37
+
38
+ messages = [{'role': 'user', 'content': prompt}]
39
+
40
+ response = client_gpu_local.chat(
41
+ model,
42
+ messages=messages,
43
+ options=model_options
44
+ )
45
 
46
+ msg = response.message.content
47
 
48
+ try:
49
 
50
+ json_data_test = json.loads(msg.split("```json")[1].split("```")[0])
51
+
52
+ return json.dumps(json_data_test, indent=2)
53
+
54
+ except:
55
+ return "{}"
56
 
 
 
57
  except:
58
+
59
+ raise gradio.Error("Error: LLM request timed out!", duration=5)
60
 
61
  return "{}"
62
 
 
110
 
111
 
112
 
113
+ with gradio.Blocks() as demo:
114
 
115
+ query = gradio.Textbox(render=False, label="Query", placeholder="Write a query and press Enter.", submit_btn="Send query")
116
 
117
 
118
+ code_llm = gradio.Code(render=False, label="LLM output", interactive=False, language="json")
119
+ code_ref = gradio.Code(render=False, label="Expected output", interactive=False, language="json")
120
 
121
+ #chat = gradio.Chatbot(render=False, value=history, label="LLM output", type="messages")
122
 
123
+ with gradio.Row():
124
  query.render()
125
 
126
+ with gradio.Row():
127
 
128
+ with gradio.Accordion(label="Examples", open=False):
129
 
130
+ gradio.Examples(label="Query examples", examples=examples_data, example_labels=[e[0] for e in examples_data], cache_examples=False, inputs=[query, code_ref],examples_per_page=10)
131
 
132
  code_ref.render()
133
+ #with gradio.Row():
134
  #chat.render()
135
 
136
 
137
 
138
+ with gradio.Row():
139
  code_llm.render()
140
 
141
  query.submit(submit_query, inputs=[query], outputs=[code_llm])