Spaces:
Runtime error
Runtime error
File size: 25,633 Bytes
8a93f9c a8f882c ddfcbfe 8a93f9c 047ac8b 8a93f9c a93ecb5 8a93f9c 8c4c9a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 |
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.prompts.few_shot import FewShotPromptTemplate
import os
os.environ["OPENAI_API_KEY"] = os.environ.get("open_ai_key") #openai key
llm = OpenAI(temperature=.7)
"""# Model Implementation"""
"""## Debugger Model v1"""
DMTemplate = """
STACKTRACE: {StackTrace}
CONTEXT: {Context}
USER RESPONSE: {UserResponse}
isFollowUpNeeded: {isFollowUpNeeded}
SYSTEM RESPONSE: {SystemResponse}
"""
DMPromptTemplate = PromptTemplate(input_variables=["StackTrace", "Context", "isFollowUpNeeded", "UserResponse", "SystemResponse"], template=DMTemplate)
DMExamples = [
{
"StackTrace": """$ sudo cog predict r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83
Starting Docker image r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83 and running setup()...
No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'
Traceback (most recent call last):
...
AssertionError:
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
β
Ή Failed to get container status: exit status 1""",
"Context": """$ cd /usr/local/cuda/samples/1_Utilities/deviceQuery
$ sudo make
$ ./deviceQuery
Detected 1 CUDA Capable device(s)
Device 0: "NVIDIA GeForce RTX 2080 Ti"
CUDA Driver Version / Runtime Version 11.4 / 11.2
CUDA Capability Major/Minor version number: 7.5
Total amount of global memory: 11016 MBytes (11551440896 bytes)
(68) Multiprocessors, ( 64) CUDA Cores/MP: 4352 CUDA Cores
...
Result = PASS""",
"UserResponse": "How do I fix this?",
"isFollowUpNeeded": "Yes",
"SystemResponse": """ Does it work if you run the model with Docker directly?
$ docker run -d -p 5000:5000 --gpus all r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83
$ curl http://localhost:5000/predict -X POST -F seed=-1""",
},
{
"StackTrace":"""ERROR in ./node_modules/react-router-dom/server.mjs 18:15-21
Can't import the named export 'Action' from non EcmaScript module (only default export is available)""",
"Context": "I am using react-router version 6.0.0. This issue can be reproduced by installing react-router and react-router-dom on rails react app using webpacker.",
"UserResponse": "Why is this not working?",
"isFollowUpNeeded": "Yes",
"SystemResponse": "You probably have an older version of history installed. Install version 5.1.0 and you'll have the needed types."
},
{
"StackTrace":"MissingSchema Traceback (most recent call last)\nin ()\n3 fare = gr.inputs.Slider(minimum=0, maximum=1000, default=100, label=\"Fare (british pounds)\")\n4\n----> 5 gr.Interface(predict_survival, [sex, age, fare], \"label\", live=True).launch();\n\n7 frames\n/usr/local/lib/python3.6/dist-packages/requests/models.py in prepare_url(self, url, params)\n386 error = error.format(to_native_string(url, 'utf8'))\n387\n--> 388 raise MissingSchema(error)\n389\n390 if not host:\n\nMissingSchema: Invalid URL 'None': No schema supplied. Perhaps you meant http://None?`",
"Context": "The system response is alerting the user to ensure that the URL provided to the gr.Interface() is valid and includes the schema (e.g. http or https). The user must check for this in order to avoid the MissingSchema error.",
"UserResponse": "Can you show me how I could do this in code?",
"isFollowUpNeeded": "Yes",
"SystemResponse": """You can check that the URL is valid and includes the schema by using the Python requests library. You can use the "requests.utils.urlparse" method to check the URL format, and add the schema if needed."""
},
# {
# "StackTrace":r"""Error: Op payload too many bytes: [133060 > 131072]: {\"opId\":\"op-o4Px8Dt1qX\",\"sequenceNumber\":191,\"objectPath\":\"canvas-e7bqXMiHN4\",\"userId\":1011378,\"version\":64,\"type\":\"SIND\",\"schemaVersion\":170,\"basisOpId\":\"op-PlSlhFwNRQ\",\"diagnostics\":{\"docId\":\"PIhhZ8iQBG\",\"schemaVersion\":170,\"version\":64,\"basisOpId\":\"op-PlSlhFwNRQ\",\"sequenceNumber\":191,\"appVersionInfo\":{\"branch\":\"hemanth-hdoan-packs-crossdoc-fix-forward-20221117-adhoc\",\"hash\":\"3039edad3b42\"},\"appInstanceId\":\"d65642d2-4e6b-4d03-9328-61a39a11ab4f\",\"opGeneration\":1,\"creationMillis\":1668745463573,\"needsBlessing\":false}}\n at Ir.ensureOpNotTooBig (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6132445)\n at Ir.applyLocalOperation (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6132496)\n at gt._handleLocalOperation (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:5962142)\n at https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6237790\n at n.apply (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6237896)\n at https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:952844\n at Object.withoutNormalizing (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:933543)\n at Object.insertNodes (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:952043)\n at Yo (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6268075)\n at Module.Ko (https://cdn.coda.io/assets/browser.723a8c13ee82fcfe8b99.entry.js:2:6267416)""",
# "Context": "The given stacktrace is related to an operation payload that is too large. The context suggests the user has tried to make changes to the Confluence document structure and the system response is to look into the slate API's insertFragment method in order to split the document into smaller chunks and reduce the payload size.",
# "UserResponse": "How could I code this?",
# "isFollowUpNeeded": "No",
# "SystemResponse": "You can use the slate API's insertFragment method to break the document into smaller chunks and reduce the payload size. This method takes a fragment to be inserted, and the path of the target node, and an optional index argument, and inserts the fragment at the given path and index."
# }
]
DMPrefix = """You are an expert {isLanguage} machine learning developer. Ask clarifying questions and debug the following error message. If 'isFollowUpNeeded' is 'Yes', gather context on the following stacktrace. Set 'isFollowUpNeeded' to 'No' if you feel like you have sufficient context to suggest approaches to solving the problem. If 'isFollowUpNeeded' is set to 'No', , suggest approaches to solving the problem or answering the 'User Response'. Do not repeat approaches, already suggested in 'Context'.:"""
DMSuffix = """
STACKTRACE: {StackTrace}
Context: {Context}
UserResponse: {UserResponse}
isFollowUpNeeded:
"""
DMPrompt = FewShotPromptTemplate(
examples=DMExamples,
example_prompt=DMPromptTemplate,
suffix=DMSuffix,
prefix=DMPrefix,
input_variables=["isLanguage","StackTrace", "UserResponse", "Context"]
)
DMChain = LLMChain(llm=llm, prompt=DMPrompt)
"""## Debugger Model v2
### isFurtherContextNeeded
"""
isFurtherContextNeededTemplate = """
STACKTRACE: {StackTrace}
CONTEXT: {Context}
USER RESPONSE: {UserResponse}
isFurtherContextNeeded: {isFurtherContextNeeded}
"""
isFurtherContextNeededPromptTemplate = PromptTemplate(input_variables=["StackTrace", "Context", "isFurtherContextNeeded", "UserResponse"], template=isFurtherContextNeededTemplate)
isFurtherContextNeededExamples = [
{
"StackTrace": """$ sudo cog predict r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83
Starting Docker image r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83 and running setup()...
No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'
Traceback (most recent call last):
...
AssertionError:
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
β
Ή Failed to get container status: exit status 1""",
"Context": """$ cd /usr/local/cuda/samples/1_Utilities/deviceQuery
$ sudo make
$ ./deviceQuery
Detected 1 CUDA Capable device(s)
Device 0: "NVIDIA GeForce RTX 2080 Ti"
CUDA Driver Version / Runtime Version 11.4 / 11.2
CUDA Capability Major/Minor version number: 7.5
Total amount of global memory: 11016 MBytes (11551440896 bytes)
(68) Multiprocessors, ( 64) CUDA Cores/MP: 4352 CUDA Cores
...
Result = PASS""",
"UserResponse": "How do I fix this?",
"isFurtherContextNeeded": "Yes"},
{
"StackTrace":"""ERROR in ./node_modules/react-router-dom/server.mjs 18:15-21
Can't import the named export 'Action' from non EcmaScript module (only default export is available)""",
"Context": "I am using react-router version 6.0.0. This issue can be reproduced by installing react-router and react-router-dom on rails react app using webpacker.",
"UserResponse": "Why is this not working?",
"isFurtherContextNeeded": "Yes"},
{
"StackTrace":"MissingSchema Traceback (most recent call last)\nin ()\n3 fare = gr.inputs.Slider(minimum=0, maximum=1000, default=100, label=\"Fare (british pounds)\")\n4\n----> 5 gr.Interface(predict_survival, [sex, age, fare], \"label\", live=True).launch();\n\n7 frames\n/usr/local/lib/python3.6/dist-packages/requests/models.py in prepare_url(self, url, params)\n386 error = error.format(to_native_string(url, 'utf8'))\n387\n--> 388 raise MissingSchema(error)\n389\n390 if not host:\n\nMissingSchema: Invalid URL 'None': No schema supplied. Perhaps you meant http://None?`",
"Context": "The system response is alerting the user to ensure that the URL provided to the gr.Interface() is valid and includes the schema (e.g. http or https). The user must check for this in order to avoid the MissingSchema error.",
"UserResponse": "Can you show me how I could do this in code?",
"isFurtherContextNeeded": "Yes"},
]
isFurtherContextNeededPrefix = """You are an expert {isLanguage} machine learning developer. Ask clarifying questions and debug the following error message. You can only set 'isFollowUpNeeded' to either 'Yes' or 'No'. Set 'isFollowUpNeeded' is 'Yes', if further context to answer the 'User Response' or to debug the 'StackTrace'. Set 'isFollowUpNeeded' to 'No' if you feel like you have sufficient context to suggest approaches to solving the problem. Do not repeat approaches, already suggested in 'Context'.:"""
isFurtherContextNeededSuffix = """
STACKTRACE: {StackTrace}
Context: {Context}
UserResponse: {UserResponse}
isFurtherContextNeeded:
"""
isFurtherContextNeededPrompt = FewShotPromptTemplate(
examples=isFurtherContextNeededExamples,
example_prompt=isFurtherContextNeededPromptTemplate,
suffix=isFurtherContextNeededSuffix,
prefix=isFurtherContextNeededPrefix,
input_variables=["isLanguage","StackTrace", "Context", "UserResponse"]
)
isFurtherContextNeededChain = LLMChain(llm=llm, prompt=isFurtherContextNeededPrompt)
"""### SystemResponse"""
SystemResponseTemplate = """
STACKTRACE: {StackTrace}
CONTEXT: {Context}
USER RESPONSE: {UserResponse}
isFurtherContextNeeded: {isFurtherContextNeeded}
SYSTEM RESPONSE: {SystemResponse}
"""
SystemResponsePromptTemplate = PromptTemplate(input_variables=["StackTrace", "Context", "isFurtherContextNeeded", "UserResponse", "SystemResponse"], template=SystemResponseTemplate)
SystemResponseExamples = [
{
"StackTrace": """$ sudo cog predict r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83
Starting Docker image r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83 and running setup()...
No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'
Traceback (most recent call last):
...
AssertionError:
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
β
Ή Failed to get container status: exit status 1""",
"Context": """$ cd /usr/local/cuda/samples/1_Utilities/deviceQuery
$ sudo make
$ ./deviceQuery
Detected 1 CUDA Capable device(s)
Device 0: "NVIDIA GeForce RTX 2080 Ti"
CUDA Driver Version / Runtime Version 11.4 / 11.2
CUDA Capability Major/Minor version number: 7.5
Total amount of global memory: 11016 MBytes (11551440896 bytes)
(68) Multiprocessors, ( 64) CUDA Cores/MP: 4352 CUDA Cores
...
Result = PASS""",
"UserResponse": "How do I fix this?",
"isFurtherContextNeeded": "Yes",
"SystemResponse": """ Does it work if you run the model with Docker directly?
$ docker run -d -p 5000:5000 --gpus all r8.im/allenhung1025/looptest@sha256:f5cd715e99046e0513fe2b4034e8f7d8c102525b02f49efb52b05f46fcb9ea83
$ curl http://localhost:5000/predict -X POST -F seed=-1""",
},
{
"StackTrace":"""ERROR in ./node_modules/react-router-dom/server.mjs 18:15-21
Can't import the named export 'Action' from non EcmaScript module (only default export is available)""",
"Context": "I am using react-router version 6.0.0. This issue can be reproduced by installing react-router and react-router-dom on rails react app using webpacker.",
"UserResponse": "Why is this not working?",
"isFurtherContextNeeded": "No",
"SystemResponse": "You probably have an older version of history installed. Install version 5.1.0 and you'll have the needed types."
},
{
"StackTrace":"MissingSchema Traceback (most recent call last)\nin ()\n3 fare = gr.inputs.Slider(minimum=0, maximum=1000, default=100, label=\"Fare (british pounds)\")\n4\n----> 5 gr.Interface(predict_survival, [sex, age, fare], \"label\", live=True).launch();\n\n7 frames\n/usr/local/lib/python3.6/dist-packages/requests/models.py in prepare_url(self, url, params)\n386 error = error.format(to_native_string(url, 'utf8'))\n387\n--> 388 raise MissingSchema(error)\n389\n390 if not host:\n\nMissingSchema: Invalid URL 'None': No schema supplied. Perhaps you meant http://None?`",
"Context": "The system response is alerting the user to ensure that the URL provided to the gr.Interface() is valid and includes the schema (e.g. http or https). The user must check for this in order to avoid the MissingSchema error.",
"UserResponse": "Can you show me how I could do this in code?",
"isFurtherContextNeeded": "No",
"SystemResponse": """You can check that the URL is valid and includes the schema by using the Python requests library. You can use the "requests.utils.urlparse" method to check the URL format, and add the schema if needed."""
},
]
SystemResponsePrefix = """You are an expert {isLanguage} machine learning developer. Please write your responses in a warm and friendly tone - like a teacher helping a student learn. Ask clarifying questions and debug the following error message. If 'isFurtherContextNeeded' is 'Yes', gather context on the given 'StackTrace' or the 'USER RESPONSE'. If 'isFollowUpNeeded' is set to 'No', suggest approaches to solving the problem. Do not repeat approaches, already suggested in 'Context'.:"""
SystemResponseSuffix = """
STACKTRACE: {StackTrace}
CONTEXT: {Context}
USER RESPONSE: {UserResponse}
isFurtherContextNeeded: {isFurtherContextNeeded}
SYSTEM RESPONSE:
"""
SystemResponsePrompt = FewShotPromptTemplate(
examples=SystemResponseExamples,
example_prompt=SystemResponsePromptTemplate,
suffix=SystemResponseSuffix,
prefix=SystemResponsePrefix,
input_variables=["isLanguage","StackTrace", "Context", "UserResponse", "isFurtherContextNeeded"]
)
SystemResponseChain = LLMChain(llm=llm, prompt=SystemResponsePrompt)
"""## Summarizer Model"""
SummarizerTemplate = """ You are an expert {isLanguage} machine learning developer. Summarize the given context, system response for the stacktrace, for somebody that is trying to debug this stacktrace:
STACKTRACE: {StackTrace}
CONTEXT: {Context}
SYSTEM RESPONSE: {SystemResponse}
SUMMARY:
"""
SummarizerPrompt = PromptTemplate(input_variables=["isLanguage", "StackTrace", "SystemResponse", "Context"], template=SummarizerTemplate)
StackTrace = r"""ImportError Traceback (most recent call last)
<ipython-input-13-43eca54f7d45> in <module>
----> 1 import gradio as gr
~\anaconda3\lib\site-packages\gradio\__init__.py in <module>
1 import pkgutil
2
----> 3 import gradio.components as components
4 import gradio.inputs as inputs
5 import gradio.outputs as outputs
~\anaconda3\lib\site-packages\gradio\components.py in <module>
29 from markdown_it import MarkdownIt
30
---> 31 from gradio import media_data, processing_utils, utils
32 from gradio.blocks import Block
33 from gradio.documentation import document, set_documentation_group
~\anaconda3\lib\site-packages\gradio\processing_utils.py in <module>
18 from PIL import Image, ImageOps, PngImagePlugin
19
---> 20 from gradio import encryptor, utils
21
22 with warnings.catch_warnings():
~\anaconda3\lib\site-packages\gradio\utils.py in <module>
32
33 import aiohttp
---> 34 import fsspec.asyn
35 import httpx
36 import requests
~\anaconda3\lib\site-packages\fsspec\asyn.py in <module>
14 from .exceptions import FSTimeoutError
15 from .spec import AbstractBufferedFile, AbstractFileSystem
---> 16 from .utils import is_exception, other_paths
17
18 private = re.compile("_[^_]")
ImportError: cannot import name 'is_exception' from 'fsspec.utils' (C:\Users\tompe\anaconda3\lib\site-packages\fsspec\utils.py)
def is_cat(x): return x[0].isupper()"""
SystemResponse = "It looks like the function 'is_exception' is missing from the fsspec.utils module. You can try reinstalling fsspec and updating your dependencies, to make sure you have the latest version of the module."
isLanguage = "python"
Context = ""
SummarizerChain = LLMChain(llm=llm, prompt=SummarizerPrompt)
chainOutput = SummarizerChain({"StackTrace": StackTrace, "Context": Context, "isLanguage": isLanguage, "SystemResponse": SystemResponse})['text']
chainOutput
"""# Gradio Implementation"""
clerkieExamples=['''---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/var/folders/49/9g9lxm9d3f3br8zlg2l2fmz80000gn/T/ipykernel_1349/2634282627.py in <module>
----> 1 torch.onnx.export(model, x, "output.onnx")
/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
502 """
503
--> 504 _export(
505 model,
506 args,
/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions)
1527 _validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
1528
-> 1529 graph, params_dict, torch_out = _model_to_graph(
1530 model,
1531 args,
/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes)
1113
1114 try:
-> 1115 graph = _optimize_graph(
1116 graph,
1117 operator_export_type,
/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop, fixed_batch_size, params_dict, dynamic_axes, input_names, module)
580 _C._jit_pass_lint(graph)
581 _C._jit_pass_onnx_autograd_function_process(graph)
--> 582 C._jit_pass_lower_all_tuples(graph)
583
584 # we now record some ops like ones/zeros
RuntimeError: outerNode->outputs().size() == node->inputs().size() INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/jit/passes/dead_code_elimination.cpp":140, please report a bug to PyTorch.''', '''error[E0382]: use of moved value: `primes`
--> src/main.rs:9:31
|
9 | if vectorIsPrime(num, primes) {
| ^^^^^^ value moved here, in previous iteration of loop
|
= note: move occurs because `primes` has type `std::vec::Vec<u64>`, which does not implement the `Copy` trait
''', "Uncaught Error: Invariant Violation: Element type is invalid: expected a string (for built-in components) or a class/function (for composite components) but got: object."]
import random
import gradio as gr
import openai
import re
openai.api_key = os.environ["OPENAI_API_KEY"]
chat_variables = {
"Context": "",
"StackTrace": "",
"isLanguage": "",
}
def chat(message, history):
print(type(message))
history = history or []
print("len(history: ", len(history))
if len(history) == 0: ## just the stacktrace
response = "which language is this in? (python, java, c++, kotlin, etc.)"
history.append((message, response))
return history, history
elif len(history) == 1: ## stacktrace + just entered the language
# get stacktrace
chat_variables["StackTrace"] = history[0][0]
# get language
chat_variables["isLanguage"] = message
# set question (hardcoded for v1)
UserResponse = "Any idea how I can solve this problem?"
# set initial context to empty
chat_variables["Context"] = ""
else: # subsequent prompts
UserResponse = message
# run the v1 debugger model to see if that improves latency
initDebuggerModelResponse = DMChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"]})['text']
print("initDebuggerModelResponse: ", initDebuggerModelResponse)
if initDebuggerModelResponse == "Yes" or initDebuggerModelResponse == "No": # i.e. doesn't output System Response
response = SystemResponseChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"], "isFurtherContextNeeded": initDebuggerModelResponse})['text']
else:
response = initDebuggerModelResponse.split("SYSTEM RESPONSE:")[1]
# summarize the conversation
SummarizerChain = LLMChain(llm=llm, prompt=SummarizerPrompt)
chat_variables["Context"] = SummarizerChain({"StackTrace": chat_variables["StackTrace"], "Context": chat_variables["Context"], "isLanguage": chat_variables["isLanguage"], "SystemResponse": response})['text']
print("response: ", response)
# get a code example after stacktrace + user just entered language
if len(history) == 1:
response_2 = ""
UserResponse = "give me an example of the code for this based on the suggested approach" # hardcode this to always give a code example after seeing stacktrace
initDebuggerModelResponse = DMChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"]})['text']
response_2 = initDebuggerModelResponse.split("SYSTEM RESPONSE:")[1]
response += "\n" + response_2
history.append((message, response))
return history, history
def set_text(inp):
return inp
def clear(arg):
return ""
with gr.Blocks() as demo:
user_state=gr.State([])
gr.Markdown("""# Welcome to Clerkie π€""")
gr.Markdown("""Use Clerkie to help you debug your complex programming errors (especially the multi-function / extremely verbose ones). Clerkie uses GPT-3 under-the-hood, and can therefore work across several languages / types of errors - including python, linux kernel output, rust, javascript, C, C++, etc. Please feel free to give it a try and let us know what you think!""")
gr.Markdown("""### π P.S. [Check out our GPT-3 based Chrome Extension that debugs your code](https://chrome.google.com/webstore/detail/clerkie-ai/oenpmifpfnikheaolfpabffojfjakfnn) π₯π₯π₯""")
with gr.Row():
with gr.Column():
output = gr.Chatbot().style(color_map=("green", "pink"))
# allow_flagging="never"
inp = gr.Textbox(placeholder="enter your stacktrace here")
print(type(inp))
btn = gr.Button("Enter message")
inp.submit(chat, [inp, user_state], [output, user_state])
inp.submit(clear, inp, inp)
btn.click(chat, [inp, user_state], [output, user_state])
btn.click(clear, inp, inp)
gr.Markdown("""### need help? got feedback? have thoughts? etc. β Join the [Discord](https://discord.gg/KvG3azf39U)""")
gr.Examples(clerkieExamples,
inputs=inp,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch(debug=True) |