Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,35 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
|
|
|
|
3 |
|
4 |
-
os.system("wget https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin")
|
5 |
|
6 |
-
def
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
|
|
|
|
|
10 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from langchain import PromptTemplate, LLMChain
|
3 |
+
from langchain.llms import GPT4All
|
4 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
5 |
|
|
|
6 |
|
7 |
+
def func(prompt):
|
8 |
+
|
9 |
+
template = """Question: {question}
|
10 |
+
|
11 |
+
Answer: Let's think step by step."""
|
12 |
+
|
13 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
14 |
+
|
15 |
+
local_path = (
|
16 |
+
"https://tommy24-llm.hf.space/file=nous-hermes-13b.ggmlv3.q4_0.bin" # replace with your desired local file path
|
17 |
+
)
|
18 |
+
|
19 |
+
# Callbacks support token-wise streaming
|
20 |
+
callbacks = [StreamingStdOutCallbackHandler()]
|
21 |
+
|
22 |
+
# Verbose is required to pass to the callback manager
|
23 |
+
llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
|
24 |
+
|
25 |
+
# If you want to use a custom model add the backend parameter
|
26 |
+
# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends
|
27 |
+
llm = GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True)
|
28 |
+
|
29 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
30 |
+
question = prompt
|
31 |
|
32 |
+
return llm_chain.run(question)
|
33 |
+
|
34 |
+
iface = gr.Interface(fn=func, inputs="text", outputs="text")
|
35 |
iface.launch()
|