tommy24 commited on
Commit
666bc15
·
1 Parent(s): 899934c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -4,14 +4,14 @@ from langchain.llms import GPT4All
4
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
 
6
 
7
- import requests
8
 
9
- url = "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin"
10
 
11
- response = requests.get(url)
12
 
13
- with open("nous-hermes-13b.ggmlv3.q4_0.bin", "wb") as f:
14
- f.write(response.content)
15
 
16
 
17
  print("DONE")
@@ -24,9 +24,8 @@ def func(prompt):
24
 
25
  prompt = PromptTemplate(template=template, input_variables=["question"])
26
 
27
- local_path = (
28
- "nous-hermes-13b.ggmlv3.q4_0.bin" # replace with your desired local file path
29
- )
30
 
31
  # Callbacks support token-wise streaming
32
  callbacks = [StreamingStdOutCallbackHandler()]
 
4
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
 
6
 
7
+ # import requests
8
 
9
+ # url = "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin"
10
 
11
+ # response = requests.get(url)
12
 
13
+ # with open("nous-hermes-13b.ggmlv3.q4_0.bin", "wb") as f:
14
+ # f.write(response.content)
15
 
16
 
17
  print("DONE")
 
24
 
25
  prompt = PromptTemplate(template=template, input_variables=["question"])
26
 
27
+ local_path = "https://tommy24-llm.hf.space/file=nous-hermes-13b.ggmlv3.q4_0.bin"
28
+
 
29
 
30
  # Callbacks support token-wise streaming
31
  callbacks = [StreamingStdOutCallbackHandler()]