Spaces:
Sleeping
Sleeping
AFischer1985
commited on
Commit
•
251281a
1
Parent(s):
39043d6
Update app.py
Browse files
app.py
CHANGED
@@ -7,15 +7,45 @@ url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardl
|
|
7 |
response = requests.get(url)
|
8 |
with open("./model.gguf", mode="wb") as file:
|
9 |
file.write(response.content)
|
10 |
-
|
11 |
-
llm = Llama(model_path="./model.gguf")
|
12 |
-
def response(input_text, history):
|
13 |
-
output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
|
14 |
-
return output['choices'][0]['text']
|
15 |
-
|
16 |
-
gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
|
17 |
|
18 |
command = ["python3", "-m", "llama_cpp.server", "--model", "./model.gguf", "--host", "0.0.0.0", "--port", "2600"]
|
19 |
subprocess.Popen(command)
|
|
|
20 |
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
response = requests.get(url)
|
8 |
with open("./model.gguf", mode="wb") as file:
|
9 |
file.write(response.content)
|
10 |
+
print("Model downloaded")
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
command = ["python3", "-m", "llama_cpp.server", "--model", "./model.gguf", "--host", "0.0.0.0", "--port", "2600"]
|
13 |
subprocess.Popen(command)
|
14 |
+
print("Model ready!")
|
15 |
|
16 |
+
#llm = Llama(model_path="./model.gguf")
|
17 |
+
#def response(input_text, history):
|
18 |
+
# output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
|
19 |
+
# return output['choices'][0]['text']
|
20 |
+
|
21 |
+
def response(message, history):
|
22 |
+
#url="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
|
23 |
+
url="http://localhost:2600/v1/completions"
|
24 |
+
body={"prompt":"Im Folgenden findest du eine Instruktion, die eine Aufgabe bescheibt. Schreibe eine Antwort, um die Aufgabe zu lösen.\n\n### Instruktion:\n"+message+"\n\n### Antwort:","max_tokens":500, "echo":"False","stream":"True"} #128
|
25 |
+
response=""
|
26 |
+
buffer=""
|
27 |
+
print("URL: "+url)
|
28 |
+
print("User: "+message+"\nAI: ")
|
29 |
+
for text in requests.post(url, json=body, stream=True): #-H 'accept: application/json' -H 'Content-Type: application/json'
|
30 |
+
print("*** Raw String: "+str(text)+"\n***\n")
|
31 |
+
text=text.decode('utf-8')
|
32 |
+
if(text.startswith(": ping -")==False):buffer=str(buffer)+str(text)
|
33 |
+
print("\n*** Buffer: "+str(buffer)+"\n***\n")
|
34 |
+
buffer=buffer.split('"finish_reason": null}]}')
|
35 |
+
if(len(buffer)==1):
|
36 |
+
buffer="".join(buffer)
|
37 |
+
pass
|
38 |
+
if(len(buffer)==2):
|
39 |
+
part=buffer[0]+'"finish_reason": null}]}'
|
40 |
+
if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
|
41 |
+
try:
|
42 |
+
part = str(json.loads(part)["choices"][0]["text"])
|
43 |
+
print(part, end="", flush=True)
|
44 |
+
response=response+part
|
45 |
+
buffer="" # reset buffer
|
46 |
+
except:
|
47 |
+
pass
|
48 |
+
yield response
|
49 |
+
|
50 |
+
gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
|
51 |
+
print("Interface up and running!")
|