AFischer1985 commited on
Commit
39043d6
1 Parent(s): a4611e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -13
app.py CHANGED
@@ -1,6 +1,4 @@
1
- from llama_cpp.server.app import create_app, Settings
2
- from fastapi.responses import HTMLResponse
3
- import os
4
  import requests
5
  from llama_cpp import Llama
6
  import gradio as gr
@@ -10,16 +8,6 @@ response = requests.get(url)
10
  with open("./model.gguf", mode="wb") as file:
11
  file.write(response.content)
12
 
13
- app = create_app(
14
- Settings(
15
- n_threads=2, # set to number of cpu cores
16
- model="./model.gguf",
17
- embedding=False
18
- )
19
- )
20
-
21
- print(app)
22
-
23
  llm = Llama(model_path="./model.gguf")
24
  def response(input_text, history):
25
  output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
@@ -27,3 +15,7 @@ def response(input_text, history):
27
 
28
  gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
29
 
 
 
 
 
 
1
+ import subprocess
 
 
2
  import requests
3
  from llama_cpp import Llama
4
  import gradio as gr
 
8
  with open("./model.gguf", mode="wb") as file:
9
  file.write(response.content)
10
 
 
 
 
 
 
 
 
 
 
 
11
  llm = Llama(model_path="./model.gguf")
12
  def response(input_text, history):
13
  output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
 
15
 
16
  gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
17
 
18
+ command = ["python3", "-m", "llama_cpp.server", "--model", "./model.gguf", "--host", "0.0.0.0", "--port", "2600"]
19
+ subprocess.Popen(command)
20
+
21
+ print("Hello world")