Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,8 +18,9 @@ OLLAMA_SERVICE_THREAD.start()
|
|
18 |
print("Giving ollama serve a moment")
|
19 |
time.sleep(10)
|
20 |
|
21 |
-
#
|
22 |
-
model = "
|
|
|
23 |
|
24 |
subprocess.run(f"~/ollama pull {model}", shell=True)
|
25 |
|
@@ -30,13 +31,10 @@ from ollama import Client
|
|
30 |
client = Client(host='http://localhost:11434', timeout=120)
|
31 |
|
32 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
33 |
-
MODEL_ID = os.environ.get("MODEL_ID", "google/gemma-2-9b-it")
|
34 |
-
MODEL_NAME = MODEL_ID.split("/")[-1]
|
35 |
|
36 |
TITLE = "<h1><center>ollama-Chat</center></h1>"
|
37 |
|
38 |
DESCRIPTION = f"""
|
39 |
-
<h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
|
40 |
<center>
|
41 |
<p>Feel free to test models with ollama.
|
42 |
<br>
|
|
|
18 |
print("Giving ollama serve a moment")
|
19 |
time.sleep(10)
|
20 |
|
21 |
+
# Uncomment and modify the model to what you want locally
|
22 |
+
# model = "moondream"
|
23 |
+
model = os.environ.get("MODEL")
|
24 |
|
25 |
subprocess.run(f"~/ollama pull {model}", shell=True)
|
26 |
|
|
|
31 |
client = Client(host='http://localhost:11434', timeout=120)
|
32 |
|
33 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
|
|
|
34 |
|
35 |
TITLE = "<h1><center>ollama-Chat</center></h1>"
|
36 |
|
37 |
DESCRIPTION = f"""
|
|
|
38 |
<center>
|
39 |
<p>Feel free to test models with ollama.
|
40 |
<br>
|