unclemusclez commited on
Commit
0058e79
·
verified ·
1 Parent(s): c2ac3c4

add hash_oauth and add ollama@hash_oauth instance

Browse files
Files changed (1) hide show
  1. app.py +43 -5
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import shutil
3
  import subprocess
4
  import signal
 
5
 
6
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
7
  import gradio as gr
@@ -18,12 +19,45 @@ from textwrap import dedent
18
  HOME = os.environ.get("HOME")
19
  # token = os.environ.get("HF_TOKEN")
20
  library_username = os.environ.get("OLLAMA_USERNAME").lower()
21
- ollama_pubkey = open(f"{HOME}/.ollama/id_ed25519.pub", "r")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def ollamafy_model(login, account, model_id, ollama_library_username , ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
24
  ollama_library_username: library_username | None
25
  if oauth_token.token is None:
26
  raise ValueError("You must be logged in to use Ollamafy")
 
 
27
  # username = whoami(oauth_token.token)["name"]
28
  model_name = model_id.split('/')[-1]
29
  fp16 = f"{model_name}-fp16.gguf"
@@ -88,10 +122,10 @@ def ollamafy_model(login, account, model_id, ollama_library_username , ollama_q_
88
  print("Model converted to Ollama successfully!")
89
 
90
  if maintainer:
91
- ollama_push = f"ollama push {library_username}/{model_name}:{q_method.lower()}"
92
  ollama_rm = f"ollama rm {library_username}/{model_name}:{q_method.lower()}"
93
  else:
94
- ollama_push = f"ollama push {library_username}/{ollama_model_name}:{q_method.lower()}"
95
  ollama_rm = f"ollama rm {library_username}/{ollama_model_name}:{q_method.lower()}"
96
 
97
  ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
@@ -118,10 +152,10 @@ def ollamafy_model(login, account, model_id, ollama_library_username , ollama_q_
118
  print("Model pushed to Ollama library successfully!")
119
 
120
  if maintainer:
121
- ollama_push_latest = f"ollama push {library_username}/{model_name}:latest"
122
  ollama_rm_latest = f"ollama rm {library_username}/{model_name}:latest"
123
  else:
124
- ollama_push_latest = f"ollama push {library_username}/{ollama_model_name}:latest"
125
  ollama_rm_latest = f"ollama rm {library_username}/{ollama_model_name}:latest"
126
 
127
  ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
@@ -154,6 +188,10 @@ with gr.Blocks(css=css) as demo:
154
  login = gr.LoginButton(
155
  min_width=250,
156
  )
 
 
 
 
157
 
158
  model_id = HuggingfaceHubSearch(
159
  label="Hugging Face Hub Model ID",
 
2
  import shutil
3
  import subprocess
4
  import signal
5
+ import hashlib
6
 
7
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
8
  import gradio as gr
 
19
  HOME = os.environ.get("HOME")
20
  # token = os.environ.get("HF_TOKEN")
21
  library_username = os.environ.get("OLLAMA_USERNAME").lower()
22
+
23
+ ollama_start = f"systemctl ollama start"
24
+ ollama_stop = f"systemctl ollama stop"
25
+
26
+
27
+ def regenerate_pubkey(oauth_token: gr.OAuthToken | None):
28
+
29
+ if oauth_token.token is None:
30
+ raise ValueError("You must be logged in to use Ollamafy")
31
+ hash_oauth = hashlib.sha256(unicode_string.encode(oauth_token)).hexdigest()
32
+ ollama_pubkey = open(f"{HOME}/.ollama/{hash_oauth}/id_ed25519.pub", "r")
33
+ delete_ihome = f"rm -Rf {HOME}/{hash_oauth}"
34
+ ollama_istart = f"systemctl ollama@{hash_oauth} start"
35
+ ollama_istop = f"systemctl ollama@{hash_oauth} stop"
36
+
37
+ result = subprocess.run(ollama_stop, shell=True, capture_output=True)
38
+ print(result)
39
+ if result.returncode != 0:
40
+ raise Exception(f"Error stoppping Ollama {result.stderr}")
41
+ print("Ollama stopped successfully!")
42
+
43
+ result = subprocess.run(delete_ihome, shell=True, capture_output=True)
44
+ print(result)
45
+ if result.returncode != 0:
46
+ raise Exception(f"Error removing Ollama HOME folder {result.stderr}")
47
+ print("Ollama HOME fodler removed successfully!")
48
+
49
+ result = subprocess.run(ollama_start, shell=True, capture_output=True)
50
+ print(result)
51
+ if result.returncode != 0:
52
+ raise Exception(f"Error starting Ollama {result.stderr}")
53
+ print("Ollama started successfully!")
54
 
55
  def ollamafy_model(login, account, model_id, ollama_library_username , ollama_q_method, latest, maintainer, oauth_token: gr.OAuthToken | None):
56
  ollama_library_username: library_username | None
57
  if oauth_token.token is None:
58
  raise ValueError("You must be logged in to use Ollamafy")
59
+ hash_oauth = hashlib.sha256(unicode_string.encode(oauth_token)).hexdigest()
60
+
61
  # username = whoami(oauth_token.token)["name"]
62
  model_name = model_id.split('/')[-1]
63
  fp16 = f"{model_name}-fp16.gguf"
 
122
  print("Model converted to Ollama successfully!")
123
 
124
  if maintainer:
125
+ ollama_push = f"ollama@{hash_oauth} push {library_username}/{model_name}:{q_method.lower()}"
126
  ollama_rm = f"ollama rm {library_username}/{model_name}:{q_method.lower()}"
127
  else:
128
+ ollama_push = f"ollama@{hash_oauth} push {library_username}/{ollama_model_name}:{q_method.lower()}"
129
  ollama_rm = f"ollama rm {library_username}/{ollama_model_name}:{q_method.lower()}"
130
 
131
  ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
 
152
  print("Model pushed to Ollama library successfully!")
153
 
154
  if maintainer:
155
+ ollama_push_latest = f"ollama@{hash_oauth} push {library_username}/{model_name}:latest"
156
  ollama_rm_latest = f"ollama rm {library_username}/{model_name}:latest"
157
  else:
158
+ ollama_push_latest = f"ollama@{hash_oauth} push {library_username}/{ollama_model_name}:latest"
159
  ollama_rm_latest = f"ollama rm {library_username}/{ollama_model_name}:latest"
160
 
161
  ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
 
188
  login = gr.LoginButton(
189
  min_width=250,
190
  )
191
+ generate_pubkey gr.Button (
192
+ value=regenerate_pubkey(),
193
+ min_width=250,
194
+ )
195
 
196
  model_id = HuggingfaceHubSearch(
197
  label="Hugging Face Hub Model ID",