unclemusclez commited on
Commit
f784967
·
verified ·
1 Parent(s): 09e1b1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -53
app.py CHANGED
@@ -75,65 +75,65 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
75
  ollama_modelfile.close()
76
  print(quantized_gguf_path)
77
 
78
- for ollama_q_method in ollama_q_methods:
79
- if ollama_q_method == "FP16":
80
- ollama_conversion = f"ollama create -q {ollama_q_method} -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
81
- else:
82
- ollama_conversion = f"ollama create -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
83
- ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
84
- print(ollama_conversion_result)
85
- if ollama_conversion_result.returncode != 0:
86
- raise Exception(f"Error converting to Ollama: {ollama_conversion_result.stderr}")
87
- print("Model converted to Ollama successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- if maintainer:
90
- ollama_push = f"ollama push {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
91
- ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
92
- else:
93
- ollama_push = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
94
- ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
95
 
96
- ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
97
- print(ollama_push_result)
98
- if ollama_push_result.returncode != 0:
99
  raise Exception(f"Error pushing to Ollama: {ollama_push_result.stderr}")
100
  print("Model pushed to Ollama library successfully!")
101
-
102
- ollama_rm_result = subprocess.run(ollama_rm, shell=True, capture_output=True)
103
- print(ollama_rm_result)
104
- if ollama_rm_result.returncode != 0:
105
- raise Exception(f"Error removing to Ollama: {ollama_rm_result.stderr}")
106
  print("Model pushed to Ollama library successfully!")
107
-
108
 
109
- if latest:
110
- ollama_copy = f"ollama cp {OLLAMA_USERNAME}/{model_id.lower()}:{q_method.lower()} {OLLAMA_USERNAME}/{model_id.lower()}:latest"
111
- ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
112
- print(ollama_copy_result)
113
- if ollama_copy_result.returncode != 0:
114
- raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
115
- print("Model pushed to Ollama library successfully!")
116
-
117
- if maintainer == True:
118
- ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{model_name}:latest"
119
- ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{model_name}:latest"
120
- else:
121
- ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:latest"
122
- ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:latest"
123
-
124
- ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
125
- print(ollama_push_latest_result)
126
- if ollama_push_latest_result.returncode != 0:
127
- raise Exception(f"Error pushing to Ollama: {ollama_push_result.stderr}")
128
- print("Model pushed to Ollama library successfully!")
129
-
130
- ollama_rm_latest_result = subprocess.run(ollama_rm_latest, shell=True, capture_output=True)
131
- print(ollama_rm_latest_result)
132
- if ollama_rm_latest_result.returncode != 0:
133
- raise Exception(f"Error pushing to Ollama: {ollama_rm_latest.stderr}")
134
- print("Model pushed to Ollama library successfully!")
135
-
136
-
137
  except Exception as e:
138
  return (f"Error: {e}", "error.png")
139
  finally:
 
75
  ollama_modelfile.close()
76
  print(quantized_gguf_path)
77
 
78
+ # for ollama_q_method in ollama_q_methods:
79
+ if ollama_q_method == "FP16":
80
+ ollama_conversion = f"ollama create -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
81
+ else:
82
+ ollama_conversion = f"ollama create -q {ollama_q_method} -f {model_file} {OLLAMA_USERNAME}/{ollama_model_name}:{ollama_q_method.lower()}"
83
+ ollama_conversion_result = subprocess.run(ollama_conversion, shell=True, capture_output=True)
84
+ print(ollama_conversion_result)
85
+ if ollama_conversion_result.returncode != 0:
86
+ raise Exception(f"Error converting to Ollama: {ollama_conversion_result.stderr}")
87
+ print("Model converted to Ollama successfully!")
88
+
89
+ if maintainer:
90
+ ollama_push = f"ollama push {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
91
+ ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{model_name}:{q_method.lower()}"
92
+ else:
93
+ ollama_push = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
94
+ ollama_rm = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:{q_method.lower()}"
95
+
96
+ ollama_push_result = subprocess.run(ollama_push, shell=True, capture_output=True)
97
+ print(ollama_push_result)
98
+ if ollama_push_result.returncode != 0:
99
+ raise Exception(f"Error pushing to Ollama: {ollama_push_result.stderr}")
100
+ print("Model pushed to Ollama library successfully!")
101
+
102
+ ollama_rm_result = subprocess.run(ollama_rm, shell=True, capture_output=True)
103
+ print(ollama_rm_result)
104
+ if ollama_rm_result.returncode != 0:
105
+ raise Exception(f"Error removing to Ollama: {ollama_rm_result.stderr}")
106
+ print("Model pushed to Ollama library successfully!")
107
+
108
+
109
+ if latest:
110
+ ollama_copy = f"ollama cp {OLLAMA_USERNAME}/{model_id.lower()}:{q_method.lower()} {OLLAMA_USERNAME}/{model_id.lower()}:latest"
111
+ ollama_copy_result = subprocess.run(ollama_copy, shell=True, capture_output=True)
112
+ print(ollama_copy_result)
113
+ if ollama_copy_result.returncode != 0:
114
+ raise Exception(f"Error converting to Ollama: {ollama_push_result.stderr}")
115
+ print("Model pushed to Ollama library successfully!")
116
 
117
+ if maintainer == True:
118
+ ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{model_name}:latest"
119
+ ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{model_name}:latest"
120
+ else:
121
+ ollama_push_latest = f"ollama push {OLLAMA_USERNAME}/{ollama_model_name}:latest"
122
+ ollama_rm_latest = f"ollama rm {OLLAMA_USERNAME}/{ollama_model_name}:latest"
123
 
124
+ ollama_push_latest_result = subprocess.run(ollama_push_latest, shell=True, capture_output=True)
125
+ print(ollama_push_latest_result)
126
+ if ollama_push_latest_result.returncode != 0:
127
  raise Exception(f"Error pushing to Ollama: {ollama_push_result.stderr}")
128
  print("Model pushed to Ollama library successfully!")
129
+
130
+ ollama_rm_latest_result = subprocess.run(ollama_rm_latest, shell=True, capture_output=True)
131
+ print(ollama_rm_latest_result)
132
+ if ollama_rm_latest_result.returncode != 0:
133
+ raise Exception(f"Error pushing to Ollama: {ollama_rm_latest.stderr}")
134
  print("Model pushed to Ollama library successfully!")
 
135
 
136
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  except Exception as e:
138
  return (f"Error: {e}", "error.png")
139
  finally: