bartowski's picture
Add dockerfile and app.py
110e4da
raw
history blame
1.32 kB
import gradio as gr
import subprocess
from huggingface_hub import HfApi, snapshot_download
api = HfApi()
def process_model(model_id: str, file_path: str, key: str, value: str, hf_token):
MODEL_NAME = model_id.split("/")[-1]
FILE_NAME = file_path.split("/")[-1]
snapshot_download(
repo_id=model_id,
allow_patterns=file_path,
local_dir=f"{MODEL_NAME}",
)
print("Model downloaded successully!")
metadata_update = f"python llama.cpp/gguf-py/scripts/gguf_set_metadata.py {MODEL_NAME}/{file_path} {key} {value}"
subprocess.run(metadata_update, shell=True)
print(f"Model metadata {key} updated to {value} successully!")
# Upload gguf files
api.upload_folder(
folder_path=MODEL_NAME,
repo_id=model_id,
allow_patterns=["*.gguf", "$.md"],
token=hf_token,
)
print("Uploaded successfully!")
return "Processing complete."
# Create Gradio interface
iface = gr.Interface(
fn=process_model,
inputs=[
gr.Textbox(lines=1, label="Model ID"),
gr.Textbox(lines=1, label="File path"),
gr.Textbox(lines=1, label="Key"),
gr.Textbox(lines=1, label="Value"),
gr.Textbox(lines=1, label="Token"),
],
outputs="text",
)
# Launch the interface
iface.launch(debug=True)