Ollama-to-HF / app.py
reach-vb's picture
reach-vb HF staff
Update app.py (#29)
9899d3a verified
raw
history blame
4.19 kB
import gradio as gr
import os
import requests
import json
import shutil
from huggingface_hub import HfApi, create_repo
from typing import Union
def download_file(media_type, digest, image):
url = f"https://registry.ollama.ai/v2/library/{image}/blobs/{digest}"
f_tag = media_type.split('.')[-1]
if f_tag == "model":
f_tag = "model.gguf"
file_name = f"blobs/{f_tag}"
# Create the directory if it doesn't exist
os.makedirs(os.path.dirname(file_name), exist_ok=True)
# Download the file
print(f"Downloading {url} to {file_name}")
response = requests.get(url, allow_redirects=True)
if response.status_code == 200:
with open(file_name, 'wb') as f:
f.write(response.content)
else:
print(f"Failed to download {url}")
def fetch_manifest(image, tag):
manifest_url = f"https://registry.ollama.ai/v2/library/{image}/manifests/{tag}"
response = requests.get(manifest_url)
if response.status_code == 200:
return response.json()
else:
return None
def upload_to_huggingface(repo_id, folder_path, oauth_token: Union[gr.OAuthToken, None]):
token = oauth_token.token if oauth_token else None
api = HfApi(token=token)
repo_path = api.create_repo(repo_id=repo_id, repo_type="model", exist_ok=True)
print(f"Repo created {repo_path}")
try:
api.upload_folder(
folder_path=folder_path,
repo_id=repo_id,
repo_type="model",
)
return "Upload successful", repo_path
except Exception as e:
return f"Upload failed: {str(e)}"
def process_image_tag(image_tag, repo_id, oauth_token: Union[gr.OAuthToken, None]):
try:
# Extract image and tag from the input
image, tag = image_tag.split(':')
# Fetch the manifest JSON
manifest_json = fetch_manifest(image, tag)
if not manifest_json or 'errors' in manifest_json:
return f"Failed to fetch the manifest for {image}:{tag}"
# Save the manifest JSON to the blobs folder
manifest_file_path = "blobs/manifest"
os.makedirs(os.path.dirname(manifest_file_path), exist_ok=True)
with open(manifest_file_path, 'w') as f:
json.dump(manifest_json, f)
# Extract the mediaType and digest values from the JSON
layers = manifest_json.get('layers', [])
for layer in layers:
media_type = layer['mediaType']
digest = layer['digest']
download_file(media_type, digest, image)
# Download the config file
config_media_type = manifest_json.get('config', {}).get('mediaType')
config_digest = manifest_json.get('config', {}).get('digest')
if config_media_type and config_digest:
download_file(config_media_type, config_digest, image)
# Upload to Hugging Face Hub
upload_result, repo_path = upload_to_huggingface(repo_id, 'blobs', oauth_token)
# Delete the blobs folder
shutil.rmtree('blobs')
return (f'Find your repo <a href=\'{repo_path}\' target="_blank" style="text-decoration:underline">here</a>', "dramallama.jpg")
except Exception as e:
shutil.rmtree('blobs', ignore_errors=True)
return (f"We got an error, my dude, here's what the error looks like: {str(e)}", "madllama.jpg")
# Create the Gradio interface using gr.Blocks
with gr.Blocks() as demo:
gr.Markdown("# Ollama <> HF Hub 🤝")
gr.Markdown("Enter the image and tag to download the corresponding files from the Ollama registry and upload them to the Hugging Face Hub.")
gr.LoginButton()
image_tag_input = gr.Textbox(placeholder="Enter Ollama ID", label="Image and Tag")
repo_id_input = gr.Textbox(placeholder="Enter Hugging Face repo ID", label="Hugging Face Repo ID")
result_output = gr.Markdown(label="Result")
result_image = gr.Image(show_label=False)
process_button = gr.Button("Process")
process_button.click(fn=process_image_tag, inputs=[image_tag_input, repo_id_input], outputs=[result_output, result_image])
# Launch the Gradio app
demo.launch()