import gradio as gr | |
from llama_cpp import Llama | |
# Load model (gunakan versi GGUF yang ringan) | |
model_path = "TheBloke/MythoMax-L2-13B-GGUF/mythomax.gguf" | |
llm = Llama(model_path=model_path, n_ctx=2048) | |
def chat(input_text): | |
output = llm(input_text, max_tokens=100) | |
return output["choices"][0]["text"] | |
iface = gr.Interface( | |
fn=chat, | |
inputs="text", | |
outputs="text", | |
title="Dika AI - MythoMax Lite", | |
description="Chatbot AI berbasis MythoMax 13B GGUF, optimized for Hugging Face CPU!" | |
) | |
iface.launch() | |