|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
checkpoint_name="ArmelR/starcoder-gradio-v0" |
|
model = AutoModelForCausalLM.from_pretrained(checkpoint_name) |
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint_name) |
|
def generate_text(inp): |
|
prompt = "Create a gradio application that help to convert temperature in celcius into temperature in Fahrenheit" |
|
inputs = tokenizer(f"Question: {prompt}\n\nAnswer: ", return_tensors="pt") |
|
|
|
outputs = model.generate( |
|
inputs["input_ids"], |
|
temperature=0.2, |
|
top_p=0.95, |
|
max_new_tokens=200 |
|
) |
|
|
|
input_len=len(inputs["input_ids"]) |
|
print(tokenizer.decode(outputs[0][input_len:])) |
|
|
|
output_text = gr.outputs.Textbox() |
|
gr.Interface(generate_text,"textbox",output_text,title="Text Generation machine ",description="Ask any question. Note: It can take 20-60 seconds to generate output based on your internet connection.").launch() |