Spaces:
Sleeping
Sleeping
File size: 3,862 Bytes
6352a01 6d2ae27 6352a01 6d2ae27 6352a01 6d2ae27 6352a01 6d2ae27 6352a01 6d2ae27 6352a01 6d2ae27 6352a01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from transformers import pipeline
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
pipe = pipeline(task="text-generation", model="gpt2-large", framework="pt")
@app.get(path="/", response_class=HTMLResponse)
def get_ui():
"""
Returns the HTML page for the UI.
"""
html_content = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Text Generator</title>
<style>
body {
font-family: Arial, sans-serif;
display: flex;
justify-content: center;
align-items: center;
min-height: 100vh;
margin: 0;
padding: 20px;
background-color: #f5f5f5;
}
#container {
width: 100%;
max-width: 600px;
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
box-sizing: border-box;
}
#prompt, #output {
width: 100%;
padding: 10px;
margin-bottom: 20px;
border: 1px solid #ddd;
border-radius: 4px;
box-sizing: border-box;
}
#output {
min-height: 100px;
white-space: pre-wrap;
background-color: #f9f9f9;
padding: 15px;
border-radius: 4px;
}
</style>
</head>
<body>
<div id="container">
<h1>Text Generator</h1>
<textarea id="prompt" rows="4" placeholder="Enter your prompt here..."></textarea>
<div id="output"></div>
</div>
<script>
document.getElementById("prompt").addEventListener("keydown", async function(event) {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
const prompt = this.value.trim();
if (!prompt) return;
const outputDiv = document.getElementById("output");
outputDiv.innerHTML = "Loading...";
this.disabled = true;
const response = await fetch(`/generate?text=${encodeURIComponent(prompt)}`);
const data = await response.json();
const text = data.output;
outputDiv.innerHTML = "";
let i = 0;
function printText() {
if (i < text.length) {
outputDiv.innerHTML += text.charAt(i);
i++;
setTimeout(printText, 50);
} else {
document.getElementById("prompt").disabled = false;
}
}
printText();
}
});
</script>
</body>
</html>
"""
return html_content
@app.get("/generate")
def generate(text: str):
"""
Using the text-generation pipeline from `transformers`, generate text
from the given input text. The model used is `openai-community/gpt2-large`, which
can be found [here](<https://huggingface.co/openai-community/gpt2-large>).
"""
output = pipe(text)
return {"output": output[0]["generated_text"]}
|