Spaces:
Sleeping
Sleeping
File size: 1,551 Bytes
abb1f73 8adacef 96d04d4 abb1f73 14275a9 abb1f73 96d04d4 abb1f73 96d04d4 abb1f73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
# import streamlit as st
# x = st.slider('Select a value')
# st.write(x, 'squared is', x * x)
# The code below confirms there is no GPU available
# import torch
# st.write("torch version: " + torch.__version__)
# st.write("cuda available: " + str(torch.cuda.is_available()))
# st.write("No of GPUs: " + str(torch.cuda.device_count()))
# Hypothesis: Huggingface will host a static copy of a model so we can use that in a FastAPI endpoint to be called externally
# Requirement: ability to get anything back from a post request of an API hosted on Huiggingface server
# (separate to the requirement of a GPU, which will cost money)
# If so, the only thing that's in the huggingface space is opensource anyway, so no risk of losing code IPR that is outside the space (the code calling the model API)
from fastapi import FastAPI, Request, HTTPException
from payload import SomeText
app=FastAPI(title="Hello Huggingworld",
version="1.0",
debug=True,
swagger_ui_bundle_js= "//unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js",
swagger_ui_standalone_preset_js= "//unpkg.com/swagger-ui-dist@3/swagger-ui-standalone-preset.js",
summary="API to return the text belonging to a payload."
)
@app.post("/api/test-io-capability")
async def test_io(request: Request, input: SomeText): #
"""
Takes in some raw text and returns the same
"""
if len(input.text) > 0:
return input.text
else:
raise HTTPException(status_code=400, detail = "payload contains no text")
|