# import streamlit as st # x = st.slider('Select a value') # st.write(x, 'squared is', x * x) # The code below confirms there is no GPU available # import torch # st.write("torch version: " + torch.__version__) # st.write("cuda available: " + str(torch.cuda.is_available())) # st.write("No of GPUs: " + str(torch.cuda.device_count())) # Hypothesis: Huggingface will host a static copy of a model so we can use that in a FastAPI endpoint to be called externally # Requirement: ability to get anything back from a post request of an API hosted on Huiggingface server # (separate to the requirement of a GPU, which will cost money) # If so, the only thing that's in the huggingface space is opensource anyway, so no risk of losing code IPR that is outside the space (the code calling the model API) from fastapi import FastAPI, Request, HTTPException from payload import SomeText app=FastAPI(title="Hello Huggingworld", version="1.0", debug=True, swagger_ui_bundle_js= "//unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js", swagger_ui_standalone_preset_js= "//unpkg.com/swagger-ui-dist@3/swagger-ui-standalone-preset.js", summary="API to return the text belonging to a payload." ) @app.post("/api/test-io-capability") async def test_io(request: Request, input: SomeText): # """ Takes in some raw text and returns the same """ if len(input.text) > 0: return input.text else: raise HTTPException(status_code=400, detail = "payload contains no text")