Sonny4Sonnix's picture
Update main.py
d46e3ff
# from fastapi import FastAPI, Query, Request, HTTPException
# import pandas as pd
# import transformers as pipeline
# from transformers import AutoTokenizer,AutoModelForSequenceClassification
# from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
# model_name = "Sonny4Sonnix/twitter-roberta-base-sentimental-analysis-of-covid-tweets"
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# sentiment = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
# app = FastAPI()
# @app.get("/")
# async def read_root():
# return {"message": "Sentiment Analysis API using FastAPI"}
# @app.get("/analyze-sentiment/")
# async def analyze_sentiment(text: str = Query(..., description="Text for sentiment analysis")):
# result = sentiment(text)
# sentiment_label = result[0]['label']
# sentiment_score = result[0]['score']
# if sentiment_label == 'LABEL_1':
# sentiment_label = "positive"
# elif sentiment_label == 'LABEL_0':
# sentiment_label = "neutral"
# else:
# sentiment_label = "negative"
# response = {
# "sentiment": sentiment_label.capitalize(),
# "score": sentiment_score
# }
# return response
# if _name_ == "_main_":
# import uvicorn
# uvicorn.run(app, host="127.0.0.1", port=7860)
# model_name = "Sonny4Sonnix/Movie_Sentiments_Analysis_with_FastAPI" # Replace with the name of the pre-trained model you want to use
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# app = FastAPI()
# @app.get("/")
# async def read_root():
# return {"message": "Welcome to the Sepsis Prediction using FastAPI"}
# def classify(prediction):
# if prediction == 0:
# return "Sentence is positive"
# else:
# return "Sentence is negative"
# @app.post("/predict/")
# async def predict_sepsis(
# request: Request,
# Text: float = Query(..., description="Please type a sentence"),
# ):
# input_data = [Text]
# input_df = pd.DataFrame([input_data], columns=[
# "Text"
# ])
# pred = model.predict(input_df)
# output = classify(pred[0])
# response = {
# "prediction": output
# }
# return response
# # Run the app using Uvicorn
# if __name__ == "__main__":
# import uvicorn
# uvicorn.run(app, host="127.0.0.1", port=7860)
# sentiment = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)