File size: 2,413 Bytes
77370fc
59978cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77370fc
 
59978cc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
import torch
import os
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")

# Load the sentiment analysis model
sentiment_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")

# Load the LLaMA-1B model for text generation, using the token from the environment variable
llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct", use_auth_token=HUGGING_FACE_TOKEN)
llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct", use_auth_token=HUGGING_FACE_TOKEN)

# Function for sentiment analysis
def analyze_sentiment(text):
    inputs = sentiment_tokenizer(text, return_tensors="pt")
    outputs = sentiment_model(**inputs)
    probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
    sentiment = "positive" if torch.argmax(probs) == 1 else "negative"
    confidence = probs.max().item()
    return sentiment, confidence

# Function to generate a supportive response
def generate_response(sentiment, text):
    prompt = f"The user feels {sentiment}. Respond with supportive advice based on: {text}"
    inputs = llama_tokenizer(prompt, return_tensors="pt")
    response = llama_model.generate(**inputs, max_length=150)
    return llama_tokenizer.decode(response[0], skip_special_tokens=True)

# Combine both functions for the personal psychologist
def personal_psychologist(text):
    sentiment, confidence = analyze_sentiment(text)
    response = generate_response(sentiment, text)
    return f"Sentiment: {sentiment} (Confidence: {confidence:.2f})\n\nResponse: {response}"

# Set up Gradio interface
iface = gr.Interface(
    fn=personal_psychologist,
    inputs="text",
    outputs="text",
    title="Personal Psychologist",
    description="A supportive AI that assesses your mood and provides comforting advice based on your input.",
    examples=[
        ["I'm feeling very anxious and stressed about my exams."],
        ["I had a great day with my friends!"],
        ["I feel like I'm not good enough and everything is going wrong."]
    ]
)

# Launch Gradio app
iface.launch()