hamaadayubkhan commited on
Commit
59978cc
1 Parent(s): 8c988d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -61
app.py CHANGED
@@ -1,64 +1,56 @@
1
  import gradio as gr
2
- from datasets import load_dataset
3
- import random
4
- import groq # Assuming you are using the Groq library
5
-
6
- # Load dataset
7
- ds = load_dataset("alexandreteles/mental-health-conversational-data")
8
-
9
- # Extract columns
10
- context = ds["train"]["Context"]
11
- knowledge = ds["train"]["Knowledge"]
12
- response = ds["train"]["Response"]
13
-
14
- # Directly input the Groq API key (replace with your actual API key)
15
- api_key = "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"
16
-
17
- # Initialize Groq API client with the API key
18
- client = groq.Client(api_key=api_key)
19
-
20
- # Define a function to simulate a conversation
21
- def chatbot(user_input):
22
- if not user_input.strip():
23
- return "Please enter a question or concern to receive guidance."
24
-
25
- try:
26
- # Try to call Groq API to generate a response
27
- brief_response = client.predict(user_input) # Ensure that 'predict' or correct method is available
28
- except Exception as e:
29
- brief_response = None # If Groq API fails, set brief_response to None
30
-
31
- if not brief_response:
32
- # If Groq API does not return a response, fall back to dataset
33
- idx = random.randint(0, len(context)-1)
34
- reply = response[idx]
35
- context_text = context[idx]
36
- knowledge_text = knowledge[idx]
37
-
38
- # Combine dataset info with fallback response
39
- complete_response = (
40
- f"**Contextual Information**\n{context_text}\n\n"
41
- f"**Knowledge Base**\n{knowledge_text}\n\n"
42
- f"**Fallback Response**\n{reply}"
43
- )
44
- else:
45
- # If Groq API returns a response, use that as the final output
46
- complete_response = f"**Personalized Response**\n{brief_response}"
47
-
48
- return complete_response
49
-
50
- # Enhance the interface
51
- interface = gr.Interface(
52
- fn=chatbot,
53
- inputs=gr.Textbox(label="Ask your question:", placeholder="How are you feeling today?"),
54
- outputs=gr.Markdown(label="Psychologist Assistant Response"),
55
- title="Virtual Psychologist Assistant",
56
- description=(
57
- "This is a supportive assistant designed to provide compassionate guidance "
58
- "for mental well-being. Type your thoughts or questions for tailored advice and insights."
59
- ),
60
- theme="huggingface", # Optional: apply a theme if available
61
  )
62
 
63
- # Launch the app
64
- interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
3
+ import torch
4
+ import os
5
+ from dotenv import load_dotenv
6
+
7
+ # Load environment variables from .env file
8
+ load_dotenv()
9
+ HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
10
+
11
+ # Load the sentiment analysis model
12
+ sentiment_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
13
+ sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
14
+
15
+ # Load the LLaMA-1B model for text generation, using the token from the environment variable
16
+ llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct", use_auth_token=HUGGING_FACE_TOKEN)
17
+ llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct", use_auth_token=HUGGING_FACE_TOKEN)
18
+
19
+ # Function for sentiment analysis
20
+ def analyze_sentiment(text):
21
+ inputs = sentiment_tokenizer(text, return_tensors="pt")
22
+ outputs = sentiment_model(**inputs)
23
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
24
+ sentiment = "positive" if torch.argmax(probs) == 1 else "negative"
25
+ confidence = probs.max().item()
26
+ return sentiment, confidence
27
+
28
+ # Function to generate a supportive response
29
+ def generate_response(sentiment, text):
30
+ prompt = f"The user feels {sentiment}. Respond with supportive advice based on: {text}"
31
+ inputs = llama_tokenizer(prompt, return_tensors="pt")
32
+ response = llama_model.generate(**inputs, max_length=150)
33
+ return llama_tokenizer.decode(response[0], skip_special_tokens=True)
34
+
35
+ # Combine both functions for the personal psychologist
36
+ def personal_psychologist(text):
37
+ sentiment, confidence = analyze_sentiment(text)
38
+ response = generate_response(sentiment, text)
39
+ return f"Sentiment: {sentiment} (Confidence: {confidence:.2f})\n\nResponse: {response}"
40
+
41
+ # Set up Gradio interface
42
+ iface = gr.Interface(
43
+ fn=personal_psychologist,
44
+ inputs="text",
45
+ outputs="text",
46
+ title="Personal Psychologist",
47
+ description="A supportive AI that assesses your mood and provides comforting advice based on your input.",
48
+ examples=[
49
+ ["I'm feeling very anxious and stressed about my exams."],
50
+ ["I had a great day with my friends!"],
51
+ ["I feel like I'm not good enough and everything is going wrong."]
52
+ ]
 
 
 
 
 
 
 
 
53
  )
54
 
55
+ # Launch Gradio app
56
+ iface.launch()