import os import requests from dotenv import load_dotenv from messages import krishna_blessings from ayush_messages import ayush_surprises # Load environment variables (Hugging Face Space secrets) load_dotenv() HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") def get_krishna_response(user_input): """ Generate a response from Little Krishna based on user input. - If the input contains specific keywords, return predefined messages. - Otherwise, use Hugging Face's Inference API with the Gemma model. """ if "birthday" in user_input.lower(): return ayush_surprises["birthday"] if "shy" in user_input.lower(): return krishna_blessings["shy"] if "hello" in user_input.lower() or "hi" in user_input.lower(): return krishna_blessings["greeting"] headers = { "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}", "Content-Type": "application/json" } payload = { "inputs": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'", "parameters": { "max_length": 60, "temperature": 0.9 } } try: response = requests.post( "https://api-inference.huggingface.co/models/google/gemma-2b", headers=headers, json=payload ) if response.status_code == 200: result = response.json() return result[0]["generated_text"].strip() else: print(f"Error with Gemma: {response.text}") return "Sorry, I couldn’t generate a response. Let’s try something else!" except Exception as e: print(f"Error connecting to Hugging Face Inference API: {str(e)}") return "Hare Krishna! I seem to be lost in Vrindavan’s magic—let’s try again!"