Spaces:
Sleeping
Sleeping
File size: 3,594 Bytes
3fb5a70 7f481ab 61e558a ec101fa 61e558a 1fa0e8e 61e558a ec101fa 61e558a 1fa0e8e 7f481ab 3fb5a70 61e558a 7f481ab 3fb5a70 1fa0e8e 61e558a ec101fa 1fa0e8e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
from flask import Flask, request
import telebot
import os
import requests
import google.generativeai as genai
import tempfile
import logging
import re
from telebot.apihelper import ApiTelegramException
import time
# Replace with your actual API keys and bot token
GOOGLE_API_KEY = 'AIzaSyAYXUMnwmR4nUGDCs97FJJsafcQAPAAuzE'
BOT_TOKEN = '7484321656:AAFaswxTqaSHu_s4jd_pk2Q2OJJWYcWHwAM'
# Initialize the Telegram bot
bot = telebot.TeleBot(BOT_TOKEN)
# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Configure Google Generative AI
genai.configure(api_key=GOOGLE_API_KEY)
# Create the model
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=generation_config,
system_instruction="Please respond to user input"
)
chat_session = model.start_chat(
history=[
{"role": "user", "parts": ["hi\n"]},
{"role": "model", "parts": ["Hello! ๐ How can I help you today? ๐ \n"]},
{"role": "user", "parts": ["I am looking for photo booth service?"]},
{"role": "model", "parts": ["That's great! ๐ I can definitely help you with information about Aforative Media's photo booth services. \n\nTo give you the most relevant information, could you tell me a little more about what you're looking for? ..."]},
{"role": "user", "parts": ["How much for photo booth services?"]},
{"role": "model", "parts": ["You're smart to ask about pricing upfront! ๐ \n\nAforative Media's Mr. & Ms. Booth photo booth services start at **USD 390 for a minimum of 2 hours**. ..."]},
{"role": "user", "parts": ["How about videography service?"]},
{"role": "model", "parts": ["You're thinking about capturing the memories on film too? Excellent choice! Videography adds a whole other dimension to remembering special events. \n\nAforative Media offers excellent videography services, and just like their photo booths, their videography packages are competitively priced and flexible. ..."]},
]
)
# Initialize Flask app
app = Flask(__name__)
@app.route(f'/bot{BOT_TOKEN}', methods=['POST'])
def webhook():
"""Handles incoming updates from Telegram."""
json_str = request.get_data(as_text=True)
update = telebot.types.Update.de_json(json_str)
bot.process_new_updates([update])
return 'ok', 200
@bot.message_handler(content_types=['text'])
def handle_text_messages(message):
"""Handles text messages and responds based on predefined questions or Generative AI."""
try:
prompt = f"Respond to the user: {message.text}"
response = chat_session.send_message(prompt) # Generate response using text and prompt
response_text = response.text
# Determine whether to reply in private or group chat with Markdown formatting
bot.send_message(message.chat.id, response_text, parse_mode='Markdown')
logger.info(f"Response sent to chat_id {message.chat.id}")
except Exception as e:
logger.error(f"Error during GenAI processing: {e}")
error_message = "Sorry, I can't answer this query right now but I will improve from time to time."
bot.send_message(message.chat.id, error_message, parse_mode='Markdown')
logger.error(f"Error message sent to chat_id {message.chat.id}")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000) |