from groq import Groq from langchain_groq import ChatGroq from langchain_core.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain.chains import LLMChain from langchain_core.messages import SystemMessage from langchain.chains.conversation.memory import ConversationBufferWindowMemory from typing import Dict, Optional import pandas as pd class GROQ: def __init__(self, api_key: str = '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'): self.client: Groq = Groq( api_key=api_key ) def chat(self, prompt: str, model: str, response_format: Optional[Dict]) -> str: completion = self.client.chat.completions.create( model=model, messages=[{"role": "user", "content": prompt}], response_format=response_format) return completion.choices[0].message.content def errorChat(self, user_question: str, sql_query: str, error: str, model: str) -> str: # Check the ai need user feedback or not prompt = """ User question: {user_question} Error: {error} Error Occured in thisSQL Query: {sql_query} Update the SQL query to fix the error. if its need user feedback, return the feedback prompt. If not, return None. Response in json {{"sql": , "feedback": , "summarization": , "user_feedback": boolean if true send {{"user_feedback": true}} if false send {{"user_feedback": false}} """.format(user_question = user_question, sql_query = sql_query, error = error) return self.chat(prompt, model, None) def get_summarization(self, user_question: str, df: pd.DataFrame, model: str) -> str: """ This function generates a summarization prompt based on the user's question and the resulting data. It then sends this summarization prompt to the Groq API and retrieves the AI's response. Parameters: client (Groqcloud): The Groq API client. user_question (str): The user's question. df (DataFrame): The DataFrame resulting from the SQL query. model (str): The AI model to use for the response. Returns: str: The content of the AI's response to the summarization prompt. """ prompt = ''' A user asked the following question pertaining to local database tables: {user_question} To answer the question, a dataframe was returned: Dataframe should be shown as a table. * Dataframe is structured as easy to read table. * Dataframe is clean and dates are converted to a readable format. Dataframe: {df} * Ensure all numeric values are formatted with appropriate precision. * If there are any percentages, display them with the % symbol. * Format any currency values with the appropriate currency symbol and decimal places. * If there are any date columns, format them as 'YYYY-MM-DD' for clarity. * If the dataframe has more than 10 rows, show only the first 10 rows and indicate there are more. * Include the total number of rows in the dataframe. In a few sentences and show the dataframe, summarize the data in the table as it pertains to the original user question. Avoid qualifiers like "based on the data" and do not comment on the structure or metadata of the table itself '''.format(user_question = user_question, df = df.to_markdown(index=False)) # Response format is set to 'None' return self.chat(prompt,model,None) class ConversationGROQ: def __init__(self, conversational_memory_length: int = 10, api_key: str = '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', model: str = 'llama3-8b-8192'): self.client: ChatGroq = ChatGroq( groq_api_key=api_key, model=model ) self.memory: ConversationBufferWindowMemory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True) self.conversation: Optional[LLMChain] = None def create_template(self, base_prompt: str) -> ChatPromptTemplate: return ChatPromptTemplate.from_messages([ SystemMessage( content=base_prompt ), # This is the persistent system prompt that is always included at the start of the chat. MessagesPlaceholder( variable_name="chat_history" ), # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context. HumanMessagePromptTemplate.from_template( "{human_input}" ), # This template is where the user's current input will be injected into the prompt. ]) def create_conversation(self, prompt: str) -> LLMChain: self.conversation = LLMChain( llm=self.client, memory=self.memory, prompt=self.create_template(prompt), verbose=True ) return self.conversation def chat(self, user_input: str) -> str: if self.conversation is None: raise ValueError("Conversation not initialized. Call create_conversation() first.") return self.conversation.predict(human_input =user_input)