aidevhund commited on
Commit
7873eac
·
verified ·
1 Parent(s): f71ab5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -17
app.py CHANGED
@@ -7,20 +7,20 @@ from llama_parse import LlamaParse
7
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
8
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
9
 
10
- # LLM ve Parser Başlatma
11
  llm = HuggingFaceInferenceAPI(model_name="tiiuae/falcon-7b-instruct")
12
  parser = LlamaParse(api_key='llx-zKtsC5UBLs8DOApOsLluXMBdQhC75ea0Vs80SmPSjsmDzuhh', result_type='markdown')
13
 
14
- # PDF dosyasını yükleyip indexleme işlemi
15
  file_extractor = {'.pdf': parser}
16
  documents = SimpleDirectoryReader('data/', file_extractor=file_extractor).load_data()
17
 
18
- # Embedding Modeli ve Query Engine Başlatma
19
  embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
20
  vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
21
  query_engine = vector_index.as_query_engine(llm=llm)
22
 
23
- # System Prompt: LLM'nin görevini belirlemek için
24
  system_prompt = """
25
  You are an AI assistant designed to answer questions about the Hund Ecosystem based on the uploaded PDF document.
26
  Your primary responsibility is to provide detailed, accurate, and clear answers to user queries related to the content of the document.
@@ -28,12 +28,12 @@ For any question that is not related to the content of the document, kindly ask
28
  Please ensure to be polite and professional in your responses. If the question cannot be answered based on the document, kindly guide the user accordingly.
29
  """
30
 
31
- # Sorgu işlemi için retry mekanizması
32
  def query_with_retry(query, max_retries=3, wait_time=5):
33
  for attempt in range(max_retries):
34
  try:
35
  start_time = datetime.now()
36
- response = query_engine.query(query) # System prompt ekleniyor
37
  end_time = datetime.now()
38
  duration = (end_time - start_time).total_seconds()
39
  print(f"Query completed in {duration:.2f} seconds.\n {response}")
@@ -48,37 +48,42 @@ def query_with_retry(query, max_retries=3, wait_time=5):
48
  print(f"An error occurred: {e}")
49
  break
50
 
51
- # Kullanıcı mesajlarını ve botun cevabını yönetmek
52
  def respond(message, history):
53
  try:
54
- # Sorguyu query_engine üzerinde çalıştır
55
  bot_message = query_engine.query(message)
 
 
56
 
57
- # Geçmişi güncelle ve doğru formatta döndür
58
  history.append((message, str(bot_message)))
59
- return history, history # Chatbot için doğru formatta döndürülüyor
 
 
 
60
  except Exception as e:
61
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
62
  return "Please upload a file.", history
63
  return f"An error occurred: {e}", history
64
 
65
  # UI Setup
66
- with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")])) as demo:
67
  gr.Markdown("# HundAI Chatbot🤖")
68
 
69
  with gr.Row():
70
  with gr.Column(scale=3):
71
- chatbot = gr.Chatbot(height=500)
72
- user_message = gr.Textbox(placeholder="Ask me questions anything about the Hund Ecosystem!", container=False)
73
- submit_btn = gr.Button("Ask")
74
  clear_btn = gr.Button("Clear Chat")
75
 
76
- # `respond` fonksiyonu, kullanıcı sorularını cevaplamak için kullanılacak
77
  submit_btn.click(fn=respond, inputs=[user_message, chatbot], outputs=[chatbot, user_message])
78
 
79
- # `clear` butonu, sohbeti temizleyecek
80
  clear_btn.click(lambda: [None, []], outputs=[chatbot, chatbot])
81
 
82
  # Launch the demo
83
  if __name__ == "__main__":
84
- demo.launch()
 
7
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
8
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
9
 
10
+ # LLM and Parser Initialization
11
  llm = HuggingFaceInferenceAPI(model_name="tiiuae/falcon-7b-instruct")
12
  parser = LlamaParse(api_key='llx-zKtsC5UBLs8DOApOsLluXMBdQhC75ea0Vs80SmPSjsmDzuhh', result_type='markdown')
13
 
14
+ # PDF document extraction and indexing
15
  file_extractor = {'.pdf': parser}
16
  documents = SimpleDirectoryReader('data/', file_extractor=file_extractor).load_data()
17
 
18
+ # Embedding Model and Query Engine Initialization
19
  embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
20
  vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
21
  query_engine = vector_index.as_query_engine(llm=llm)
22
 
23
+ # System Prompt for LLM
24
  system_prompt = """
25
  You are an AI assistant designed to answer questions about the Hund Ecosystem based on the uploaded PDF document.
26
  Your primary responsibility is to provide detailed, accurate, and clear answers to user queries related to the content of the document.
 
28
  Please ensure to be polite and professional in your responses. If the question cannot be answered based on the document, kindly guide the user accordingly.
29
  """
30
 
31
+ # Query Retry Logic
32
  def query_with_retry(query, max_retries=3, wait_time=5):
33
  for attempt in range(max_retries):
34
  try:
35
  start_time = datetime.now()
36
+ response = query_engine.query(query) # System prompt is added here
37
  end_time = datetime.now()
38
  duration = (end_time - start_time).total_seconds()
39
  print(f"Query completed in {duration:.2f} seconds.\n {response}")
 
48
  print(f"An error occurred: {e}")
49
  break
50
 
51
+ # Manage user messages and bot responses
52
  def respond(message, history):
53
  try:
54
+ # Run the query engine with the user message
55
  bot_message = query_engine.query(message)
56
+
57
+ print(f"\n{datetime.now()}:{llm.model_name}:: {message} --> {str(bot_message)}\n")
58
 
59
+ # Add user's message and bot's response to history
60
  history.append((message, str(bot_message)))
61
+
62
+ # Clear the input field after sending the message
63
+ return history, "" # Clear the input field for the next question
64
+
65
  except Exception as e:
66
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
67
  return "Please upload a file.", history
68
  return f"An error occurred: {e}", history
69
 
70
  # UI Setup
71
+ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as demo:
72
  gr.Markdown("# HundAI Chatbot🤖")
73
 
74
  with gr.Row():
75
  with gr.Column(scale=3):
76
+ chatbot = gr.Chatbot(height=500) # Display chat history here
77
+ user_message = gr.Textbox(placeholder="Ask me questions about the Hund Ecosystem!", container=False)
78
+ submit_btn = gr.Button("Send")
79
  clear_btn = gr.Button("Clear Chat")
80
 
81
+ # When submit is clicked, process the message and show response
82
  submit_btn.click(fn=respond, inputs=[user_message, chatbot], outputs=[chatbot, user_message])
83
 
84
+ # Clear the chat history when the clear button is clicked
85
  clear_btn.click(lambda: [None, []], outputs=[chatbot, chatbot])
86
 
87
  # Launch the demo
88
  if __name__ == "__main__":
89
+ demo.launch()