sunilkumardash9 commited on
Commit
8fcee46
Β·
1 Parent(s): 5d635db

Changed few things

Browse files
Files changed (1) hide show
  1. app.py +39 -38
app.py CHANGED
@@ -1,26 +1,29 @@
 
1
  import gradio as gr
2
  from langchain.embeddings.openai import OpenAIEmbeddings
3
- from langchain.text_splitter import CharacterTextSplitter
4
  from langchain.vectorstores import Chroma
5
 
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.chat_models import ChatOpenAI
8
 
9
  from langchain.document_loaders import PyPDFLoader
10
- import os
11
 
12
  import fitz
13
  from PIL import Image
14
 
15
- from functools import lru_cache
16
- import json
17
- import uuid
18
- import re
19
  import chromadb
20
- from chromadb.config import Settings
21
-
22
 
 
 
23
 
 
 
 
 
 
 
 
24
 
25
 
26
  def add_text(history, text):
@@ -30,19 +33,20 @@ def add_text(history, text):
30
  return history
31
 
32
  class my_app:
33
- def __init__(self) -> None:
 
34
  self.chain = None
35
  self.chat_history = []
36
  self.N = 0
37
- self.enable_box = gr.Textbox.update(value=None,placeholder= 'Upload your OpenAI API key',interactive=True)
38
- self.disable_box = gr.Textbox.update(value = 'OpenAI API key is Set',interactive=False)
39
-
40
- def set_apikey(self,api_key):
41
- self.OPENAI_API_KEY = api_key
42
- return self.disable_box
43
-
44
- def enable_api_box(self):
45
- return self.enable_box
46
 
47
  def chroma_client(self):
48
  #create a chroma client
@@ -70,34 +74,32 @@ class my_app:
70
  self.chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.0, openai_api_key=self.OPENAI_API_KEY),
71
  retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
72
  return_source_documents=True,)
 
73
 
74
- def get_response(self, history, query, file):
75
-
76
  if not file:
77
  raise gr.Error(message='Upload a PDF')
78
 
79
- if not self.chain:
80
- self.build_chain(file)
81
-
82
- result = self.chain({"question": query, 'chat_history':self.chat_history},return_only_outputs=True)
83
- self.chat_history += [(query, result["answer"])]
84
- self.N = list(result['source_documents'][0])[1][1]['page']
85
 
86
  for char in result['answer']:
87
  history[-1][-1] += char
88
  yield history,''
89
 
90
- def render_file(self,file):
91
-
92
  doc = fitz.open(file.name)
93
- page = doc[self.N]
94
  #Render the page as a PNG image with a resolution of 300 DPI
95
  pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
96
  image = Image.frombytes('RGB', [pix.width, pix.height], pix.samples)
97
  return image
98
-
99
  app = my_app()
100
-
101
  with gr.Blocks() as demo:
102
 
103
  with gr.Column():
@@ -120,14 +122,13 @@ with gr.Blocks() as demo:
120
  with gr.Column(scale=0.20):
121
  btn = gr.UploadButton("πŸ“ upload a PDF", file_types=[".pdf"]).style()
122
 
 
 
 
 
123
 
124
-
125
- api_key.submit(fn=app.set_apikey, inputs=[api_key], outputs=[api_key])
126
- change_api_key.click(fn= app.enable_api_box,outputs=[api_key])
127
- btn.upload(fn=app.render_file, inputs=[btn], outputs=[show_img],)
128
-
129
- submit_btn.click(fn=add_text, inputs=[chatbot,txt], outputs=[chatbot, ], queue=False).success(fn=app.get_response,inputs = [chatbot, txt, btn],
130
- outputs = [chatbot,txt]).success(fn=app.render_file,inputs = [btn], outputs=[show_img])
131
 
132
 
133
  demo.queue()
 
1
+ from typing import Any
2
  import gradio as gr
3
  from langchain.embeddings.openai import OpenAIEmbeddings
 
4
  from langchain.vectorstores import Chroma
5
 
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.chat_models import ChatOpenAI
8
 
9
  from langchain.document_loaders import PyPDFLoader
 
10
 
11
  import fitz
12
  from PIL import Image
13
 
 
 
 
 
14
  import chromadb
15
+ import re
 
16
 
17
+ enable_box = gr.Textbox.update(value=None,placeholder= 'Upload your OpenAI API key',interactive=True)
18
+ disable_box = gr.Textbox.update(value = 'OpenAI API key is Set',interactive=False)
19
 
20
+ def set_apikey(api_key):
21
+ global app
22
+ app.OPENAI_API_KEY = api_key
23
+ return disable_box
24
+
25
+ def enable_api_box():
26
+ return enable_box
27
 
28
 
29
  def add_text(history, text):
 
33
  return history
34
 
35
  class my_app:
36
+ def __init__(self, OPENAI_API_KEY= None ) -> None:
37
+ self.OPENAI_API_KEY = OPENAI_API_KEY
38
  self.chain = None
39
  self.chat_history = []
40
  self.N = 0
41
+ self.count = 0
42
+
43
+ def __call__(self, file) -> Any:
44
+ if self.count==0:
45
+ print('This is here')
46
+ self.build_chain(file)
47
+ self.count+=1
48
+
49
+ return self.chain
50
 
51
  def chroma_client(self):
52
  #create a chroma client
 
74
  self.chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.0, openai_api_key=self.OPENAI_API_KEY),
75
  retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
76
  return_source_documents=True,)
77
+ return self.chain
78
 
79
+ def get_response(history, query, file):
80
+ global app
81
  if not file:
82
  raise gr.Error(message='Upload a PDF')
83
 
84
+ chain = app(file)
85
+ result = chain({"question": query, 'chat_history':app.chat_history},return_only_outputs=True)
86
+ app.chat_history += [(query, result["answer"])]
87
+ app.N = list(result['source_documents'][0])[1][1]['page']
 
 
88
 
89
  for char in result['answer']:
90
  history[-1][-1] += char
91
  yield history,''
92
 
93
+ def render_file(file):
94
+ global app
95
  doc = fitz.open(file.name)
96
+ page = doc[app.N]
97
  #Render the page as a PNG image with a resolution of 300 DPI
98
  pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
99
  image = Image.frombytes('RGB', [pix.width, pix.height], pix.samples)
100
  return image
101
+
102
  app = my_app()
 
103
  with gr.Blocks() as demo:
104
 
105
  with gr.Column():
 
122
  with gr.Column(scale=0.20):
123
  btn = gr.UploadButton("πŸ“ upload a PDF", file_types=[".pdf"]).style()
124
 
125
+
126
+ api_key.submit(fn=set_apikey, inputs=[api_key], outputs=[api_key])
127
+ change_api_key.click(fn= enable_api_box,outputs=[api_key])
128
+ btn.upload(fn=render_file, inputs=[btn], outputs=[show_img],)
129
 
130
+ submit_btn.click(fn=add_text, inputs=[chatbot,txt], outputs=[chatbot, ], queue=False).success(fn=get_response,inputs = [chatbot, txt, btn],
131
+ outputs = [chatbot,txt]).success(fn=render_file,inputs = [btn], outputs=[show_img])
 
 
 
 
 
132
 
133
 
134
  demo.queue()