Spaces:
Sleeping
Sleeping
Mr-Vicky-01
commited on
Commit
•
93a48f2
1
Parent(s):
f94f9d9
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ from PyPDF2 import PdfReader
|
|
3 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
import os
|
5 |
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
|
|
6 |
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
|
7 |
import google.generativeai as genai
|
8 |
from langchain.vectorstores import FAISS
|
@@ -13,6 +14,7 @@ import time
|
|
13 |
from dotenv import load_dotenv
|
14 |
|
15 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
|
16 |
|
17 |
def get_pdf_text(pdf_docs):
|
18 |
text = ""
|
@@ -42,7 +44,9 @@ def get_conversational_chain():
|
|
42 |
|
43 |
Answer:
|
44 |
"""
|
45 |
-
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.1)
|
|
|
|
|
46 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
47 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
48 |
return chain
|
|
|
3 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
import os
|
5 |
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
6 |
+
from lanchain.llms import HuggingFaceHub
|
7 |
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
|
8 |
import google.generativeai as genai
|
9 |
from langchain.vectorstores import FAISS
|
|
|
14 |
from dotenv import load_dotenv
|
15 |
|
16 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
17 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HF_TOKEN")
|
18 |
|
19 |
def get_pdf_text(pdf_docs):
|
20 |
text = ""
|
|
|
44 |
|
45 |
Answer:
|
46 |
"""
|
47 |
+
# model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.1)
|
48 |
+
model = HuggingFaceHub(repo_id="google/gemma-1.1-7b-it",
|
49 |
+
model_kwargs={"temperature": 0.2,"max_new_tokens":512})
|
50 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
51 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
52 |
return chain
|