Mr-Vicky-01 commited on
Commit
990d40d
1 Parent(s): f5ac43e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
2
  from PyPDF2 import PdfReader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter
4
  import os
5
- from langchain.chain import LLMChain
6
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
7
  from langchain.llms import HuggingFaceHub
8
  from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
@@ -47,7 +47,7 @@ def get_conversational_chain():
47
  """
48
  # model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.1)
49
  model = HuggingFaceHub(repo_id="google/gemma-1.1-7b-it",
50
- model_kwargs={"temperature": 0.2,"max_new_tokens":512})
51
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
  chain = LLMChain(llm=model, chain_type="stuff", prompt=prompt)
53
  return chain
 
2
  from PyPDF2 import PdfReader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter
4
  import os
5
+ from langchain import LLMChain
6
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
7
  from langchain.llms import HuggingFaceHub
8
  from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
 
47
  """
48
  # model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.1)
49
  model = HuggingFaceHub(repo_id="google/gemma-1.1-7b-it",
50
+ model_kwargs={"temperature": 0.2,"max_new_tokens":512}, return_only_answer=True)
51
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
  chain = LLMChain(llm=model, chain_type="stuff", prompt=prompt)
53
  return chain