quantamentalfinance commited on
Commit
140e718
1 Parent(s): 288bd7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -22,6 +22,7 @@ from langchain.chains.question_answering import load_qa_chain
22
 
23
  ### 4. For Gradio App UI
24
  import gradio as gr
 
25
 
26
  fmp_api_key = os.environ['FMP_API_KEY']
27
 
@@ -40,10 +41,12 @@ chroma_db = Chroma(persist_directory='chromadb_earnings_transcripts_extracted/ch
40
  # Load the huggingface inference endpoint of an LLM model
41
  # Name of the LLM model we are using, feel free to try others!
42
  model = "mistralai/Mistral-7B-Instruct-v0.1"
 
43
 
44
  # This is an inference endpoint API from huggingface, the model is not run locally, it is run on huggingface
45
- hf_llm = HuggingFaceHub(repo_id=model,model_kwargs={'temperature':0.5,"max_new_tokens":300})
46
 
 
47
 
48
  def source_question_answer(query:str,vectorstore:Chroma=chroma_db,llm:HuggingFaceHub=hf_llm):
49
  """
 
22
 
23
  ### 4. For Gradio App UI
24
  import gradio as gr
25
+ from huggingface_hub import InferenceClient
26
 
27
  fmp_api_key = os.environ['FMP_API_KEY']
28
 
 
41
  # Load the huggingface inference endpoint of an LLM model
42
  # Name of the LLM model we are using, feel free to try others!
43
  model = "mistralai/Mistral-7B-Instruct-v0.1"
44
+ hf_client = InferenceClient(model_id=model)
45
 
46
  # This is an inference endpoint API from huggingface, the model is not run locally, it is run on huggingface
47
+ hf_llm = HuggingFaceHub(repo_id=model,model_kwargs={'temperature':0.5,"max_new_tokens":200})
48
 
49
+ print("### Chroma DB and LLM model loaded successfully...")
50
 
51
  def source_question_answer(query:str,vectorstore:Chroma=chroma_db,llm:HuggingFaceHub=hf_llm):
52
  """