userlocallm commited on
Commit
90bf81b
·
verified ·
1 Parent(s): 68b11c0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -3
app.py CHANGED
@@ -8,6 +8,7 @@ import requests
8
  import logging
9
  import subprocess
10
  from llama_cpp import Llama # Import Llama from llama_cpp
 
11
 
12
  # Configure logging
13
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -20,9 +21,21 @@ def install_requirements():
20
  except subprocess.CalledProcessError as e:
21
  logging.error(f"Failed to install requirements: {e}")
22
 
 
 
 
 
 
 
 
 
23
  # Install requirements
24
  install_requirements()
25
 
 
 
 
 
26
  # Create the directory if it doesn't exist
27
  local_dir = "models"
28
  os.makedirs(local_dir, exist_ok=True)
@@ -67,8 +80,8 @@ def respond(
67
  # Load the model with the maximum context length and control the maximum tokens in the response
68
  llm = Llama(
69
  model_path=model_path,
70
- n_ctx=5000, # Set the maximum context length
71
- max_tokens=512 # Control the maximum number of tokens generated in the response
72
  )
73
 
74
  agent = Agent(llm, db_path, system_prompt)
@@ -88,4 +101,4 @@ demo = gr.ChatInterface(
88
  )
89
 
90
  if __name__ == "__main__":
91
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
8
  import logging
9
  import subprocess
10
  from llama_cpp import Llama # Import Llama from llama_cpp
11
+ import spacy
12
 
13
  # Configure logging
14
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
21
  except subprocess.CalledProcessError as e:
22
  logging.error(f"Failed to install requirements: {e}")
23
 
24
+ # Function to download the spaCy model
25
+ def download_spacy_model(model_name):
26
+ try:
27
+ subprocess.check_call([os.sys.executable, '-m', 'spacy', 'download', model_name])
28
+ logging.info(f"SpaCy model {model_name} downloaded successfully.")
29
+ except subprocess.CalledProcessError as e:
30
+ logging.error(f"Failed to download SpaCy model {model_name}: {e}")
31
+
32
  # Install requirements
33
  install_requirements()
34
 
35
+ # Download the spaCy model if it doesn't exist
36
+ if not spacy.util.is_package('en_core_web_lg'):
37
+ download_spacy_model('en_core_web_lg')
38
+
39
  # Create the directory if it doesn't exist
40
  local_dir = "models"
41
  os.makedirs(local_dir, exist_ok=True)
 
80
  # Load the model with the maximum context length and control the maximum tokens in the response
81
  llm = Llama(
82
  model_path=model_path,
83
+ n_ctx=500, # Set the maximum context length
84
+ max_tokens=500 # Control the maximum number of tokens generated in the response
85
  )
86
 
87
  agent = Agent(llm, db_path, system_prompt)
 
101
  )
102
 
103
  if __name__ == "__main__":
104
+ demo.launch()