Chris4K commited on
Commit
c299bfb
·
verified ·
1 Parent(s): 85f410a

Update services/llama_generator.py

Browse files
Files changed (1) hide show
  1. services/llama_generator.py +1 -0
services/llama_generator.py CHANGED
@@ -86,6 +86,7 @@ class LlamaGenerator(BaseGenerator):
86
  raise ValueError(f"Failed to load model: {llama_model_name}")
87
 
88
  self.prm_model = self.model_manager.models.get("prm")
 
89
 
90
 
91
  self.prompt_builder = LlamaPromptTemplate()
 
86
  raise ValueError(f"Failed to load model: {llama_model_name}")
87
 
88
  self.prm_model = self.model_manager.models.get("prm")
89
+ #self.prm_tokenizer = self.model_manager.load_tokenizer(prm_model_path) # Add this line to initialize the tokenizer
90
 
91
 
92
  self.prompt_builder = LlamaPromptTemplate()