from langchain_community.llms.ctransformers import CTransformers import os MODEL_TYPE = 'mistral' MODEL_BIN_PATH = "mistral-7b-instruct-v0.1.Q3_K_S.gguf" MAX_NEW_TOKEN = 600 TEMPRATURE = 0.01 CONTEXT_LENGTH = 6000 class LLMWrapper: def __init__(self): self.llm = CTransformers( model=MODEL_BIN_PATH, config={ 'max_new_tokens': MAX_NEW_TOKEN, 'temperature': TEMPRATURE, 'context_length': CONTEXT_LENGTH }, model_type=MODEL_TYPE ) def generate_text(self, prompt): return self.llm(prompt)