""" This file contains all the code which defines architectures and architecture components. An architecture is modelled a pipeline of ArchitectureComponents through which an ArchitectureRequest flows. Architectures are configured in the file config/architectures.json """ import chromadb import json import logging import os import regex as re import requests import shutil import traceback from abc import ABC, abstractmethod from enum import Enum from huggingface_hub import Repository from queue import Queue from threading import Thread, Timer from time import time from typing import List, Optional, Dict, Callable from better_profanity import profanity from src.common import config_dir, data_dir, hf_api_token, escape_dollars class ArchitectureRequest: """ This class represents a request (chat query) from a user which can then be built up or modified through the pipeline process. It also holds the response to the request which again is a stack which can be modified through life. """ def __init__(self, query: str): self._request: List[str] = [query] # Stack for the request text as it evolves down the pipeline self._response: List[str] = [] # Stack for the response text as it evolves down the pipeline self.early_exit: bool = False self.early_exit_message: str = None @property def request(self): return self._request[-1] @request.setter def request(self, value: str): self._request.append(value) @property def response(self): if len(self._response) > 0: return self._response[-1] return None @response.setter def response(self, value: str): self._response.append(value) def as_markdown(self) -> str: """ Returns a markdown representation for display / testing :return: str - the markdown """ md = "- **Request evolution**" for r in self._request: md += f"\n - {r}" md += "\n- **Response evolution**" for r in self._response: md += f"\n - {r}" return escape_dollars(md) def as_dict(self) -> Dict: return {'request_evolution': self._request, 'response_evolution': self._response} class ArchitectureTraceOutcome(Enum): """ Class representing the outcome of a component step in an architecture """ NONE = 0 SUCCESS = 1 EARLY_EXIT = 2 EXCEPTION = 3 class ArchitectureTraceStep: """ Class to hold the trace details of a single step in an Architecture pipeline """ def __init__(self, name: str): self.name = name self.start_ms = int(time() * 1000) self.end_ms = None self.outcome = ArchitectureTraceOutcome.NONE self._exception: str = None self.early_exit_message: str = None def end(self, outcome: ArchitectureTraceOutcome): self.end_ms = int(time() * 1000) self.outcome = outcome @property def exception(self) -> str: return self._exception @exception.setter def exception(self, value: Exception): self._exception = f'{value}' # Hold any exception as a string in the trace def as_markdown(self) -> str: """ Converts the trace to markdown for simple display purposes :return: a string of markdown """ md = f"- **Step**: {self.name} \n" md += f" - **Start**: {self.start_ms}; **End**: {self.end_ms} \n" md += f" - **Elapsed time**: {self.end_ms - self.start_ms}ms \n" outcome = "None" if self.outcome == ArchitectureTraceOutcome.SUCCESS: outcome = "Success" elif self.outcome == ArchitectureTraceOutcome.EARLY_EXIT: outcome = f"Early Exit ({self.early_exit_message})" elif self.outcome == ArchitectureTraceOutcome.EXCEPTION: outcome = f"Exception ({self._exception})" md += f" - **Outcome**: {outcome}" return escape_dollars(md) def as_dict(self) -> Dict: return { 'name': self.name, 'start_ms': self.start_ms, 'end_ms': self.end_ms, 'outcome': str(self.outcome), 'exception': "" if self._exception is None else f"{self._exception}", 'early_exit_message': "" if self.early_exit_message is None else self.early_exit_message } class ArchitectureTrace: """ This class represents the system instrumentation / trace for a request. It holds the name for each component called, the start and end time of the component processing and the outcome of the step. """ def __init__(self): self.steps: List[ArchitectureTraceStep] = [] def start_trace(self, name: str): self.steps.append(ArchitectureTraceStep(name=name)) def end_trace(self, outcome: ArchitectureTraceOutcome, early_exit_message: str = None): assert len(self.steps) > 0 assert self.steps[-1].outcome == ArchitectureTraceOutcome.NONE self.steps[-1].end(outcome=outcome) if early_exit_message is not None: self.steps[-1].early_exit_message = early_exit_message def as_markdown(self) -> str: """ Converts the trace to markdown for simple display purposes :return: a string of markdown """ md = ' \n'.join([s.as_markdown() for s in self.steps]) return md def as_dict(self) -> Dict: return {'steps': [s.as_dict() for s in self.steps]} class ArchitectureComponent(ABC): """ This is the anbstract base class for all classes which want to be a concrete components available to be configured into an Architecture pipeline. Specifies the elements which need to be implemented to be a compliant architecture component. """ description = "Components should override a description" @abstractmethod def process_request(self, request: ArchitectureRequest) -> None: """ The principal method that concrete implementations of a component must implement. They should signal anything to the pipeline through direct modification of the provided request (i.e. amending the request text or response text, or setting the early_exit flag). :param request: The request which is flowing down the pipeline :return: None """ pass def config_description(self) -> str: """ Optional method to override for providing a string of description in markdown format for display purposes for the component :return: a markdwon string (defaulting to empty in the base class) """ return "" class LogWorker(Thread): """ The LogWorker implements a daemon thread which runs in the background to write the results of user queries through the system to a log file for analysis/reporting and offline saving. The LogWorker provides two functions to the system. 1) it moves this I/O operation out of the main architecture execution which allows for clearer understanding of the true performance of the architectures themselves. 2) it is designed to be run as a single thread to provide controlled shared access to a resource (the log file) with an in-memory queue for thread safety, which then allows us to multi-thread the architecture invocation itself. In addition to the LogWorker provides some basic batching capabilities for performance (e.g. batches up N requests before committing the IO operation to the file, or commits open activity after a set period of inactivity) """ instance = None architectures = None save_repo = None save_repo_load_error = False save_repo_url = "https://huggingface.co/datasets/alfraser/llm-arch-trace" trace_dir = "trace" trace_file_name = "trace.json" trace_file = os.path.join(trace_dir, trace_file_name) queue = Queue() commit_time = 5 # Number of seconds after which to commit with no activity commit_after = 20 # Number of records after which to commit irrespective of time commit_count = 0 # Current uncommitted records commit_timer = None # The actual commit timer - we will schedule the commit on this timeout_functions: List[Callable[[], None]] = [] # Callbacks which will be fired on timeout def run(self): while True: arch_name, request, trace, trace_tags, trace_comment = LogWorker.queue.get() if request is None: # There was a period of inactivity so run the timeout functions for func in LogWorker.timeout_functions: logging.info(f"LogWorker commit running {func.__name__}") try: func() except Exception as e: logging.error(f"Timeout func {func.__name__} had error {e}") else: if LogWorker.commit_timer is not None and LogWorker.commit_timer.is_alive(): # Cancel the inactivity timer LogWorker.commit_timer.cancel() LogWorker.commit_timer = None try: save_dict = { 'architecture': arch_name, 'request': request.as_dict(), 'trace': trace.as_dict(), 'test_tags': trace_tags, 'test_comment': trace_comment } LogWorker.append_and_save_data_as_json(save_dict) LogWorker.commit_count += 1 if LogWorker.commit_count >= LogWorker.commit_after: LogWorker.commit_repo() except Exception as err: logging.error(f"Request / trace save failed {err}") # Restart the inactivity timer LogWorker.commit_timer = Timer(LogWorker.commit_time, LogWorker.signal_commit) LogWorker.commit_timer.start() @classmethod def append_and_save_data_as_json(cls, data: Dict) -> None: """ If the working log file is not download, then get a local copy. Add the new record to the local file. """ logging.debug(f"LogWorker logging open record {LogWorker.commit_count + 1}") if cls.save_repo is None and not cls.save_repo_load_error: try: hf_write_token = hf_api_token(write=True) cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token) except Exception as err: cls.save_repo_load_error = True logging.error(f"Error connecting to the save repo {err} - persistence now disabled") if cls.save_repo is not None: with open(cls.trace_file, 'r') as f: test_json = json.load(f) test_json['tests'].append(data) with open(cls.trace_file, 'w') as f: json.dump(test_json, f, indent=2) @classmethod def commit_repo(cls): """ If there are any changes in the local file which are not committed to the repo then commit them. """ if cls.commit_count > 0: logging.info(f"LogWorker committing {LogWorker.commit_count} open records") cls.save_repo.push_to_hub() LogWorker.commit_count = 0 @classmethod def signal_commit(cls): # Signalling this back via the queue and not doing the work here as it would # be executed on the Timer thread and may conflict with resources if the main # LogWorker starts doing work concurrently. logging.debug("LogWorker signalling commit based on time elapsed") cls.queue.put((None, None, None, None, None)) @classmethod def write(cls, arch_name: str, request: ArchitectureRequest, trace: ArchitectureTrace, trace_tags: List[str] = None, trace_comment: str = None) -> None: """ Class method callable from across the system to put a logging request onto the queue so that the LogWorker will pick it up in turn and write it to the log """ trace_tags = [] if trace_tags is None else trace_tags trace_comment = "" if trace_comment is None else trace_comment cls.queue.put((arch_name, request, trace, trace_tags, trace_comment)) # Instantiate and run worker on import if LogWorker.instance is None: LogWorker.instance = LogWorker() LogWorker.daemon = True LogWorker.instance.start() LogWorker.timeout_functions.append(LogWorker.commit_repo) class Architecture: """ An architecture is built as a callable pipeline of steps. An ArchitectureRequest object is passed down the pipeline sequentially to each component. A component can modify the request if needed, update the response or signal an early exit. The Architecture framework also provides trace timing and logging, plus exception handling so an individual request cannot crash the system. """ architectures = None save_repo = None save_repo_load_error = False save_repo_url = "https://huggingface.co/datasets/alfraser/llm-arch-trace" trace_dir = "trace" trace_file_name = "trace.json" trace_file = os.path.join(trace_dir, trace_file_name) @classmethod def wipe_trace(cls, hf_write_token:str = None) -> None: """ Wipes the json trace file - note will not delete any records which have been saved offline to the database """ if os.path.exists(cls.trace_dir): shutil.rmtree(cls.trace_dir) try: if hf_write_token is None: hf_write_token = hf_api_token(write=True) cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token) test_json = {'tests': []} with open(cls.trace_file, 'w') as f: json.dump(test_json, f, indent=2) cls.save_repo.push_to_hub() except Exception as err: cls.save_repo_load_error = True logging.error(f"Error connecting to the save repo {err} - persistence now disabled") @classmethod def get_trace_records(cls) -> List[Dict]: """ Loads and returns all the trace records which are held in the trace file """ if not os.path.isfile(cls.trace_file): hf_write_token = hf_api_token(write=True) try: cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token) except Exception as err: cls.save_repo_load_error = True logging.error(f"Error connecting to the save repo {err} - persistence now disabled") return [] with open(cls.trace_file, 'r') as f: test_json = json.load(f) return test_json['tests'] @classmethod def load_architectures(cls, force_reload: bool = False) -> None: """ Class method to load the configuration file and try and set up architectures for each config entry (a named sequence of components with optional setup params). :param force_reload: A bool of whether to force a reload, defaults to False. """ if cls.architectures is None or force_reload: config_file = os.path.join(config_dir, "architectures.json") with open(config_file, "r") as f: configs = json.load(f)['architectures'] archs = [] for c in configs: arch_name = c['name'] arch_description = c['description'] arch_img = None if 'img' in c: arch_img = c['img'] arch_comps = [] for s in c['steps']: component_class_name = s['class'] component_init_params = {} if 'params' in s: component_init_params = s['params'] arch_comps.append(globals()[component_class_name](**component_init_params)) arch = Architecture(name=arch_name, description=arch_description, steps=arch_comps, img=arch_img) archs.append(arch) cls.architectures = archs @classmethod def get_architecture(cls, name: str): """ Lookup an architecture by name :param name: The name of the architecture to look up :return: The architecture object """ if cls.architectures is None: cls.load_architectures() for a in cls.architectures: if a.name == name: return a raise ValueError(f"Could not find an architecture named {name}") def __init__(self, name: str, description: str, steps: List[ArchitectureComponent], img: Optional[str] = None, exception_text: str = "Sorry an internal technical error occurred.", no_response_text: str = "Sorry I can't answer that."): self.name = name self.description = description self.steps = steps self.img = img self.exception_text = exception_text self.no_response_text = no_response_text def __call__(self, request: ArchitectureRequest, trace_tags: List[str] = None, trace_comment: str = None) -> ArchitectureTrace: """ The main entry point to call the pipeline. Passes the request through each pipeline step in sequence, allowing them to amend the request or early exit the processing. Also captures exceptions and generates the trace, plus saves the request/response and the trace to a store for analysis. :param request: The architecture request to pass down the pipeline :return: The trace record for this invocation of the architecture """ logging.info(f'{self.name} processing query "{request.request}"') trace = ArchitectureTrace() for component in self.steps: trace.start_trace(name=component.__class__.__name__) try: component.process_request(request) if request.early_exit: trace.end_trace(outcome=ArchitectureTraceOutcome.EARLY_EXIT, early_exit_message=request.early_exit_message) break else: trace.end_trace(outcome=ArchitectureTraceOutcome.SUCCESS) except Exception as err: trace.end_trace(outcome=ArchitectureTraceOutcome.EXCEPTION) trace.steps[-1].exception = err traceback.print_exc() break LogWorker.write(self.name, request, trace, trace_tags, trace_comment) return trace class InputRequestScreener(ArchitectureComponent): """ This is a concrete component which screens the input query for profanity using an off the shelf profanity search library (better_profanity) """ description = "Simplistic input screener for demonstration. Screens inputs for profanity." def process_request(self, request: ArchitectureRequest) -> None: if profanity.contains_profanity(request.request): request.response = "Sorry - I cannot answer this question. Please try and rephrase it." request.early_exit = True request.early_exit_message = "Profanity detected in request" class OutputResponseScreener(ArchitectureComponent): """ This is a concrete component designed to review the final response before showing it to the user. It is a simple exemplar component using a call to the baseline LLM just with the response text and asking the baseline LLM if it contains anything offensive. This is illustrative only and should not be considered a best in class or production usable safety implementation. """ description = "Screens outputs for offensive responses." def __init__(self): self.api_token = hf_api_token() self.endpoint_url = "https://yl89ru8gdr1wkbej.eu-west-1.aws.endpoints.huggingface.cloud" def process_request(self, request: ArchitectureRequest) -> None: system_prompt = "You are screening for offensive content. In a single word (yes or no), is the response offensive?" headers = { "Accept": "application/json", "Authorization": f"Bearer {self.api_token}", "Content-Type": "application/json" } query_input = f"[INST] <> {system_prompt} <> {request.response} [/INST] " payload = { "inputs": query_input, "parameters": { "temperature": 0.1, "max_new_tokens": 10 } } llm_response = requests.post(self.endpoint_url, headers=headers, json=payload) generated_text = json.loads(llm_response.text)[0]['generated_text'].strip() if len(generated_text) > 2 and generated_text[0:3].lower() == 'yes': # Too many false positives getting blocked so tweaked to lean relaxed for the demo request.response = "Sorry - I cannot answer this question. Please try and rephrase it." request.early_exit = True class RetrievalAugmentor(ArchitectureComponent): """ This is a concrete implementation of the RAG augmentation component of the RAG architecture. Takes the current input request, queries the vector store for documents and then appends these documents into the beginning of the LLM prompt, ready for inference. """ description = "Retrieves appropriate documents from the store and then augments the request." def __init__(self, vector_store: str, doc_count: int = 5): chroma_db = os.path.join(data_dir, 'vector_stores', f'{vector_store}_chroma') self.vector_store = chroma_db client = chromadb.PersistentClient(path=chroma_db) self.collection = client.get_collection(name='products') self.doc_count = doc_count def process_request(self, request: ArchitectureRequest) -> None: # Get the count nearest documents from the doc store input_query = request.request results = self.collection.query(query_texts=[input_query], n_results=self.doc_count) documents = results['documents'][0] # Index 0 as we are always asking one question # Update the request to include the retrieved documents new_query = '{"background": [' new_query += ', '.join([f'"{d}"' for d in documents]) new_query += ']}\n\nQUESTION: ' new_query += input_query # Put the request back into the architecture request request.request = new_query def config_description(self) -> str: """ Custom config details as markdown """ desc = f"Vector Store: {self.vector_store}; " desc += f"Max docs: {self.doc_count}" return desc class HFInferenceEndpoint(ArchitectureComponent): """ A concrete pipeline component which sends the current query to a given llama chat based inference endpoint on HuggingFace """ def __init__(self, endpoint_url: str, model_name: str, system_prompt: str, max_new_tokens: int, temperature: float = 1.0, prompt_style: str = "multi_line"): self.endpoint_url: str = endpoint_url self.prompt_style = prompt_style self.model_name: str = model_name self.system_prompt: str = system_prompt self.max_new_tokens = max_new_tokens self.api_token = hf_api_token() self.temperature = temperature def config_description(self) -> str: """ Custom config details as markdown """ desc = f"Model: {self.model_name}; " desc += f"Endpoint: {self.endpoint_url}; " desc += f"Max tokens: {self.max_new_tokens}; " desc += f"Temperature: {self.temperature}; " desc += f"System prompt: {self.system_prompt}" return desc def process_request(self, request: ArchitectureRequest) -> None: """ Main processing method for this function. Calls the HTTP service for the model by port if provided or attempting to lookup by name, and then adds this to the response element of the request. Support different prompt styles that were tested during testing to determine the best way to get a good response from the various LLM endpoints. """ headers = { "Accept": "application/json", "Authorization": f"Bearer {self.api_token}", "Content-Type": "application/json" } if self.prompt_style == "multi_line": query_input = f"[INST] <>\n{self.system_prompt}\n<>\n\n{request.request} [/INST] " elif self.prompt_style == "multi_line_no_sys": query_input = f"[INST]\n{request.request} [/INST] " elif self.prompt_style == "single_line_no_sys": query_input = f"[INST] {request.request} [/INST] " elif self.prompt_style == "single_line": query_input = f"[INST] <>\n{self.system_prompt}\n<> {request.request} [/INST] " elif self.prompt_style == "multi_line_with_roles": query_input = f"<>\n{self.system_prompt}\n<>\n[INST]\nUser:{request.request}\n[/INST]\n\nAssistant:" elif self.prompt_style == "raw": # No formatting - used to just send things straight through from the front end query_input = request.request else: raise ValueError(f"Config error - Unknown prompt style: {self.prompt_style}") payload = { "inputs": query_input, "parameters": { "temperature": self.temperature, "max_new_tokens": self.max_new_tokens } } llm_response = requests.post(self.endpoint_url, headers=headers, json=payload) if llm_response.status_code == 200: generated_text = llm_response.json()[0]['generated_text'].strip() request.response = generated_text elif llm_response.status_code == 502: request.response = "Received 502 error from LLM service - service initialising, try again shortly" else: request.response = f"Received {llm_response.status_code} - {llm_response.text}" class ResponseTrimmer(ArchitectureComponent): """ A concrete pipeline component which trims the response based on a regex match, then uppercases the first character of what is left. """ description = "Trims the response based on a regex" def __init__(self, regexes: List[str]): quoted_regexes = [f'"{r}"' for r in regexes] self.regex_display = f"[{', '.join(quoted_regexes)}]" self.regexes = [re.compile(r, re.IGNORECASE) for r in regexes] def process_request(self, request: ArchitectureRequest): new_response = request.response for regex in self.regexes: new_response = regex.sub('', new_response) new_response = new_response[:1].upper() + new_response[1:] request.response = new_response def config_description(self) -> str: return f"Regexes: {self.regex_display}"