File size: 27,507 Bytes
e94696c
 
8f8b146
 
 
e94696c
 
 
 
2d7adb6
e94696c
d02c4c7
6000142
c0f0676
cc9a95f
e94696c
 
 
745c1f4
c0a1e47
 
e94696c
e35ef72
d246d52
e94696c
aff284c
e94696c
 
 
 
 
 
 
 
 
3af3634
 
 
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aff284c
e94696c
745c1f4
 
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
8f8b146
e94696c
 
 
 
 
 
3af3634
 
e94696c
 
 
 
 
 
3af3634
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3af3634
e94696c
 
 
aff284c
e94696c
745c1f4
 
 
 
 
 
 
 
 
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
3af3634
e94696c
 
 
3af3634
 
e94696c
 
 
 
 
 
 
 
 
745c1f4
 
 
e94696c
 
8f8b146
 
 
 
 
e94696c
 
 
 
 
53ccfca
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0a1e47
8f8b146
 
 
 
 
 
 
 
 
 
 
c0a1e47
 
 
 
 
 
 
 
 
e35ef72
 
c0a1e47
 
e35ef72
c0a1e47
 
 
30696ca
c0a1e47
8f8b146
e35ef72
2d7adb6
e35ef72
 
 
2d7adb6
c0a1e47
 
8f8b146
c0a1e47
 
 
 
30696ca
c0a1e47
 
 
 
 
 
 
 
 
 
2d7adb6
c0a1e47
8f8b146
c0a1e47
 
 
 
8f8b146
 
 
 
 
2d7adb6
c0a1e47
 
 
 
 
 
2d7adb6
c0a1e47
 
 
 
 
 
 
 
 
 
8f8b146
 
 
e35ef72
2d7adb6
e35ef72
 
c0a1e47
 
 
e35ef72
 
 
2d7adb6
30696ca
c0a1e47
 
30696ca
8f8b146
 
 
 
 
c0a1e47
 
30696ca
c0a1e47
 
 
 
 
d5cf91c
c0a1e47
e35ef72
c0a1e47
 
e94696c
 
 
 
 
 
 
 
 
 
745c1f4
 
8f424fc
 
 
 
e94696c
c0f0676
8f8b146
 
 
 
8f424fc
 
c0f0676
3853f7c
 
8f424fc
c0f0676
8f424fc
c0f0676
8f424fc
c0f0676
8f424fc
2d7adb6
c0f0676
8f424fc
 
8f8b146
 
 
8f424fc
 
 
 
 
 
2d7adb6
8f424fc
 
 
 
c0f0676
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2db4636
 
 
e94696c
 
 
 
 
 
 
2db4636
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745c1f4
e94696c
 
 
 
2db4636
e94696c
 
 
 
 
2db4636
e94696c
 
 
53697b7
e94696c
 
 
 
 
8f8b146
 
e94696c
2d7adb6
e94696c
 
 
 
 
 
3af3634
 
e94696c
 
 
 
 
 
cc9a95f
e94696c
30696ca
e94696c
 
 
 
8f8b146
 
 
 
d246d52
e94696c
 
d246d52
bd6f44c
 
d246d52
e94696c
 
 
8f8b146
 
 
 
 
 
bd6f44c
e94696c
0adaf44
 
 
 
e94696c
bd6f44c
0adaf44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15c31c0
bd6f44c
 
e94696c
 
 
8f8b146
 
 
 
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d02c4c7
 
 
 
e94696c
 
 
 
 
 
 
 
 
 
 
 
 
6000142
 
8f8b146
6000142
 
53169ab
190ec66
6000142
53169ab
27e1387
6000142
 
 
 
 
 
 
 
 
27e1387
 
0adaf44
6000142
 
 
 
 
 
 
 
8f8b146
 
6000142
 
 
 
 
 
53169ab
 
 
 
 
 
 
 
 
20cff9b
 
92aa543
 
 
53169ab
 
6000142
 
 
 
 
 
 
0adaf44
 
 
 
 
 
 
 
6000142
 
d02c4c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
"""
This file contains all the code which defines architectures and
architecture components.  An architecture is modelled a pipeline of ArchitectureComponents
through which an ArchitectureRequest flows.  Architectures are configured in the file
config/architectures.json
"""

import chromadb
import json
import logging
import os
import regex as re
import requests
import shutil
import traceback

from abc import ABC, abstractmethod
from enum import Enum
from huggingface_hub import Repository
from queue import Queue
from threading import Thread, Timer
from time import time
from typing import List, Optional, Dict, Callable
from better_profanity import profanity

from src.common import config_dir, data_dir, hf_api_token, escape_dollars


class ArchitectureRequest:
    """
    This class represents a request (chat query) from a user which can then be built up or
    modified through the pipeline process. It also holds the response to the request which again
    is a stack which can be modified through life.
    """
    def __init__(self, query: str):
        self._request: List[str] = [query]  # Stack for the request text as it evolves down the pipeline
        self._response: List[str] = []  # Stack for the response text as it evolves down the pipeline
        self.early_exit: bool = False
        self.early_exit_message: str = None

    @property
    def request(self):
        return self._request[-1]

    @request.setter
    def request(self, value: str):
        self._request.append(value)

    @property
    def response(self):
        if len(self._response) > 0:
            return self._response[-1]
        return None

    @response.setter
    def response(self, value: str):
        self._response.append(value)

    def as_markdown(self) -> str:
        """
        Returns a markdown representation for display / testing
        :return: str - the markdown
        """
        md = "- **Request evolution**"
        for r in self._request:
            md += f"\n  - {r}"
        md += "\n- **Response evolution**"
        for r in self._response:
            md += f"\n  - {r}"
        return escape_dollars(md)

    def as_dict(self) -> Dict:
        return {'request_evolution': self._request, 'response_evolution': self._response}


class ArchitectureTraceOutcome(Enum):
    """
    Class representing the outcome of a component step in an architecture
    """
    NONE = 0
    SUCCESS = 1
    EARLY_EXIT = 2
    EXCEPTION = 3


class ArchitectureTraceStep:
    """
    Class to hold the trace details of a single step in an Architecture pipeline
    """
    def __init__(self, name: str):
        self.name = name
        self.start_ms = int(time() * 1000)
        self.end_ms = None
        self.outcome = ArchitectureTraceOutcome.NONE
        self._exception: str = None
        self.early_exit_message: str = None

    def end(self, outcome: ArchitectureTraceOutcome):
        self.end_ms = int(time() * 1000)
        self.outcome = outcome

    @property
    def exception(self) -> str:
        return self._exception

    @exception.setter
    def exception(self, value: Exception):
        self._exception = f'{value}'  # Hold any exception as a string in the trace

    def as_markdown(self) -> str:
        """
        Converts the trace to markdown for simple display purposes
        :return: a string of markdown
        """
        md = f"- **Step**: {self.name}  \n"
        md += f"  - **Start**: {self.start_ms}; **End**: {self.end_ms}  \n"
        md += f"  - **Elapsed time**: {self.end_ms - self.start_ms}ms  \n"
        outcome = "None"
        if self.outcome == ArchitectureTraceOutcome.SUCCESS:
            outcome = "Success"
        elif self.outcome == ArchitectureTraceOutcome.EARLY_EXIT:
            outcome = f"Early Exit ({self.early_exit_message})"
        elif self.outcome == ArchitectureTraceOutcome.EXCEPTION:
            outcome = f"Exception ({self._exception})"
        md += f"  - **Outcome**: {outcome}"
        return escape_dollars(md)

    def as_dict(self) -> Dict:
        return {
            'name': self.name,
            'start_ms': self.start_ms,
            'end_ms': self.end_ms,
            'outcome': str(self.outcome),
            'exception': "" if self._exception is None else f"{self._exception}",
            'early_exit_message': "" if self.early_exit_message is None else self.early_exit_message
        }


class ArchitectureTrace:
    """
    This class represents the system instrumentation / trace for a request. It holds the name
    for each component called, the start and end time of the component processing and the outcome
    of the step.
    """
    def __init__(self):
        self.steps: List[ArchitectureTraceStep] = []

    def start_trace(self, name: str):
        self.steps.append(ArchitectureTraceStep(name=name))

    def end_trace(self, outcome: ArchitectureTraceOutcome, early_exit_message: str = None):
        assert len(self.steps) > 0
        assert self.steps[-1].outcome == ArchitectureTraceOutcome.NONE
        self.steps[-1].end(outcome=outcome)
        if early_exit_message is not None:
            self.steps[-1].early_exit_message = early_exit_message

    def as_markdown(self) -> str:
        """
        Converts the trace to markdown for simple display purposes
        :return: a string of markdown
        """
        md = '  \n'.join([s.as_markdown() for s in self.steps])
        return md

    def as_dict(self) -> Dict:
        return {'steps': [s.as_dict() for s in self.steps]}


class ArchitectureComponent(ABC):
    """
    This is the anbstract base class for all classes which want to be a concrete components available
    to be configured into an Architecture pipeline.  Specifies the elements which need to be implemented
    to be a compliant architecture component.
    """
    description = "Components should override a description"

    @abstractmethod
    def process_request(self, request: ArchitectureRequest) -> None:
        """
        The principal method that concrete implementations of a component must implement.
        They should signal anything to the pipeline through direct modification of the provided
        request (i.e. amending the request text or response text, or setting the early_exit flag).
        :param request: The request which is flowing down the pipeline
        :return: None
        """
        pass

    def config_description(self) -> str:
        """
        Optional method to override for providing a string of description in markdown format for
        display purposes for the component
        :return: a markdwon string (defaulting to empty in the base class)
        """
        return ""


class LogWorker(Thread):
    """
    The LogWorker implements a daemon thread which runs in the background to write the results
    of user queries through the system to a log file for analysis/reporting and offline saving.
    The LogWorker provides two functions to the system.  1) it moves this I/O operation out of the
    main architecture execution which allows for clearer understanding of the true performance of the
    architectures themselves.  2) it is designed to be run as a single thread to provide controlled
    shared access to a resource (the log file) with an in-memory queue for thread safety, which then
    allows us to multi-thread the architecture invocation itself.  In addition to the LogWorker provides
    some basic batching capabilities for performance (e.g. batches up N requests before committing the IO
    operation to the file, or commits open activity after a set period of inactivity)
    """
    instance = None
    architectures = None
    save_repo = None
    save_repo_load_error = False
    save_repo_url = "https://huggingface.co/datasets/alfraser/llm-arch-trace"
    trace_dir = "trace"
    trace_file_name = "trace.json"
    trace_file = os.path.join(trace_dir, trace_file_name)
    queue = Queue()
    commit_time = 5  # Number of seconds after which to commit with no activity
    commit_after = 20  # Number of records after which to commit irrespective of time
    commit_count = 0  # Current uncommitted records
    commit_timer = None  # The actual commit timer - we will schedule the commit on this
    timeout_functions: List[Callable[[], None]] = []  # Callbacks which will be fired on timeout

    def run(self):
        while True:
            arch_name, request, trace, trace_tags, trace_comment = LogWorker.queue.get()
            if request is None:
                # There was a period of inactivity so run the timeout functions
                for func in LogWorker.timeout_functions:
                    logging.info(f"LogWorker commit running {func.__name__}")
                    try:
                        func()
                    except Exception as e:
                        logging.error(f"Timeout func {func.__name__} had error {e}")
            else:
                if LogWorker.commit_timer is not None and LogWorker.commit_timer.is_alive():
                    # Cancel the inactivity timer
                    LogWorker.commit_timer.cancel()
                    LogWorker.commit_timer = None
                try:
                    save_dict = {
                        'architecture': arch_name,
                        'request': request.as_dict(),
                        'trace': trace.as_dict(),
                        'test_tags': trace_tags,
                        'test_comment': trace_comment
                    }
                    LogWorker.append_and_save_data_as_json(save_dict)
                    LogWorker.commit_count += 1
                    if LogWorker.commit_count >= LogWorker.commit_after:
                        LogWorker.commit_repo()
                except Exception as err:
                    logging.error(f"Request / trace save failed {err}")

                # Restart the inactivity timer
                LogWorker.commit_timer = Timer(LogWorker.commit_time, LogWorker.signal_commit)
                LogWorker.commit_timer.start()

    @classmethod
    def append_and_save_data_as_json(cls, data: Dict) -> None:
        """
        If the working log file is not download, then get a local copy.
        Add the new record to the local file.
        """
        logging.debug(f"LogWorker logging open record {LogWorker.commit_count + 1}")
        if cls.save_repo is None and not cls.save_repo_load_error:
            try:
                hf_write_token = hf_api_token(write=True)
                cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
            except Exception as err:
                cls.save_repo_load_error = True
                logging.error(f"Error connecting to the save repo {err} - persistence now disabled")

        if cls.save_repo is not None:
            with open(cls.trace_file, 'r') as f:
                test_json = json.load(f)
            test_json['tests'].append(data)
            with open(cls.trace_file, 'w') as f:
                json.dump(test_json, f, indent=2)

    @classmethod
    def commit_repo(cls):
        """
        If there are any changes in the local file which are not committed to the repo then commit them.
        """
        if cls.commit_count > 0:
            logging.info(f"LogWorker committing {LogWorker.commit_count} open records")
            cls.save_repo.push_to_hub()
            LogWorker.commit_count = 0

    @classmethod
    def signal_commit(cls):
        # Signalling this back via the queue and not doing the work here as it would
        # be executed on the Timer thread and may conflict with resources if the main
        # LogWorker starts doing work concurrently.
        logging.debug("LogWorker signalling commit based on time elapsed")
        cls.queue.put((None, None, None, None, None))

    @classmethod
    def write(cls, arch_name: str, request: ArchitectureRequest, trace: ArchitectureTrace,
              trace_tags: List[str] = None, trace_comment: str = None) -> None:
        """
        Class method callable from across the system to put a logging request onto the queue so that
        the LogWorker will pick it up in turn and write it to the log
        """
        trace_tags = [] if trace_tags is None else trace_tags
        trace_comment = "" if trace_comment is None else trace_comment
        cls.queue.put((arch_name, request, trace, trace_tags, trace_comment))


# Instantiate and run worker on import
if LogWorker.instance is None:
    LogWorker.instance = LogWorker()
    LogWorker.daemon = True
    LogWorker.instance.start()
    LogWorker.timeout_functions.append(LogWorker.commit_repo)


class Architecture:
    """
    An architecture is built as a callable pipeline of steps. An
    ArchitectureRequest object is passed down the pipeline sequentially
    to each component.  A component can modify the request if needed, update the response
    or signal an early exit.  The Architecture framework also provides trace timing
    and logging, plus exception handling so an individual request cannot
    crash the system.
    """
    architectures = None
    save_repo = None
    save_repo_load_error = False
    save_repo_url = "https://huggingface.co/datasets/alfraser/llm-arch-trace"
    trace_dir = "trace"
    trace_file_name = "trace.json"
    trace_file = os.path.join(trace_dir, trace_file_name)

    @classmethod
    def wipe_trace(cls, hf_write_token:str = None) -> None:
        """
        Wipes the json trace file - note will not delete any records which have been saved offline to the database
        """
        if os.path.exists(cls.trace_dir):
            shutil.rmtree(cls.trace_dir)
        try:
            if hf_write_token is None:
                hf_write_token = hf_api_token(write=True)
            cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
            test_json = {'tests': []}
            with open(cls.trace_file, 'w') as f:
                json.dump(test_json, f, indent=2)
            cls.save_repo.push_to_hub()
        except Exception as err:
            cls.save_repo_load_error = True
            logging.error(f"Error connecting to the save repo {err} - persistence now disabled")

    @classmethod
    def get_trace_records(cls) -> List[Dict]:
        """
        Loads and returns all the trace records which are held in the trace file
        """
        if not os.path.isfile(cls.trace_file):
            hf_write_token = hf_api_token(write=True)
            try:
                cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
            except Exception as err:
                cls.save_repo_load_error = True
                logging.error(f"Error connecting to the save repo {err} - persistence now disabled")
                return []
        with open(cls.trace_file, 'r') as f:
            test_json = json.load(f)
            return test_json['tests']

    @classmethod
    def load_architectures(cls, force_reload: bool = False) -> None:
        """
        Class method to load the configuration file and try and set up architectures for each
        config entry (a named sequence of components with optional setup params).
        :param force_reload: A bool of whether to force a reload, defaults to False.
        """
        if cls.architectures is None or force_reload:
            config_file = os.path.join(config_dir, "architectures.json")
            with open(config_file, "r") as f:
                configs = json.load(f)['architectures']
                archs = []
                for c in configs:
                    arch_name = c['name']
                    arch_description = c['description']
                    arch_img = None
                    if 'img' in c:
                        arch_img = c['img']
                    arch_comps = []
                    for s in c['steps']:
                        component_class_name = s['class']
                        component_init_params = {}
                        if 'params' in s:
                            component_init_params = s['params']
                        arch_comps.append(globals()[component_class_name](**component_init_params))
                    arch = Architecture(name=arch_name, description=arch_description, steps=arch_comps, img=arch_img)
                    archs.append(arch)
            cls.architectures = archs

    @classmethod
    def get_architecture(cls, name: str):
        """
        Lookup an architecture by name
        :param name: The name of the architecture to look up
        :return: The architecture object
        """
        if cls.architectures is None:
            cls.load_architectures()
        for a in cls.architectures:
            if a.name == name:
                return a
        raise ValueError(f"Could not find an architecture named {name}")


    def __init__(self,
                 name: str,
                 description: str,
                 steps: List[ArchitectureComponent],
                 img: Optional[str] = None,
                 exception_text: str = "Sorry an internal technical error occurred.",
                 no_response_text: str = "Sorry I can't answer that."):
        self.name = name
        self.description = description
        self.steps = steps
        self.img = img
        self.exception_text = exception_text
        self.no_response_text = no_response_text

    def __call__(self, request: ArchitectureRequest, trace_tags: List[str] = None, trace_comment: str = None) -> ArchitectureTrace:
        """
        The main entry point to call the pipeline. Passes the request through each pipeline step
        in sequence, allowing them to amend the request or early exit the processing. Also captures
        exceptions and generates the trace, plus saves the request/response and the trace to a store
        for analysis.
        :param request: The architecture request to pass down the pipeline
        :return: The trace record for this invocation of the architecture
        """
        logging.info(f'{self.name} processing query "{request.request}"')
        trace = ArchitectureTrace()
        for component in self.steps:
            trace.start_trace(name=component.__class__.__name__)
            try:
                component.process_request(request)
                if request.early_exit:
                    trace.end_trace(outcome=ArchitectureTraceOutcome.EARLY_EXIT,
                                    early_exit_message=request.early_exit_message)
                    break
                else:
                    trace.end_trace(outcome=ArchitectureTraceOutcome.SUCCESS)
            except Exception as err:
                trace.end_trace(outcome=ArchitectureTraceOutcome.EXCEPTION)
                trace.steps[-1].exception = err
                traceback.print_exc()
                break
        LogWorker.write(self.name, request, trace, trace_tags, trace_comment)
        return trace


class InputRequestScreener(ArchitectureComponent):
    """
    This is a concrete component which screens the input query for profanity using an off the shelf
    profanity search library (better_profanity)
    """
    description = "Simplistic input screener for demonstration. Screens inputs for profanity."

    def process_request(self, request: ArchitectureRequest) -> None:
        if profanity.contains_profanity(request.request):
            request.response = "Sorry - I cannot answer this question. Please try and rephrase it."
            request.early_exit = True
            request.early_exit_message = "Profanity detected in request"


class OutputResponseScreener(ArchitectureComponent):
    """
    This is a concrete component designed to review the final response before showing it to the user.
    It is a simple exemplar component using a call to the baseline LLM just with the response text and asking
    the baseline LLM if it contains anything offensive. This is illustrative only and should not be considered
    a best in class or production usable safety implementation.
    """
    description = "Screens outputs for offensive responses."

    def __init__(self):
        self.api_token = hf_api_token()
        self.endpoint_url = "https://yl89ru8gdr1wkbej.eu-west-1.aws.endpoints.huggingface.cloud"

    def process_request(self, request: ArchitectureRequest) -> None:
        system_prompt = "You are screening for offensive content. In a single word (yes or no), is the response offensive?"
        headers = {
            "Accept": "application/json",
            "Authorization": f"Bearer {self.api_token}",
            "Content-Type": "application/json"
        }
        query_input = f"[INST] <<SYS>> {system_prompt} <<SYS>> {request.response} [/INST] "
        payload = {
            "inputs": query_input,
            "parameters": {
                "temperature": 0.1,
                "max_new_tokens": 10
            }
        }
        llm_response = requests.post(self.endpoint_url, headers=headers, json=payload)
        generated_text = json.loads(llm_response.text)[0]['generated_text'].strip()
        if len(generated_text) > 2 and generated_text[0:3].lower() == 'yes':  # Too many false positives getting blocked so tweaked to lean relaxed for the demo
            request.response = "Sorry - I cannot answer this question. Please try and rephrase it."
            request.early_exit = True


class RetrievalAugmentor(ArchitectureComponent):
    """
    This is a concrete implementation of the RAG augmentation component of the RAG architecture.  Takes
    the current input request, queries the vector store for documents and then appends these documents into
    the beginning of the LLM prompt, ready for inference.
    """
    description = "Retrieves appropriate documents from the store and then augments the request."

    def __init__(self, vector_store: str, doc_count: int = 5):
        chroma_db = os.path.join(data_dir, 'vector_stores', f'{vector_store}_chroma')
        self.vector_store = chroma_db
        client = chromadb.PersistentClient(path=chroma_db)
        self.collection = client.get_collection(name='products')
        self.doc_count = doc_count

    def process_request(self, request: ArchitectureRequest) -> None:
        # Get the count nearest documents from the doc store
        input_query = request.request
        results = self.collection.query(query_texts=[input_query], n_results=self.doc_count)
        documents = results['documents'][0]  # Index 0 as we are always asking one question

        # Update the request to include the retrieved documents
        new_query = '{"background": ['
        new_query += ', '.join([f'"{d}"' for d in documents])
        new_query += ']}\n\nQUESTION: '
        new_query += input_query

        # Put the request back into the architecture request
        request.request = new_query

    def config_description(self) -> str:
        """
        Custom config details as markdown
        """
        desc = f"Vector Store: {self.vector_store};  "
        desc += f"Max docs: {self.doc_count}"
        return desc


class HFInferenceEndpoint(ArchitectureComponent):
    """
    A concrete pipeline component which sends the current query to a given llama chat based
    inference endpoint on HuggingFace
    """
    def __init__(self, endpoint_url: str, model_name: str, system_prompt: str, max_new_tokens: int,
                 temperature: float = 1.0, prompt_style: str = "multi_line"):
        self.endpoint_url: str = endpoint_url
        self.prompt_style = prompt_style
        self.model_name: str = model_name
        self.system_prompt: str = system_prompt
        self.max_new_tokens = max_new_tokens
        self.api_token = hf_api_token()
        self.temperature = temperature

    def config_description(self) -> str:
        """
        Custom config details as markdown
        """
        desc = f"Model: {self.model_name};  "
        desc += f"Endpoint: {self.endpoint_url};  "
        desc += f"Max tokens: {self.max_new_tokens};  "
        desc += f"Temperature: {self.temperature};  "
        desc += f"System prompt: {self.system_prompt}"
        return desc

    def process_request(self, request: ArchitectureRequest) -> None:
        """
        Main processing method for this function. Calls the HTTP service for the model
        by port if provided or attempting to lookup by name, and then adds this to the
        response element of the request.  Support different prompt styles that were tested
        during testing to determine the best way to get a good response from the various LLM endpoints.
        """
        headers = {
            "Accept": "application/json",
            "Authorization": f"Bearer {self.api_token}",
            "Content-Type": "application/json"
        }

        if self.prompt_style == "multi_line":
            query_input = f"<s>[INST] <<SYS>>\n{self.system_prompt}\n<</SYS>>\n\n{request.request} [/INST] "
        elif self.prompt_style == "multi_line_no_sys":
            query_input = f"<s>[INST]\n{request.request} [/INST] "
        elif self.prompt_style == "single_line_no_sys":
            query_input = f"<s>[INST] {request.request} [/INST] "
        elif self.prompt_style == "single_line":
            query_input = f"<s>[INST] <<SYS>>\n{self.system_prompt}\n<</SYS>> {request.request} [/INST] "
        elif self.prompt_style == "multi_line_with_roles":
            query_input = f"<<SYS>>\n{self.system_prompt}\n<</SYS>>\n[INST]\nUser:{request.request}\n[/INST]\n\nAssistant:"
        elif self.prompt_style == "raw":
            # No formatting - used to just send things straight through from the front end
            query_input = request.request
        else:
            raise ValueError(f"Config error - Unknown prompt style: {self.prompt_style}")
        payload = {
            "inputs": query_input,
            "parameters": {
                "temperature": self.temperature,
                "max_new_tokens": self.max_new_tokens
            }
        }
        llm_response = requests.post(self.endpoint_url, headers=headers, json=payload)
        if llm_response.status_code == 200:
            generated_text = llm_response.json()[0]['generated_text'].strip()
            request.response = generated_text
        elif llm_response.status_code == 502:
            request.response = "Received 502 error from LLM service - service initialising, try again shortly"
        else:
            request.response = f"Received {llm_response.status_code} - {llm_response.text}"


class ResponseTrimmer(ArchitectureComponent):
    """
    A concrete pipeline component which trims the response based on a regex match,
    then uppercases the first character of what is left.
    """
    description = "Trims the response based on a regex"

    def __init__(self, regexes: List[str]):
        quoted_regexes = [f'"{r}"' for r in regexes]
        self.regex_display = f"[{', '.join(quoted_regexes)}]"
        self.regexes = [re.compile(r, re.IGNORECASE) for r in regexes]

    def process_request(self, request: ArchitectureRequest):
        new_response = request.response
        for regex in self.regexes:
            new_response = regex.sub('', new_response)
        new_response = new_response[:1].upper() + new_response[1:]
        request.response = new_response

    def config_description(self) -> str:
        return f"Regexes: {self.regex_display}"