File size: 3,762 Bytes
f2019a4
23de817
f2019a4
 
 
 
 
 
 
82a9fe8
f2019a4
 
 
846f47f
feacba8
f2019a4
9e80596
 
f2019a4
75ec164
9e80596
 
f2019a4
 
9e80596
f2019a4
 
 
 
 
 
9e80596
f2019a4
9e80596
f2019a4
 
9e80596
 
 
 
 
 
 
 
 
f2019a4
9e80596
 
f2019a4
9e80596
 
f2019a4
9e80596
9934680
f2019a4
9e80596
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2019a4
 
9e80596
 
f2019a4
 
 
 
 
 
 
 
27f6f13
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import re
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, AutoModel, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from vllm import LLM, SamplingParams
import torch
import gradio as gr
import json
import os
import shutil
import requests
import numpy as np
import pandas as pd
from chromadb.config import Settings
from chromadb.utils import embedding_functions
from FlagEmbedding import BGEM3FlagModel
from sklearn.metrics.pairwise import cosine_similarity

model = BGEM3FlagModel('BAAI/bge-m3',  
                       use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation

embeddings = np.load("embeddings_tchap.npy")
embeddings_data = pd.read_json("embeddings_tchap.json")
embeddings_text = embeddings_data["text_with_context"].tolist()

# Define the device
#device = "cuda" if torch.cuda.is_available() else "cpu"
#Define variables 
temperature=0.2
max_new_tokens=1000
top_p=0.92
repetition_penalty=1.7

#model_name = "Pclanglais/Tchap"

#llm = LLM(model_name, max_model_len=4096)

#Vector search over the database
def vector_search(sentence_query):

    query_embedding = model.encode(sentence_query, 
                            batch_size=12, 
                            max_length=256, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
                            )['dense_vecs']

    # Reshape the query embedding to fit the cosine_similarity function requirements
    query_embedding_reshaped = query_embedding.reshape(1, -1)
    
    # Compute cosine similarities
    similarities = cosine_similarity(query_embedding_reshaped, embeddings)
    
    # Find the index of the closest document (highest similarity)
    closest_doc_index = np.argmax(similarities)
    
    # Closest document's embedding
    closest_doc_embedding = embeddings_text[closest_doc_index]
    
    return closest_doc_embedding


class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = [29, 0]
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False

def predict(message, history):
    text = vector_search(message)
    message = message + "\n\n### Source ###\n"
    history_transformer_format = history + [[message, ""]]

    messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
                for item in history_transformer_format])

    return messages

def predict_alt(message, history):
    history_transformer_format = history + [[message, ""]]
    stop = StopOnTokens()

    messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
                for item in history_transformer_format])

    model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        model_inputs,
        streamer=streamer,
        max_new_tokens=1024,
        do_sample=True,
        top_p=0.95,
        top_k=1000,
        temperature=1.0,
        num_beams=1,
        stopping_criteria=StoppingCriteriaList([stop])
        )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    partial_message = ""
    for new_token in streamer:
        if new_token != '<':
            partial_message += new_token
            yield partial_message

# Define the Gradio interface
title = "Tchap"
description = "Le chatbot du service public"
examples = [
    [
        "Qui peut bénéficier de l'AIP?",  # user_message
        0.7  # temperature
    ]
]


gr.ChatInterface(predict).launch()