Spaces:
Runtime error
Runtime error
| import json # to work with JSON | |
| import threading # to allow streaming response | |
| import time # to pave the deliver of the message | |
| import faiss # to create a search index | |
| import gradio # for the interface | |
| import numpy # to work with vectors | |
| import pandas # to work with pandas | |
| import sentence_transformers # to load an embedding model | |
| import spaces # for GPU | |
| import transformers # to load an LLM | |
| # Constants | |
| GREETING = ( | |
| "Howdy! I'm an AI agent that uses a [retrieval-augmented generation](" | |
| "https://en.wikipedia.org/wiki/Retrieval-augmented_generation) pipeline to answer questions about published at [ASME IDETC](https://asmedigitalcollection.asme.org/IDETC-CIE). And the best part is that I always cite my sources! What" | |
| " can I tell you about today?" | |
| ) | |
| EXAMPLE_QUERIES = [ | |
| "What's the difference between a markov chain and a hidden markov model?", | |
| ] | |
| EMBEDDING_MODEL_NAME = "allenai-specter" | |
| LLM_MODEL_NAME = "Qwen/Qwen2-7B-Instruct" | |
| # Load the dataset and convert to pandas | |
| data = pandas.read_parquet("hf://datasets/ccm/rag-idetc/data/train-00000-of-00001.parquet") | |
| # Load the model for later use in embeddings | |
| model = sentence_transformers.SentenceTransformer(EMBEDDING_MODEL_NAME) | |
| # Create an LLM pipeline that we can send queries to | |
| tokenizer = transformers.AutoTokenizer.from_pretrained(LLM_MODEL_NAME) | |
| streamer = transformers.TextIteratorStreamer( | |
| tokenizer, skip_prompt=True, skip_special_tokens=True | |
| ) | |
| chatmodel = transformers.AutoModelForCausalLM.from_pretrained( | |
| LLM_MODEL_NAME, torch_dtype="auto", device_map="auto" | |
| ) | |
| # Create a FAISS index for fast similarity search | |
| metric = faiss.METRIC_INNER_PRODUCT | |
| vectors = numpy.stack(data["embedding"].tolist(), axis=0).astype('float32') | |
| index = faiss.IndexFlatL2(len(data["embedding"][0])) | |
| index.metric_type = metric | |
| faiss.normalize_L2(vectors) | |
| index.train(vectors) | |
| index.add(vectors) | |
| def preprocess(query: str, k: int) -> tuple[str, str]: | |
| """ | |
| Searches the dataset for the top k most relevant papers to the query and returns a prompt and references | |
| Args: | |
| query (str): The user's query | |
| k (int): The number of results to return | |
| Returns: | |
| tuple[str, str]: A tuple containing the prompt and references | |
| """ | |
| encoded_query = numpy.expand_dims(model.encode(query), axis=0) | |
| print(query, encoded_query) | |
| faiss.normalize_L2(encoded_query) | |
| D, I = index.search(encoded_query, k) | |
| top_five = data.loc[I[0]] | |
| prompt = ( | |
| "You are an AI assistant who delights in helping people learn about research from the IDETC Conference. Your main task is to provide an ANSWER to the USER_QUERY based on the RESEARCH_EXCERPTS.\n\n" | |
| "RESEARCH_EXCERPTS:\n{{ABSTRACTS_GO_HERE}}\n\n" | |
| "USER_GUERY:\n{{QUERY_GOES_HERE}}\n\n" | |
| "ANSWER:\n" | |
| ) | |
| references = "\n\n## References\n\n" | |
| research_abstracts = "" | |
| for i in range(k): | |
| title = top_five["title"].values[i] | |
| id = top_five["id"].values[i] | |
| url = "https://doi.org/10.1115/" + id | |
| path = top_five["path"].values[i] | |
| text = top_five["text"].values[i] | |
| research_abstracts += str(i + i) + ". This excerpt is from: '" + title + "':\n" + text + "\n" | |
| references += ( | |
| str(i + 1) | |
| + ". [" | |
| + title.title() | |
| + "](" | |
| + url | |
| + ").\n" | |
| ) | |
| prompt = prompt.replace("{{ABSTRACTS_GO_HERE}}", research_abstracts) | |
| prompt = prompt.replace("{{QUERY_GOES_HERE}}", query) | |
| return prompt, references | |
| def postprocess(response: str, bypass_from_preprocessing: str) -> str: | |
| """ | |
| Applies a postprocessing step to the LLM's response before the user receives it | |
| Args: | |
| response (str): The LLM's response | |
| bypass_from_preprocessing (str): The bypass variable from the preprocessing step | |
| Returns: | |
| str: The postprocessed response | |
| """ | |
| return response + bypass_from_preprocessing | |
| def reply(message: str, history: list[str]) -> str: | |
| """ | |
| This function is responsible for crafting a response | |
| Args: | |
| message (str): The user's message | |
| history (list[str]): The conversation history | |
| Returns: | |
| str: The AI's response | |
| """ | |
| # Apply preprocessing | |
| message, bypass = preprocess(message, 5) | |
| # This is some handling that is applied to the history variable to put it in a good format | |
| history_transformer_format = [ | |
| {"role": role, "content": message_pair[idx]} | |
| for message_pair in history | |
| for idx, role in enumerate(["user", "assistant"]) | |
| if message_pair[idx] is not None | |
| ] + [{"role": "user", "content": message}] | |
| # Stream a response from pipe | |
| text = tokenizer.apply_chat_template( | |
| history_transformer_format, tokenize=False, add_generation_prompt=True | |
| ) | |
| model_inputs = tokenizer([text], return_tensors="pt").to("cuda:0") | |
| generate_kwargs = dict(model_inputs, streamer=streamer, max_new_tokens=512) | |
| t = threading.Thread(target=chatmodel.generate, kwargs=generate_kwargs) | |
| t.start() | |
| partial_message = "" | |
| for new_token in streamer: | |
| if new_token != "<": | |
| partial_message += new_token | |
| time.sleep(0.05) | |
| yield partial_message | |
| yield partial_message + bypass | |
| # Create and run the gradio interface | |
| gradio.ChatInterface( | |
| reply, | |
| examples=EXAMPLE_QUERIES, | |
| chatbot=gradio.Chatbot( | |
| show_label=False, | |
| show_share_button=False, | |
| show_copy_button=False, | |
| value=[[None, GREETING]], | |
| height="60vh", | |
| bubble_full_width=False, | |
| ), | |
| retry_btn=None, | |
| undo_btn=None, | |
| clear_btn=None, | |
| ).launch(debug=True) | |