herMaster commited on
Commit
0284c70
β€’
1 Parent(s): fdf2483

Upload first huggingface test file on space

Browse files

This is the first test file upload on hugging face space to use spaces for the first time.

Files changed (1) hide show
  1. grado-test-3-huggingface-space.py +147 -0
grado-test-3-huggingface-space.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from qdrant_client import models, QdrantClient
3
+ from sentence_transformers import SentenceTransformer
4
+ from PyPDF2 import PdfReader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.callbacks.manager import CallbackManager
7
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
8
+ from langchain.llms import LlamaCpp
9
+ from langchain.vectorstores import Qdrant
10
+ from qdrant_client.http import models
11
+
12
+
13
+
14
+ # loading the embedding model -
15
+
16
+ encoder = SentenceTransformer("all-MiniLM-L6-v2")
17
+
18
+ print("embedding model loaded.............................")
19
+ print("####################################################")
20
+
21
+ # loading the LLM
22
+
23
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
24
+
25
+ print("loading the LLM......................................")
26
+
27
+ llm = LlamaCpp(
28
+ model_path="/home/devangpagare/llm/models/llama-2-7b-chat.Q3_K_S.gguf",
29
+ n_ctx=2048,
30
+ f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
31
+ callback_manager=callback_manager,
32
+ verbose=True,
33
+ )
34
+ print("LLM loaded........................................")
35
+ print("################################################################")
36
+
37
+ def get_chunks(text):
38
+ text_splitter = RecursiveCharacterTextSplitter(
39
+ # seperator = "\n",
40
+ chunk_size = 500,
41
+ chunk_overlap = 100,
42
+ length_function = len,
43
+ )
44
+
45
+ chunks = text_splitter.split_text(text)
46
+ return chunks
47
+
48
+
49
+ pdf_path = '/home/devangpagare/llm/qdrant-cloud-rag-main/100 Weird Facts About the Human Body.pdf'
50
+
51
+
52
+ reader = PdfReader(pdf_path)
53
+ text = ""
54
+ num_of_pages = len(reader.pages)
55
+ for page in range(num_of_pages):
56
+ current_page = reader.pages[page]
57
+ text += current_page.extract_text()
58
+
59
+
60
+ chunks = get_chunks(text)
61
+
62
+ print("Chunks are ready.....................................")
63
+ print("######################################################")
64
+
65
+ qdrant = QdrantClient(path = "/home/devangpagare/llm/qdrant-cloud-rag-main/gradio/db")
66
+ print("db created................................................")
67
+ print("#####################################################################")
68
+
69
+ qdrant.recreate_collection(
70
+ collection_name="my_facts",
71
+ vectors_config=models.VectorParams(
72
+ size=encoder.get_sentence_embedding_dimension(), # Vector size is defined by used model
73
+ distance=models.Distance.COSINE,
74
+ ),
75
+ )
76
+
77
+ print("Collection created........................................")
78
+ print("#########################################################")
79
+
80
+
81
+
82
+ li = []
83
+ for i in range(len(chunks)):
84
+ li.append(i)
85
+
86
+ dic = zip(li, chunks)
87
+ dic= dict(dic)
88
+
89
+ qdrant.upload_records(
90
+ collection_name="my_facts",
91
+ records=[
92
+ models.Record(
93
+ id=idx,
94
+ vector=encoder.encode(dic[idx]).tolist(),
95
+ payload= {dic[idx][:5] : dic[idx]}
96
+ ) for idx in dic.keys()
97
+ ],
98
+ )
99
+
100
+ print("Records uploaded........................................")
101
+ print("###########################################################")
102
+
103
+ def chat(question):
104
+ # question = input("ask question from pdf.....")
105
+
106
+
107
+ hits = qdrant.search(
108
+ collection_name="my_facts",
109
+ query_vector=encoder.encode(question).tolist(),
110
+ limit=3
111
+ )
112
+ context = []
113
+ for hit in hits:
114
+ context.append(list(hit.payload.values())[0])
115
+
116
+ context = context[0] + context[1] + context[2]
117
+
118
+ system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
119
+ Read the given context before answering questions and think step by step. If you can not answer a user question based on
120
+ the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
121
+
122
+
123
+ B_INST, E_INST = "[INST]", "[/INST]"
124
+
125
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
126
+
127
+ SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
128
+
129
+ instruction = f"""
130
+ Context: {context}
131
+ User: {question}"""
132
+
133
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
134
+
135
+ result = llm(prompt_template)
136
+ return result
137
+
138
+ gr.Interface(
139
+ fn = chat,
140
+ inputs = gr.Textbox(lines = 10, placeholder = "Enter your question here πŸ‘‰"),
141
+ outputs = gr.Textbox(lines = 10, placeholder = "Your answer will be here soon πŸš€"),
142
+ title="Q&N with PDF πŸ‘©πŸ»β€πŸ’»πŸ““βœπŸ»πŸ’‘",
143
+ description="This app facilitates a conversation with PDFs available on https://www.delo.si/assets/media/other/20110728/100%20Weird%20Facts%20About%20the%20Human%20Body.pdfπŸ’‘",
144
+ theme="soft",
145
+ examples=["Hello", "what is the speed of human nerve impulses?"],
146
+ cache_examples=True,
147
+ ).launch(share = True, auth=("username", "password"))