SS8297 commited on
Commit
8469765
·
verified ·
1 Parent(s): 2f55736

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -3
app.py CHANGED
@@ -1,6 +1,83 @@
1
  import streamlit as st
 
 
 
 
 
 
2
 
3
- st.title("Echo Bot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Initialize chat history
6
  if "messages" not in st.session_state:
@@ -12,13 +89,43 @@ for message in st.session_state.messages:
12
  st.markdown(message["content"])
13
 
14
  # React to user input
15
- if prompt := st.chat_input("What is up?"):
16
  # Display user message in chat message container
17
  st.chat_message("user").markdown(prompt)
18
  # Add user message to chat history
19
  st.session_state.messages.append({"role": "user", "content": prompt})
20
 
21
- response = f"Echo: {prompt}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  # Display assistant response in chat message container
23
  with st.chat_message("assistant"):
24
  st.markdown(response)
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
+ from transformers import StoppingCriteriaList, StoppingCriteria
5
+ from sentence_transformers import SentenceTransformer
6
+ from pinecone import Pinecone
7
+ import warnings
8
 
9
+
10
+ warnings.filterwarnings("ignore", category=UserWarning)
11
+
12
+ # model_name = "AI-Sweden-Models/gpt-sw3-126m-instruct"
13
+ model_name = "AI-Sweden-Models/gpt-sw3-1.3b-instruct"
14
+
15
+
16
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
17
+
18
+ # Initialize Tokenizer & Model
19
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
20
+
21
+
22
+ def read_file(file_path: str) -> str:
23
+ """Read the contents of a file."""
24
+ with open(file_path, "r") as file:
25
+ return file.read()
26
+
27
+
28
+ model = AutoModelForCausalLM.from_pretrained(model_name)
29
+ model.eval()
30
+ model.to(device)
31
+
32
+ document_encoder_model = SentenceTransformer("KBLab/sentence-bert-swedish-cased")
33
+
34
+
35
+ # Note: 'index1' has been pre-created in the pinecone console
36
+ # read the pinecone api key from a file
37
+ pinecone_api_key = read_file("language_model\pinecone_api_key.txt")
38
+ pc = Pinecone(api_key=pinecone_api_key)
39
+ index = pc.Index("index1")
40
+
41
+
42
+ def query_pincecone_namespace(
43
+ vector_databse_index: Pinecone, q_embedding: str, namespace: str
44
+ ) -> str:
45
+ result = vector_databse_index.query(
46
+ namespace=namespace,
47
+ vector=q_embedding.tolist(),
48
+ top_k=1,
49
+ include_values=True,
50
+ include_metadata=True,
51
+ )
52
+ results = []
53
+ for match in result.matches:
54
+ results.append(match.metadata["paragraph"])
55
+ return results[0]
56
+
57
+
58
+ def generate_prompt(llmprompt: str) -> str:
59
+ """Generates a prompt for the GPT-3 model"""
60
+ start_token = "<|endoftext|><s>"
61
+ end_token = "<s>"
62
+ return f"{start_token}\nUser:\n{llmprompt}\n{end_token}\nBot:\n".strip()
63
+
64
+
65
+ def encode_query(query: str) -> torch.Tensor:
66
+ """Encode the query using the model's tokenizer"""
67
+ return document_encoder_model.encode(query)
68
+
69
+
70
+ class StopOnTokenCriteria(StoppingCriteria):
71
+ def __init__(self, stop_token_id):
72
+ self.stop_token_id = stop_token_id
73
+
74
+ def __call__(self, input_ids, scores, **kwargs):
75
+ return input_ids[0, -1] == self.stop_token_id
76
+
77
+
78
+ stop_on_token_criteria = StopOnTokenCriteria(stop_token_id=tokenizer.bos_token_id)
79
+
80
+ st.title("Paralegal Assistant")
81
 
82
  # Initialize chat history
83
  if "messages" not in st.session_state:
 
89
  st.markdown(message["content"])
90
 
91
  # React to user input
92
+ if prompt := st.chat_input("Skriv din fråga..."):
93
  # Display user message in chat message container
94
  st.chat_message("user").markdown(prompt)
95
  # Add user message to chat history
96
  st.session_state.messages.append({"role": "user", "content": prompt})
97
 
98
+ query = query_pincecone_namespace(
99
+ vector_databse_index=index,
100
+ q_embedding=encode_query(query=prompt),
101
+ namespace="ns-parent-balk",
102
+ )
103
+ llmprompt = (
104
+ "Besvara följande fråga på ett sakligt, kortfattat och formellt vis: "
105
+ + prompt
106
+ + "\n"
107
+ + "Använd följande text som referens när du besvarar frågan och hänvisa fakta i texten: \n"
108
+ + query
109
+ )
110
+ llmprompt = generate_prompt(llmprompt=llmprompt)
111
+
112
+ # # Convert prompt to tokens
113
+ input_ids = tokenizer(llmprompt, return_tensors="pt")["input_ids"].to(device)
114
+
115
+ # Genqerate tokens based om prompt
116
+ generated_token_ids = model.generate(
117
+ inputs=input_ids,
118
+ max_new_tokens=128,
119
+ do_sample=True,
120
+ temperature=0.8,
121
+ top_p=1,
122
+ stopping_criteria=StoppingCriteriaList([stop_on_token_criteria]),
123
+ )[0]
124
+
125
+ # Decode the generated tokens
126
+ generated_text = tokenizer.decode(generated_token_ids[len(input_ids[0]) : -1])
127
+
128
+ response = f"{generated_text}"
129
  # Display assistant response in chat message container
130
  with st.chat_message("assistant"):
131
  st.markdown(response)