Joshua Sundance Bailey commited on
Commit
87d6984
1 Parent(s): b44a3fc

move defaults to new file

Browse files
.idea/langchain-streamlit-demo.iml CHANGED
@@ -1,7 +1,9 @@
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <module type="PYTHON_MODULE" version="4">
3
  <component name="NewModuleRootManager">
4
- <content url="file://$MODULE_DIR$" />
 
 
5
  <orderEntry type="jdk" jdkName="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" jdkType="Python SDK" />
6
  <orderEntry type="sourceFolder" forTests="false" />
7
  </component>
 
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <module type="PYTHON_MODULE" version="4">
3
  <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <sourceFolder url="file://$MODULE_DIR$/langchain-streamlit-demo" isTestSource="false" />
6
+ </content>
7
  <orderEntry type="jdk" jdkName="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" jdkType="Python SDK" />
8
  <orderEntry type="sourceFolder" forTests="false" />
9
  </component>
langchain-streamlit-demo/app.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  from datetime import datetime
3
  from tempfile import NamedTemporaryFile
4
  from typing import Tuple, List, Dict, Any, Union
@@ -30,6 +29,29 @@ from langchain.vectorstores import FAISS
30
  from langsmith.client import Client
31
  from streamlit_feedback import streamlit_feedback
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  from qagen import get_rag_qa_gen_chain
34
  from summarize import get_rag_summarization_chain
35
 
@@ -85,64 +107,6 @@ class StreamHandler(BaseCallbackHandler):
85
  RUN_COLLECTOR = RunCollectorCallbackHandler()
86
 
87
 
88
- # --- Model Selection Helpers ---
89
- MODEL_DICT = {
90
- "gpt-3.5-turbo": "OpenAI",
91
- "gpt-4": "OpenAI",
92
- "claude-instant-v1": "Anthropic",
93
- "claude-2": "Anthropic",
94
- "meta-llama/Llama-2-7b-chat-hf": "Anyscale Endpoints",
95
- "meta-llama/Llama-2-13b-chat-hf": "Anyscale Endpoints",
96
- "meta-llama/Llama-2-70b-chat-hf": "Anyscale Endpoints",
97
- "codellama/CodeLlama-34b-Instruct-hf": "Anyscale Endpoints",
98
- "Azure OpenAI": "Azure OpenAI",
99
- }
100
- SUPPORTED_MODELS = list(MODEL_DICT.keys())
101
-
102
-
103
- # --- Constants from Environment Variables ---
104
- DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-3.5-turbo")
105
- DEFAULT_SYSTEM_PROMPT = os.environ.get(
106
- "DEFAULT_SYSTEM_PROMPT",
107
- "You are a helpful chatbot.",
108
- )
109
- MIN_TEMP = float(os.environ.get("MIN_TEMPERATURE", 0.0))
110
- MAX_TEMP = float(os.environ.get("MAX_TEMPERATURE", 1.0))
111
- DEFAULT_TEMP = float(os.environ.get("DEFAULT_TEMPERATURE", 0.7))
112
- MIN_MAX_TOKENS = int(os.environ.get("MIN_MAX_TOKENS", 1))
113
- MAX_MAX_TOKENS = int(os.environ.get("MAX_MAX_TOKENS", 100000))
114
- DEFAULT_MAX_TOKENS = int(os.environ.get("DEFAULT_MAX_TOKENS", 1000))
115
- DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
116
-
117
- AZURE_VARS = [
118
- "AZURE_OPENAI_BASE_URL",
119
- "AZURE_OPENAI_API_VERSION",
120
- "AZURE_OPENAI_DEPLOYMENT_NAME",
121
- "AZURE_OPENAI_API_KEY",
122
- "AZURE_OPENAI_MODEL_VERSION",
123
- ]
124
-
125
- AZURE_DICT = {v: os.environ.get(v, "") for v in AZURE_VARS}
126
-
127
- PROVIDER_KEY_DICT = {
128
- "OpenAI": os.environ.get("OPENAI_API_KEY", ""),
129
- "Anthropic": os.environ.get("ANTHROPIC_API_KEY", ""),
130
- "Anyscale Endpoints": os.environ.get("ANYSCALE_API_KEY", ""),
131
- "LANGSMITH": os.environ.get("LANGCHAIN_API_KEY", ""),
132
- }
133
- OPENAI_API_KEY = PROVIDER_KEY_DICT["OpenAI"]
134
-
135
- MIN_CHUNK_SIZE = 1
136
- MAX_CHUNK_SIZE = 10000
137
- DEFAULT_CHUNK_SIZE = 1000
138
-
139
- MIN_CHUNK_OVERLAP = 0
140
- MAX_CHUNK_OVERLAP = 10000
141
- DEFAULT_CHUNK_OVERLAP = 0
142
-
143
- DEFAULT_RETRIEVER_K = 4
144
-
145
-
146
  @st.cache_data
147
  def get_texts_and_retriever(
148
  uploaded_file_bytes: bytes,
@@ -239,6 +203,7 @@ with sidebar:
239
  max_value=MAX_CHUNK_SIZE,
240
  value=DEFAULT_CHUNK_SIZE,
241
  )
 
242
  chunk_overlap = st.slider(
243
  label="Chunk Overlap",
244
  help="Number of characters to overlap between chunks",
@@ -250,6 +215,7 @@ with sidebar:
250
  chain_type_help_root = (
251
  "https://python.langchain.com/docs/modules/chains/document/"
252
  )
 
253
  chain_type_help = "\n".join(
254
  f"- [{chain_type_name}]({chain_type_help_root}/{chain_type_name})"
255
  for chain_type_name in (
@@ -259,6 +225,7 @@ with sidebar:
259
  "map_rerank",
260
  )
261
  )
 
262
  document_chat_chain_type = st.selectbox(
263
  label="Document Chat Chain Type",
264
  options=[
@@ -304,6 +271,7 @@ with sidebar:
304
  .replace("{", "{{")
305
  .replace("}", "}}")
306
  )
 
307
  temperature = st.slider(
308
  "Temperature",
309
  min_value=MIN_TEMP,
@@ -327,10 +295,12 @@ with sidebar:
327
  type="password",
328
  value=PROVIDER_KEY_DICT.get("LANGSMITH"),
329
  )
 
330
  LANGSMITH_PROJECT = st.text_input(
331
  "LangSmith Project Name",
332
  value=DEFAULT_LANGSMITH_PROJECT or "langchain-streamlit-demo",
333
  )
 
334
  if st.session_state.client is None and LANGSMITH_API_KEY:
335
  st.session_state.client = Client(
336
  api_url="https://api.smith.langchain.com",
@@ -347,19 +317,23 @@ with sidebar:
347
  "AZURE_OPENAI_BASE_URL",
348
  value=AZURE_DICT["AZURE_OPENAI_BASE_URL"],
349
  )
 
350
  AZURE_OPENAI_API_VERSION = st.text_input(
351
  "AZURE_OPENAI_API_VERSION",
352
  value=AZURE_DICT["AZURE_OPENAI_API_VERSION"],
353
  )
 
354
  AZURE_OPENAI_DEPLOYMENT_NAME = st.text_input(
355
  "AZURE_OPENAI_DEPLOYMENT_NAME",
356
  value=AZURE_DICT["AZURE_OPENAI_DEPLOYMENT_NAME"],
357
  )
 
358
  AZURE_OPENAI_API_KEY = st.text_input(
359
  "AZURE_OPENAI_API_KEY",
360
  value=AZURE_DICT["AZURE_OPENAI_API_KEY"],
361
  type="password",
362
  )
 
363
  AZURE_OPENAI_MODEL_VERSION = st.text_input(
364
  "AZURE_OPENAI_MODEL_VERSION",
365
  value=AZURE_DICT["AZURE_OPENAI_MODEL_VERSION"],
@@ -386,6 +360,7 @@ if provider_api_key:
386
  streaming=True,
387
  max_tokens=max_tokens,
388
  )
 
389
  elif st.session_state.provider == "Anthropic":
390
  st.session_state.llm = ChatAnthropic(
391
  model=model,
@@ -394,6 +369,7 @@ if provider_api_key:
394
  streaming=True,
395
  max_tokens_to_sample=max_tokens,
396
  )
 
397
  elif st.session_state.provider == "Anyscale Endpoints":
398
  st.session_state.llm = ChatAnyscale(
399
  model_name=model,
@@ -402,6 +378,7 @@ if provider_api_key:
402
  streaming=True,
403
  max_tokens=max_tokens,
404
  )
 
405
  elif AZURE_AVAILABLE and st.session_state.provider == "Azure OpenAI":
406
  st.session_state.llm = AzureChatOpenAI(
407
  openai_api_base=AZURE_OPENAI_BASE_URL,
 
 
1
  from datetime import datetime
2
  from tempfile import NamedTemporaryFile
3
  from typing import Tuple, List, Dict, Any, Union
 
29
  from langsmith.client import Client
30
  from streamlit_feedback import streamlit_feedback
31
 
32
+ from defaults import (
33
+ MODEL_DICT,
34
+ SUPPORTED_MODELS,
35
+ DEFAULT_MODEL,
36
+ DEFAULT_SYSTEM_PROMPT,
37
+ MIN_TEMP,
38
+ MAX_TEMP,
39
+ DEFAULT_TEMP,
40
+ MIN_MAX_TOKENS,
41
+ MAX_MAX_TOKENS,
42
+ DEFAULT_MAX_TOKENS,
43
+ DEFAULT_LANGSMITH_PROJECT,
44
+ AZURE_DICT,
45
+ PROVIDER_KEY_DICT,
46
+ OPENAI_API_KEY,
47
+ MIN_CHUNK_SIZE,
48
+ MAX_CHUNK_SIZE,
49
+ DEFAULT_CHUNK_SIZE,
50
+ MIN_CHUNK_OVERLAP,
51
+ MAX_CHUNK_OVERLAP,
52
+ DEFAULT_CHUNK_OVERLAP,
53
+ DEFAULT_RETRIEVER_K,
54
+ )
55
  from qagen import get_rag_qa_gen_chain
56
  from summarize import get_rag_summarization_chain
57
 
 
107
  RUN_COLLECTOR = RunCollectorCallbackHandler()
108
 
109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  @st.cache_data
111
  def get_texts_and_retriever(
112
  uploaded_file_bytes: bytes,
 
203
  max_value=MAX_CHUNK_SIZE,
204
  value=DEFAULT_CHUNK_SIZE,
205
  )
206
+
207
  chunk_overlap = st.slider(
208
  label="Chunk Overlap",
209
  help="Number of characters to overlap between chunks",
 
215
  chain_type_help_root = (
216
  "https://python.langchain.com/docs/modules/chains/document/"
217
  )
218
+
219
  chain_type_help = "\n".join(
220
  f"- [{chain_type_name}]({chain_type_help_root}/{chain_type_name})"
221
  for chain_type_name in (
 
225
  "map_rerank",
226
  )
227
  )
228
+
229
  document_chat_chain_type = st.selectbox(
230
  label="Document Chat Chain Type",
231
  options=[
 
271
  .replace("{", "{{")
272
  .replace("}", "}}")
273
  )
274
+
275
  temperature = st.slider(
276
  "Temperature",
277
  min_value=MIN_TEMP,
 
295
  type="password",
296
  value=PROVIDER_KEY_DICT.get("LANGSMITH"),
297
  )
298
+
299
  LANGSMITH_PROJECT = st.text_input(
300
  "LangSmith Project Name",
301
  value=DEFAULT_LANGSMITH_PROJECT or "langchain-streamlit-demo",
302
  )
303
+
304
  if st.session_state.client is None and LANGSMITH_API_KEY:
305
  st.session_state.client = Client(
306
  api_url="https://api.smith.langchain.com",
 
317
  "AZURE_OPENAI_BASE_URL",
318
  value=AZURE_DICT["AZURE_OPENAI_BASE_URL"],
319
  )
320
+
321
  AZURE_OPENAI_API_VERSION = st.text_input(
322
  "AZURE_OPENAI_API_VERSION",
323
  value=AZURE_DICT["AZURE_OPENAI_API_VERSION"],
324
  )
325
+
326
  AZURE_OPENAI_DEPLOYMENT_NAME = st.text_input(
327
  "AZURE_OPENAI_DEPLOYMENT_NAME",
328
  value=AZURE_DICT["AZURE_OPENAI_DEPLOYMENT_NAME"],
329
  )
330
+
331
  AZURE_OPENAI_API_KEY = st.text_input(
332
  "AZURE_OPENAI_API_KEY",
333
  value=AZURE_DICT["AZURE_OPENAI_API_KEY"],
334
  type="password",
335
  )
336
+
337
  AZURE_OPENAI_MODEL_VERSION = st.text_input(
338
  "AZURE_OPENAI_MODEL_VERSION",
339
  value=AZURE_DICT["AZURE_OPENAI_MODEL_VERSION"],
 
360
  streaming=True,
361
  max_tokens=max_tokens,
362
  )
363
+
364
  elif st.session_state.provider == "Anthropic":
365
  st.session_state.llm = ChatAnthropic(
366
  model=model,
 
369
  streaming=True,
370
  max_tokens_to_sample=max_tokens,
371
  )
372
+
373
  elif st.session_state.provider == "Anyscale Endpoints":
374
  st.session_state.llm = ChatAnyscale(
375
  model_name=model,
 
378
  streaming=True,
379
  max_tokens=max_tokens,
380
  )
381
+
382
  elif AZURE_AVAILABLE and st.session_state.provider == "Azure OpenAI":
383
  st.session_state.llm = AzureChatOpenAI(
384
  openai_api_base=AZURE_OPENAI_BASE_URL,
langchain-streamlit-demo/defaults.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ MODEL_DICT = {
4
+ "gpt-3.5-turbo": "OpenAI",
5
+ "gpt-4": "OpenAI",
6
+ "claude-instant-v1": "Anthropic",
7
+ "claude-2": "Anthropic",
8
+ "meta-llama/Llama-2-7b-chat-hf": "Anyscale Endpoints",
9
+ "meta-llama/Llama-2-13b-chat-hf": "Anyscale Endpoints",
10
+ "meta-llama/Llama-2-70b-chat-hf": "Anyscale Endpoints",
11
+ "codellama/CodeLlama-34b-Instruct-hf": "Anyscale Endpoints",
12
+ "Azure OpenAI": "Azure OpenAI",
13
+ }
14
+
15
+ SUPPORTED_MODELS = list(MODEL_DICT.keys())
16
+
17
+ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-3.5-turbo")
18
+
19
+ DEFAULT_SYSTEM_PROMPT = os.environ.get(
20
+ "DEFAULT_SYSTEM_PROMPT",
21
+ "You are a helpful chatbot.",
22
+ )
23
+
24
+ MIN_TEMP = float(os.environ.get("MIN_TEMPERATURE", 0.0))
25
+ MAX_TEMP = float(os.environ.get("MAX_TEMPERATURE", 1.0))
26
+ DEFAULT_TEMP = float(os.environ.get("DEFAULT_TEMPERATURE", 0.7))
27
+
28
+ MIN_MAX_TOKENS = int(os.environ.get("MIN_MAX_TOKENS", 1))
29
+ MAX_MAX_TOKENS = int(os.environ.get("MAX_MAX_TOKENS", 100000))
30
+ DEFAULT_MAX_TOKENS = int(os.environ.get("DEFAULT_MAX_TOKENS", 1000))
31
+
32
+ DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
33
+
34
+ AZURE_VARS = [
35
+ "AZURE_OPENAI_BASE_URL",
36
+ "AZURE_OPENAI_API_VERSION",
37
+ "AZURE_OPENAI_DEPLOYMENT_NAME",
38
+ "AZURE_OPENAI_API_KEY",
39
+ "AZURE_OPENAI_MODEL_VERSION",
40
+ ]
41
+
42
+ AZURE_DICT = {v: os.environ.get(v, "") for v in AZURE_VARS}
43
+
44
+ PROVIDER_KEY_DICT = {
45
+ "OpenAI": os.environ.get("OPENAI_API_KEY", ""),
46
+ "Anthropic": os.environ.get("ANTHROPIC_API_KEY", ""),
47
+ "Anyscale Endpoints": os.environ.get("ANYSCALE_API_KEY", ""),
48
+ "LANGSMITH": os.environ.get("LANGCHAIN_API_KEY", ""),
49
+ }
50
+
51
+ OPENAI_API_KEY = PROVIDER_KEY_DICT["OpenAI"]
52
+
53
+
54
+ MIN_CHUNK_SIZE = 1
55
+ MAX_CHUNK_SIZE = 10000
56
+ DEFAULT_CHUNK_SIZE = 1000
57
+
58
+ MIN_CHUNK_OVERLAP = 0
59
+ MAX_CHUNK_OVERLAP = 10000
60
+ DEFAULT_CHUNK_OVERLAP = 0
61
+
62
+ DEFAULT_RETRIEVER_K = 4