awacke1 commited on
Commit
34595f9
·
verified ·
1 Parent(s): 146e256

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -218
app.py CHANGED
@@ -15,12 +15,18 @@ from urllib.parse import quote
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
- # (Optional) If you use huggingface_hub:
19
  from huggingface_hub import InferenceClient
20
 
21
 
22
  # ----------------------------
23
- # Placeholder data structures
 
 
 
 
 
 
24
  # ----------------------------
25
  PromptPrefix = "AI-Search: "
26
  PromptPrefix2 = "AI-Refine: "
@@ -42,15 +48,9 @@ transhuman_glossary = {
42
  }
43
 
44
 
45
- # ------------------------------------------
46
- # Example stubs for placeholders
47
- # ------------------------------------------
48
  def process_text(text):
49
  st.write(f"process_text called with: {text}")
50
 
51
- def process_text2(text_input):
52
- return f"[process_text2 placeholder] Received: {text_input}"
53
-
54
  def search_arxiv(text):
55
  st.write(f"search_arxiv called with: {text}")
56
 
@@ -63,11 +63,7 @@ def process_image(image_file, prompt):
63
  def process_video(video_file, seconds_per_frame):
64
  st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
65
 
66
- def search_glossary(content):
67
- st.write(f"search_glossary called with: {content}")
68
-
69
-
70
- # If you have HF Inference endpoints, placeholders here
71
  API_URL = "https://huggingface-inference-endpoint-placeholder"
72
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
73
 
@@ -76,14 +72,11 @@ def InferenceLLM(prompt):
76
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
77
 
78
 
79
- # --------------------------------------
80
- # Display Entities & Glossary Functions
81
- # --------------------------------------
82
  @st.cache_resource
83
  def display_glossary_entity(k):
84
- """
85
- Creates multiple links (emojis) for a single entity.
86
- """
87
  search_urls = {
88
  "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
89
  "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}",
@@ -98,38 +91,27 @@ def display_glossary_entity(k):
98
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
99
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- @st.cache_resource
103
- def display_glossary_grid(roleplaying_glossary):
104
- """
105
- Displays a glossary in columns with multiple link emojis.
106
- """
107
- search_urls = {
108
- "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
109
- "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}",
110
- "📚PyCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix2)}",
111
- "🔬JSCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix3)}",
112
- "📖": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
113
- "🔍": lambda k: f"https://www.google.com/search?q={quote(k)}",
114
- "🔎": lambda k: f"https://www.bing.com/search?q={quote(k)}",
115
- "🎥": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
116
- "🐦": lambda k: f"https://twitter.com/search?q={quote(k)}",
117
- }
118
-
119
- for category, details in roleplaying_glossary.items():
120
- st.write(f"### {category}")
121
- cols = st.columns(len(details))
122
- for idx, (game, terms) in enumerate(details.items()):
123
- with cols[idx]:
124
- st.markdown(f"#### {game}")
125
- for term in terms:
126
- links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()])
127
- st.markdown(f"**{term}** <small>{links_md}</small>", unsafe_allow_html=True)
128
 
129
 
130
- # --------------------
131
- # File-Handling Logic
132
- # --------------------
133
  def load_file(file_path):
134
  try:
135
  with open(file_path, "r", encoding='utf-8') as f:
@@ -153,9 +135,6 @@ def get_zip_download_link(zip_file):
153
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
154
 
155
  def get_table_download_link(file_path):
156
- """
157
- Creates a download link for a single file from your snippet.
158
- """
159
  try:
160
  with open(file_path, 'r', encoding='utf-8') as file:
161
  data = file.read()
@@ -179,43 +158,17 @@ def get_table_download_link(file_path):
179
  def get_file_size(file_path):
180
  return os.path.getsize(file_path)
181
 
182
- def compare_and_delete_files(files):
183
- """
184
- Compare file sizes. If duplicates exist, keep only the latest.
185
- """
186
- if not files:
187
- st.warning("No files to compare.")
188
- return
189
- file_sizes = {}
190
- for file in files:
191
- size = os.path.getsize(file)
192
- file_sizes.setdefault(size, []).append(file)
193
- # Remove all but the latest file for each size group
194
- for size, paths in file_sizes.items():
195
- if len(paths) > 1:
196
- latest_file = max(paths, key=os.path.getmtime)
197
- for file in paths:
198
- if file != latest_file:
199
- os.remove(file)
200
- st.success(f"Deleted {file} as a duplicate.")
201
-
202
-
203
  def FileSidebar():
204
- """
205
- Renders the file sidebar with all the open/view/run/delete logic.
206
- """
207
  all_files = glob.glob("*.md")
208
- # Filter out short-named or undesired files, if needed:
209
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
210
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
211
 
212
- # "Delete All" and "Download" buttons
213
  Files1, Files2 = st.sidebar.columns(2)
214
  with Files1:
215
  if st.button("🗑 Delete All"):
216
  for file in all_files:
217
  os.remove(file)
218
- st.experimental_rerun() # or remove if you prefer no rerun
219
  with Files2:
220
  if st.button("⬇️ Download"):
221
  zip_file = create_zip_of_files(all_files)
@@ -253,29 +206,8 @@ def FileSidebar():
253
  with col5:
254
  if st.button("🗑", key="delete_"+file):
255
  os.remove(file)
256
- st.experimental_rerun() # or remove if no rerun needed
257
-
258
- # Optional: show file sizes
259
- file_sizes = [get_file_size(file) for file in all_files]
260
- previous_size = None
261
- st.sidebar.title("File Operations")
262
- for file, size in zip(all_files, file_sizes):
263
- duplicate_flag = "🚩" if size == previous_size else ""
264
- with st.sidebar.expander(f"File: {file} {duplicate_flag}"):
265
- st.text(f"Size: {size} bytes")
266
- if st.button("View", key=f"view_{file}"):
267
- try:
268
- with open(file, "r", encoding='utf-8') as f:
269
- file_content = f.read()
270
- st.code(file_content, language="markdown")
271
- except UnicodeDecodeError:
272
- st.error("Failed to decode file with UTF-8.")
273
- if st.button("Delete", key=f"delete3_{file}"):
274
- os.remove(file)
275
- st.experimental_rerun()
276
- previous_size = size
277
 
278
- # If we've loaded content from a file
279
  if file_contents:
280
  if next_action == 'open':
281
  open1, open2 = st.columns([0.8, 0.2])
@@ -302,9 +234,9 @@ def FileSidebar():
302
  st.write("Running GPT logic placeholder...")
303
 
304
 
305
- # -------------------------------------------
306
  # Basic Scoring / Glossaries
307
- # -------------------------------------------
308
  score_dir = "scores"
309
  os.makedirs(score_dir, exist_ok=True)
310
 
@@ -333,9 +265,6 @@ def load_score(key):
333
  return 0
334
 
335
  def display_buttons_with_scores(num_columns_text):
336
- """
337
- Show buttons that track a 'score' from your glossary data.
338
- """
339
  game_emojis = {
340
  "Dungeons and Dragons": "🐉",
341
  "Call of Cthulhu": "🐙",
@@ -371,13 +300,10 @@ def display_buttons_with_scores(num_columns_text):
371
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
372
 
373
 
374
- # --------------------------------------
375
- # Image & Video Grids
376
- # --------------------------------------
377
  def display_images_and_wikipedia_summaries(num_columns=4):
378
- """
379
- Display .png images in a grid with text input prompts.
380
- """
381
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
382
  if not image_files:
383
  st.write("No PNG images found in the current directory.")
@@ -394,7 +320,6 @@ def display_images_and_wikipedia_summaries(num_columns=4):
394
  st.image(image, use_column_width=True)
395
  k = image_file.split('.')[0]
396
  display_glossary_entity(k)
397
- # Provide a text input for user interactions
398
  image_text_input = st.text_input(f"Prompt for {image_file}", key=f"image_prompt_{image_file}")
399
  if image_text_input:
400
  response = process_image(image_file, image_text_input)
@@ -404,9 +329,6 @@ def display_images_and_wikipedia_summaries(num_columns=4):
404
  col_index += 1
405
 
406
  def display_videos_and_links(num_columns=4):
407
- """
408
- Displays all .mp4/.webm videos in the directory in a grid, with text input prompts.
409
- """
410
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
411
  if not video_files:
412
  st.write("No MP4 or WEBM videos found in the current directory.")
@@ -424,7 +346,6 @@ def display_videos_and_links(num_columns=4):
424
  video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
425
  if video_text_input:
426
  try:
427
- # For demonstration
428
  seconds_per_frame = 10
429
  process_video(video_file, seconds_per_frame)
430
  except ValueError:
@@ -432,47 +353,10 @@ def display_videos_and_links(num_columns=4):
432
  col_index += 1
433
 
434
 
435
- # --------------------------------------
436
- # Query Param Helpers (No experimental)
437
- # --------------------------------------
438
- def clear_query_params():
439
- """
440
- In Streamlit, there's no direct method to "clear" query params
441
- without rewriting the URL. One workaround is to do:
442
-
443
- st.experimental_set_query_params() # with no arguments
444
-
445
- But if you want to avoid *all* experimental calls,
446
- you can provide a link or button that leads to a new URL without params.
447
- """
448
- st.warning("Use a redirect or link that excludes query parameters.")
449
-
450
-
451
- def display_content_or_image(query):
452
- """
453
- If a query matches transhuman_glossary or a local image, display it.
454
- """
455
- for category, term_list in transhuman_glossary.items():
456
- for term in term_list:
457
- if query.lower() in term.lower():
458
- st.subheader(f"Found in {category}:")
459
- st.write(term)
460
- return True
461
- image_path = f"images/{query}.png"
462
- if os.path.exists(image_path):
463
- st.image(image_path, caption=f"Image for {query}")
464
- return True
465
- st.warning("No matching content or image found.")
466
- return False
467
-
468
-
469
- # ------------------------------------
470
- # MERMAID DIAGRAM with Clickable Nodes
471
- # ------------------------------------
472
  def generate_mermaid_html(mermaid_code: str) -> str:
473
- """
474
- Embeds a mermaid diagram in HTML, centered.
475
- """
476
  return f"""
477
  <html>
478
  <head>
@@ -500,14 +384,19 @@ def generate_mermaid_html(mermaid_code: str) -> str:
500
  """
501
 
502
  def append_model_param(url: str, model_selected: bool) -> str:
503
- """
504
- If 'model=1' is desired, we append it to each URL in the diagram.
505
- """
506
  if not model_selected:
507
  return url
508
  delimiter = "&" if "?" in url else "?"
509
  return f"{url}{delimiter}model=1"
510
 
 
 
 
 
 
 
 
 
511
 
512
  DEFAULT_MERMAID = """
513
  flowchart LR
@@ -525,31 +414,21 @@ flowchart LR
525
  click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" _self
526
  """
527
 
528
-
529
- # ---------------------------
530
- # Main Streamlit App
531
- # ---------------------------
532
  def main():
533
- st.set_page_config(page_title="Mermaid + Clickable Links Demo", layout="wide")
534
 
535
- # ---------------------------------------------
536
- # Query Parameter Parsing (No experimental usage)
537
- # ---------------------------------------------
538
  try:
539
  query_params = st.query_params
540
-
541
- # Look for 'q' or 'query'
542
  query_list = (query_params.get('q') or query_params.get('query') or [''])
543
- q_or_query = query_list[0] if len(query_list) > 0 else ''
544
  if q_or_query.strip():
545
  filesearch = PromptPrefix + q_or_query
546
  st.markdown(filesearch)
547
  process_text(filesearch)
 
 
548
 
549
- except Exception as e:
550
- st.markdown(" ") # do nothing if there's an error
551
-
552
- # If 'action' in st.query_params
553
  if 'action' in st.query_params:
554
  action_list = st.query_params['action']
555
  if action_list:
@@ -558,51 +437,45 @@ def main():
558
  st.success("Showing a message because 'action=show_message' was found in the URL.")
559
  elif action == 'clear':
560
  clear_query_params()
561
- # If you wanted a full rerun with no params, you'd do a redirect or
562
- # st.experimental_set_query_params() with no arguments (but that's experimental).
563
 
564
- # If 'query' param is present, show content or image
565
  if 'query' in st.query_params:
566
- query_list2 = st.query_params['query']
567
- if query_list2 and len(query_list2) > 0:
568
- query_val = query_list2[0]
569
- display_content_or_image(query_val)
570
-
571
- # ---------------------------------------------
572
- # Let user pick if they want to add ?model=1
573
- # ---------------------------------------------
574
  st.sidebar.write("## Diagram Link Settings")
575
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
576
 
577
- # Rebuild the dynamic Mermaid code
578
  base_diagram = DEFAULT_MERMAID
579
  lines = base_diagram.strip().split("\n")
580
  new_lines = []
581
  for line in lines:
582
  if "click " in line and '"/?' in line:
583
- # e.g. click U "/?q=User" _self
584
  parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
585
  if len(parts) == 4:
586
- url = parts[1]
587
- updated_url = append_model_param(url, model_selected)
588
- new_line = f"{parts[0]}\"{updated_url}\" {parts[2]}"
 
 
 
 
 
589
  new_lines.append(new_line)
590
  else:
591
  new_lines.append(line)
592
  else:
593
  new_lines.append(line)
 
594
  mermaid_code = "\n".join(new_lines)
595
 
596
- # ---------------------------------------------
597
- # Render top-centered Mermaid diagram
598
- # ---------------------------------------------
599
- st.sidebar.markdown("Mermaid Diagram Editor with Clickable Links 🏺")
600
  diagram_html = generate_mermaid_html(mermaid_code)
601
  components.html(diagram_html, height=400, scrolling=True)
602
 
603
- # ---------------------------------------------
604
- # Two-column layout: Markdown & Mermaid Editors
605
- # ---------------------------------------------
606
  left_col, right_col = st.columns(2)
607
 
608
  # --- Left: Markdown Editor
@@ -624,7 +497,7 @@ def main():
624
  with colB:
625
  if st.button("❌ Clear Markdown"):
626
  st.session_state["markdown_text"] = ""
627
- st.rerun() # non-experimental re-run if available in your Streamlit version
628
 
629
  st.markdown("---")
630
  st.markdown("**Preview:**")
@@ -658,36 +531,25 @@ def main():
658
  st.markdown("**Mermaid Source:**")
659
  st.code(mermaid_input, language="python", line_numbers=True)
660
 
661
- # ---------------------------------------------
662
- # Media Galleries
663
- # ---------------------------------------------
664
  st.markdown("---")
665
  st.header("Media Galleries")
666
-
667
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
668
  display_images_and_wikipedia_summaries(num_columns_images)
669
 
670
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
671
  display_videos_and_links(num_columns_video)
672
 
673
- # ---------------------------------------------
674
- # Optional Extended UI
675
- # ---------------------------------------------
676
  showExtendedTextInterface = False
677
  if showExtendedTextInterface:
678
- display_glossary_grid(roleplaying_glossary)
679
- num_columns_text = st.slider("Choose Number of Text Columns", 1, 15, 4, key="num_columns_text")
680
- display_buttons_with_scores(num_columns_text)
681
- st.markdown("Extended text interface is on...")
682
-
683
- # ---------------------------------------------
684
- # File Sidebar
685
- # ---------------------------------------------
686
  FileSidebar()
687
 
688
- # ---------------------------------------------
689
- # Random Title at the bottom
690
- # ---------------------------------------------
691
  titles = [
692
  "🧠🎭 Semantic Symphonies & Episodic Encores",
693
  "🌌🎼 AI Rhythms of Memory Lane",
@@ -698,8 +560,7 @@ def main():
698
  "🏰 Semantic Soul & Episodic Essence",
699
  "🥁🎻 The Music Of AI's Mind"
700
  ]
701
- selected_title = random.choice(titles)
702
- st.markdown(f"**{selected_title}**")
703
 
704
 
705
  if __name__ == "__main__":
 
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
+ # (Optional) huggingface_hub usage
19
  from huggingface_hub import InferenceClient
20
 
21
 
22
  # ----------------------------
23
+ # Configurable BASE_URL
24
+ # ----------------------------
25
+ BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
26
+
27
+
28
+ # ----------------------------
29
+ # Placeholder data
30
  # ----------------------------
31
  PromptPrefix = "AI-Search: "
32
  PromptPrefix2 = "AI-Refine: "
 
48
  }
49
 
50
 
 
 
 
51
  def process_text(text):
52
  st.write(f"process_text called with: {text}")
53
 
 
 
 
54
  def search_arxiv(text):
55
  st.write(f"search_arxiv called with: {text}")
56
 
 
63
  def process_video(video_file, seconds_per_frame):
64
  st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
65
 
66
+ # Stub if you have an HF endpoint
 
 
 
 
67
  API_URL = "https://huggingface-inference-endpoint-placeholder"
68
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
69
 
 
72
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
73
 
74
 
75
+ # ------------------------------------------
76
+ # Example Glossary & File Utility Functions
77
+ # ------------------------------------------
78
  @st.cache_resource
79
  def display_glossary_entity(k):
 
 
 
80
  search_urls = {
81
  "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
82
  "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}",
 
91
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
92
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
93
 
94
+ def display_content_or_image(query):
95
+ for category, term_list in transhuman_glossary.items():
96
+ for term in term_list:
97
+ if query.lower() in term.lower():
98
+ st.subheader(f"Found in {category}:")
99
+ st.write(term)
100
+ return True
101
+ image_path = f"images/{query}.png"
102
+ if os.path.exists(image_path):
103
+ st.image(image_path, caption=f"Image for {query}")
104
+ return True
105
+ st.warning("No matching content or image found.")
106
+ return False
107
 
108
+ def clear_query_params():
109
+ st.warning("Define a redirect or use a link without query params if you want to truly clear them.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
 
112
+ # -----------------------
113
+ # File Handling
114
+ # -----------------------
115
  def load_file(file_path):
116
  try:
117
  with open(file_path, "r", encoding='utf-8') as f:
 
135
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
136
 
137
  def get_table_download_link(file_path):
 
 
 
138
  try:
139
  with open(file_path, 'r', encoding='utf-8') as file:
140
  data = file.read()
 
158
  def get_file_size(file_path):
159
  return os.path.getsize(file_path)
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  def FileSidebar():
 
 
 
162
  all_files = glob.glob("*.md")
 
163
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
164
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
165
 
 
166
  Files1, Files2 = st.sidebar.columns(2)
167
  with Files1:
168
  if st.button("🗑 Delete All"):
169
  for file in all_files:
170
  os.remove(file)
171
+ st.rerun()
172
  with Files2:
173
  if st.button("⬇️ Download"):
174
  zip_file = create_zip_of_files(all_files)
 
206
  with col5:
207
  if st.button("🗑", key="delete_"+file):
208
  os.remove(file)
209
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
 
211
  if file_contents:
212
  if next_action == 'open':
213
  open1, open2 = st.columns([0.8, 0.2])
 
234
  st.write("Running GPT logic placeholder...")
235
 
236
 
237
+ # ---------------------------
238
  # Basic Scoring / Glossaries
239
+ # ---------------------------
240
  score_dir = "scores"
241
  os.makedirs(score_dir, exist_ok=True)
242
 
 
265
  return 0
266
 
267
  def display_buttons_with_scores(num_columns_text):
 
 
 
268
  game_emojis = {
269
  "Dungeons and Dragons": "🐉",
270
  "Call of Cthulhu": "🐙",
 
300
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
301
 
302
 
303
+ # -------------------------------
304
+ # Image & Video Display
305
+ # -------------------------------
306
  def display_images_and_wikipedia_summaries(num_columns=4):
 
 
 
307
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
308
  if not image_files:
309
  st.write("No PNG images found in the current directory.")
 
320
  st.image(image, use_column_width=True)
321
  k = image_file.split('.')[0]
322
  display_glossary_entity(k)
 
323
  image_text_input = st.text_input(f"Prompt for {image_file}", key=f"image_prompt_{image_file}")
324
  if image_text_input:
325
  response = process_image(image_file, image_text_input)
 
329
  col_index += 1
330
 
331
  def display_videos_and_links(num_columns=4):
 
 
 
332
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
333
  if not video_files:
334
  st.write("No MP4 or WEBM videos found in the current directory.")
 
346
  video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
347
  if video_text_input:
348
  try:
 
349
  seconds_per_frame = 10
350
  process_video(video_file, seconds_per_frame)
351
  except ValueError:
 
353
  col_index += 1
354
 
355
 
356
+ # -------------------------------
357
+ # Mermaid Diagram Logic
358
+ # -------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
  def generate_mermaid_html(mermaid_code: str) -> str:
 
 
 
360
  return f"""
361
  <html>
362
  <head>
 
384
  """
385
 
386
  def append_model_param(url: str, model_selected: bool) -> str:
 
 
 
387
  if not model_selected:
388
  return url
389
  delimiter = "&" if "?" in url else "?"
390
  return f"{url}{delimiter}model=1"
391
 
392
+ def inject_base_url(url: str) -> str:
393
+ """
394
+ If the URL doesn't start with http, we assume it's a relative
395
+ path like '/?q=...' and prepend the BASE_URL.
396
+ """
397
+ if url.startswith("http"):
398
+ return url
399
+ return f"{BASE_URL}{url}"
400
 
401
  DEFAULT_MERMAID = """
402
  flowchart LR
 
414
  click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" _self
415
  """
416
 
 
 
 
 
417
  def main():
418
+ st.set_page_config(page_title="Mermaid + Clickable Links Demo with Base URL", layout="wide")
419
 
420
+ # 1) Query Param Parsing
 
 
421
  try:
422
  query_params = st.query_params
 
 
423
  query_list = (query_params.get('q') or query_params.get('query') or [''])
424
+ q_or_query = query_list[0] if query_list else ''
425
  if q_or_query.strip():
426
  filesearch = PromptPrefix + q_or_query
427
  st.markdown(filesearch)
428
  process_text(filesearch)
429
+ except:
430
+ pass
431
 
 
 
 
 
432
  if 'action' in st.query_params:
433
  action_list = st.query_params['action']
434
  if action_list:
 
437
  st.success("Showing a message because 'action=show_message' was found in the URL.")
438
  elif action == 'clear':
439
  clear_query_params()
 
 
440
 
 
441
  if 'query' in st.query_params:
442
+ query_val = st.query_params['query'][0]
443
+ display_content_or_image(query_val)
444
+
445
+ # 2) Model param
 
 
 
 
446
  st.sidebar.write("## Diagram Link Settings")
447
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
448
 
449
+ # 3) Rebuild the clickable lines in the Mermaid code
450
  base_diagram = DEFAULT_MERMAID
451
  lines = base_diagram.strip().split("\n")
452
  new_lines = []
453
  for line in lines:
454
  if "click " in line and '"/?' in line:
455
+ # We want to parse the line, extract the URL, inject base, then maybe add &model=1
456
  parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
457
  if len(parts) == 4:
458
+ old_url = parts[1] # e.g. '/?q=User%20😎'
459
+ # 1) Prepend base if needed
460
+ new_url = inject_base_url(old_url)
461
+ # 2) Possibly add &model=1
462
+ new_url = append_model_param(new_url, model_selected)
463
+
464
+ # Recombine
465
+ new_line = f"{parts[0]}\"{new_url}\" {parts[2]}"
466
  new_lines.append(new_line)
467
  else:
468
  new_lines.append(line)
469
  else:
470
  new_lines.append(line)
471
+
472
  mermaid_code = "\n".join(new_lines)
473
 
474
+ st.title("Mermaid Diagram with Base URL + Model Param")
 
 
 
475
  diagram_html = generate_mermaid_html(mermaid_code)
476
  components.html(diagram_html, height=400, scrolling=True)
477
 
478
+ # 4) Two-column interface: Markdown & Mermaid editors
 
 
479
  left_col, right_col = st.columns(2)
480
 
481
  # --- Left: Markdown Editor
 
497
  with colB:
498
  if st.button("❌ Clear Markdown"):
499
  st.session_state["markdown_text"] = ""
500
+ st.rerun()
501
 
502
  st.markdown("---")
503
  st.markdown("**Preview:**")
 
531
  st.markdown("**Mermaid Source:**")
532
  st.code(mermaid_input, language="python", line_numbers=True)
533
 
534
+ # 5) Media Galleries
 
 
535
  st.markdown("---")
536
  st.header("Media Galleries")
 
537
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
538
  display_images_and_wikipedia_summaries(num_columns_images)
539
 
540
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
541
  display_videos_and_links(num_columns_video)
542
 
543
+ # 6) Optional UI
 
 
544
  showExtendedTextInterface = False
545
  if showExtendedTextInterface:
546
+ st.write("Extended text interface is ON.")
547
+ # e.g. display_glossary_grid, display_buttons_with_scores, etc.
548
+
549
+ # 7) File Sidebar
 
 
 
 
550
  FileSidebar()
551
 
552
+ # 8) Random Title
 
 
553
  titles = [
554
  "🧠🎭 Semantic Symphonies & Episodic Encores",
555
  "🌌🎼 AI Rhythms of Memory Lane",
 
560
  "🏰 Semantic Soul & Episodic Essence",
561
  "🥁🎻 The Music Of AI's Mind"
562
  ]
563
+ st.markdown(f"**{random.choice(titles)}**")
 
564
 
565
 
566
  if __name__ == "__main__":