awacke1 commited on
Commit
5b97dfd
·
verified ·
1 Parent(s): 34595f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -61
app.py CHANGED
@@ -15,7 +15,7 @@ from urllib.parse import quote
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
- # (Optional) huggingface_hub usage
19
  from huggingface_hub import InferenceClient
20
 
21
 
@@ -24,10 +24,7 @@ from huggingface_hub import InferenceClient
24
  # ----------------------------
25
  BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
26
 
27
-
28
- # ----------------------------
29
- # Placeholder data
30
- # ----------------------------
31
  PromptPrefix = "AI-Search: "
32
  PromptPrefix2 = "AI-Refine: "
33
  PromptPrefix3 = "AI-JS: "
@@ -47,7 +44,6 @@ transhuman_glossary = {
47
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
48
  }
49
 
50
-
51
  def process_text(text):
52
  st.write(f"process_text called with: {text}")
53
 
@@ -63,7 +59,7 @@ def process_image(image_file, prompt):
63
  def process_video(video_file, seconds_per_frame):
64
  st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
65
 
66
- # Stub if you have an HF endpoint
67
  API_URL = "https://huggingface-inference-endpoint-placeholder"
68
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
69
 
@@ -71,22 +67,21 @@ API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
71
  def InferenceLLM(prompt):
72
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
73
 
74
-
75
  # ------------------------------------------
76
- # Example Glossary & File Utility Functions
77
  # ------------------------------------------
78
  @st.cache_resource
79
  def display_glossary_entity(k):
80
  search_urls = {
81
- "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
82
- "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}",
83
- "📚PyCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix2)}",
84
- "🔬JSCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix3)}",
85
- "📖": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
86
- "🔍": lambda k: f"https://www.google.com/search?q={quote(k)}",
87
- "🔎": lambda k: f"https://www.bing.com/search?q={quote(k)}",
88
- "🎥": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
89
- "🐦": lambda k: f"https://twitter.com/search?q={quote(k)}",
90
  }
91
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
92
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
@@ -106,7 +101,7 @@ def display_content_or_image(query):
106
  return False
107
 
108
  def clear_query_params():
109
- st.warning("Define a redirect or use a link without query params if you want to truly clear them.")
110
 
111
 
112
  # -----------------------
@@ -233,9 +228,8 @@ def FileSidebar():
233
  if st.button('🔍Run'):
234
  st.write("Running GPT logic placeholder...")
235
 
236
-
237
  # ---------------------------
238
- # Basic Scoring / Glossaries
239
  # ---------------------------
240
  score_dir = "scores"
241
  os.makedirs(score_dir, exist_ok=True)
@@ -257,9 +251,9 @@ def update_score(key, increment=1):
257
  return score_data["score"]
258
 
259
  def load_score(key):
260
- score_file = os.path.join(score_dir, f"{key}.json")
261
- if os.path.exists(score_file):
262
- with open(score_file, "r") as file:
263
  score_data = json.load(file)
264
  return score_data["score"]
265
  return 0
@@ -299,9 +293,8 @@ def display_buttons_with_scores(num_columns_text):
299
  newscore = update_score(key.replace('?',''))
300
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
301
 
302
-
303
  # -------------------------------
304
- # Image & Video Display
305
  # -------------------------------
306
  def display_images_and_wikipedia_summaries(num_columns=4):
307
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
@@ -352,10 +345,9 @@ def display_videos_and_links(num_columns=4):
352
  st.error("Invalid input for seconds per frame!")
353
  col_index += 1
354
 
355
-
356
- # -------------------------------
357
- # Mermaid Diagram Logic
358
- # -------------------------------
359
  def generate_mermaid_html(mermaid_code: str) -> str:
360
  return f"""
361
  <html>
@@ -390,10 +382,6 @@ def append_model_param(url: str, model_selected: bool) -> str:
390
  return f"{url}{delimiter}model=1"
391
 
392
  def inject_base_url(url: str) -> str:
393
- """
394
- If the URL doesn't start with http, we assume it's a relative
395
- path like '/?q=...' and prepend the BASE_URL.
396
- """
397
  if url.startswith("http"):
398
  return url
399
  return f"{BASE_URL}{url}"
@@ -415,22 +403,19 @@ flowchart LR
415
  """
416
 
417
  def main():
418
- st.set_page_config(page_title="Mermaid + Clickable Links Demo with Base URL", layout="wide")
419
 
420
  # 1) Query Param Parsing
421
- try:
422
- query_params = st.query_params
423
- query_list = (query_params.get('q') or query_params.get('query') or [''])
424
- q_or_query = query_list[0] if query_list else ''
425
- if q_or_query.strip():
426
- filesearch = PromptPrefix + q_or_query
427
- st.markdown(filesearch)
428
- process_text(filesearch)
429
- except:
430
- pass
431
-
432
- if 'action' in st.query_params:
433
- action_list = st.query_params['action']
434
  if action_list:
435
  action = action_list[0]
436
  if action == 'show_message':
@@ -438,11 +423,11 @@ def main():
438
  elif action == 'clear':
439
  clear_query_params()
440
 
441
- if 'query' in st.query_params:
442
- query_val = st.query_params['query'][0]
443
  display_content_or_image(query_val)
444
 
445
- # 2) Model param
446
  st.sidebar.write("## Diagram Link Settings")
447
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
448
 
@@ -452,12 +437,11 @@ def main():
452
  new_lines = []
453
  for line in lines:
454
  if "click " in line and '"/?' in line:
455
- # We want to parse the line, extract the URL, inject base, then maybe add &model=1
456
  parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
457
  if len(parts) == 4:
458
- old_url = parts[1] # e.g. '/?q=User%20😎'
459
  # 1) Prepend base if needed
460
- new_url = inject_base_url(old_url)
461
  # 2) Possibly add &model=1
462
  new_url = append_model_param(new_url, model_selected)
463
 
@@ -471,14 +455,15 @@ def main():
471
 
472
  mermaid_code = "\n".join(new_lines)
473
 
474
- st.title("Mermaid Diagram with Base URL + Model Param")
 
475
  diagram_html = generate_mermaid_html(mermaid_code)
476
  components.html(diagram_html, height=400, scrolling=True)
477
 
478
- # 4) Two-column interface: Markdown & Mermaid editors
479
  left_col, right_col = st.columns(2)
480
 
481
- # --- Left: Markdown Editor
482
  with left_col:
483
  st.subheader("Markdown Side 📝")
484
  if "markdown_text" not in st.session_state:
@@ -503,7 +488,7 @@ def main():
503
  st.markdown("**Preview:**")
504
  st.markdown(markdown_text)
505
 
506
- # --- Right: Mermaid Editor
507
  with right_col:
508
  st.subheader("Mermaid Side 🧜‍♂️")
509
 
@@ -531,7 +516,7 @@ def main():
531
  st.markdown("**Mermaid Source:**")
532
  st.code(mermaid_input, language="python", line_numbers=True)
533
 
534
- # 5) Media Galleries
535
  st.markdown("---")
536
  st.header("Media Galleries")
537
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
@@ -540,11 +525,10 @@ def main():
540
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
541
  display_videos_and_links(num_columns_video)
542
 
543
- # 6) Optional UI
544
  showExtendedTextInterface = False
545
  if showExtendedTextInterface:
546
- st.write("Extended text interface is ON.")
547
  # e.g. display_glossary_grid, display_buttons_with_scores, etc.
 
548
 
549
  # 7) File Sidebar
550
  FileSidebar()
 
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
+ # (Optional) huggingface_hub usage if you do model inference
19
  from huggingface_hub import InferenceClient
20
 
21
 
 
24
  # ----------------------------
25
  BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
26
 
27
+ # Example placeholders
 
 
 
28
  PromptPrefix = "AI-Search: "
29
  PromptPrefix2 = "AI-Refine: "
30
  PromptPrefix3 = "AI-JS: "
 
44
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
45
  }
46
 
 
47
  def process_text(text):
48
  st.write(f"process_text called with: {text}")
49
 
 
59
  def process_video(video_file, seconds_per_frame):
60
  st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
61
 
62
+ # Stub if you have a HF endpoint
63
  API_URL = "https://huggingface-inference-endpoint-placeholder"
64
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
65
 
 
67
  def InferenceLLM(prompt):
68
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
69
 
 
70
  # ------------------------------------------
71
+ # Glossary & File Utility
72
  # ------------------------------------------
73
  @st.cache_resource
74
  def display_glossary_entity(k):
75
  search_urls = {
76
+ "🚀🌌ArXiv": lambda x: f"/?q={quote(x)}",
77
+ "🃏Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
78
+ "📚PyCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix2)}",
79
+ "🔬JSCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix3)}",
80
+ "📖": lambda x: f"https://en.wikipedia.org/wiki/{quote(x)}",
81
+ "🔍": lambda x: f"https://www.google.com/search?q={quote(x)}",
82
+ "🔎": lambda x: f"https://www.bing.com/search?q={quote(x)}",
83
+ "🎥": lambda x: f"https://www.youtube.com/results?search_query={quote(x)}",
84
+ "🐦": lambda x: f"https://twitter.com/search?q={quote(x)}",
85
  }
86
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
87
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
 
101
  return False
102
 
103
  def clear_query_params():
104
+ st.warning("Define a redirect or link without query params if you want to truly clear them.")
105
 
106
 
107
  # -----------------------
 
228
  if st.button('🔍Run'):
229
  st.write("Running GPT logic placeholder...")
230
 
 
231
  # ---------------------------
232
+ # Scoring / Glossaries
233
  # ---------------------------
234
  score_dir = "scores"
235
  os.makedirs(score_dir, exist_ok=True)
 
251
  return score_data["score"]
252
 
253
  def load_score(key):
254
+ file_path = os.path.join(score_dir, f"{key}.json")
255
+ if os.path.exists(file_path):
256
+ with open(file_path, "r") as file:
257
  score_data = json.load(file)
258
  return score_data["score"]
259
  return 0
 
293
  newscore = update_score(key.replace('?',''))
294
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
295
 
 
296
  # -------------------------------
297
+ # Image & Video
298
  # -------------------------------
299
  def display_images_and_wikipedia_summaries(num_columns=4):
300
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
 
345
  st.error("Invalid input for seconds per frame!")
346
  col_index += 1
347
 
348
+ # --------------------------------
349
+ # MERMAID DIAGRAM
350
+ # --------------------------------
 
351
  def generate_mermaid_html(mermaid_code: str) -> str:
352
  return f"""
353
  <html>
 
382
  return f"{url}{delimiter}model=1"
383
 
384
  def inject_base_url(url: str) -> str:
 
 
 
 
385
  if url.startswith("http"):
386
  return url
387
  return f"{BASE_URL}{url}"
 
403
  """
404
 
405
  def main():
406
+ st.set_page_config(page_title="Mermaid + Clickable Links with Base URL", layout="wide")
407
 
408
  # 1) Query Param Parsing
409
+ query_params = st.query_params
410
+ query_list = (query_params.get('q') or query_params.get('query') or [''])
411
+ q_or_query = query_list[0] if query_list else ''
412
+ if q_or_query.strip():
413
+ search_payload = PromptPrefix + q_or_query
414
+ st.markdown(search_payload)
415
+ process_text(search_payload)
416
+
417
+ if 'action' in query_params:
418
+ action_list = query_params['action']
 
 
 
419
  if action_list:
420
  action = action_list[0]
421
  if action == 'show_message':
 
423
  elif action == 'clear':
424
  clear_query_params()
425
 
426
+ if 'query' in query_params:
427
+ query_val = query_params['query'][0]
428
  display_content_or_image(query_val)
429
 
430
+ # 2) Let user pick if we want ?model=1
431
  st.sidebar.write("## Diagram Link Settings")
432
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
433
 
 
437
  new_lines = []
438
  for line in lines:
439
  if "click " in line and '"/?' in line:
 
440
  parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
441
  if len(parts) == 4:
442
+ old_url = parts[1] # e.g. '/?q=User%20😎'
443
  # 1) Prepend base if needed
444
+ new_url = inject_base_url(old_url)
445
  # 2) Possibly add &model=1
446
  new_url = append_model_param(new_url, model_selected)
447
 
 
455
 
456
  mermaid_code = "\n".join(new_lines)
457
 
458
+ # 4) Render the top-centered Mermaid diagram
459
+ st.title("Mermaid Diagram with Base URL Injection")
460
  diagram_html = generate_mermaid_html(mermaid_code)
461
  components.html(diagram_html, height=400, scrolling=True)
462
 
463
+ # 5) Two-column interface: Markdown & Mermaid
464
  left_col, right_col = st.columns(2)
465
 
466
+ # Left: Markdown Editor
467
  with left_col:
468
  st.subheader("Markdown Side 📝")
469
  if "markdown_text" not in st.session_state:
 
488
  st.markdown("**Preview:**")
489
  st.markdown(markdown_text)
490
 
491
+ # Right: Mermaid Editor
492
  with right_col:
493
  st.subheader("Mermaid Side 🧜‍♂️")
494
 
 
516
  st.markdown("**Mermaid Source:**")
517
  st.code(mermaid_input, language="python", line_numbers=True)
518
 
519
+ # 6) Media Galleries
520
  st.markdown("---")
521
  st.header("Media Galleries")
522
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
 
525
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
526
  display_videos_and_links(num_columns_video)
527
 
 
528
  showExtendedTextInterface = False
529
  if showExtendedTextInterface:
 
530
  # e.g. display_glossary_grid, display_buttons_with_scores, etc.
531
+ pass
532
 
533
  # 7) File Sidebar
534
  FileSidebar()