File size: 3,248 Bytes
faef160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from matplotlib.pyplot import get
from matplotlib.style import available
import streamlit as st
import numpy as np
import pandas as pd
import streamlit.components.v1 as components

from millify import millify

from utils.utils_display import get_current_date, get_json_from_date, get_available_dates, render_st_from_chapter_number, get_current_global_step
from utils.constants import preface_disclaimer

st.set_page_config(page_title="Bloom Book",layout='wide')

BATCH_SIZE=2048
SEQ_LENGTH=2048

curr_date = get_current_date()

# set_png_as_page_bg("data/image/bloom-book-bg.png") #
st.markdown("<h1 style='text-align: center; color: grey;'>πŸ“– BLOOM Book πŸ“– </h1>", unsafe_allow_html=True)

available_dates = get_available_dates()
available_chapters = ("Preface", ) + tuple(available_dates)

st.sidebar.image(
    "https://assets.website-files.com/6139f3cdcbbff3a68486761d/613cd8997b270da063e230c5_Tekengebied%201-p-2000.png",
    use_column_width=True
)

st.sidebar.title(
    "Chapters browser"
)

st.sidebar.markdown(
    "You can freely browse the different chapters - ie example prompts from different people - and see the results."
)

selected_date = st.sidebar.selectbox(
    "Please select the chapter you want to read:",
    available_chapters
)

if selected_date != "Preface":
    current_global_step = get_current_global_step(selected_date)
    seen_tokens = BATCH_SIZE * SEQ_LENGTH * current_global_step
    st.markdown("<h2 style='text-align: center; color: grey;'> Chapter {} </h2>".format(selected_date), unsafe_allow_html=True)
    st.markdown("<h3 style='text-align: center; color: grey;'> Global step: {} - Seen tokens: {} </h3>".format(current_global_step, millify(seen_tokens)), unsafe_allow_html=True)
    st.markdown("<h5 style='text-align: center; color: grey;'> Click into the text cards to visualize the answers </h5>", unsafe_allow_html=True)

    selected_format = st.sidebar.selectbox('Visualize as:', ["HTML","JSON"])
    suffixes = ["greedy", "nucleus"]

    if selected_format == "HTML":
        user_input = st.sidebar.text_input("Search for a specific prompt: ", "")
        render_st_from_chapter_number(selected_date, suffixes, user_input)
    elif selected_format == "JSON":
        suffix = st.sidebar.selectbox('Decoding strategy:', ["greedy","nucleus"])
        json_output = get_json_from_date(selected_date, suffix)
        st.json(json_output)
else:
    st.markdown("<h3 style='text-align: center; color: grey;'> Welcome to the <i> BLOOM Book </i>. Here you can read generations from the main model based on prompts provided by the community. </h3> ", unsafe_allow_html=True)
    st.markdown("""<h3 style='text-align: center; color: grey;'> Follow the main model's training <a href='https://huggingface.co/bigscience/tr11-176B-ml-logs' target="_blank"> here </a> </h3> """, unsafe_allow_html=True)
    st.markdown("""<h3 style='text-align: center; color: grey;'> Try your own prompts? Check the <a href='https://forms.gle/2L7jkZt8MS8VDy2ZA' target="_blank"> Google Form </a> </h3> """, unsafe_allow_html=True)
    st.markdown("{}".format(preface_disclaimer), unsafe_allow_html=True)
    final_html =""" """ #TODO: add preface
    chapter = components.html(
        final_html,
        height=600,
    )