import json
import os
import pandas as pd
import requests
import threading
import streamlit as st
from datasets import load_dataset, load_metric

MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]


@st.cache()
def load_examples():
    with open("utils/examples.json", "r") as f:
        examples = json.load(f)
    return examples
    
    
def load_evaluation():
    # load task 2 of HumanEval and code_eval_metric
    os.environ["HF_ALLOW_CODE_EVAL"] = "1"
    human_eval = load_dataset("openai_humaneval")
    entry_point = f"check({human_eval['test'][2]['entry_point']})"
    test_func = "\n" + human_eval["test"][2]["test"] + "\n" + entry_point
    code_eval = load_metric("code_eval")
    return code_eval, test_func


def read_markdown(path):
    with open(path, "r") as f:
        output = f.read()
    st.markdown(output, unsafe_allow_html=True)


def generate_code(
    generations, model_name, gen_prompt, max_new_tokens, temperature, seed
):
    # call space using its API endpoint
    url = (
        f"https://hf.space/embed/codeparrot/{model_name.lower()}-subspace/+/api/predict/"
    )
    r = requests.post(
        url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
    )
    generated_text = r.json()["data"][0]
    generations.append({model_name: generated_text})


def generate_code_threads(
    generations, models, gen_prompt, max_new_tokens, temperature, seed
):
    threads = []
    for model_name in models:
        # create the thread
        threads.append(
            threading.Thread(
                target=generate_code,
                args=(
                    generations,
                    model_name,
                    gen_prompt,
                    max_new_tokens,
                    temperature,
                    seed,
                ),
            )
        )
        threads[-1].start()

    for t in threads:
        t.join()

@st.cache(show_spinner=False)
def generate_teaser(gen_prompt):
    generations = []
    generate_code(generations, "CodeParrot", gen_prompt, 8, 0.2, 42)
    return generations[0]["CodeParrot"]
    
st.set_page_config(page_icon=":laptop:", layout="wide")


# Introduction
st.title("Genera codice onlinešŸ¤—")

# Code generation

col1, col2, col3 = st.columns([7, 1, 6])
with col1:
    st.markdown("**Modelli disponibli**")
    selected_models = st.multiselect(
        "Seleziona uno o più modelli pe generare del codice:",
        GENERATION_MODELS,
        default=GENERATION_MODELS,
        key=3,
    )
    st.markdown(" ")
    st.markdown("**Esempi**")
    examples = load_examples()
    example_names = [example["name"] for example in examples]
    name2id = dict([(name, i) for i, name in enumerate(example_names)])
    selected_example = st.selectbox(
        "Seleziona un esempio per prendere spunto:", example_names
    )
    example_text = examples[name2id[selected_example]]["value"]
    default_length = examples[name2id[selected_example]]["length"]
with col3:
    st.markdown("**Impostazioni**")
    temperature = st.slider(
        "Temperature:", value=0.2, min_value=0.1, step=0.1, max_value=2.0
    )
    max_new_tokens = st.slider(
        "Token da generare:",
        value=default_length,
        min_value=8,
        step=4,
        max_value=256,
    )
    seed = st.slider("Random seed:", value=42, min_value=0, step=1, max_value=1000)
gen_prompt = st.text_area(
    "Istruzioni per generare il codice:",
    value=example_text,
    height=200,
).strip()
if st.button("Genera il codice e risparmi tempo", key=4):
    with st.spinner("Dammi un minuto, sto rubando un programmatore..."):
        # use threading
        generations = []
        generate_code_threads(
            generations,
            selected_models,
            gen_prompt=gen_prompt,
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            seed=seed,
        )
        for i in range(len(generations)):
            st.markdown(f"**{selected_models[i]}**")
            for j in range(len(generations)):
                if selected_models[i] in generations[j].keys():
                    st.code(generations[j][selected_models[i]])
        if len(generations) < len(selected_models):
            st.markdown("<span style='color:red'>Avviso: alcuni modelli vanno in timeout, prova un'altra volta o riduci il numero di token da generare.", unsafe_allow_html=True)