File size: 2,911 Bytes
71d010d
0ab46f1
f590120
71d010d
f590120
 
 
 
 
 
71d010d
 
dfae8f1
71d010d
f590120
 
 
 
 
71d010d
f590120
0459f8a
f590120
0459f8a
f590120
0ab46f1
 
 
 
 
 
 
 
71d010d
0ab46f1
71d010d
 
 
 
 
 
 
 
f590120
0ab46f1
fb520fb
f590120
71d010d
 
5c6b5ae
71d010d
 
f590120
 
 
 
71d010d
f590120
 
71d010d
f590120
0ab46f1
16b61e6
71d010d
f590120
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
from threading import Thread

description = """
<p style="text-align: center; font-size: 24px; color: #292b47;">
    <strong>πŸš€ <span style='color: #3264ff;'>DeciCoder-6B: Bigger, Faster, Stronger </span></strong>
</p>
<span style='color: #292b47;'>Welcome to the <a href="https://huggingface.co/Deci/DeciCoder-6B" style="color: #3264ff;">DeciCoder-6B playground</a>! DeciCoder-6B was trained on the Python, Java, Javascript, Rust, C++, C, and C# subset of the Starcoder Training Dataset, and it's released under the Apache 2.0 license. This model is capable of code-completion and instruction following. It surpasses CodeGen 2.5 7B, CodeLlama 7B, abd StarCoder 7B in its supported languages on HumanEval, and leads by 3 points in Python over StarCoderBase 15.5B.</span>
"""


checkpoint = "Deci/DeciCoder-6B"

model = AutoModelForCausalLM.from_pretrained(checkpoint,
                                             trust_remote_code=True,
                                             device_map="auto",
                                             low_cpu_mem_usage=True,
                                             load_in_4bit=True)

tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)

tokenizer.pad_token = tokenizer.eos_token

tokenizer.padding_side = "left"

pipe = pipeline("text-generation",
                model=model,
                tokenizer=tokenizer,
                device_map="auto",
                max_length=2048,
                temperature=1e-3,
)

def code_generation(prompt: str) -> str:
    """
    Generates code based on the given prompt. Handles both regular and FIM (Fill-In-Missing) generation.

    Args:
        prompt (str): The input code prompt.

    Returns:
        str: The HTML-styled generated code.
    """
    completion = pipe(prompt)[0]['generated_text']
    return completion.replace("<|endoftext|>", "")


demo = gr.Blocks(
    css=".gradio-container {background-color: #FAFBFF; color: #292b47}"
)
with demo:
    gr.Markdown(value=description)
    with gr.Row():
        code = gr.Code(lines=10, language="python", label="πŸ‘¨πŸ½β€πŸ’» Input", value="def nth_element_in_fibonnaci(element):\n    \"\"\"Returns the nth element of the Fibonnaci sequence.\"\"\"")
        output = gr.Code(label="πŸ’» Generated code")
    with gr.Row():
        run = gr.Button(value="πŸ‘¨πŸ½β€πŸ’» Generate code")
        clear = gr.Button("πŸ—‘οΈ Clear")

    clear.click(lambda: (None, None), None, [code, output], queue=False)
    event = run.click(code_generation, [code], output)
    gr.HTML(label="Keep in touch", value="<img src='https://huggingface.co/spaces/Deci/DeciCoder-Demo/resolve/main/deci-coder-banner.png' alt='Keep in touch' style='display: block; color: #292b47; margin: auto; max-width: 800px;'>")

demo.launch(debug=True)