File size: 4,965 Bytes
5cd48aa
 
 
 
 
cf93af3
 
5cd48aa
 
 
cf93af3
5cd48aa
 
 
cf93af3
5cd48aa
 
cf93af3
5cd48aa
cf93af3
5cd48aa
cf93af3
5cd48aa
 
 
cf93af3
5cd48aa
 
 
 
 
 
 
 
1b9dc38
 
5cd48aa
 
 
 
 
 
 
cf93af3
5cd48aa
cf93af3
5cd48aa
 
 
 
 
 
 
 
 
 
 
 
 
cf93af3
 
 
 
 
 
 
 
5cd48aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0eee7a4
5cd48aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf93af3
5cd48aa
 
 
 
 
 
cf93af3
5cd48aa
 
 
 
 
 
cf93af3
5cd48aa
 
 
 
 
 
cf93af3
5cd48aa
 
 
 
 
 
 
 
 
 
 
 
cf93af3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import os
from threading import Thread
from typing import Iterator

import gradio as gr
# from gradio import MultimodalTextbox
# from gradio.data_classes import FileData
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from typing_extensions import NotRequired, TypedDict

MAX_MAX_NEW_TOKENS = 1024
DEFAULT_MAX_NEW_TOKENS = 256
MAX_INPUT_TOKEN_LENGTH = 50000

DESCRIPTION = """\
# Yam-Peleg's Hebrew-Mistral-7B-200K

Hebrew-Mistral-7B-200K was introduced in [this Facebook post](https://www.facebook.com/groups/MDLI1/posts/2708679492629415/).

Please, check the [original model card](https://huggingface.co/yam-peleg/Hebrew-Mistral-7B-200K) for more details.
You can see the other Hebrew models by Yam [here](https://huggingface.co/collections/yam-peleg/hebrew-models-65e957875324e2b9a4b68f08)

# Note: Use this model for only for completing sentences.
## While the user interface is of a chatbot for convenience, this is a base model and is not fine-tuned for chatbot tasks or instruction following tasks.
"""

LICENSE = """
<p/>

---
A derivative work of [mistral-7b](https://mistral.ai/news/announcing-mistral-7b/) by Mistral-AI.
The model and space are released under the Apache 2.0 license

This demo Space was created by [Doron Adler](https://linktr.ee/Norod78)
"""

if not torch.cuda.is_available():
    DESCRIPTION += "\n<p>Running on CPU ๐Ÿฅถ This demo does not work on CPU.</p>"


if torch.cuda.is_available():
    model_id = "yam-peleg/Hebrew-Mistral-7B-200K"    
    model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
    tokenizer_id = "yam-peleg/Hebrew-Mistral-7B-200K"
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
        

@spaces.GPU
def generate(
    message: str,
    chat_history: list[tuple[str, str]],
    max_new_tokens: int = 1024,
    temperature: float = 0.2,
    top_p: float = 0.7,
    top_k: int = 30,
    repetition_penalty: float = 1.0,
) -> Iterator[str]:
    historical_text = ""
    #Prepend the entire chat history to the message with new lines between each message
    for user, assistant in chat_history:
        historical_text += f"\n{user}\n{assistant}"

    message = historical_text + f"\n{message}"
    if len(historical_text) > 0:
        message = historical_text + f"\n{message}"
    input_ids = tokenizer([message], return_tensors="pt").input_ids
    if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
        input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
        gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
    input_ids = input_ids.to(model.device)

    streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        {"input_ids": input_ids},
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        top_p=top_p,
        top_k=top_k,
        temperature=temperature,
        num_beams=1,
        pad_token_id = tokenizer.eos_token_id,
        repetition_penalty=repetition_penalty,
        no_repeat_ngram_size=5,
        early_stopping=False,
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    outputs = []
    for text in streamer:
        outputs.append(text)
        yield "".join(outputs)


chat_interface = gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(rtl=True, show_copy_button=True),
    textbox=gr.Textbox(text_align = 'right', rtl = True),
    additional_inputs=[
        gr.Slider(
            label="Max new tokens",
            minimum=1,
            maximum=MAX_MAX_NEW_TOKENS,
            step=1,
            value=DEFAULT_MAX_NEW_TOKENS,
        ),
        gr.Slider(
            label="Temperature",
            minimum=0.1,
            maximum=4.0,
            step=0.1,
            value=0.9,
        ),
        gr.Slider(
            label="Top-p (nucleus sampling)",
            minimum=0.05,
            maximum=1.0,
            step=0.05,
            value=0.7,
        ),
        gr.Slider(
            label="Top-k",
            minimum=1,
            maximum=1000,
            step=1,
            value=40,
        ),
    ],
    stop_btn=None,
    examples=[
        ["ืžืชื›ื•ืŸ ืœืขื•ื’ืช ืฉื•ืงื•ืœื“:"],
        ["ืฉืคืช ื”ืชื›ื ื•ืช ืคื™ื™ื˜ื•ืŸ ื”ื™ื"],
        ["ื”ืื™ืฉ ื”ืื—ืจื•ืŸ ื‘ืขื•ืœื ื™ืฉื‘ ืœื‘ื“ ื‘ื—ื“ืจื•, ื›ืฉืœืคืชืข"],
        ["ืฉืืœื”: ืžื”ื™ ืขื™ืจ ื”ื‘ื™ืจื” ืฉืœ ืžื“ื™ื ืช ื™ืฉืจืืœ?\nืชืฉื•ื‘ื”:"],
        ["ืฉืืœื”: ืื ื™ ืžืžืฉ ืขื™ื™ืฃ, ืžื” ื›ื“ืื™ ืœื™ ืœืขืฉื•ืช?\nืชืฉื•ื‘ื”:"],
    ],
)

with gr.Blocks(css="style.css") as demo:
    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    chat_interface.render()
    gr.Markdown(LICENSE)

if __name__ == "__main__":
    demo.queue(max_size=20).launch()