File size: 6,039 Bytes
4272568 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
#import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,2,3"
import torch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
model_path= "CubeAI/Zhuji-Internet-Literature-Intelligent-Writing-Model-V1.0"
tokenizer = AutoTokenizer.from_pretrained(model_path, encode_special_tokens=True)
model= AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype= torch.bfloat16,
low_cpu_mem_usage= True,
attn_implementation="flash_attention_2",
device_map= "auto"
)
model = torch.compile(model)
model = model.eval()
import gradio as gr
import os
from transformers import GemmaTokenizer, AutoModelForCausalLM
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">自研模型测试长篇小说概要</h1>
<p>本空间旨在展示我们自行研发的模型在长篇小说领域的应用能力。该模型经过特别优化,适用于长篇小说的生成和理解任务,具备两种不同的规模配置:基础版和高级版。</p>
<p>📚 如果您对模型在长篇小说创作和分析方面的应用感兴趣,欢迎尝试使用我们的基础版模型进行初步探索。</p>
<p>🚀 对于寻求更高级功能和更深层次分析的用户,我们提供了高级版模型,它具备更强大的生成能力和更精细的文本理解技术。</p>
</div>
'''
LICENSE = """
<p/>
---
Built with NovelGen
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ai助力写作</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">ai辅助写作</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
tokenizer.chat_template = """{% for message in messages %}
{% if message['role'] == 'user' %}
{{'<|user|>'+ message['content'].strip() + '<|observation|>'+ '<|assistant|>'}}
{% elif message['role'] == 'system' %}
{{ '<|system|>' + message['content'].strip() + '<|observation|>'}}
{% elif message['role'] == 'assistant' %}
{{ message['content'] + '<|observation|>'}}
{% endif %}
{% endfor %}""".replace("\n", "").replace(" ", "")
def chat_zhuji(
message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the llama3-8b model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
#<|system|><|observation|><|user|>
for user, assistant in history:
conversation.extend([{"role": "system","content": "",},{"role": "user", "content": user}, {"role": "<|assistant|>", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids= input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
penalty_alpha= 0.65,
top_p= 0.90,
top_k= 35,
use_cache= True,
eos_token_id= tokenizer.encode("<|observation|>",add_special_tokens= False),
temperature=temperature,
)
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
text_box= gr.Textbox(show_copy_button= True)
with gr.Blocks(fill_height=True, css=css) as demo:
#gr.Markdown(DESCRIPTION)
gr.ChatInterface(
fn=chat_zhuji,
chatbot=chatbot,
textbox= text_box,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0,
maximum=1,
step=0.1,
value=0.95,
label="Temperature",
render=False),
gr.Slider(minimum=2048,
maximum=8192*2,
step=1,
value=8192*2,
label="Max new tokens",
render=False ),
],
examples=[
['请给一个古代美女的外貌来一段描写'],
['请生成4个东方神功的招式名称'],
['生成一段官军和倭寇打斗的场面描写'],
['生成一个都市大女主的角色档案'],
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch(
#server_name='0.0.0.0',
#server_port=config.webui_config.port,
#inbrowser=True,
share=True
) |