File size: 2,560 Bytes
492f975
1ca7e27
492f975
402c662
982c554
 
 
402c662
 
 
1ca7e27
 
0a9d70b
982c554
402c662
 
1ca7e27
 
 
 
 
 
 
 
 
 
 
4a9455f
1ca7e27
 
 
 
402c662
 
 
1ca7e27
 
4a9455f
1ca7e27
982c554
1ca7e27
 
 
 
 
 
 
 
 
 
 
 
402c662
50bed54
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

import torch
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer


def init_model():
    model = AutoModelForCausalLM.from_pretrained("Linly-AI/Chinese-LLaMA-2-7B-hf", device_map="cuda:0",
                                                 torch_dtype=torch.bfloat16, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained("Linly-AI/Chinese-LLaMA-2-7B-hf", use_fast=False, trust_remote_code=True)
    return model, tokenizer


def process(message, history):
    input_prompt = ""
    for interaction in history:
        input_prompt = f"{input_prompt} User: {str(interaction[0]).strip(' ')} Bot: {str(interaction[1]).strip(' ')}"
    input_prompt = f"{input_prompt} ### Instruction:{message.strip()}  ### Response:"
    inputs = tokenizer(input_prompt, return_tensors="pt").to("cuda:0")
    try:
        generate_ids = model.generate(inputs.input_ids, max_new_tokens=2048, do_sample=True, top_k=30, top_p=0.84,
                                      temperature=1.0, repetition_penalty=1.15, eos_token_id=2, bos_token_id=1,
                                      pad_token_id=0)
        response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        print('log:', input_prompt, response)
        response = response.split("### Response:")[-1]
        return response
    except:
        return "Error: 会话超长,请重试!"


if __name__ == '__main__':
    examples = ["Python和JavaScript编程语言的主要区别是什么?", "影响消费者行为的主要因素是什么?", "请用pytorch实现一个带ReLU激活函数的全连接层的代码",
                "请用C++编程语言实现“给你两个字符串haystack和needle,在haystack字符串中找出needle字符串的第一个匹配项的下标(下标从 0 开始)。如果needle不是haystack的一部分,则返回-1。",
                "如何使用ssh -L,请用具体例子说明",
                "应对压力最有效的方法是什么?"]
    model, tokenizer = init_model()
    demo = gr.ChatInterface(
        process,
        chatbot=gr.Chatbot(height=600),
        textbox=gr.Textbox(placeholder="Input", container=False, scale=7),
        title="Linly ChatFlow",
        description="",
        theme="soft",
        examples=examples,
        cache_examples=True,
        retry_btn="Retry",
        undo_btn="Delete Previous",
        clear_btn="Clear",
    )
    demo.queue(concurrency_count=75).launch()