File size: 3,299 Bytes
fbed355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
<?xml version="1.0"?>
<net name="detokenizer" version="11">
	<layers>
		<layer id="0" name="Parameter_237586" type="Parameter" version="opset1">
			<data shape="?,?" element_type="i64" />
			<output>
				<port id="0" precision="I64" names="Parameter_237586">
					<dim>-1</dim>
					<dim>-1</dim>
				</port>
			</output>
		</layer>
		<layer id="1" name="Constant_237562" type="Const" version="opset1">
			<data element_type="u8" shape="499991" offset="0" size="499991" />
			<output>
				<port id="0" precision="U8">
					<dim>499991</dim>
				</port>
			</output>
		</layer>
		<layer id="2" name="Convert_237596" type="Convert" version="opset1">
			<data destination_type="i32" />
			<input>
				<port id="0" precision="I64">
					<dim>-1</dim>
					<dim>-1</dim>
				</port>
			</input>
			<output>
				<port id="1" precision="I32">
					<dim>-1</dim>
					<dim>-1</dim>
				</port>
			</output>
		</layer>
		<layer id="3" name="SentencepieceDetokenizer_237587" type="SentencepieceDetokenizer" version="extension">
			<input>
				<port id="0" precision="U8">
					<dim>499991</dim>
				</port>
				<port id="1" precision="I32">
					<dim>-1</dim>
					<dim>-1</dim>
				</port>
			</input>
			<output>
				<port id="2" precision="I32">
					<dim>-1</dim>
				</port>
				<port id="3" precision="I32">
					<dim>-1</dim>
				</port>
				<port id="4" precision="U8">
					<dim>-1</dim>
				</port>
			</output>
		</layer>
		<layer id="4" name="StringTensorPack_237588" type="StringTensorPack" version="extension">
			<data mode="begins_ends" />
			<input>
				<port id="0" precision="I32">
					<dim>-1</dim>
				</port>
				<port id="1" precision="I32">
					<dim>-1</dim>
				</port>
				<port id="2" precision="U8">
					<dim>-1</dim>
				</port>
			</input>
			<output>
				<port id="3" precision="STRING" names="string_output">
					<dim>-1</dim>
				</port>
			</output>
		</layer>
		<layer id="5" name="Result_237589" type="Result" version="opset1">
			<input>
				<port id="0" precision="STRING">
					<dim>-1</dim>
				</port>
			</input>
		</layer>
	</layers>
	<edges>
		<edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
		<edge from-layer="1" from-port="0" to-layer="3" to-port="0" />
		<edge from-layer="2" from-port="1" to-layer="3" to-port="1" />
		<edge from-layer="3" from-port="2" to-layer="4" to-port="0" />
		<edge from-layer="3" from-port="3" to-layer="4" to-port="1" />
		<edge from-layer="3" from-port="4" to-layer="4" to-port="2" />
		<edge from-layer="4" from-port="3" to-layer="5" to-port="0" />
	</edges>
	<rt_info>
		<bos_token_id value="1" />
		<chat_template value="{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'&lt;|system|>&#10;' + message['content'] + '&lt;|end|>&#10;'}}{% elif message['role'] == 'user' %}{{'&lt;|user|>&#10;' + message['content'] + '&lt;|end|>&#10;'}}{% elif message['role'] == 'assistant' %}{{'&lt;|assistant|>&#10;' + message['content'] + '&lt;|end|>&#10;'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '&lt;|assistant|>&#10;' }}{% else %}{{ eos_token }}{% endif %}" />
		<eos_token_id value="32000" />
		<original_tokenizer_class value="&lt;class 'transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast'>" />
		<pad_token_id value="32000" />
	</rt_info>
</net>