File size: 4,740 Bytes
a32b8e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
<?xml version="1.0"?>
<net name="detokenizer" version="11">
<layers>
<layer id="0" name="Parameter_33" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="Parameter_33">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="1" name="Constant_2" type="Const" version="opset1">
<data element_type="u8" shape="587448" offset="0" size="587448" />
<output>
<port id="0" precision="U8">
<dim>587448</dim>
</port>
</output>
</layer>
<layer id="2" name="Convert_48" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="3" name="SentencepieceDetokenizer_34" type="SentencepieceDetokenizer" version="extension">
<input>
<port id="0" precision="U8">
<dim>587448</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="I32">
<dim>-1</dim>
</port>
<port id="4" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="4" name="Constant_36" type="Const" version="opset1">
<data element_type="u8" shape="10" offset="587448" size="10" />
<output>
<port id="0" precision="U8">
<dim>10</dim>
</port>
</output>
</layer>
<layer id="5" name="Constant_38" type="Const" version="opset1">
<data element_type="u8" shape="2" offset="587458" size="2" />
<output>
<port id="0" precision="U8">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="6" name="RegexNormalization_39" type="RegexNormalization" version="extension">
<data global_replace="true" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>10</dim>
</port>
<port id="4" precision="U8">
<dim>2</dim>
</port>
</input>
<output>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
<port id="6" precision="I32">
<dim>-1</dim>
</port>
<port id="7" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="7" name="StringTensorPack_40" type="StringTensorPack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="STRING" names="string_output">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="8" name="Result_41" type="Result" version="opset1">
<input>
<port id="0" precision="STRING">
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="2" from-port="1" to-layer="3" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="6" to-port="0" />
<edge from-layer="3" from-port="3" to-layer="6" to-port="1" />
<edge from-layer="3" from-port="4" to-layer="6" to-port="2" />
<edge from-layer="4" from-port="0" to-layer="6" to-port="3" />
<edge from-layer="5" from-port="0" to-layer="6" to-port="4" />
<edge from-layer="6" from-port="5" to-layer="7" to-port="0" />
<edge from-layer="6" from-port="6" to-layer="7" to-port="1" />
<edge from-layer="6" from-port="7" to-layer="7" to-port="2" />
<edge from-layer="7" from-port="3" to-layer="8" to-port="0" />
</edges>
<rt_info>
<bos_token_id value="1" />
<chat_template value="{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + ' ' + message['content'] + '<|im_end|>' + ' '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant ' }}{% endif %}" />
<eos_token_id value="32768" />
<original_tokenizer_class value="<class 'transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast'>" />
<pad_token_id value="2" />
</rt_info>
</net>
|