open-orca-mistral-7b-ov / openvino_detokenizer.xml
doberst's picture
Upload 12 files
c6fc44d verified
raw
history blame
3.2 kB
<?xml version="1.0"?>
<net name="detokenizer" version="11">
<layers>
<layer id="0" name="Parameter_224304" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="Parameter_224304">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="1" name="Constant_224280" type="Const" version="opset1">
<data element_type="u8" shape="493487" offset="0" size="493487" />
<output>
<port id="0" precision="U8">
<dim>493487</dim>
</port>
</output>
</layer>
<layer id="2" name="Convert_224314" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="3" name="SentencepieceDetokenizer_224305" type="SentencepieceDetokenizer" version="extension">
<input>
<port id="0" precision="U8">
<dim>493487</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="I32">
<dim>-1</dim>
</port>
<port id="4" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="4" name="StringTensorPack_224306" type="StringTensorPack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="STRING" names="string_output">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="5" name="Result_224307" type="Result" version="opset1">
<input>
<port id="0" precision="STRING">
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="2" from-port="1" to-layer="3" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="4" to-port="0" />
<edge from-layer="3" from-port="3" to-layer="4" to-port="1" />
<edge from-layer="3" from-port="4" to-layer="4" to-port="2" />
<edge from-layer="4" from-port="3" to-layer="5" to-port="0" />
</edges>
<rt_info>
<bos_token_id value="1" />
<chat_template value="{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'&lt;|im_start|>' + message['role'] + '&#10;' + message['content'] + '&lt;|im_end|>' + '&#10;'}}{% endfor %}{% if add_generation_prompt %}{{ '&lt;|im_start|>assistant&#10;' }}{% endif %}" />
<eos_token_id value="32000" />
<original_tokenizer_class value="&lt;class 'transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast'>" />
</rt_info>
</net>