QwQ-32B-Preview-openvino-4bit / openvino_detokenizer.xml
AIFunOver's picture
Upload openvino_detokenizer.xml with huggingface_hub
fd2670d verified
<?xml version="1.0"?>
<net name="detokenizer" version="11">
<layers>
<layer id="0" name="Parameter_408297" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="Parameter_408297">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="1" name="Convert_408308" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="2" name="Constant_408272" type="Const" version="opset1">
<data element_type="u8" shape="1582931" offset="0" size="1582931" />
<output>
<port id="0" precision="U8">
<dim>1582931</dim>
</port>
</output>
</layer>
<layer id="3" name="StringTensorUnpack_408273" type="StringTensorUnpack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="U8">
<dim>1582931</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="4" name="VocabDecoder_408298" type="VocabDecoder" version="extension">
<data skip_tokens="151643, 151644, 151645, 151646, 151647, 151648, 151649, 151650, 151651, 151652, 151653, 151654, 151655, 151656" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="I32">
<dim>-1</dim>
</port>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
<port id="6" precision="I32">
<dim>-1</dim>
</port>
<port id="7" precision="I32">
<dim>-1</dim>
</port>
<port id="8" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="5" name="FuzeRagged_408299" type="FuzeRagged" version="extension">
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="I32">
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="I32">
<dim>-1</dim>
</port>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="6" name="StringTensorPack_408300" type="StringTensorPack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="STRING" names="string_output">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="7" name="Result_408301" type="Result" version="opset1">
<input>
<port id="0" precision="STRING">
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
<edge from-layer="1" from-port="1" to-layer="4" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="3" from-port="1" to-layer="4" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="4" to-port="2" />
<edge from-layer="3" from-port="3" to-layer="4" to-port="3" />
<edge from-layer="4" from-port="4" to-layer="5" to-port="0" />
<edge from-layer="4" from-port="5" to-layer="5" to-port="1" />
<edge from-layer="4" from-port="6" to-layer="5" to-port="2" />
<edge from-layer="4" from-port="7" to-layer="5" to-port="3" />
<edge from-layer="4" from-port="8" to-layer="6" to-port="2" />
<edge from-layer="5" from-port="4" to-layer="6" to-port="0" />
<edge from-layer="5" from-port="5" to-layer="6" to-port="1" />
<edge from-layer="6" from-port="3" to-layer="7" to-port="0" />
</edges>
<rt_info>
<add_attention_mask value="True" />
<add_prefix_space />
<add_special_tokens value="True" />
<chat_template value="{%- if tools %}&#10; {{- '&lt;|im_start|>system\n' }}&#10; {%- if messages[0]['role'] == 'system' %}&#10; {{- messages[0]['content'] }}&#10; {%- else %}&#10; {{- 'You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.' }}&#10; {%- endif %}&#10; {{- &quot;\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within &lt;tools>&lt;/tools> XML tags:\n&lt;tools>&quot; }}&#10; {%- for tool in tools %}&#10; {{- &quot;\n&quot; }}&#10; {{- tool | tojson }}&#10; {%- endfor %}&#10; {{- &quot;\n&lt;/tools>\n\nFor each function call, return a json object with function name and arguments within &lt;tool_call>&lt;/tool_call> XML tags:\n&lt;tool_call>\n{\&quot;name\&quot;: &lt;function-name>, \&quot;arguments\&quot;: &lt;args-json-object>}\n&lt;/tool_call>&lt;|im_end|>\n&quot; }}&#10;{%- else %}&#10; {%- if messages[0]['role'] == 'system' %}&#10; {{- '&lt;|im_start|>system\n' + messages[0]['content'] + '&lt;|im_end|>\n' }}&#10; {%- else %}&#10; {{- '&lt;|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.&lt;|im_end|>\n' }}&#10; {%- endif %}&#10;{%- endif %}&#10;{%- for message in messages %}&#10; {%- if (message.role == &quot;user&quot;) or (message.role == &quot;system&quot; and not loop.first) or (message.role == &quot;assistant&quot; and not message.tool_calls) %}&#10; {{- '&lt;|im_start|>' + message.role + '\n' + message.content + '&lt;|im_end|>' + '\n' }}&#10; {%- elif message.role == &quot;assistant&quot; %}&#10; {{- '&lt;|im_start|>' + message.role }}&#10; {%- if message.content %}&#10; {{- '\n' + message.content }}&#10; {%- endif %}&#10; {%- for tool_call in message.tool_calls %}&#10; {%- if tool_call.function is defined %}&#10; {%- set tool_call = tool_call.function %}&#10; {%- endif %}&#10; {{- '\n&lt;tool_call>\n{&quot;name&quot;: &quot;' }}&#10; {{- tool_call.name }}&#10; {{- '&quot;, &quot;arguments&quot;: ' }}&#10; {{- tool_call.arguments | tojson }}&#10; {{- '}\n&lt;/tool_call>' }}&#10; {%- endfor %}&#10; {{- '&lt;|im_end|>\n' }}&#10; {%- elif message.role == &quot;tool&quot; %}&#10; {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != &quot;tool&quot;) %}&#10; {{- '&lt;|im_start|>user' }}&#10; {%- endif %}&#10; {{- '\n&lt;tool_response>\n' }}&#10; {{- message.content }}&#10; {{- '\n&lt;/tool_response>' }}&#10; {%- if loop.last or (messages[loop.index0 + 1].role != &quot;tool&quot;) %}&#10; {{- '&lt;|im_end|>\n' }}&#10; {%- endif %}&#10; {%- endif %}&#10;{%- endfor %}&#10;{%- if add_generation_prompt %}&#10; {{- '&lt;|im_start|>assistant\n' }}&#10;{%- endif %}&#10;" />
<clean_up_tokenization_spaces />
<detokenizer_input_type value="i64" />
<eos_token_id value="151645" />
<handle_special_tokens_with_re />
<number_of_inputs value="1" />
<openvino_tokenizers_version value="2024.5.0.0" />
<openvino_version value="2024.5.0" />
<original_tokenizer_class value="&lt;class 'transformers.models.qwen2.tokenization_qwen2_fast.Qwen2TokenizerFast'>" />
<pad_token_id value="151643" />
<sentencepiece_version value="0.2.0" />
<skip_special_tokens value="True" />
<streaming_detokenizer value="False" />
<tiktoken_version value="0.8.0" />
<tokenizer_output_type value="i64" />
<tokenizers_version value="0.20.1" />
<transformers_version value="4.46.3" />
<use_max_padding value="False" />
<use_sentencepiece_backend value="False" />
<utf8_replace_mode />
<with_detokenizer value="True" />
</rt_info>
</net>