diff --git "a/original/compiled/TextEncoder2.mlmodelc/model.mil" "b/original/compiled/TextEncoder2.mlmodelc/model.mil" --- "a/original/compiled/TextEncoder2.mlmodelc/model.mil" +++ "b/original/compiled/TextEncoder2.mlmodelc/model.mil" @@ -2,2268 +2,2272 @@ program(1.0) [buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1436.100.10"}, {"coremltools-component-torch", "2.1.0.dev20230718"}, {"coremltools-version", "7.0b1"}})] { func main(tensor input_ids) { - tensor text_encoder_text_model_embeddings_token_embedding_weight = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; - tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252969088)))]; - tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252974272)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252979456)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252984640)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(259538304)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(259543488)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(266097152)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(266102336)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272656000)))]; - tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272661184)))]; - tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279214848)))]; - tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279220032)))]; - tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279225216)))]; - tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279245760)))]; - tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305460224)))]; - tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305465408)))]; - tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(331679872)))]; - tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(331685056)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(331690240)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(331695424)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338249088)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338254272)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344807936)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344813120)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(351366784)))]; - tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(351371968)))]; - tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357925632)))]; - tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357930816)))]; - tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357936000)))]; - tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357956544)))]; - tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384171008)))]; - tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384176192)))]; - tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410390656)))]; - tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410395840)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410401024)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410406208)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416959872)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416965056)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423518720)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423523904)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430077568)))]; - tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430082752)))]; - tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436636416)))]; - tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436641600)))]; - tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436646784)))]; - tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436667328)))]; - tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462881792)))]; - tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462886976)))]; - tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489101440)))]; - tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489106624)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489111808)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489116992)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495670656)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495675840)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502229504)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502234688)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508788352)))]; - tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508793536)))]; - tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515347200)))]; - tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515352384)))]; - tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515357568)))]; - tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515378112)))]; - tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541592576)))]; - tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541597760)))]; - tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567812224)))]; - tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567817408)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567822592)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567827776)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574381440)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574386624)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580940288)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580945472)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587499136)))]; - tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587504320)))]; - tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594057984)))]; - tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594063168)))]; - tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594068352)))]; - tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594088896)))]; - tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620303360)))]; - tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620308544)))]; - tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646523008)))]; - tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646528192)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646533376)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646538560)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653092224)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653097408)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659651072)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659656256)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(666209920)))]; - tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(666215104)))]; - tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672768768)))]; - tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672773952)))]; - tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672779136)))]; - tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672799680)))]; - tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699014144)))]; - tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699019328)))]; - tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725233792)))]; - tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725238976)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725244160)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725249344)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731803008)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731808192)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(738361856)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(738367040)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(744920704)))]; - tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(744925888)))]; - tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751479552)))]; - tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751484736)))]; - tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751489920)))]; - tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751510464)))]; - tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(777724928)))]; - tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(777730112)))]; - tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803944576)))]; - tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803949760)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803954944)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803960128)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810513792)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810518976)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(817072640)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(817077824)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(823631488)))]; - tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(823636672)))]; - tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830190336)))]; - tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830195520)))]; - tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830200704)))]; - tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830221248)))]; - tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856435712)))]; - tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856440896)))]; - tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882655360)))]; - tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882660544)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882665728)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882670912)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889224576)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889229760)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895783424)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895788608)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(902342272)))]; - tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(902347456)))]; - tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908901120)))]; - tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908906304)))]; - tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908911488)))]; - tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908932032)))]; - tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935146496)))]; - tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935151680)))]; - tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961366144)))]; - tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961371328)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961376512)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961381696)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967935360)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967940544)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974494208)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974499392)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981053056)))]; - tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981058240)))]; - tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(987611904)))]; - tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(987617088)))]; - tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(987622272)))]; - tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(987642816)))]; - tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013857280)))]; - tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013862464)))]; - tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040076928)))]; - tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040082112)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040087296)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040092480)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046646144)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046651328)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1053204992)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1053210176)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1059763840)))]; - tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1059769024)))]; - tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066322688)))]; - tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066327872)))]; - tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066333056)))]; - tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066353600)))]; - tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092568064)))]; - tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092573248)))]; - tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118787712)))]; - tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118792896)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118798080)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118803264)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125356928)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125362112)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131915776)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131920960)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1138474624)))]; - tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1138479808)))]; - tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145033472)))]; - tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145038656)))]; - tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145043840)))]; - tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145064384)))]; - tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1171278848)))]; - tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1171284032)))]; - tensor text_encoder_text_model_encoder_layers_12_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1197498496)))]; - tensor text_encoder_text_model_encoder_layers_12_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1197503680)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1197508864)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1197514048)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204067712)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204072896)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210626560)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210631744)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217185408)))]; - tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217190592)))]; - tensor text_encoder_text_model_encoder_layers_12_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1223744256)))]; - tensor text_encoder_text_model_encoder_layers_12_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1223749440)))]; - tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1223754624)))]; - tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1223775168)))]; - tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249989632)))]; - tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249994816)))]; - tensor text_encoder_text_model_encoder_layers_13_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276209280)))]; - tensor text_encoder_text_model_encoder_layers_13_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276214464)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276219648)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276224832)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282778496)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282783680)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1289337344)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1289342528)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295896192)))]; - tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295901376)))]; - tensor text_encoder_text_model_encoder_layers_13_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302455040)))]; - tensor text_encoder_text_model_encoder_layers_13_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302460224)))]; - tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302465408)))]; - tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302485952)))]; - tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328700416)))]; - tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328705600)))]; - tensor text_encoder_text_model_encoder_layers_14_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354920064)))]; - tensor text_encoder_text_model_encoder_layers_14_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354925248)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354930432)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354935616)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361489280)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361494464)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1368048128)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1368053312)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374606976)))]; - tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374612160)))]; - tensor text_encoder_text_model_encoder_layers_14_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381165824)))]; - tensor text_encoder_text_model_encoder_layers_14_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381171008)))]; - tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381176192)))]; - tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381196736)))]; - tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407411200)))]; - tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407416384)))]; - tensor text_encoder_text_model_encoder_layers_15_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433630848)))]; - tensor text_encoder_text_model_encoder_layers_15_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433636032)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433641216)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433646400)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1440200064)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1440205248)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1446758912)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1446764096)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1453317760)))]; - tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1453322944)))]; - tensor text_encoder_text_model_encoder_layers_15_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459876608)))]; - tensor text_encoder_text_model_encoder_layers_15_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459881792)))]; - tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459886976)))]; - tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459907520)))]; - tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1486121984)))]; - tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1486127168)))]; - tensor text_encoder_text_model_encoder_layers_16_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512341632)))]; - tensor text_encoder_text_model_encoder_layers_16_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512346816)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512352000)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512357184)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1518910848)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1518916032)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1525469696)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1525474880)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532028544)))]; - tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532033728)))]; - tensor text_encoder_text_model_encoder_layers_16_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1538587392)))]; - tensor text_encoder_text_model_encoder_layers_16_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1538592576)))]; - tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1538597760)))]; - tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1538618304)))]; - tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564832768)))]; - tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564837952)))]; - tensor text_encoder_text_model_encoder_layers_17_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591052416)))]; - tensor text_encoder_text_model_encoder_layers_17_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591057600)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591062784)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591067968)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1597621632)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1597626816)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1604180480)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1604185664)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1610739328)))]; - tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1610744512)))]; - tensor text_encoder_text_model_encoder_layers_17_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617298176)))]; - tensor text_encoder_text_model_encoder_layers_17_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617303360)))]; - tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617308544)))]; - tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617329088)))]; - tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643543552)))]; - tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643548736)))]; - tensor text_encoder_text_model_encoder_layers_18_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669763200)))]; - tensor text_encoder_text_model_encoder_layers_18_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669768384)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669773568)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669778752)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1676332416)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1676337600)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682891264)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682896448)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1689450112)))]; - tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1689455296)))]; - tensor text_encoder_text_model_encoder_layers_18_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696008960)))]; - tensor text_encoder_text_model_encoder_layers_18_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696014144)))]; - tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696019328)))]; - tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696039872)))]; - tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722254336)))]; - tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722259520)))]; - tensor text_encoder_text_model_encoder_layers_19_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748473984)))]; - tensor text_encoder_text_model_encoder_layers_19_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748479168)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748484352)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748489536)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1755043200)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1755048384)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761602048)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761607232)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1768160896)))]; - tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1768166080)))]; - tensor text_encoder_text_model_encoder_layers_19_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774719744)))]; - tensor text_encoder_text_model_encoder_layers_19_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774724928)))]; - tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774730112)))]; - tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774750656)))]; - tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800965120)))]; - tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800970304)))]; - tensor text_encoder_text_model_encoder_layers_20_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827184768)))]; - tensor text_encoder_text_model_encoder_layers_20_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827189952)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827195136)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827200320)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833753984)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833759168)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1840312832)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1840318016)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1846871680)))]; - tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1846876864)))]; - tensor text_encoder_text_model_encoder_layers_20_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853430528)))]; - tensor text_encoder_text_model_encoder_layers_20_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853435712)))]; - tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853440896)))]; - tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853461440)))]; - tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1879675904)))]; - tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1879681088)))]; - tensor text_encoder_text_model_encoder_layers_21_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1905895552)))]; - tensor text_encoder_text_model_encoder_layers_21_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1905900736)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1905905920)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1905911104)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1912464768)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1912469952)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1919023616)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1919028800)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1925582464)))]; - tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1925587648)))]; - tensor text_encoder_text_model_encoder_layers_21_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1932141312)))]; - tensor text_encoder_text_model_encoder_layers_21_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1932146496)))]; - tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1932151680)))]; - tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1932172224)))]; - tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1958386688)))]; - tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1958391872)))]; - tensor text_encoder_text_model_encoder_layers_22_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984606336)))]; - tensor text_encoder_text_model_encoder_layers_22_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984611520)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984616704)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984621888)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1991175552)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1991180736)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1997734400)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1997739584)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2004293248)))]; - tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2004298432)))]; - tensor text_encoder_text_model_encoder_layers_22_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2010852096)))]; - tensor text_encoder_text_model_encoder_layers_22_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2010857280)))]; - tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2010862464)))]; - tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2010883008)))]; - tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2037097472)))]; - tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2037102656)))]; - tensor text_encoder_text_model_encoder_layers_23_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2063317120)))]; - tensor text_encoder_text_model_encoder_layers_23_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2063322304)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2063327488)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2063332672)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2069886336)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2069891520)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2076445184)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2076450368)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2083004032)))]; - tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2083009216)))]; - tensor text_encoder_text_model_encoder_layers_23_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2089562880)))]; - tensor text_encoder_text_model_encoder_layers_23_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2089568064)))]; - tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2089573248)))]; - tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2089593792)))]; - tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2115808256)))]; - tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2115813440)))]; - tensor text_encoder_text_model_encoder_layers_24_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2142027904)))]; - tensor text_encoder_text_model_encoder_layers_24_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2142033088)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2142038272)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2142043456)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2148597120)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2148602304)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2155155968)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2155161152)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2161714816)))]; - tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2161720000)))]; - tensor text_encoder_text_model_encoder_layers_24_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2168273664)))]; - tensor text_encoder_text_model_encoder_layers_24_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2168278848)))]; - tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2168284032)))]; - tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2168304576)))]; - tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2194519040)))]; - tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2194524224)))]; - tensor text_encoder_text_model_encoder_layers_25_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2220738688)))]; - tensor text_encoder_text_model_encoder_layers_25_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2220743872)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2220749056)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2220754240)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2227307904)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2227313088)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2233866752)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2233871936)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2240425600)))]; - tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2240430784)))]; - tensor text_encoder_text_model_encoder_layers_25_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2246984448)))]; - tensor text_encoder_text_model_encoder_layers_25_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2246989632)))]; - tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2246994816)))]; - tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2247015360)))]; - tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2273229824)))]; - tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2273235008)))]; - tensor text_encoder_text_model_encoder_layers_26_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2299449472)))]; - tensor text_encoder_text_model_encoder_layers_26_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2299454656)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2299459840)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2299465024)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2306018688)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2306023872)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2312577536)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2312582720)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2319136384)))]; - tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2319141568)))]; - tensor text_encoder_text_model_encoder_layers_26_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2325695232)))]; - tensor text_encoder_text_model_encoder_layers_26_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2325700416)))]; - tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2325705600)))]; - tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2325726144)))]; - tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2351940608)))]; - tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2351945792)))]; - tensor text_encoder_text_model_encoder_layers_27_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2378160256)))]; - tensor text_encoder_text_model_encoder_layers_27_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2378165440)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2378170624)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2378175808)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2384729472)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2384734656)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2391288320)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2391293504)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2397847168)))]; - tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2397852352)))]; - tensor text_encoder_text_model_encoder_layers_27_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2404406016)))]; - tensor text_encoder_text_model_encoder_layers_27_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2404411200)))]; - tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2404416384)))]; - tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2404436928)))]; - tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2430651392)))]; - tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2430656576)))]; - tensor text_encoder_text_model_encoder_layers_28_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2456871040)))]; - tensor text_encoder_text_model_encoder_layers_28_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2456876224)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2456881408)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2456886592)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2463440256)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2463445440)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2469999104)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2470004288)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2476557952)))]; - tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2476563136)))]; - tensor text_encoder_text_model_encoder_layers_28_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2483116800)))]; - tensor text_encoder_text_model_encoder_layers_28_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2483121984)))]; - tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2483127168)))]; - tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2483147712)))]; - tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2509362176)))]; - tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2509367360)))]; - tensor text_encoder_text_model_encoder_layers_29_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2535581824)))]; - tensor text_encoder_text_model_encoder_layers_29_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2535587008)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2535592192)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2535597376)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2542151040)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2542156224)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2548709888)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2548715072)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2555268736)))]; - tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2555273920)))]; - tensor text_encoder_text_model_encoder_layers_29_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2561827584)))]; - tensor text_encoder_text_model_encoder_layers_29_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2561832768)))]; - tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2561837952)))]; - tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2561858496)))]; - tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2588072960)))]; - tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2588078144)))]; - tensor text_encoder_text_model_encoder_layers_30_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2614292608)))]; - tensor text_encoder_text_model_encoder_layers_30_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2614297792)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2614302976)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2614308160)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2620861824)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2620867008)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2627420672)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2627425856)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2633979520)))]; - tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2633984704)))]; - tensor text_encoder_text_model_encoder_layers_30_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2640538368)))]; - tensor text_encoder_text_model_encoder_layers_30_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2640543552)))]; - tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2640548736)))]; - tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2640569280)))]; - tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2666783744)))]; - tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2666788928)))]; - tensor text_encoder_text_model_encoder_layers_31_layer_norm1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2693003392)))]; - tensor text_encoder_text_model_encoder_layers_31_layer_norm1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2693008576)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2693013760)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2693018944)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2699572608)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2699577792)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2706131456)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2706136640)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2712690304)))]; - tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2712695488)))]; - tensor text_encoder_text_model_encoder_layers_31_layer_norm2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2719249152)))]; - tensor text_encoder_text_model_encoder_layers_31_layer_norm2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2719254336)))]; - tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2719259520)))]; - tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2719280064)))]; - tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_bias = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2745494528)))]; - tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_weight = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2745499712)))]; - tensor text_encoder_text_model_final_layer_norm_bias = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2771714176)))]; - tensor text_encoder_text_model_final_layer_norm_weight = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2771719360)))]; - tensor text_encoder_text_projection_weight = const()[name = tensor("text_encoder_text_projection_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2771724544)))]; tensor var_5 = const()[name = tensor("op_5"), val = tensor(-1)]; tensor var_6 = const()[name = tensor("op_6"), val = tensor(false)]; - tensor var_12 = const()[name = tensor("op_12"), val = tensor(0x1.4f8b58p-17)]; tensor inputs_embeds_axis_0 = const()[name = tensor("inputs_embeds_axis_0"), val = tensor(0)]; tensor inputs_embeds_batch_dims_0 = const()[name = tensor("inputs_embeds_batch_dims_0"), val = tensor(0)]; - tensor inputs_embeds = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = input_ids, x = text_encoder_text_model_embeddings_token_embedding_weight)[name = tensor("inputs_embeds")]; - tensor position_embeddings = const()[name = tensor("position_embeddings"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2778278208)))]; - tensor input_3 = add(x = inputs_embeds, y = position_embeddings)[name = tensor("input_3")]; - tensor causal_attention_mask = const()[name = tensor("causal_attention_mask"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2778672512)))]; + tensor text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor inputs_embeds_cast = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = input_ids, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor("inputs_embeds_cast")]; + tensor position_embeddings_to_fp16 = const()[name = tensor("position_embeddings_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126484608)))]; + tensor input_3_cast = add(x = inputs_embeds_cast, y = position_embeddings_to_fp16)[name = tensor("input_3_cast")]; tensor hidden_states_1_axes_0 = const()[name = tensor("hidden_states_1_axes_0"), val = tensor([-1])]; - tensor hidden_states_1 = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight, x = input_3)[name = tensor("hidden_states_1")]; - tensor var_128 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight, x = hidden_states_1)[name = tensor("op_128")]; - tensor var_129 = const()[name = tensor("op_129"), val = tensor(0x1p-3)]; - tensor tensor_5 = mul(x = var_128, y = var_129)[name = tensor("tensor_5")]; - tensor tensor_1 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight, x = hidden_states_1)[name = tensor("tensor_1")]; - tensor var_134 = const()[name = tensor("op_134"), val = tensor([1, -1, 20, 64])]; - tensor var_135 = reshape(shape = var_134, x = tensor_1)[name = tensor("op_135")]; - tensor var_136_perm_0 = const()[name = tensor("op_136_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_3 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight, x = hidden_states_1)[name = tensor("tensor_3")]; - tensor var_141 = const()[name = tensor("op_141"), val = tensor([1, -1, 20, 64])]; - tensor var_142 = reshape(shape = var_141, x = tensor_3)[name = tensor("op_142")]; - tensor var_143_perm_0 = const()[name = tensor("op_143_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 77, 20, 64])]; - tensor var_151 = reshape(shape = var_150, x = tensor_5)[name = tensor("op_151")]; - tensor var_152_perm_0 = const()[name = tensor("op_152_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_154 = const()[name = tensor("op_154"), val = tensor([20, -1, 64])]; - tensor transpose_158 = transpose(perm = var_152_perm_0, x = var_151)[name = tensor("transpose_158")]; - tensor query_states_1 = reshape(shape = var_154, x = transpose_158)[name = tensor("query_states_1")]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126681792)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126684416)))]; + tensor var_13_to_fp16 = const()[name = tensor("op_13_to_fp16"), val = tensor(0x1.5p-17)]; + tensor hidden_states_1_cast = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast)[name = tensor("hidden_states_1_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126687040)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129963904)))]; + tensor var_130_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("op_130_cast")]; + tensor var_131_to_fp16 = const()[name = tensor("op_131_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_5_cast = mul(x = var_130_cast, y = var_131_to_fp16)[name = tensor("tensor_5_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129966528)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133243392)))]; + tensor tensor_1_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_1_cast")]; + tensor var_136 = const()[name = tensor("op_136"), val = tensor([1, -1, 20, 64])]; + tensor var_137_cast = reshape(shape = var_136, x = tensor_1_cast)[name = tensor("op_137_cast")]; + tensor var_138_perm_0 = const()[name = tensor("op_138_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133246016)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136522880)))]; + tensor tensor_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_3_cast")]; + tensor var_143 = const()[name = tensor("op_143"), val = tensor([1, -1, 20, 64])]; + tensor var_144_cast = reshape(shape = var_143, x = tensor_3_cast)[name = tensor("op_144_cast")]; + tensor var_145_perm_0 = const()[name = tensor("op_145_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_152 = const()[name = tensor("op_152"), val = tensor([1, 77, 20, 64])]; + tensor var_153_cast = reshape(shape = var_152, x = tensor_5_cast)[name = tensor("op_153_cast")]; + tensor var_154_perm_0 = const()[name = tensor("op_154_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_156 = const()[name = tensor("op_156"), val = tensor([20, -1, 64])]; - tensor transpose_160 = transpose(perm = var_136_perm_0, x = var_135)[name = tensor("transpose_160")]; - tensor key_states_3 = reshape(shape = var_156, x = transpose_160)[name = tensor("key_states_3")]; + tensor transpose_158 = transpose(perm = var_154_perm_0, x = var_153_cast)[name = tensor("transpose_158")]; + tensor query_states_1_cast = reshape(shape = var_156, x = transpose_158)[name = tensor("query_states_1_cast")]; tensor var_158 = const()[name = tensor("op_158"), val = tensor([20, -1, 64])]; - tensor transpose_159 = transpose(perm = var_143_perm_0, x = var_142)[name = tensor("transpose_159")]; - tensor value_states_3 = reshape(shape = var_158, x = transpose_159)[name = tensor("value_states_3")]; - tensor var_161_perm_0 = const()[name = tensor("op_161_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_160 = transpose(perm = var_138_perm_0, x = var_137_cast)[name = tensor("transpose_160")]; + tensor key_states_3_cast = reshape(shape = var_158, x = transpose_160)[name = tensor("key_states_3_cast")]; + tensor var_160 = const()[name = tensor("op_160"), val = tensor([20, -1, 64])]; + tensor transpose_159 = transpose(perm = var_145_perm_0, x = var_144_cast)[name = tensor("transpose_159")]; + tensor value_states_3_cast = reshape(shape = var_160, x = transpose_159)[name = tensor("value_states_3_cast")]; + tensor var_163_perm_0 = const()[name = tensor("op_163_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(false)]; tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; - tensor transpose_157 = transpose(perm = var_161_perm_0, x = key_states_3)[name = tensor("transpose_157")]; - tensor attn_weights_1 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1, y = transpose_157)[name = tensor("attn_weights_1")]; - tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 20, 77, 77])]; - tensor var_164 = reshape(shape = var_163, x = attn_weights_1)[name = tensor("op_164")]; - tensor attn_weights_3 = add(x = var_164, y = causal_attention_mask)[name = tensor("attn_weights_3")]; - tensor var_169 = const()[name = tensor("op_169"), val = tensor([20, 77, 77])]; - tensor input_5 = reshape(shape = var_169, x = attn_weights_3)[name = tensor("input_5")]; - tensor input_7 = softmax(axis = var_5, x = input_5)[name = tensor("input_7")]; + tensor transpose_157 = transpose(perm = var_163_perm_0, x = key_states_3_cast)[name = tensor("transpose_157")]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1_cast, y = transpose_157)[name = tensor("attn_weights_1_cast")]; + tensor var_165 = const()[name = tensor("op_165"), val = tensor([1, 20, 77, 77])]; + tensor var_166_cast = reshape(shape = var_165, x = attn_weights_1_cast)[name = tensor("op_166_cast")]; + tensor causal_attention_mask_to_fp16 = const()[name = tensor("causal_attention_mask_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136525504)))]; + tensor attn_weights_3_cast = add(x = var_166_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_171 = const()[name = tensor("op_171"), val = tensor([20, 77, 77])]; + tensor input_5_cast = reshape(shape = var_171, x = attn_weights_3_cast)[name = tensor("input_5_cast")]; + tensor input_7_cast = softmax(axis = var_5, x = input_5_cast)[name = tensor("input_7_cast")]; tensor attn_output_1_transpose_x_0 = const()[name = tensor("attn_output_1_transpose_x_0"), val = tensor(false)]; tensor attn_output_1_transpose_y_0 = const()[name = tensor("attn_output_1_transpose_y_0"), val = tensor(false)]; - tensor attn_output_1 = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7, y = value_states_3)[name = tensor("attn_output_1")]; - tensor var_174 = const()[name = tensor("op_174"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_3 = reshape(shape = var_174, x = attn_output_1)[name = tensor("attn_output_3")]; + tensor attn_output_1_cast = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast, y = value_states_3_cast)[name = tensor("attn_output_1_cast")]; + tensor var_176 = const()[name = tensor("op_176"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_3_cast = reshape(shape = var_176, x = attn_output_1_cast)[name = tensor("attn_output_3_cast")]; tensor attn_output_5_perm_0 = const()[name = tensor("attn_output_5_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_177 = const()[name = tensor("op_177"), val = tensor([1, 77, 1280])]; - tensor transpose_156 = transpose(perm = attn_output_5_perm_0, x = attn_output_3)[name = tensor("transpose_156")]; - tensor input_9 = reshape(shape = var_177, x = transpose_156)[name = tensor("input_9")]; - tensor hidden_states_3 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight, x = input_9)[name = tensor("hidden_states_3")]; - tensor input_11 = add(x = input_3, y = hidden_states_3)[name = tensor("input_11")]; + tensor var_179 = const()[name = tensor("op_179"), val = tensor([1, 77, 1280])]; + tensor transpose_156 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast)[name = tensor("transpose_156")]; + tensor input_9_cast = reshape(shape = var_179, x = transpose_156)[name = tensor("input_9_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136537472)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139814336)))]; + tensor hidden_states_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16, x = input_9_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_11_cast = add(x = input_3_cast, y = hidden_states_3_cast)[name = tensor("input_11_cast")]; tensor input_13_axes_0 = const()[name = tensor("input_13_axes_0"), val = tensor([-1])]; - tensor input_13 = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight, x = input_11)[name = tensor("input_13")]; - tensor input_15 = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight, x = input_13)[name = tensor("input_15")]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139816960)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139819584)))]; + tensor input_13_cast = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast)[name = tensor("input_13_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139822208)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152929472)))]; + tensor input_15_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16, x = input_13_cast)[name = tensor("input_15_cast")]; tensor input_17_mode_0 = const()[name = tensor("input_17_mode_0"), val = tensor("EXACT")]; - tensor input_17 = gelu(mode = input_17_mode_0, x = input_15)[name = tensor("input_17")]; - tensor hidden_states_5 = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight, x = input_17)[name = tensor("hidden_states_5")]; - tensor input_19 = add(x = input_11, y = hidden_states_5)[name = tensor("input_19")]; + tensor input_17_cast = gelu(mode = input_17_mode_0, x = input_15_cast)[name = tensor("input_17_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152939776)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166047040)))]; + tensor hidden_states_5_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_5_cast")]; + tensor input_19_cast = add(x = input_11_cast, y = hidden_states_5_cast)[name = tensor("input_19_cast")]; tensor hidden_states_7_axes_0 = const()[name = tensor("hidden_states_7_axes_0"), val = tensor([-1])]; - tensor hidden_states_7 = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight, x = input_19)[name = tensor("hidden_states_7")]; - tensor var_215 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight, x = hidden_states_7)[name = tensor("op_215")]; - tensor var_216 = const()[name = tensor("op_216"), val = tensor(0x1p-3)]; - tensor tensor_11 = mul(x = var_215, y = var_216)[name = tensor("tensor_11")]; - tensor tensor_7 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight, x = hidden_states_7)[name = tensor("tensor_7")]; - tensor var_221 = const()[name = tensor("op_221"), val = tensor([1, -1, 20, 64])]; - tensor var_222 = reshape(shape = var_221, x = tensor_7)[name = tensor("op_222")]; - tensor var_223_perm_0 = const()[name = tensor("op_223_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_9 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight, x = hidden_states_7)[name = tensor("tensor_9")]; - tensor var_228 = const()[name = tensor("op_228"), val = tensor([1, -1, 20, 64])]; - tensor var_229 = reshape(shape = var_228, x = tensor_9)[name = tensor("op_229")]; - tensor var_230_perm_0 = const()[name = tensor("op_230_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 77, 20, 64])]; - tensor var_238 = reshape(shape = var_237, x = tensor_11)[name = tensor("op_238")]; - tensor var_239_perm_0 = const()[name = tensor("op_239_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_241 = const()[name = tensor("op_241"), val = tensor([20, -1, 64])]; - tensor transpose_153 = transpose(perm = var_239_perm_0, x = var_238)[name = tensor("transpose_153")]; - tensor query_states_3 = reshape(shape = var_241, x = transpose_153)[name = tensor("query_states_3")]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166049664)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166052288)))]; + tensor hidden_states_7_cast = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast)[name = tensor("hidden_states_7_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166054912)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169331776)))]; + tensor var_217_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("op_217_cast")]; + tensor var_218_to_fp16 = const()[name = tensor("op_218_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_11_cast = mul(x = var_217_cast, y = var_218_to_fp16)[name = tensor("tensor_11_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169334400)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172611264)))]; + tensor tensor_7_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_7_cast")]; + tensor var_223 = const()[name = tensor("op_223"), val = tensor([1, -1, 20, 64])]; + tensor var_224_cast = reshape(shape = var_223, x = tensor_7_cast)[name = tensor("op_224_cast")]; + tensor var_225_perm_0 = const()[name = tensor("op_225_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172613888)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175890752)))]; + tensor tensor_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_9_cast")]; + tensor var_230 = const()[name = tensor("op_230"), val = tensor([1, -1, 20, 64])]; + tensor var_231_cast = reshape(shape = var_230, x = tensor_9_cast)[name = tensor("op_231_cast")]; + tensor var_232_perm_0 = const()[name = tensor("op_232_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_239 = const()[name = tensor("op_239"), val = tensor([1, 77, 20, 64])]; + tensor var_240_cast = reshape(shape = var_239, x = tensor_11_cast)[name = tensor("op_240_cast")]; + tensor var_241_perm_0 = const()[name = tensor("op_241_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_243 = const()[name = tensor("op_243"), val = tensor([20, -1, 64])]; - tensor transpose_155 = transpose(perm = var_223_perm_0, x = var_222)[name = tensor("transpose_155")]; - tensor key_states_7 = reshape(shape = var_243, x = transpose_155)[name = tensor("key_states_7")]; + tensor transpose_153 = transpose(perm = var_241_perm_0, x = var_240_cast)[name = tensor("transpose_153")]; + tensor query_states_3_cast = reshape(shape = var_243, x = transpose_153)[name = tensor("query_states_3_cast")]; tensor var_245 = const()[name = tensor("op_245"), val = tensor([20, -1, 64])]; - tensor transpose_154 = transpose(perm = var_230_perm_0, x = var_229)[name = tensor("transpose_154")]; - tensor value_states_7 = reshape(shape = var_245, x = transpose_154)[name = tensor("value_states_7")]; - tensor var_248_perm_0 = const()[name = tensor("op_248_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_155 = transpose(perm = var_225_perm_0, x = var_224_cast)[name = tensor("transpose_155")]; + tensor key_states_7_cast = reshape(shape = var_245, x = transpose_155)[name = tensor("key_states_7_cast")]; + tensor var_247 = const()[name = tensor("op_247"), val = tensor([20, -1, 64])]; + tensor transpose_154 = transpose(perm = var_232_perm_0, x = var_231_cast)[name = tensor("transpose_154")]; + tensor value_states_7_cast = reshape(shape = var_247, x = transpose_154)[name = tensor("value_states_7_cast")]; + tensor var_250_perm_0 = const()[name = tensor("op_250_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_7_transpose_x_0 = const()[name = tensor("attn_weights_7_transpose_x_0"), val = tensor(false)]; tensor attn_weights_7_transpose_y_0 = const()[name = tensor("attn_weights_7_transpose_y_0"), val = tensor(false)]; - tensor transpose_152 = transpose(perm = var_248_perm_0, x = key_states_7)[name = tensor("transpose_152")]; - tensor attn_weights_7 = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3, y = transpose_152)[name = tensor("attn_weights_7")]; - tensor var_250 = const()[name = tensor("op_250"), val = tensor([1, 20, 77, 77])]; - tensor var_251 = reshape(shape = var_250, x = attn_weights_7)[name = tensor("op_251")]; - tensor attn_weights_9 = add(x = var_251, y = causal_attention_mask)[name = tensor("attn_weights_9")]; - tensor var_256 = const()[name = tensor("op_256"), val = tensor([20, 77, 77])]; - tensor input_21 = reshape(shape = var_256, x = attn_weights_9)[name = tensor("input_21")]; - tensor input_23 = softmax(axis = var_5, x = input_21)[name = tensor("input_23")]; + tensor transpose_152 = transpose(perm = var_250_perm_0, x = key_states_7_cast)[name = tensor("transpose_152")]; + tensor attn_weights_7_cast = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3_cast, y = transpose_152)[name = tensor("attn_weights_7_cast")]; + tensor var_252 = const()[name = tensor("op_252"), val = tensor([1, 20, 77, 77])]; + tensor var_253_cast = reshape(shape = var_252, x = attn_weights_7_cast)[name = tensor("op_253_cast")]; + tensor attn_weights_9_cast = add(x = var_253_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_9_cast")]; + tensor var_258 = const()[name = tensor("op_258"), val = tensor([20, 77, 77])]; + tensor input_21_cast = reshape(shape = var_258, x = attn_weights_9_cast)[name = tensor("input_21_cast")]; + tensor input_23_cast = softmax(axis = var_5, x = input_21_cast)[name = tensor("input_23_cast")]; tensor attn_output_7_transpose_x_0 = const()[name = tensor("attn_output_7_transpose_x_0"), val = tensor(false)]; tensor attn_output_7_transpose_y_0 = const()[name = tensor("attn_output_7_transpose_y_0"), val = tensor(false)]; - tensor attn_output_7 = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23, y = value_states_7)[name = tensor("attn_output_7")]; - tensor var_261 = const()[name = tensor("op_261"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_9 = reshape(shape = var_261, x = attn_output_7)[name = tensor("attn_output_9")]; + tensor attn_output_7_cast = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast, y = value_states_7_cast)[name = tensor("attn_output_7_cast")]; + tensor var_263 = const()[name = tensor("op_263"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_9_cast = reshape(shape = var_263, x = attn_output_7_cast)[name = tensor("attn_output_9_cast")]; tensor attn_output_11_perm_0 = const()[name = tensor("attn_output_11_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_264 = const()[name = tensor("op_264"), val = tensor([1, 77, 1280])]; - tensor transpose_151 = transpose(perm = attn_output_11_perm_0, x = attn_output_9)[name = tensor("transpose_151")]; - tensor input_25 = reshape(shape = var_264, x = transpose_151)[name = tensor("input_25")]; - tensor hidden_states_9 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight, x = input_25)[name = tensor("hidden_states_9")]; - tensor input_27 = add(x = input_19, y = hidden_states_9)[name = tensor("input_27")]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor([1, 77, 1280])]; + tensor transpose_151 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast)[name = tensor("transpose_151")]; + tensor input_25_cast = reshape(shape = var_266, x = transpose_151)[name = tensor("input_25_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175893376)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179170240)))]; + tensor hidden_states_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16, x = input_25_cast)[name = tensor("hidden_states_9_cast")]; + tensor input_27_cast = add(x = input_19_cast, y = hidden_states_9_cast)[name = tensor("input_27_cast")]; tensor input_29_axes_0 = const()[name = tensor("input_29_axes_0"), val = tensor([-1])]; - tensor input_29 = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight, x = input_27)[name = tensor("input_29")]; - tensor input_31 = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight, x = input_29)[name = tensor("input_31")]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179172864)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179175488)))]; + tensor input_29_cast = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast)[name = tensor("input_29_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179178112)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192285376)))]; + tensor input_31_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16, x = input_29_cast)[name = tensor("input_31_cast")]; tensor input_33_mode_0 = const()[name = tensor("input_33_mode_0"), val = tensor("EXACT")]; - tensor input_33 = gelu(mode = input_33_mode_0, x = input_31)[name = tensor("input_33")]; - tensor hidden_states_11 = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight, x = input_33)[name = tensor("hidden_states_11")]; - tensor input_35 = add(x = input_27, y = hidden_states_11)[name = tensor("input_35")]; + tensor input_33_cast = gelu(mode = input_33_mode_0, x = input_31_cast)[name = tensor("input_33_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192295680)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205402944)))]; + tensor hidden_states_11_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_11_cast")]; + tensor input_35_cast = add(x = input_27_cast, y = hidden_states_11_cast)[name = tensor("input_35_cast")]; tensor hidden_states_13_axes_0 = const()[name = tensor("hidden_states_13_axes_0"), val = tensor([-1])]; - tensor hidden_states_13 = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight, x = input_35)[name = tensor("hidden_states_13")]; - tensor var_302 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight, x = hidden_states_13)[name = tensor("op_302")]; - tensor var_303 = const()[name = tensor("op_303"), val = tensor(0x1p-3)]; - tensor tensor_17 = mul(x = var_302, y = var_303)[name = tensor("tensor_17")]; - tensor tensor_13 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight, x = hidden_states_13)[name = tensor("tensor_13")]; - tensor var_308 = const()[name = tensor("op_308"), val = tensor([1, -1, 20, 64])]; - tensor var_309 = reshape(shape = var_308, x = tensor_13)[name = tensor("op_309")]; - tensor var_310_perm_0 = const()[name = tensor("op_310_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_15 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight, x = hidden_states_13)[name = tensor("tensor_15")]; - tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, -1, 20, 64])]; - tensor var_316 = reshape(shape = var_315, x = tensor_15)[name = tensor("op_316")]; - tensor var_317_perm_0 = const()[name = tensor("op_317_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_324 = const()[name = tensor("op_324"), val = tensor([1, 77, 20, 64])]; - tensor var_325 = reshape(shape = var_324, x = tensor_17)[name = tensor("op_325")]; - tensor var_326_perm_0 = const()[name = tensor("op_326_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_328 = const()[name = tensor("op_328"), val = tensor([20, -1, 64])]; - tensor transpose_148 = transpose(perm = var_326_perm_0, x = var_325)[name = tensor("transpose_148")]; - tensor query_states_5 = reshape(shape = var_328, x = transpose_148)[name = tensor("query_states_5")]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205405568)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205408192)))]; + tensor hidden_states_13_cast = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast)[name = tensor("hidden_states_13_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205410816)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208687680)))]; + tensor var_304_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("op_304_cast")]; + tensor var_305_to_fp16 = const()[name = tensor("op_305_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_17_cast = mul(x = var_304_cast, y = var_305_to_fp16)[name = tensor("tensor_17_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208690304)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211967168)))]; + tensor tensor_13_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_13_cast")]; + tensor var_310 = const()[name = tensor("op_310"), val = tensor([1, -1, 20, 64])]; + tensor var_311_cast = reshape(shape = var_310, x = tensor_13_cast)[name = tensor("op_311_cast")]; + tensor var_312_perm_0 = const()[name = tensor("op_312_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211969792)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215246656)))]; + tensor tensor_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_15_cast")]; + tensor var_317 = const()[name = tensor("op_317"), val = tensor([1, -1, 20, 64])]; + tensor var_318_cast = reshape(shape = var_317, x = tensor_15_cast)[name = tensor("op_318_cast")]; + tensor var_319_perm_0 = const()[name = tensor("op_319_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_326 = const()[name = tensor("op_326"), val = tensor([1, 77, 20, 64])]; + tensor var_327_cast = reshape(shape = var_326, x = tensor_17_cast)[name = tensor("op_327_cast")]; + tensor var_328_perm_0 = const()[name = tensor("op_328_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_330 = const()[name = tensor("op_330"), val = tensor([20, -1, 64])]; - tensor transpose_150 = transpose(perm = var_310_perm_0, x = var_309)[name = tensor("transpose_150")]; - tensor key_states_11 = reshape(shape = var_330, x = transpose_150)[name = tensor("key_states_11")]; + tensor transpose_148 = transpose(perm = var_328_perm_0, x = var_327_cast)[name = tensor("transpose_148")]; + tensor query_states_5_cast = reshape(shape = var_330, x = transpose_148)[name = tensor("query_states_5_cast")]; tensor var_332 = const()[name = tensor("op_332"), val = tensor([20, -1, 64])]; - tensor transpose_149 = transpose(perm = var_317_perm_0, x = var_316)[name = tensor("transpose_149")]; - tensor value_states_11 = reshape(shape = var_332, x = transpose_149)[name = tensor("value_states_11")]; - tensor var_335_perm_0 = const()[name = tensor("op_335_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_150 = transpose(perm = var_312_perm_0, x = var_311_cast)[name = tensor("transpose_150")]; + tensor key_states_11_cast = reshape(shape = var_332, x = transpose_150)[name = tensor("key_states_11_cast")]; + tensor var_334 = const()[name = tensor("op_334"), val = tensor([20, -1, 64])]; + tensor transpose_149 = transpose(perm = var_319_perm_0, x = var_318_cast)[name = tensor("transpose_149")]; + tensor value_states_11_cast = reshape(shape = var_334, x = transpose_149)[name = tensor("value_states_11_cast")]; + tensor var_337_perm_0 = const()[name = tensor("op_337_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(false)]; tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; - tensor transpose_147 = transpose(perm = var_335_perm_0, x = key_states_11)[name = tensor("transpose_147")]; - tensor attn_weights_13 = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5, y = transpose_147)[name = tensor("attn_weights_13")]; - tensor var_337 = const()[name = tensor("op_337"), val = tensor([1, 20, 77, 77])]; - tensor var_338 = reshape(shape = var_337, x = attn_weights_13)[name = tensor("op_338")]; - tensor attn_weights_15 = add(x = var_338, y = causal_attention_mask)[name = tensor("attn_weights_15")]; - tensor var_343 = const()[name = tensor("op_343"), val = tensor([20, 77, 77])]; - tensor input_37 = reshape(shape = var_343, x = attn_weights_15)[name = tensor("input_37")]; - tensor input_39 = softmax(axis = var_5, x = input_37)[name = tensor("input_39")]; + tensor transpose_147 = transpose(perm = var_337_perm_0, x = key_states_11_cast)[name = tensor("transpose_147")]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5_cast, y = transpose_147)[name = tensor("attn_weights_13_cast")]; + tensor var_339 = const()[name = tensor("op_339"), val = tensor([1, 20, 77, 77])]; + tensor var_340_cast = reshape(shape = var_339, x = attn_weights_13_cast)[name = tensor("op_340_cast")]; + tensor attn_weights_15_cast = add(x = var_340_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_345 = const()[name = tensor("op_345"), val = tensor([20, 77, 77])]; + tensor input_37_cast = reshape(shape = var_345, x = attn_weights_15_cast)[name = tensor("input_37_cast")]; + tensor input_39_cast = softmax(axis = var_5, x = input_37_cast)[name = tensor("input_39_cast")]; tensor attn_output_13_transpose_x_0 = const()[name = tensor("attn_output_13_transpose_x_0"), val = tensor(false)]; tensor attn_output_13_transpose_y_0 = const()[name = tensor("attn_output_13_transpose_y_0"), val = tensor(false)]; - tensor attn_output_13 = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39, y = value_states_11)[name = tensor("attn_output_13")]; - tensor var_348 = const()[name = tensor("op_348"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_15 = reshape(shape = var_348, x = attn_output_13)[name = tensor("attn_output_15")]; + tensor attn_output_13_cast = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast, y = value_states_11_cast)[name = tensor("attn_output_13_cast")]; + tensor var_350 = const()[name = tensor("op_350"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_15_cast = reshape(shape = var_350, x = attn_output_13_cast)[name = tensor("attn_output_15_cast")]; tensor attn_output_17_perm_0 = const()[name = tensor("attn_output_17_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_351 = const()[name = tensor("op_351"), val = tensor([1, 77, 1280])]; - tensor transpose_146 = transpose(perm = attn_output_17_perm_0, x = attn_output_15)[name = tensor("transpose_146")]; - tensor input_41 = reshape(shape = var_351, x = transpose_146)[name = tensor("input_41")]; - tensor hidden_states_15 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight, x = input_41)[name = tensor("hidden_states_15")]; - tensor input_43 = add(x = input_35, y = hidden_states_15)[name = tensor("input_43")]; + tensor var_353 = const()[name = tensor("op_353"), val = tensor([1, 77, 1280])]; + tensor transpose_146 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast)[name = tensor("transpose_146")]; + tensor input_41_cast = reshape(shape = var_353, x = transpose_146)[name = tensor("input_41_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215249280)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218526144)))]; + tensor hidden_states_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_15_cast")]; + tensor input_43_cast = add(x = input_35_cast, y = hidden_states_15_cast)[name = tensor("input_43_cast")]; tensor input_45_axes_0 = const()[name = tensor("input_45_axes_0"), val = tensor([-1])]; - tensor input_45 = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight, x = input_43)[name = tensor("input_45")]; - tensor input_47 = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight, x = input_45)[name = tensor("input_47")]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218528768)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218531392)))]; + tensor input_45_cast = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218534016)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231641280)))]; + tensor input_47_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16, x = input_45_cast)[name = tensor("input_47_cast")]; tensor input_49_mode_0 = const()[name = tensor("input_49_mode_0"), val = tensor("EXACT")]; - tensor input_49 = gelu(mode = input_49_mode_0, x = input_47)[name = tensor("input_49")]; - tensor hidden_states_17 = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight, x = input_49)[name = tensor("hidden_states_17")]; - tensor input_51 = add(x = input_43, y = hidden_states_17)[name = tensor("input_51")]; + tensor input_49_cast = gelu(mode = input_49_mode_0, x = input_47_cast)[name = tensor("input_49_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231651584)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244758848)))]; + tensor hidden_states_17_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16, x = input_49_cast)[name = tensor("hidden_states_17_cast")]; + tensor input_51_cast = add(x = input_43_cast, y = hidden_states_17_cast)[name = tensor("input_51_cast")]; tensor hidden_states_19_axes_0 = const()[name = tensor("hidden_states_19_axes_0"), val = tensor([-1])]; - tensor hidden_states_19 = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight, x = input_51)[name = tensor("hidden_states_19")]; - tensor var_389 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight, x = hidden_states_19)[name = tensor("op_389")]; - tensor var_390 = const()[name = tensor("op_390"), val = tensor(0x1p-3)]; - tensor tensor_23 = mul(x = var_389, y = var_390)[name = tensor("tensor_23")]; - tensor tensor_19 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight, x = hidden_states_19)[name = tensor("tensor_19")]; - tensor var_395 = const()[name = tensor("op_395"), val = tensor([1, -1, 20, 64])]; - tensor var_396 = reshape(shape = var_395, x = tensor_19)[name = tensor("op_396")]; - tensor var_397_perm_0 = const()[name = tensor("op_397_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_21 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight, x = hidden_states_19)[name = tensor("tensor_21")]; - tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, -1, 20, 64])]; - tensor var_403 = reshape(shape = var_402, x = tensor_21)[name = tensor("op_403")]; - tensor var_404_perm_0 = const()[name = tensor("op_404_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_411 = const()[name = tensor("op_411"), val = tensor([1, 77, 20, 64])]; - tensor var_412 = reshape(shape = var_411, x = tensor_23)[name = tensor("op_412")]; - tensor var_413_perm_0 = const()[name = tensor("op_413_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_415 = const()[name = tensor("op_415"), val = tensor([20, -1, 64])]; - tensor transpose_143 = transpose(perm = var_413_perm_0, x = var_412)[name = tensor("transpose_143")]; - tensor query_states_7 = reshape(shape = var_415, x = transpose_143)[name = tensor("query_states_7")]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244761472)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244764096)))]; + tensor hidden_states_19_cast = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast)[name = tensor("hidden_states_19_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244766720)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248043584)))]; + tensor var_391_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("op_391_cast")]; + tensor var_392_to_fp16 = const()[name = tensor("op_392_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_23_cast = mul(x = var_391_cast, y = var_392_to_fp16)[name = tensor("tensor_23_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248046208)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251323072)))]; + tensor tensor_19_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_19_cast")]; + tensor var_397 = const()[name = tensor("op_397"), val = tensor([1, -1, 20, 64])]; + tensor var_398_cast = reshape(shape = var_397, x = tensor_19_cast)[name = tensor("op_398_cast")]; + tensor var_399_perm_0 = const()[name = tensor("op_399_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251325696)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254602560)))]; + tensor tensor_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_21_cast")]; + tensor var_404 = const()[name = tensor("op_404"), val = tensor([1, -1, 20, 64])]; + tensor var_405_cast = reshape(shape = var_404, x = tensor_21_cast)[name = tensor("op_405_cast")]; + tensor var_406_perm_0 = const()[name = tensor("op_406_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_413 = const()[name = tensor("op_413"), val = tensor([1, 77, 20, 64])]; + tensor var_414_cast = reshape(shape = var_413, x = tensor_23_cast)[name = tensor("op_414_cast")]; + tensor var_415_perm_0 = const()[name = tensor("op_415_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_417 = const()[name = tensor("op_417"), val = tensor([20, -1, 64])]; - tensor transpose_145 = transpose(perm = var_397_perm_0, x = var_396)[name = tensor("transpose_145")]; - tensor key_states_15 = reshape(shape = var_417, x = transpose_145)[name = tensor("key_states_15")]; + tensor transpose_143 = transpose(perm = var_415_perm_0, x = var_414_cast)[name = tensor("transpose_143")]; + tensor query_states_7_cast = reshape(shape = var_417, x = transpose_143)[name = tensor("query_states_7_cast")]; tensor var_419 = const()[name = tensor("op_419"), val = tensor([20, -1, 64])]; - tensor transpose_144 = transpose(perm = var_404_perm_0, x = var_403)[name = tensor("transpose_144")]; - tensor value_states_15 = reshape(shape = var_419, x = transpose_144)[name = tensor("value_states_15")]; - tensor var_422_perm_0 = const()[name = tensor("op_422_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_145 = transpose(perm = var_399_perm_0, x = var_398_cast)[name = tensor("transpose_145")]; + tensor key_states_15_cast = reshape(shape = var_419, x = transpose_145)[name = tensor("key_states_15_cast")]; + tensor var_421 = const()[name = tensor("op_421"), val = tensor([20, -1, 64])]; + tensor transpose_144 = transpose(perm = var_406_perm_0, x = var_405_cast)[name = tensor("transpose_144")]; + tensor value_states_15_cast = reshape(shape = var_421, x = transpose_144)[name = tensor("value_states_15_cast")]; + tensor var_424_perm_0 = const()[name = tensor("op_424_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_19_transpose_x_0 = const()[name = tensor("attn_weights_19_transpose_x_0"), val = tensor(false)]; tensor attn_weights_19_transpose_y_0 = const()[name = tensor("attn_weights_19_transpose_y_0"), val = tensor(false)]; - tensor transpose_142 = transpose(perm = var_422_perm_0, x = key_states_15)[name = tensor("transpose_142")]; - tensor attn_weights_19 = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7, y = transpose_142)[name = tensor("attn_weights_19")]; - tensor var_424 = const()[name = tensor("op_424"), val = tensor([1, 20, 77, 77])]; - tensor var_425 = reshape(shape = var_424, x = attn_weights_19)[name = tensor("op_425")]; - tensor attn_weights_21 = add(x = var_425, y = causal_attention_mask)[name = tensor("attn_weights_21")]; - tensor var_430 = const()[name = tensor("op_430"), val = tensor([20, 77, 77])]; - tensor input_53 = reshape(shape = var_430, x = attn_weights_21)[name = tensor("input_53")]; - tensor input_55 = softmax(axis = var_5, x = input_53)[name = tensor("input_55")]; + tensor transpose_142 = transpose(perm = var_424_perm_0, x = key_states_15_cast)[name = tensor("transpose_142")]; + tensor attn_weights_19_cast = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7_cast, y = transpose_142)[name = tensor("attn_weights_19_cast")]; + tensor var_426 = const()[name = tensor("op_426"), val = tensor([1, 20, 77, 77])]; + tensor var_427_cast = reshape(shape = var_426, x = attn_weights_19_cast)[name = tensor("op_427_cast")]; + tensor attn_weights_21_cast = add(x = var_427_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_21_cast")]; + tensor var_432 = const()[name = tensor("op_432"), val = tensor([20, 77, 77])]; + tensor input_53_cast = reshape(shape = var_432, x = attn_weights_21_cast)[name = tensor("input_53_cast")]; + tensor input_55_cast = softmax(axis = var_5, x = input_53_cast)[name = tensor("input_55_cast")]; tensor attn_output_19_transpose_x_0 = const()[name = tensor("attn_output_19_transpose_x_0"), val = tensor(false)]; tensor attn_output_19_transpose_y_0 = const()[name = tensor("attn_output_19_transpose_y_0"), val = tensor(false)]; - tensor attn_output_19 = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55, y = value_states_15)[name = tensor("attn_output_19")]; - tensor var_435 = const()[name = tensor("op_435"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_21 = reshape(shape = var_435, x = attn_output_19)[name = tensor("attn_output_21")]; + tensor attn_output_19_cast = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast, y = value_states_15_cast)[name = tensor("attn_output_19_cast")]; + tensor var_437 = const()[name = tensor("op_437"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_21_cast = reshape(shape = var_437, x = attn_output_19_cast)[name = tensor("attn_output_21_cast")]; tensor attn_output_23_perm_0 = const()[name = tensor("attn_output_23_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_438 = const()[name = tensor("op_438"), val = tensor([1, 77, 1280])]; - tensor transpose_141 = transpose(perm = attn_output_23_perm_0, x = attn_output_21)[name = tensor("transpose_141")]; - tensor input_57 = reshape(shape = var_438, x = transpose_141)[name = tensor("input_57")]; - tensor hidden_states_21 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight, x = input_57)[name = tensor("hidden_states_21")]; - tensor input_59 = add(x = input_51, y = hidden_states_21)[name = tensor("input_59")]; + tensor var_440 = const()[name = tensor("op_440"), val = tensor([1, 77, 1280])]; + tensor transpose_141 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast)[name = tensor("transpose_141")]; + tensor input_57_cast = reshape(shape = var_440, x = transpose_141)[name = tensor("input_57_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254605184)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257882048)))]; + tensor hidden_states_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16, x = input_57_cast)[name = tensor("hidden_states_21_cast")]; + tensor input_59_cast = add(x = input_51_cast, y = hidden_states_21_cast)[name = tensor("input_59_cast")]; tensor input_61_axes_0 = const()[name = tensor("input_61_axes_0"), val = tensor([-1])]; - tensor input_61 = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight, x = input_59)[name = tensor("input_61")]; - tensor input_63 = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight, x = input_61)[name = tensor("input_63")]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257884672)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257887296)))]; + tensor input_61_cast = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast)[name = tensor("input_61_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257889920)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270997184)))]; + tensor input_63_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16, x = input_61_cast)[name = tensor("input_63_cast")]; tensor input_65_mode_0 = const()[name = tensor("input_65_mode_0"), val = tensor("EXACT")]; - tensor input_65 = gelu(mode = input_65_mode_0, x = input_63)[name = tensor("input_65")]; - tensor hidden_states_23 = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight, x = input_65)[name = tensor("hidden_states_23")]; - tensor input_67 = add(x = input_59, y = hidden_states_23)[name = tensor("input_67")]; + tensor input_65_cast = gelu(mode = input_65_mode_0, x = input_63_cast)[name = tensor("input_65_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(271007488)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284114752)))]; + tensor hidden_states_23_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16, x = input_65_cast)[name = tensor("hidden_states_23_cast")]; + tensor input_67_cast = add(x = input_59_cast, y = hidden_states_23_cast)[name = tensor("input_67_cast")]; tensor hidden_states_25_axes_0 = const()[name = tensor("hidden_states_25_axes_0"), val = tensor([-1])]; - tensor hidden_states_25 = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight, x = input_67)[name = tensor("hidden_states_25")]; - tensor var_476 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight, x = hidden_states_25)[name = tensor("op_476")]; - tensor var_477 = const()[name = tensor("op_477"), val = tensor(0x1p-3)]; - tensor tensor_29 = mul(x = var_476, y = var_477)[name = tensor("tensor_29")]; - tensor tensor_25 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight, x = hidden_states_25)[name = tensor("tensor_25")]; - tensor var_482 = const()[name = tensor("op_482"), val = tensor([1, -1, 20, 64])]; - tensor var_483 = reshape(shape = var_482, x = tensor_25)[name = tensor("op_483")]; - tensor var_484_perm_0 = const()[name = tensor("op_484_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_27 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight, x = hidden_states_25)[name = tensor("tensor_27")]; - tensor var_489 = const()[name = tensor("op_489"), val = tensor([1, -1, 20, 64])]; - tensor var_490 = reshape(shape = var_489, x = tensor_27)[name = tensor("op_490")]; - tensor var_491_perm_0 = const()[name = tensor("op_491_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_498 = const()[name = tensor("op_498"), val = tensor([1, 77, 20, 64])]; - tensor var_499 = reshape(shape = var_498, x = tensor_29)[name = tensor("op_499")]; - tensor var_500_perm_0 = const()[name = tensor("op_500_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_502 = const()[name = tensor("op_502"), val = tensor([20, -1, 64])]; - tensor transpose_138 = transpose(perm = var_500_perm_0, x = var_499)[name = tensor("transpose_138")]; - tensor query_states_9 = reshape(shape = var_502, x = transpose_138)[name = tensor("query_states_9")]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284117376)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284120000)))]; + tensor hidden_states_25_cast = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_25_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284122624)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287399488)))]; + tensor var_478_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("op_478_cast")]; + tensor var_479_to_fp16 = const()[name = tensor("op_479_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_29_cast = mul(x = var_478_cast, y = var_479_to_fp16)[name = tensor("tensor_29_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287402112)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290678976)))]; + tensor tensor_25_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_25_cast")]; + tensor var_484 = const()[name = tensor("op_484"), val = tensor([1, -1, 20, 64])]; + tensor var_485_cast = reshape(shape = var_484, x = tensor_25_cast)[name = tensor("op_485_cast")]; + tensor var_486_perm_0 = const()[name = tensor("op_486_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290681600)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293958464)))]; + tensor tensor_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_27_cast")]; + tensor var_491 = const()[name = tensor("op_491"), val = tensor([1, -1, 20, 64])]; + tensor var_492_cast = reshape(shape = var_491, x = tensor_27_cast)[name = tensor("op_492_cast")]; + tensor var_493_perm_0 = const()[name = tensor("op_493_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_500 = const()[name = tensor("op_500"), val = tensor([1, 77, 20, 64])]; + tensor var_501_cast = reshape(shape = var_500, x = tensor_29_cast)[name = tensor("op_501_cast")]; + tensor var_502_perm_0 = const()[name = tensor("op_502_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_504 = const()[name = tensor("op_504"), val = tensor([20, -1, 64])]; - tensor transpose_140 = transpose(perm = var_484_perm_0, x = var_483)[name = tensor("transpose_140")]; - tensor key_states_19 = reshape(shape = var_504, x = transpose_140)[name = tensor("key_states_19")]; + tensor transpose_138 = transpose(perm = var_502_perm_0, x = var_501_cast)[name = tensor("transpose_138")]; + tensor query_states_9_cast = reshape(shape = var_504, x = transpose_138)[name = tensor("query_states_9_cast")]; tensor var_506 = const()[name = tensor("op_506"), val = tensor([20, -1, 64])]; - tensor transpose_139 = transpose(perm = var_491_perm_0, x = var_490)[name = tensor("transpose_139")]; - tensor value_states_19 = reshape(shape = var_506, x = transpose_139)[name = tensor("value_states_19")]; - tensor var_509_perm_0 = const()[name = tensor("op_509_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_140 = transpose(perm = var_486_perm_0, x = var_485_cast)[name = tensor("transpose_140")]; + tensor key_states_19_cast = reshape(shape = var_506, x = transpose_140)[name = tensor("key_states_19_cast")]; + tensor var_508 = const()[name = tensor("op_508"), val = tensor([20, -1, 64])]; + tensor transpose_139 = transpose(perm = var_493_perm_0, x = var_492_cast)[name = tensor("transpose_139")]; + tensor value_states_19_cast = reshape(shape = var_508, x = transpose_139)[name = tensor("value_states_19_cast")]; + tensor var_511_perm_0 = const()[name = tensor("op_511_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(false)]; tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; - tensor transpose_137 = transpose(perm = var_509_perm_0, x = key_states_19)[name = tensor("transpose_137")]; - tensor attn_weights_25 = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9, y = transpose_137)[name = tensor("attn_weights_25")]; - tensor var_511 = const()[name = tensor("op_511"), val = tensor([1, 20, 77, 77])]; - tensor var_512 = reshape(shape = var_511, x = attn_weights_25)[name = tensor("op_512")]; - tensor attn_weights_27 = add(x = var_512, y = causal_attention_mask)[name = tensor("attn_weights_27")]; - tensor var_517 = const()[name = tensor("op_517"), val = tensor([20, 77, 77])]; - tensor input_69 = reshape(shape = var_517, x = attn_weights_27)[name = tensor("input_69")]; - tensor input_71 = softmax(axis = var_5, x = input_69)[name = tensor("input_71")]; + tensor transpose_137 = transpose(perm = var_511_perm_0, x = key_states_19_cast)[name = tensor("transpose_137")]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9_cast, y = transpose_137)[name = tensor("attn_weights_25_cast")]; + tensor var_513 = const()[name = tensor("op_513"), val = tensor([1, 20, 77, 77])]; + tensor var_514_cast = reshape(shape = var_513, x = attn_weights_25_cast)[name = tensor("op_514_cast")]; + tensor attn_weights_27_cast = add(x = var_514_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_519 = const()[name = tensor("op_519"), val = tensor([20, 77, 77])]; + tensor input_69_cast = reshape(shape = var_519, x = attn_weights_27_cast)[name = tensor("input_69_cast")]; + tensor input_71_cast = softmax(axis = var_5, x = input_69_cast)[name = tensor("input_71_cast")]; tensor attn_output_25_transpose_x_0 = const()[name = tensor("attn_output_25_transpose_x_0"), val = tensor(false)]; tensor attn_output_25_transpose_y_0 = const()[name = tensor("attn_output_25_transpose_y_0"), val = tensor(false)]; - tensor attn_output_25 = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71, y = value_states_19)[name = tensor("attn_output_25")]; - tensor var_522 = const()[name = tensor("op_522"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_27 = reshape(shape = var_522, x = attn_output_25)[name = tensor("attn_output_27")]; + tensor attn_output_25_cast = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast, y = value_states_19_cast)[name = tensor("attn_output_25_cast")]; + tensor var_524 = const()[name = tensor("op_524"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_27_cast = reshape(shape = var_524, x = attn_output_25_cast)[name = tensor("attn_output_27_cast")]; tensor attn_output_29_perm_0 = const()[name = tensor("attn_output_29_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 77, 1280])]; - tensor transpose_136 = transpose(perm = attn_output_29_perm_0, x = attn_output_27)[name = tensor("transpose_136")]; - tensor input_73 = reshape(shape = var_525, x = transpose_136)[name = tensor("input_73")]; - tensor hidden_states_27 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight, x = input_73)[name = tensor("hidden_states_27")]; - tensor input_75 = add(x = input_67, y = hidden_states_27)[name = tensor("input_75")]; + tensor var_527 = const()[name = tensor("op_527"), val = tensor([1, 77, 1280])]; + tensor transpose_136 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast)[name = tensor("transpose_136")]; + tensor input_73_cast = reshape(shape = var_527, x = transpose_136)[name = tensor("input_73_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293961088)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297237952)))]; + tensor hidden_states_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16, x = input_73_cast)[name = tensor("hidden_states_27_cast")]; + tensor input_75_cast = add(x = input_67_cast, y = hidden_states_27_cast)[name = tensor("input_75_cast")]; tensor input_77_axes_0 = const()[name = tensor("input_77_axes_0"), val = tensor([-1])]; - tensor input_77 = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight, x = input_75)[name = tensor("input_77")]; - tensor input_79 = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight, x = input_77)[name = tensor("input_79")]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297240576)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297243200)))]; + tensor input_77_cast = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297245824)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310353088)))]; + tensor input_79_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16, x = input_77_cast)[name = tensor("input_79_cast")]; tensor input_81_mode_0 = const()[name = tensor("input_81_mode_0"), val = tensor("EXACT")]; - tensor input_81 = gelu(mode = input_81_mode_0, x = input_79)[name = tensor("input_81")]; - tensor hidden_states_29 = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight, x = input_81)[name = tensor("hidden_states_29")]; - tensor input_83 = add(x = input_75, y = hidden_states_29)[name = tensor("input_83")]; + tensor input_81_cast = gelu(mode = input_81_mode_0, x = input_79_cast)[name = tensor("input_81_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310363392)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323470656)))]; + tensor hidden_states_29_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16, x = input_81_cast)[name = tensor("hidden_states_29_cast")]; + tensor input_83_cast = add(x = input_75_cast, y = hidden_states_29_cast)[name = tensor("input_83_cast")]; tensor hidden_states_31_axes_0 = const()[name = tensor("hidden_states_31_axes_0"), val = tensor([-1])]; - tensor hidden_states_31 = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight, x = input_83)[name = tensor("hidden_states_31")]; - tensor var_563 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight, x = hidden_states_31)[name = tensor("op_563")]; - tensor var_564 = const()[name = tensor("op_564"), val = tensor(0x1p-3)]; - tensor tensor_35 = mul(x = var_563, y = var_564)[name = tensor("tensor_35")]; - tensor tensor_31 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight, x = hidden_states_31)[name = tensor("tensor_31")]; - tensor var_569 = const()[name = tensor("op_569"), val = tensor([1, -1, 20, 64])]; - tensor var_570 = reshape(shape = var_569, x = tensor_31)[name = tensor("op_570")]; - tensor var_571_perm_0 = const()[name = tensor("op_571_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_33 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight, x = hidden_states_31)[name = tensor("tensor_33")]; - tensor var_576 = const()[name = tensor("op_576"), val = tensor([1, -1, 20, 64])]; - tensor var_577 = reshape(shape = var_576, x = tensor_33)[name = tensor("op_577")]; - tensor var_578_perm_0 = const()[name = tensor("op_578_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 77, 20, 64])]; - tensor var_586 = reshape(shape = var_585, x = tensor_35)[name = tensor("op_586")]; - tensor var_587_perm_0 = const()[name = tensor("op_587_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_589 = const()[name = tensor("op_589"), val = tensor([20, -1, 64])]; - tensor transpose_133 = transpose(perm = var_587_perm_0, x = var_586)[name = tensor("transpose_133")]; - tensor query_states_11 = reshape(shape = var_589, x = transpose_133)[name = tensor("query_states_11")]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323473280)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323475904)))]; + tensor hidden_states_31_cast = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast)[name = tensor("hidden_states_31_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323478528)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326755392)))]; + tensor var_565_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("op_565_cast")]; + tensor var_566_to_fp16 = const()[name = tensor("op_566_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_35_cast = mul(x = var_565_cast, y = var_566_to_fp16)[name = tensor("tensor_35_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326758016)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330034880)))]; + tensor tensor_31_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_31_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, -1, 20, 64])]; + tensor var_572_cast = reshape(shape = var_571, x = tensor_31_cast)[name = tensor("op_572_cast")]; + tensor var_573_perm_0 = const()[name = tensor("op_573_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330037504)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333314368)))]; + tensor tensor_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_33_cast")]; + tensor var_578 = const()[name = tensor("op_578"), val = tensor([1, -1, 20, 64])]; + tensor var_579_cast = reshape(shape = var_578, x = tensor_33_cast)[name = tensor("op_579_cast")]; + tensor var_580_perm_0 = const()[name = tensor("op_580_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_587 = const()[name = tensor("op_587"), val = tensor([1, 77, 20, 64])]; + tensor var_588_cast = reshape(shape = var_587, x = tensor_35_cast)[name = tensor("op_588_cast")]; + tensor var_589_perm_0 = const()[name = tensor("op_589_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_591 = const()[name = tensor("op_591"), val = tensor([20, -1, 64])]; - tensor transpose_135 = transpose(perm = var_571_perm_0, x = var_570)[name = tensor("transpose_135")]; - tensor key_states_23 = reshape(shape = var_591, x = transpose_135)[name = tensor("key_states_23")]; + tensor transpose_133 = transpose(perm = var_589_perm_0, x = var_588_cast)[name = tensor("transpose_133")]; + tensor query_states_11_cast = reshape(shape = var_591, x = transpose_133)[name = tensor("query_states_11_cast")]; tensor var_593 = const()[name = tensor("op_593"), val = tensor([20, -1, 64])]; - tensor transpose_134 = transpose(perm = var_578_perm_0, x = var_577)[name = tensor("transpose_134")]; - tensor value_states_23 = reshape(shape = var_593, x = transpose_134)[name = tensor("value_states_23")]; - tensor var_596_perm_0 = const()[name = tensor("op_596_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_135 = transpose(perm = var_573_perm_0, x = var_572_cast)[name = tensor("transpose_135")]; + tensor key_states_23_cast = reshape(shape = var_593, x = transpose_135)[name = tensor("key_states_23_cast")]; + tensor var_595 = const()[name = tensor("op_595"), val = tensor([20, -1, 64])]; + tensor transpose_134 = transpose(perm = var_580_perm_0, x = var_579_cast)[name = tensor("transpose_134")]; + tensor value_states_23_cast = reshape(shape = var_595, x = transpose_134)[name = tensor("value_states_23_cast")]; + tensor var_598_perm_0 = const()[name = tensor("op_598_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_31_transpose_x_0 = const()[name = tensor("attn_weights_31_transpose_x_0"), val = tensor(false)]; tensor attn_weights_31_transpose_y_0 = const()[name = tensor("attn_weights_31_transpose_y_0"), val = tensor(false)]; - tensor transpose_132 = transpose(perm = var_596_perm_0, x = key_states_23)[name = tensor("transpose_132")]; - tensor attn_weights_31 = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11, y = transpose_132)[name = tensor("attn_weights_31")]; - tensor var_598 = const()[name = tensor("op_598"), val = tensor([1, 20, 77, 77])]; - tensor var_599 = reshape(shape = var_598, x = attn_weights_31)[name = tensor("op_599")]; - tensor attn_weights_33 = add(x = var_599, y = causal_attention_mask)[name = tensor("attn_weights_33")]; - tensor var_604 = const()[name = tensor("op_604"), val = tensor([20, 77, 77])]; - tensor input_85 = reshape(shape = var_604, x = attn_weights_33)[name = tensor("input_85")]; - tensor input_87 = softmax(axis = var_5, x = input_85)[name = tensor("input_87")]; + tensor transpose_132 = transpose(perm = var_598_perm_0, x = key_states_23_cast)[name = tensor("transpose_132")]; + tensor attn_weights_31_cast = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11_cast, y = transpose_132)[name = tensor("attn_weights_31_cast")]; + tensor var_600 = const()[name = tensor("op_600"), val = tensor([1, 20, 77, 77])]; + tensor var_601_cast = reshape(shape = var_600, x = attn_weights_31_cast)[name = tensor("op_601_cast")]; + tensor attn_weights_33_cast = add(x = var_601_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_33_cast")]; + tensor var_606 = const()[name = tensor("op_606"), val = tensor([20, 77, 77])]; + tensor input_85_cast = reshape(shape = var_606, x = attn_weights_33_cast)[name = tensor("input_85_cast")]; + tensor input_87_cast = softmax(axis = var_5, x = input_85_cast)[name = tensor("input_87_cast")]; tensor attn_output_31_transpose_x_0 = const()[name = tensor("attn_output_31_transpose_x_0"), val = tensor(false)]; tensor attn_output_31_transpose_y_0 = const()[name = tensor("attn_output_31_transpose_y_0"), val = tensor(false)]; - tensor attn_output_31 = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87, y = value_states_23)[name = tensor("attn_output_31")]; - tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_33 = reshape(shape = var_609, x = attn_output_31)[name = tensor("attn_output_33")]; + tensor attn_output_31_cast = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast, y = value_states_23_cast)[name = tensor("attn_output_31_cast")]; + tensor var_611 = const()[name = tensor("op_611"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_33_cast = reshape(shape = var_611, x = attn_output_31_cast)[name = tensor("attn_output_33_cast")]; tensor attn_output_35_perm_0 = const()[name = tensor("attn_output_35_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_612 = const()[name = tensor("op_612"), val = tensor([1, 77, 1280])]; - tensor transpose_131 = transpose(perm = attn_output_35_perm_0, x = attn_output_33)[name = tensor("transpose_131")]; - tensor input_89 = reshape(shape = var_612, x = transpose_131)[name = tensor("input_89")]; - tensor hidden_states_33 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight, x = input_89)[name = tensor("hidden_states_33")]; - tensor input_91 = add(x = input_83, y = hidden_states_33)[name = tensor("input_91")]; + tensor var_614 = const()[name = tensor("op_614"), val = tensor([1, 77, 1280])]; + tensor transpose_131 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast)[name = tensor("transpose_131")]; + tensor input_89_cast = reshape(shape = var_614, x = transpose_131)[name = tensor("input_89_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333316992)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336593856)))]; + tensor hidden_states_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16, x = input_89_cast)[name = tensor("hidden_states_33_cast")]; + tensor input_91_cast = add(x = input_83_cast, y = hidden_states_33_cast)[name = tensor("input_91_cast")]; tensor input_93_axes_0 = const()[name = tensor("input_93_axes_0"), val = tensor([-1])]; - tensor input_93 = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight, x = input_91)[name = tensor("input_93")]; - tensor input_95 = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight, x = input_93)[name = tensor("input_95")]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336596480)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336599104)))]; + tensor input_93_cast = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast)[name = tensor("input_93_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336601728)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349708992)))]; + tensor input_95_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16, x = input_93_cast)[name = tensor("input_95_cast")]; tensor input_97_mode_0 = const()[name = tensor("input_97_mode_0"), val = tensor("EXACT")]; - tensor input_97 = gelu(mode = input_97_mode_0, x = input_95)[name = tensor("input_97")]; - tensor hidden_states_35 = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight, x = input_97)[name = tensor("hidden_states_35")]; - tensor input_99 = add(x = input_91, y = hidden_states_35)[name = tensor("input_99")]; + tensor input_97_cast = gelu(mode = input_97_mode_0, x = input_95_cast)[name = tensor("input_97_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349719296)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362826560)))]; + tensor hidden_states_35_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16, x = input_97_cast)[name = tensor("hidden_states_35_cast")]; + tensor input_99_cast = add(x = input_91_cast, y = hidden_states_35_cast)[name = tensor("input_99_cast")]; tensor hidden_states_37_axes_0 = const()[name = tensor("hidden_states_37_axes_0"), val = tensor([-1])]; - tensor hidden_states_37 = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight, x = input_99)[name = tensor("hidden_states_37")]; - tensor var_650 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight, x = hidden_states_37)[name = tensor("op_650")]; - tensor var_651 = const()[name = tensor("op_651"), val = tensor(0x1p-3)]; - tensor tensor_41 = mul(x = var_650, y = var_651)[name = tensor("tensor_41")]; - tensor tensor_37 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight, x = hidden_states_37)[name = tensor("tensor_37")]; - tensor var_656 = const()[name = tensor("op_656"), val = tensor([1, -1, 20, 64])]; - tensor var_657 = reshape(shape = var_656, x = tensor_37)[name = tensor("op_657")]; - tensor var_658_perm_0 = const()[name = tensor("op_658_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_39 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight, x = hidden_states_37)[name = tensor("tensor_39")]; - tensor var_663 = const()[name = tensor("op_663"), val = tensor([1, -1, 20, 64])]; - tensor var_664 = reshape(shape = var_663, x = tensor_39)[name = tensor("op_664")]; - tensor var_665_perm_0 = const()[name = tensor("op_665_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 77, 20, 64])]; - tensor var_673 = reshape(shape = var_672, x = tensor_41)[name = tensor("op_673")]; - tensor var_674_perm_0 = const()[name = tensor("op_674_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_676 = const()[name = tensor("op_676"), val = tensor([20, -1, 64])]; - tensor transpose_128 = transpose(perm = var_674_perm_0, x = var_673)[name = tensor("transpose_128")]; - tensor query_states_13 = reshape(shape = var_676, x = transpose_128)[name = tensor("query_states_13")]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362829184)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362831808)))]; + tensor hidden_states_37_cast = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast)[name = tensor("hidden_states_37_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362834432)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366111296)))]; + tensor var_652_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("op_652_cast")]; + tensor var_653_to_fp16 = const()[name = tensor("op_653_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_41_cast = mul(x = var_652_cast, y = var_653_to_fp16)[name = tensor("tensor_41_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366113920)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369390784)))]; + tensor tensor_37_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_37_cast")]; + tensor var_658 = const()[name = tensor("op_658"), val = tensor([1, -1, 20, 64])]; + tensor var_659_cast = reshape(shape = var_658, x = tensor_37_cast)[name = tensor("op_659_cast")]; + tensor var_660_perm_0 = const()[name = tensor("op_660_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369393408)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372670272)))]; + tensor tensor_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_39_cast")]; + tensor var_665 = const()[name = tensor("op_665"), val = tensor([1, -1, 20, 64])]; + tensor var_666_cast = reshape(shape = var_665, x = tensor_39_cast)[name = tensor("op_666_cast")]; + tensor var_667_perm_0 = const()[name = tensor("op_667_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_674 = const()[name = tensor("op_674"), val = tensor([1, 77, 20, 64])]; + tensor var_675_cast = reshape(shape = var_674, x = tensor_41_cast)[name = tensor("op_675_cast")]; + tensor var_676_perm_0 = const()[name = tensor("op_676_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_678 = const()[name = tensor("op_678"), val = tensor([20, -1, 64])]; - tensor transpose_130 = transpose(perm = var_658_perm_0, x = var_657)[name = tensor("transpose_130")]; - tensor key_states_27 = reshape(shape = var_678, x = transpose_130)[name = tensor("key_states_27")]; + tensor transpose_128 = transpose(perm = var_676_perm_0, x = var_675_cast)[name = tensor("transpose_128")]; + tensor query_states_13_cast = reshape(shape = var_678, x = transpose_128)[name = tensor("query_states_13_cast")]; tensor var_680 = const()[name = tensor("op_680"), val = tensor([20, -1, 64])]; - tensor transpose_129 = transpose(perm = var_665_perm_0, x = var_664)[name = tensor("transpose_129")]; - tensor value_states_27 = reshape(shape = var_680, x = transpose_129)[name = tensor("value_states_27")]; - tensor var_683_perm_0 = const()[name = tensor("op_683_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_130 = transpose(perm = var_660_perm_0, x = var_659_cast)[name = tensor("transpose_130")]; + tensor key_states_27_cast = reshape(shape = var_680, x = transpose_130)[name = tensor("key_states_27_cast")]; + tensor var_682 = const()[name = tensor("op_682"), val = tensor([20, -1, 64])]; + tensor transpose_129 = transpose(perm = var_667_perm_0, x = var_666_cast)[name = tensor("transpose_129")]; + tensor value_states_27_cast = reshape(shape = var_682, x = transpose_129)[name = tensor("value_states_27_cast")]; + tensor var_685_perm_0 = const()[name = tensor("op_685_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(false)]; tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; - tensor transpose_127 = transpose(perm = var_683_perm_0, x = key_states_27)[name = tensor("transpose_127")]; - tensor attn_weights_37 = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13, y = transpose_127)[name = tensor("attn_weights_37")]; - tensor var_685 = const()[name = tensor("op_685"), val = tensor([1, 20, 77, 77])]; - tensor var_686 = reshape(shape = var_685, x = attn_weights_37)[name = tensor("op_686")]; - tensor attn_weights_39 = add(x = var_686, y = causal_attention_mask)[name = tensor("attn_weights_39")]; - tensor var_691 = const()[name = tensor("op_691"), val = tensor([20, 77, 77])]; - tensor input_101 = reshape(shape = var_691, x = attn_weights_39)[name = tensor("input_101")]; - tensor input_103 = softmax(axis = var_5, x = input_101)[name = tensor("input_103")]; + tensor transpose_127 = transpose(perm = var_685_perm_0, x = key_states_27_cast)[name = tensor("transpose_127")]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13_cast, y = transpose_127)[name = tensor("attn_weights_37_cast")]; + tensor var_687 = const()[name = tensor("op_687"), val = tensor([1, 20, 77, 77])]; + tensor var_688_cast = reshape(shape = var_687, x = attn_weights_37_cast)[name = tensor("op_688_cast")]; + tensor attn_weights_39_cast = add(x = var_688_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_693 = const()[name = tensor("op_693"), val = tensor([20, 77, 77])]; + tensor input_101_cast = reshape(shape = var_693, x = attn_weights_39_cast)[name = tensor("input_101_cast")]; + tensor input_103_cast = softmax(axis = var_5, x = input_101_cast)[name = tensor("input_103_cast")]; tensor attn_output_37_transpose_x_0 = const()[name = tensor("attn_output_37_transpose_x_0"), val = tensor(false)]; tensor attn_output_37_transpose_y_0 = const()[name = tensor("attn_output_37_transpose_y_0"), val = tensor(false)]; - tensor attn_output_37 = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103, y = value_states_27)[name = tensor("attn_output_37")]; - tensor var_696 = const()[name = tensor("op_696"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_39 = reshape(shape = var_696, x = attn_output_37)[name = tensor("attn_output_39")]; + tensor attn_output_37_cast = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast, y = value_states_27_cast)[name = tensor("attn_output_37_cast")]; + tensor var_698 = const()[name = tensor("op_698"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_39_cast = reshape(shape = var_698, x = attn_output_37_cast)[name = tensor("attn_output_39_cast")]; tensor attn_output_41_perm_0 = const()[name = tensor("attn_output_41_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_699 = const()[name = tensor("op_699"), val = tensor([1, 77, 1280])]; - tensor transpose_126 = transpose(perm = attn_output_41_perm_0, x = attn_output_39)[name = tensor("transpose_126")]; - tensor input_105 = reshape(shape = var_699, x = transpose_126)[name = tensor("input_105")]; - tensor hidden_states_39 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight, x = input_105)[name = tensor("hidden_states_39")]; - tensor input_107 = add(x = input_99, y = hidden_states_39)[name = tensor("input_107")]; + tensor var_701 = const()[name = tensor("op_701"), val = tensor([1, 77, 1280])]; + tensor transpose_126 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast)[name = tensor("transpose_126")]; + tensor input_105_cast = reshape(shape = var_701, x = transpose_126)[name = tensor("input_105_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372672896)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375949760)))]; + tensor hidden_states_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16, x = input_105_cast)[name = tensor("hidden_states_39_cast")]; + tensor input_107_cast = add(x = input_99_cast, y = hidden_states_39_cast)[name = tensor("input_107_cast")]; tensor input_109_axes_0 = const()[name = tensor("input_109_axes_0"), val = tensor([-1])]; - tensor input_109 = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight, x = input_107)[name = tensor("input_109")]; - tensor input_111 = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight, x = input_109)[name = tensor("input_111")]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375952384)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375955008)))]; + tensor input_109_cast = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast)[name = tensor("input_109_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375957632)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389064896)))]; + tensor input_111_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16, x = input_109_cast)[name = tensor("input_111_cast")]; tensor input_113_mode_0 = const()[name = tensor("input_113_mode_0"), val = tensor("EXACT")]; - tensor input_113 = gelu(mode = input_113_mode_0, x = input_111)[name = tensor("input_113")]; - tensor hidden_states_41 = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight, x = input_113)[name = tensor("hidden_states_41")]; - tensor input_115 = add(x = input_107, y = hidden_states_41)[name = tensor("input_115")]; + tensor input_113_cast = gelu(mode = input_113_mode_0, x = input_111_cast)[name = tensor("input_113_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389075200)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402182464)))]; + tensor hidden_states_41_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16, x = input_113_cast)[name = tensor("hidden_states_41_cast")]; + tensor input_115_cast = add(x = input_107_cast, y = hidden_states_41_cast)[name = tensor("input_115_cast")]; tensor hidden_states_43_axes_0 = const()[name = tensor("hidden_states_43_axes_0"), val = tensor([-1])]; - tensor hidden_states_43 = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight, x = input_115)[name = tensor("hidden_states_43")]; - tensor var_737 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight, x = hidden_states_43)[name = tensor("op_737")]; - tensor var_738 = const()[name = tensor("op_738"), val = tensor(0x1p-3)]; - tensor tensor_47 = mul(x = var_737, y = var_738)[name = tensor("tensor_47")]; - tensor tensor_43 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight, x = hidden_states_43)[name = tensor("tensor_43")]; - tensor var_743 = const()[name = tensor("op_743"), val = tensor([1, -1, 20, 64])]; - tensor var_744 = reshape(shape = var_743, x = tensor_43)[name = tensor("op_744")]; - tensor var_745_perm_0 = const()[name = tensor("op_745_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_45 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight, x = hidden_states_43)[name = tensor("tensor_45")]; - tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, -1, 20, 64])]; - tensor var_751 = reshape(shape = var_750, x = tensor_45)[name = tensor("op_751")]; - tensor var_752_perm_0 = const()[name = tensor("op_752_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_759 = const()[name = tensor("op_759"), val = tensor([1, 77, 20, 64])]; - tensor var_760 = reshape(shape = var_759, x = tensor_47)[name = tensor("op_760")]; - tensor var_761_perm_0 = const()[name = tensor("op_761_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_763 = const()[name = tensor("op_763"), val = tensor([20, -1, 64])]; - tensor transpose_123 = transpose(perm = var_761_perm_0, x = var_760)[name = tensor("transpose_123")]; - tensor query_states_15 = reshape(shape = var_763, x = transpose_123)[name = tensor("query_states_15")]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402185088)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402187712)))]; + tensor hidden_states_43_cast = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast)[name = tensor("hidden_states_43_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402190336)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405467200)))]; + tensor var_739_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("op_739_cast")]; + tensor var_740_to_fp16 = const()[name = tensor("op_740_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_47_cast = mul(x = var_739_cast, y = var_740_to_fp16)[name = tensor("tensor_47_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405469824)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408746688)))]; + tensor tensor_43_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_43_cast")]; + tensor var_745 = const()[name = tensor("op_745"), val = tensor([1, -1, 20, 64])]; + tensor var_746_cast = reshape(shape = var_745, x = tensor_43_cast)[name = tensor("op_746_cast")]; + tensor var_747_perm_0 = const()[name = tensor("op_747_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408749312)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412026176)))]; + tensor tensor_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_45_cast")]; + tensor var_752 = const()[name = tensor("op_752"), val = tensor([1, -1, 20, 64])]; + tensor var_753_cast = reshape(shape = var_752, x = tensor_45_cast)[name = tensor("op_753_cast")]; + tensor var_754_perm_0 = const()[name = tensor("op_754_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_761 = const()[name = tensor("op_761"), val = tensor([1, 77, 20, 64])]; + tensor var_762_cast = reshape(shape = var_761, x = tensor_47_cast)[name = tensor("op_762_cast")]; + tensor var_763_perm_0 = const()[name = tensor("op_763_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_765 = const()[name = tensor("op_765"), val = tensor([20, -1, 64])]; - tensor transpose_125 = transpose(perm = var_745_perm_0, x = var_744)[name = tensor("transpose_125")]; - tensor key_states_31 = reshape(shape = var_765, x = transpose_125)[name = tensor("key_states_31")]; + tensor transpose_123 = transpose(perm = var_763_perm_0, x = var_762_cast)[name = tensor("transpose_123")]; + tensor query_states_15_cast = reshape(shape = var_765, x = transpose_123)[name = tensor("query_states_15_cast")]; tensor var_767 = const()[name = tensor("op_767"), val = tensor([20, -1, 64])]; - tensor transpose_124 = transpose(perm = var_752_perm_0, x = var_751)[name = tensor("transpose_124")]; - tensor value_states_31 = reshape(shape = var_767, x = transpose_124)[name = tensor("value_states_31")]; - tensor var_770_perm_0 = const()[name = tensor("op_770_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_125 = transpose(perm = var_747_perm_0, x = var_746_cast)[name = tensor("transpose_125")]; + tensor key_states_31_cast = reshape(shape = var_767, x = transpose_125)[name = tensor("key_states_31_cast")]; + tensor var_769 = const()[name = tensor("op_769"), val = tensor([20, -1, 64])]; + tensor transpose_124 = transpose(perm = var_754_perm_0, x = var_753_cast)[name = tensor("transpose_124")]; + tensor value_states_31_cast = reshape(shape = var_769, x = transpose_124)[name = tensor("value_states_31_cast")]; + tensor var_772_perm_0 = const()[name = tensor("op_772_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_43_transpose_x_0 = const()[name = tensor("attn_weights_43_transpose_x_0"), val = tensor(false)]; tensor attn_weights_43_transpose_y_0 = const()[name = tensor("attn_weights_43_transpose_y_0"), val = tensor(false)]; - tensor transpose_122 = transpose(perm = var_770_perm_0, x = key_states_31)[name = tensor("transpose_122")]; - tensor attn_weights_43 = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15, y = transpose_122)[name = tensor("attn_weights_43")]; - tensor var_772 = const()[name = tensor("op_772"), val = tensor([1, 20, 77, 77])]; - tensor var_773 = reshape(shape = var_772, x = attn_weights_43)[name = tensor("op_773")]; - tensor attn_weights_45 = add(x = var_773, y = causal_attention_mask)[name = tensor("attn_weights_45")]; - tensor var_778 = const()[name = tensor("op_778"), val = tensor([20, 77, 77])]; - tensor input_117 = reshape(shape = var_778, x = attn_weights_45)[name = tensor("input_117")]; - tensor input_119 = softmax(axis = var_5, x = input_117)[name = tensor("input_119")]; + tensor transpose_122 = transpose(perm = var_772_perm_0, x = key_states_31_cast)[name = tensor("transpose_122")]; + tensor attn_weights_43_cast = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15_cast, y = transpose_122)[name = tensor("attn_weights_43_cast")]; + tensor var_774 = const()[name = tensor("op_774"), val = tensor([1, 20, 77, 77])]; + tensor var_775_cast = reshape(shape = var_774, x = attn_weights_43_cast)[name = tensor("op_775_cast")]; + tensor attn_weights_45_cast = add(x = var_775_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_45_cast")]; + tensor var_780 = const()[name = tensor("op_780"), val = tensor([20, 77, 77])]; + tensor input_117_cast = reshape(shape = var_780, x = attn_weights_45_cast)[name = tensor("input_117_cast")]; + tensor input_119_cast = softmax(axis = var_5, x = input_117_cast)[name = tensor("input_119_cast")]; tensor attn_output_43_transpose_x_0 = const()[name = tensor("attn_output_43_transpose_x_0"), val = tensor(false)]; tensor attn_output_43_transpose_y_0 = const()[name = tensor("attn_output_43_transpose_y_0"), val = tensor(false)]; - tensor attn_output_43 = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119, y = value_states_31)[name = tensor("attn_output_43")]; - tensor var_783 = const()[name = tensor("op_783"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_45 = reshape(shape = var_783, x = attn_output_43)[name = tensor("attn_output_45")]; + tensor attn_output_43_cast = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast, y = value_states_31_cast)[name = tensor("attn_output_43_cast")]; + tensor var_785 = const()[name = tensor("op_785"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_45_cast = reshape(shape = var_785, x = attn_output_43_cast)[name = tensor("attn_output_45_cast")]; tensor attn_output_47_perm_0 = const()[name = tensor("attn_output_47_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 77, 1280])]; - tensor transpose_121 = transpose(perm = attn_output_47_perm_0, x = attn_output_45)[name = tensor("transpose_121")]; - tensor input_121 = reshape(shape = var_786, x = transpose_121)[name = tensor("input_121")]; - tensor hidden_states_45 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight, x = input_121)[name = tensor("hidden_states_45")]; - tensor input_123 = add(x = input_115, y = hidden_states_45)[name = tensor("input_123")]; + tensor var_788 = const()[name = tensor("op_788"), val = tensor([1, 77, 1280])]; + tensor transpose_121 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast)[name = tensor("transpose_121")]; + tensor input_121_cast = reshape(shape = var_788, x = transpose_121)[name = tensor("input_121_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412028800)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415305664)))]; + tensor hidden_states_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16, x = input_121_cast)[name = tensor("hidden_states_45_cast")]; + tensor input_123_cast = add(x = input_115_cast, y = hidden_states_45_cast)[name = tensor("input_123_cast")]; tensor input_125_axes_0 = const()[name = tensor("input_125_axes_0"), val = tensor([-1])]; - tensor input_125 = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight, x = input_123)[name = tensor("input_125")]; - tensor input_127 = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight, x = input_125)[name = tensor("input_127")]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415308288)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415310912)))]; + tensor input_125_cast = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast)[name = tensor("input_125_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415313536)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428420800)))]; + tensor input_127_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16, x = input_125_cast)[name = tensor("input_127_cast")]; tensor input_129_mode_0 = const()[name = tensor("input_129_mode_0"), val = tensor("EXACT")]; - tensor input_129 = gelu(mode = input_129_mode_0, x = input_127)[name = tensor("input_129")]; - tensor hidden_states_47 = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight, x = input_129)[name = tensor("hidden_states_47")]; - tensor input_131 = add(x = input_123, y = hidden_states_47)[name = tensor("input_131")]; + tensor input_129_cast = gelu(mode = input_129_mode_0, x = input_127_cast)[name = tensor("input_129_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428431104)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441538368)))]; + tensor hidden_states_47_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16, x = input_129_cast)[name = tensor("hidden_states_47_cast")]; + tensor input_131_cast = add(x = input_123_cast, y = hidden_states_47_cast)[name = tensor("input_131_cast")]; tensor hidden_states_49_axes_0 = const()[name = tensor("hidden_states_49_axes_0"), val = tensor([-1])]; - tensor hidden_states_49 = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight, x = input_131)[name = tensor("hidden_states_49")]; - tensor var_824 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight, x = hidden_states_49)[name = tensor("op_824")]; - tensor var_825 = const()[name = tensor("op_825"), val = tensor(0x1p-3)]; - tensor tensor_53 = mul(x = var_824, y = var_825)[name = tensor("tensor_53")]; - tensor tensor_49 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight, x = hidden_states_49)[name = tensor("tensor_49")]; - tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, -1, 20, 64])]; - tensor var_831 = reshape(shape = var_830, x = tensor_49)[name = tensor("op_831")]; - tensor var_832_perm_0 = const()[name = tensor("op_832_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_51 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight, x = hidden_states_49)[name = tensor("tensor_51")]; - tensor var_837 = const()[name = tensor("op_837"), val = tensor([1, -1, 20, 64])]; - tensor var_838 = reshape(shape = var_837, x = tensor_51)[name = tensor("op_838")]; - tensor var_839_perm_0 = const()[name = tensor("op_839_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_846 = const()[name = tensor("op_846"), val = tensor([1, 77, 20, 64])]; - tensor var_847 = reshape(shape = var_846, x = tensor_53)[name = tensor("op_847")]; - tensor var_848_perm_0 = const()[name = tensor("op_848_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_850 = const()[name = tensor("op_850"), val = tensor([20, -1, 64])]; - tensor transpose_118 = transpose(perm = var_848_perm_0, x = var_847)[name = tensor("transpose_118")]; - tensor query_states_17 = reshape(shape = var_850, x = transpose_118)[name = tensor("query_states_17")]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441540992)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441543616)))]; + tensor hidden_states_49_cast = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast)[name = tensor("hidden_states_49_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441546240)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444823104)))]; + tensor var_826_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("op_826_cast")]; + tensor var_827_to_fp16 = const()[name = tensor("op_827_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_53_cast = mul(x = var_826_cast, y = var_827_to_fp16)[name = tensor("tensor_53_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444825728)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448102592)))]; + tensor tensor_49_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_49_cast")]; + tensor var_832 = const()[name = tensor("op_832"), val = tensor([1, -1, 20, 64])]; + tensor var_833_cast = reshape(shape = var_832, x = tensor_49_cast)[name = tensor("op_833_cast")]; + tensor var_834_perm_0 = const()[name = tensor("op_834_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448105216)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451382080)))]; + tensor tensor_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_51_cast")]; + tensor var_839 = const()[name = tensor("op_839"), val = tensor([1, -1, 20, 64])]; + tensor var_840_cast = reshape(shape = var_839, x = tensor_51_cast)[name = tensor("op_840_cast")]; + tensor var_841_perm_0 = const()[name = tensor("op_841_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_848 = const()[name = tensor("op_848"), val = tensor([1, 77, 20, 64])]; + tensor var_849_cast = reshape(shape = var_848, x = tensor_53_cast)[name = tensor("op_849_cast")]; + tensor var_850_perm_0 = const()[name = tensor("op_850_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_852 = const()[name = tensor("op_852"), val = tensor([20, -1, 64])]; - tensor transpose_120 = transpose(perm = var_832_perm_0, x = var_831)[name = tensor("transpose_120")]; - tensor key_states_35 = reshape(shape = var_852, x = transpose_120)[name = tensor("key_states_35")]; + tensor transpose_118 = transpose(perm = var_850_perm_0, x = var_849_cast)[name = tensor("transpose_118")]; + tensor query_states_17_cast = reshape(shape = var_852, x = transpose_118)[name = tensor("query_states_17_cast")]; tensor var_854 = const()[name = tensor("op_854"), val = tensor([20, -1, 64])]; - tensor transpose_119 = transpose(perm = var_839_perm_0, x = var_838)[name = tensor("transpose_119")]; - tensor value_states_35 = reshape(shape = var_854, x = transpose_119)[name = tensor("value_states_35")]; - tensor var_857_perm_0 = const()[name = tensor("op_857_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_120 = transpose(perm = var_834_perm_0, x = var_833_cast)[name = tensor("transpose_120")]; + tensor key_states_35_cast = reshape(shape = var_854, x = transpose_120)[name = tensor("key_states_35_cast")]; + tensor var_856 = const()[name = tensor("op_856"), val = tensor([20, -1, 64])]; + tensor transpose_119 = transpose(perm = var_841_perm_0, x = var_840_cast)[name = tensor("transpose_119")]; + tensor value_states_35_cast = reshape(shape = var_856, x = transpose_119)[name = tensor("value_states_35_cast")]; + tensor var_859_perm_0 = const()[name = tensor("op_859_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(false)]; tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; - tensor transpose_117 = transpose(perm = var_857_perm_0, x = key_states_35)[name = tensor("transpose_117")]; - tensor attn_weights_49 = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17, y = transpose_117)[name = tensor("attn_weights_49")]; - tensor var_859 = const()[name = tensor("op_859"), val = tensor([1, 20, 77, 77])]; - tensor var_860 = reshape(shape = var_859, x = attn_weights_49)[name = tensor("op_860")]; - tensor attn_weights_51 = add(x = var_860, y = causal_attention_mask)[name = tensor("attn_weights_51")]; - tensor var_865 = const()[name = tensor("op_865"), val = tensor([20, 77, 77])]; - tensor input_133 = reshape(shape = var_865, x = attn_weights_51)[name = tensor("input_133")]; - tensor input_135 = softmax(axis = var_5, x = input_133)[name = tensor("input_135")]; + tensor transpose_117 = transpose(perm = var_859_perm_0, x = key_states_35_cast)[name = tensor("transpose_117")]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17_cast, y = transpose_117)[name = tensor("attn_weights_49_cast")]; + tensor var_861 = const()[name = tensor("op_861"), val = tensor([1, 20, 77, 77])]; + tensor var_862_cast = reshape(shape = var_861, x = attn_weights_49_cast)[name = tensor("op_862_cast")]; + tensor attn_weights_51_cast = add(x = var_862_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_867 = const()[name = tensor("op_867"), val = tensor([20, 77, 77])]; + tensor input_133_cast = reshape(shape = var_867, x = attn_weights_51_cast)[name = tensor("input_133_cast")]; + tensor input_135_cast = softmax(axis = var_5, x = input_133_cast)[name = tensor("input_135_cast")]; tensor attn_output_49_transpose_x_0 = const()[name = tensor("attn_output_49_transpose_x_0"), val = tensor(false)]; tensor attn_output_49_transpose_y_0 = const()[name = tensor("attn_output_49_transpose_y_0"), val = tensor(false)]; - tensor attn_output_49 = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135, y = value_states_35)[name = tensor("attn_output_49")]; - tensor var_870 = const()[name = tensor("op_870"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_51 = reshape(shape = var_870, x = attn_output_49)[name = tensor("attn_output_51")]; + tensor attn_output_49_cast = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast, y = value_states_35_cast)[name = tensor("attn_output_49_cast")]; + tensor var_872 = const()[name = tensor("op_872"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_51_cast = reshape(shape = var_872, x = attn_output_49_cast)[name = tensor("attn_output_51_cast")]; tensor attn_output_53_perm_0 = const()[name = tensor("attn_output_53_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_873 = const()[name = tensor("op_873"), val = tensor([1, 77, 1280])]; - tensor transpose_116 = transpose(perm = attn_output_53_perm_0, x = attn_output_51)[name = tensor("transpose_116")]; - tensor input_137 = reshape(shape = var_873, x = transpose_116)[name = tensor("input_137")]; - tensor hidden_states_51 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight, x = input_137)[name = tensor("hidden_states_51")]; - tensor input_139 = add(x = input_131, y = hidden_states_51)[name = tensor("input_139")]; + tensor var_875 = const()[name = tensor("op_875"), val = tensor([1, 77, 1280])]; + tensor transpose_116 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast)[name = tensor("transpose_116")]; + tensor input_137_cast = reshape(shape = var_875, x = transpose_116)[name = tensor("input_137_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451384704)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454661568)))]; + tensor hidden_states_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16, x = input_137_cast)[name = tensor("hidden_states_51_cast")]; + tensor input_139_cast = add(x = input_131_cast, y = hidden_states_51_cast)[name = tensor("input_139_cast")]; tensor input_141_axes_0 = const()[name = tensor("input_141_axes_0"), val = tensor([-1])]; - tensor input_141 = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight, x = input_139)[name = tensor("input_141")]; - tensor input_143 = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight, x = input_141)[name = tensor("input_143")]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454664192)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454666816)))]; + tensor input_141_cast = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454669440)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467776704)))]; + tensor input_143_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16, x = input_141_cast)[name = tensor("input_143_cast")]; tensor input_145_mode_0 = const()[name = tensor("input_145_mode_0"), val = tensor("EXACT")]; - tensor input_145 = gelu(mode = input_145_mode_0, x = input_143)[name = tensor("input_145")]; - tensor hidden_states_53 = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight, x = input_145)[name = tensor("hidden_states_53")]; - tensor input_147 = add(x = input_139, y = hidden_states_53)[name = tensor("input_147")]; + tensor input_145_cast = gelu(mode = input_145_mode_0, x = input_143_cast)[name = tensor("input_145_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467787008)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480894272)))]; + tensor hidden_states_53_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16, x = input_145_cast)[name = tensor("hidden_states_53_cast")]; + tensor input_147_cast = add(x = input_139_cast, y = hidden_states_53_cast)[name = tensor("input_147_cast")]; tensor hidden_states_55_axes_0 = const()[name = tensor("hidden_states_55_axes_0"), val = tensor([-1])]; - tensor hidden_states_55 = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight, x = input_147)[name = tensor("hidden_states_55")]; - tensor var_911 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight, x = hidden_states_55)[name = tensor("op_911")]; - tensor var_912 = const()[name = tensor("op_912"), val = tensor(0x1p-3)]; - tensor tensor_59 = mul(x = var_911, y = var_912)[name = tensor("tensor_59")]; - tensor tensor_55 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight, x = hidden_states_55)[name = tensor("tensor_55")]; - tensor var_917 = const()[name = tensor("op_917"), val = tensor([1, -1, 20, 64])]; - tensor var_918 = reshape(shape = var_917, x = tensor_55)[name = tensor("op_918")]; - tensor var_919_perm_0 = const()[name = tensor("op_919_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_57 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight, x = hidden_states_55)[name = tensor("tensor_57")]; - tensor var_924 = const()[name = tensor("op_924"), val = tensor([1, -1, 20, 64])]; - tensor var_925 = reshape(shape = var_924, x = tensor_57)[name = tensor("op_925")]; - tensor var_926_perm_0 = const()[name = tensor("op_926_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_933 = const()[name = tensor("op_933"), val = tensor([1, 77, 20, 64])]; - tensor var_934 = reshape(shape = var_933, x = tensor_59)[name = tensor("op_934")]; - tensor var_935_perm_0 = const()[name = tensor("op_935_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_937 = const()[name = tensor("op_937"), val = tensor([20, -1, 64])]; - tensor transpose_113 = transpose(perm = var_935_perm_0, x = var_934)[name = tensor("transpose_113")]; - tensor query_states_19 = reshape(shape = var_937, x = transpose_113)[name = tensor("query_states_19")]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480896896)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480899520)))]; + tensor hidden_states_55_cast = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast)[name = tensor("hidden_states_55_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480902144)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484179008)))]; + tensor var_913_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("op_913_cast")]; + tensor var_914_to_fp16 = const()[name = tensor("op_914_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_59_cast = mul(x = var_913_cast, y = var_914_to_fp16)[name = tensor("tensor_59_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484181632)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487458496)))]; + tensor tensor_55_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_55_cast")]; + tensor var_919 = const()[name = tensor("op_919"), val = tensor([1, -1, 20, 64])]; + tensor var_920_cast = reshape(shape = var_919, x = tensor_55_cast)[name = tensor("op_920_cast")]; + tensor var_921_perm_0 = const()[name = tensor("op_921_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487461120)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490737984)))]; + tensor tensor_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_57_cast")]; + tensor var_926 = const()[name = tensor("op_926"), val = tensor([1, -1, 20, 64])]; + tensor var_927_cast = reshape(shape = var_926, x = tensor_57_cast)[name = tensor("op_927_cast")]; + tensor var_928_perm_0 = const()[name = tensor("op_928_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_935 = const()[name = tensor("op_935"), val = tensor([1, 77, 20, 64])]; + tensor var_936_cast = reshape(shape = var_935, x = tensor_59_cast)[name = tensor("op_936_cast")]; + tensor var_937_perm_0 = const()[name = tensor("op_937_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_939 = const()[name = tensor("op_939"), val = tensor([20, -1, 64])]; - tensor transpose_115 = transpose(perm = var_919_perm_0, x = var_918)[name = tensor("transpose_115")]; - tensor key_states_39 = reshape(shape = var_939, x = transpose_115)[name = tensor("key_states_39")]; + tensor transpose_113 = transpose(perm = var_937_perm_0, x = var_936_cast)[name = tensor("transpose_113")]; + tensor query_states_19_cast = reshape(shape = var_939, x = transpose_113)[name = tensor("query_states_19_cast")]; tensor var_941 = const()[name = tensor("op_941"), val = tensor([20, -1, 64])]; - tensor transpose_114 = transpose(perm = var_926_perm_0, x = var_925)[name = tensor("transpose_114")]; - tensor value_states_39 = reshape(shape = var_941, x = transpose_114)[name = tensor("value_states_39")]; - tensor var_944_perm_0 = const()[name = tensor("op_944_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_115 = transpose(perm = var_921_perm_0, x = var_920_cast)[name = tensor("transpose_115")]; + tensor key_states_39_cast = reshape(shape = var_941, x = transpose_115)[name = tensor("key_states_39_cast")]; + tensor var_943 = const()[name = tensor("op_943"), val = tensor([20, -1, 64])]; + tensor transpose_114 = transpose(perm = var_928_perm_0, x = var_927_cast)[name = tensor("transpose_114")]; + tensor value_states_39_cast = reshape(shape = var_943, x = transpose_114)[name = tensor("value_states_39_cast")]; + tensor var_946_perm_0 = const()[name = tensor("op_946_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_55_transpose_x_0 = const()[name = tensor("attn_weights_55_transpose_x_0"), val = tensor(false)]; tensor attn_weights_55_transpose_y_0 = const()[name = tensor("attn_weights_55_transpose_y_0"), val = tensor(false)]; - tensor transpose_112 = transpose(perm = var_944_perm_0, x = key_states_39)[name = tensor("transpose_112")]; - tensor attn_weights_55 = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19, y = transpose_112)[name = tensor("attn_weights_55")]; - tensor var_946 = const()[name = tensor("op_946"), val = tensor([1, 20, 77, 77])]; - tensor var_947 = reshape(shape = var_946, x = attn_weights_55)[name = tensor("op_947")]; - tensor attn_weights_57 = add(x = var_947, y = causal_attention_mask)[name = tensor("attn_weights_57")]; - tensor var_952 = const()[name = tensor("op_952"), val = tensor([20, 77, 77])]; - tensor input_149 = reshape(shape = var_952, x = attn_weights_57)[name = tensor("input_149")]; - tensor input_151 = softmax(axis = var_5, x = input_149)[name = tensor("input_151")]; + tensor transpose_112 = transpose(perm = var_946_perm_0, x = key_states_39_cast)[name = tensor("transpose_112")]; + tensor attn_weights_55_cast = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19_cast, y = transpose_112)[name = tensor("attn_weights_55_cast")]; + tensor var_948 = const()[name = tensor("op_948"), val = tensor([1, 20, 77, 77])]; + tensor var_949_cast = reshape(shape = var_948, x = attn_weights_55_cast)[name = tensor("op_949_cast")]; + tensor attn_weights_57_cast = add(x = var_949_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_57_cast")]; + tensor var_954 = const()[name = tensor("op_954"), val = tensor([20, 77, 77])]; + tensor input_149_cast = reshape(shape = var_954, x = attn_weights_57_cast)[name = tensor("input_149_cast")]; + tensor input_151_cast = softmax(axis = var_5, x = input_149_cast)[name = tensor("input_151_cast")]; tensor attn_output_55_transpose_x_0 = const()[name = tensor("attn_output_55_transpose_x_0"), val = tensor(false)]; tensor attn_output_55_transpose_y_0 = const()[name = tensor("attn_output_55_transpose_y_0"), val = tensor(false)]; - tensor attn_output_55 = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151, y = value_states_39)[name = tensor("attn_output_55")]; - tensor var_957 = const()[name = tensor("op_957"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_57 = reshape(shape = var_957, x = attn_output_55)[name = tensor("attn_output_57")]; + tensor attn_output_55_cast = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast, y = value_states_39_cast)[name = tensor("attn_output_55_cast")]; + tensor var_959 = const()[name = tensor("op_959"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_57_cast = reshape(shape = var_959, x = attn_output_55_cast)[name = tensor("attn_output_57_cast")]; tensor attn_output_59_perm_0 = const()[name = tensor("attn_output_59_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_960 = const()[name = tensor("op_960"), val = tensor([1, 77, 1280])]; - tensor transpose_111 = transpose(perm = attn_output_59_perm_0, x = attn_output_57)[name = tensor("transpose_111")]; - tensor input_153 = reshape(shape = var_960, x = transpose_111)[name = tensor("input_153")]; - tensor hidden_states_57 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight, x = input_153)[name = tensor("hidden_states_57")]; - tensor input_155 = add(x = input_147, y = hidden_states_57)[name = tensor("input_155")]; + tensor var_962 = const()[name = tensor("op_962"), val = tensor([1, 77, 1280])]; + tensor transpose_111 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast)[name = tensor("transpose_111")]; + tensor input_153_cast = reshape(shape = var_962, x = transpose_111)[name = tensor("input_153_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490740608)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494017472)))]; + tensor hidden_states_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16, x = input_153_cast)[name = tensor("hidden_states_57_cast")]; + tensor input_155_cast = add(x = input_147_cast, y = hidden_states_57_cast)[name = tensor("input_155_cast")]; tensor input_157_axes_0 = const()[name = tensor("input_157_axes_0"), val = tensor([-1])]; - tensor input_157 = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight, x = input_155)[name = tensor("input_157")]; - tensor input_159 = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight, x = input_157)[name = tensor("input_159")]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494020096)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494022720)))]; + tensor input_157_cast = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast)[name = tensor("input_157_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494025344)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507132608)))]; + tensor input_159_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16, x = input_157_cast)[name = tensor("input_159_cast")]; tensor input_161_mode_0 = const()[name = tensor("input_161_mode_0"), val = tensor("EXACT")]; - tensor input_161 = gelu(mode = input_161_mode_0, x = input_159)[name = tensor("input_161")]; - tensor hidden_states_59 = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight, x = input_161)[name = tensor("hidden_states_59")]; - tensor input_163 = add(x = input_155, y = hidden_states_59)[name = tensor("input_163")]; + tensor input_161_cast = gelu(mode = input_161_mode_0, x = input_159_cast)[name = tensor("input_161_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507142912)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520250176)))]; + tensor hidden_states_59_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16, x = input_161_cast)[name = tensor("hidden_states_59_cast")]; + tensor input_163_cast = add(x = input_155_cast, y = hidden_states_59_cast)[name = tensor("input_163_cast")]; tensor hidden_states_61_axes_0 = const()[name = tensor("hidden_states_61_axes_0"), val = tensor([-1])]; - tensor hidden_states_61 = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight, x = input_163)[name = tensor("hidden_states_61")]; - tensor var_998 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight, x = hidden_states_61)[name = tensor("op_998")]; - tensor var_999 = const()[name = tensor("op_999"), val = tensor(0x1p-3)]; - tensor tensor_65 = mul(x = var_998, y = var_999)[name = tensor("tensor_65")]; - tensor tensor_61 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight, x = hidden_states_61)[name = tensor("tensor_61")]; - tensor var_1004 = const()[name = tensor("op_1004"), val = tensor([1, -1, 20, 64])]; - tensor var_1005 = reshape(shape = var_1004, x = tensor_61)[name = tensor("op_1005")]; - tensor var_1006_perm_0 = const()[name = tensor("op_1006_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_63 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight, x = hidden_states_61)[name = tensor("tensor_63")]; - tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, -1, 20, 64])]; - tensor var_1012 = reshape(shape = var_1011, x = tensor_63)[name = tensor("op_1012")]; - tensor var_1013_perm_0 = const()[name = tensor("op_1013_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1020 = const()[name = tensor("op_1020"), val = tensor([1, 77, 20, 64])]; - tensor var_1021 = reshape(shape = var_1020, x = tensor_65)[name = tensor("op_1021")]; - tensor var_1022_perm_0 = const()[name = tensor("op_1022_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1024 = const()[name = tensor("op_1024"), val = tensor([20, -1, 64])]; - tensor transpose_108 = transpose(perm = var_1022_perm_0, x = var_1021)[name = tensor("transpose_108")]; - tensor query_states_21 = reshape(shape = var_1024, x = transpose_108)[name = tensor("query_states_21")]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520252800)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520255424)))]; + tensor hidden_states_61_cast = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast)[name = tensor("hidden_states_61_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520258048)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523534912)))]; + tensor var_1000_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("op_1000_cast")]; + tensor var_1001_to_fp16 = const()[name = tensor("op_1001_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_65_cast = mul(x = var_1000_cast, y = var_1001_to_fp16)[name = tensor("tensor_65_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523537536)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526814400)))]; + tensor tensor_61_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_61_cast")]; + tensor var_1006 = const()[name = tensor("op_1006"), val = tensor([1, -1, 20, 64])]; + tensor var_1007_cast = reshape(shape = var_1006, x = tensor_61_cast)[name = tensor("op_1007_cast")]; + tensor var_1008_perm_0 = const()[name = tensor("op_1008_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526817024)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530093888)))]; + tensor tensor_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_63_cast")]; + tensor var_1013 = const()[name = tensor("op_1013"), val = tensor([1, -1, 20, 64])]; + tensor var_1014_cast = reshape(shape = var_1013, x = tensor_63_cast)[name = tensor("op_1014_cast")]; + tensor var_1015_perm_0 = const()[name = tensor("op_1015_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1022 = const()[name = tensor("op_1022"), val = tensor([1, 77, 20, 64])]; + tensor var_1023_cast = reshape(shape = var_1022, x = tensor_65_cast)[name = tensor("op_1023_cast")]; + tensor var_1024_perm_0 = const()[name = tensor("op_1024_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1026 = const()[name = tensor("op_1026"), val = tensor([20, -1, 64])]; - tensor transpose_110 = transpose(perm = var_1006_perm_0, x = var_1005)[name = tensor("transpose_110")]; - tensor key_states_43 = reshape(shape = var_1026, x = transpose_110)[name = tensor("key_states_43")]; + tensor transpose_108 = transpose(perm = var_1024_perm_0, x = var_1023_cast)[name = tensor("transpose_108")]; + tensor query_states_21_cast = reshape(shape = var_1026, x = transpose_108)[name = tensor("query_states_21_cast")]; tensor var_1028 = const()[name = tensor("op_1028"), val = tensor([20, -1, 64])]; - tensor transpose_109 = transpose(perm = var_1013_perm_0, x = var_1012)[name = tensor("transpose_109")]; - tensor value_states_43 = reshape(shape = var_1028, x = transpose_109)[name = tensor("value_states_43")]; - tensor var_1031_perm_0 = const()[name = tensor("op_1031_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_110 = transpose(perm = var_1008_perm_0, x = var_1007_cast)[name = tensor("transpose_110")]; + tensor key_states_43_cast = reshape(shape = var_1028, x = transpose_110)[name = tensor("key_states_43_cast")]; + tensor var_1030 = const()[name = tensor("op_1030"), val = tensor([20, -1, 64])]; + tensor transpose_109 = transpose(perm = var_1015_perm_0, x = var_1014_cast)[name = tensor("transpose_109")]; + tensor value_states_43_cast = reshape(shape = var_1030, x = transpose_109)[name = tensor("value_states_43_cast")]; + tensor var_1033_perm_0 = const()[name = tensor("op_1033_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(false)]; tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; - tensor transpose_107 = transpose(perm = var_1031_perm_0, x = key_states_43)[name = tensor("transpose_107")]; - tensor attn_weights_61 = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21, y = transpose_107)[name = tensor("attn_weights_61")]; - tensor var_1033 = const()[name = tensor("op_1033"), val = tensor([1, 20, 77, 77])]; - tensor var_1034 = reshape(shape = var_1033, x = attn_weights_61)[name = tensor("op_1034")]; - tensor attn_weights_63 = add(x = var_1034, y = causal_attention_mask)[name = tensor("attn_weights_63")]; - tensor var_1039 = const()[name = tensor("op_1039"), val = tensor([20, 77, 77])]; - tensor input_165 = reshape(shape = var_1039, x = attn_weights_63)[name = tensor("input_165")]; - tensor input_167 = softmax(axis = var_5, x = input_165)[name = tensor("input_167")]; + tensor transpose_107 = transpose(perm = var_1033_perm_0, x = key_states_43_cast)[name = tensor("transpose_107")]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21_cast, y = transpose_107)[name = tensor("attn_weights_61_cast")]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 20, 77, 77])]; + tensor var_1036_cast = reshape(shape = var_1035, x = attn_weights_61_cast)[name = tensor("op_1036_cast")]; + tensor attn_weights_63_cast = add(x = var_1036_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1041 = const()[name = tensor("op_1041"), val = tensor([20, 77, 77])]; + tensor input_165_cast = reshape(shape = var_1041, x = attn_weights_63_cast)[name = tensor("input_165_cast")]; + tensor input_167_cast = softmax(axis = var_5, x = input_165_cast)[name = tensor("input_167_cast")]; tensor attn_output_61_transpose_x_0 = const()[name = tensor("attn_output_61_transpose_x_0"), val = tensor(false)]; tensor attn_output_61_transpose_y_0 = const()[name = tensor("attn_output_61_transpose_y_0"), val = tensor(false)]; - tensor attn_output_61 = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167, y = value_states_43)[name = tensor("attn_output_61")]; - tensor var_1044 = const()[name = tensor("op_1044"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_63 = reshape(shape = var_1044, x = attn_output_61)[name = tensor("attn_output_63")]; + tensor attn_output_61_cast = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast, y = value_states_43_cast)[name = tensor("attn_output_61_cast")]; + tensor var_1046 = const()[name = tensor("op_1046"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_63_cast = reshape(shape = var_1046, x = attn_output_61_cast)[name = tensor("attn_output_63_cast")]; tensor attn_output_65_perm_0 = const()[name = tensor("attn_output_65_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1, 77, 1280])]; - tensor transpose_106 = transpose(perm = attn_output_65_perm_0, x = attn_output_63)[name = tensor("transpose_106")]; - tensor input_169 = reshape(shape = var_1047, x = transpose_106)[name = tensor("input_169")]; - tensor hidden_states_63 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight, x = input_169)[name = tensor("hidden_states_63")]; - tensor input_171 = add(x = input_163, y = hidden_states_63)[name = tensor("input_171")]; + tensor var_1049 = const()[name = tensor("op_1049"), val = tensor([1, 77, 1280])]; + tensor transpose_106 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast)[name = tensor("transpose_106")]; + tensor input_169_cast = reshape(shape = var_1049, x = transpose_106)[name = tensor("input_169_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530096512)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533373376)))]; + tensor hidden_states_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16, x = input_169_cast)[name = tensor("hidden_states_63_cast")]; + tensor input_171_cast = add(x = input_163_cast, y = hidden_states_63_cast)[name = tensor("input_171_cast")]; tensor input_173_axes_0 = const()[name = tensor("input_173_axes_0"), val = tensor([-1])]; - tensor input_173 = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight, x = input_171)[name = tensor("input_173")]; - tensor input_175 = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight, x = input_173)[name = tensor("input_175")]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533376000)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533378624)))]; + tensor input_173_cast = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast)[name = tensor("input_173_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533381248)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546488512)))]; + tensor input_175_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16, x = input_173_cast)[name = tensor("input_175_cast")]; tensor input_177_mode_0 = const()[name = tensor("input_177_mode_0"), val = tensor("EXACT")]; - tensor input_177 = gelu(mode = input_177_mode_0, x = input_175)[name = tensor("input_177")]; - tensor hidden_states_65 = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight, x = input_177)[name = tensor("hidden_states_65")]; - tensor input_179 = add(x = input_171, y = hidden_states_65)[name = tensor("input_179")]; + tensor input_177_cast = gelu(mode = input_177_mode_0, x = input_175_cast)[name = tensor("input_177_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546498816)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559606080)))]; + tensor hidden_states_65_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16, x = input_177_cast)[name = tensor("hidden_states_65_cast")]; + tensor input_179_cast = add(x = input_171_cast, y = hidden_states_65_cast)[name = tensor("input_179_cast")]; tensor hidden_states_67_axes_0 = const()[name = tensor("hidden_states_67_axes_0"), val = tensor([-1])]; - tensor hidden_states_67 = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight, x = input_179)[name = tensor("hidden_states_67")]; - tensor var_1085 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight, x = hidden_states_67)[name = tensor("op_1085")]; - tensor var_1086 = const()[name = tensor("op_1086"), val = tensor(0x1p-3)]; - tensor tensor_71 = mul(x = var_1085, y = var_1086)[name = tensor("tensor_71")]; - tensor tensor_67 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight, x = hidden_states_67)[name = tensor("tensor_67")]; - tensor var_1091 = const()[name = tensor("op_1091"), val = tensor([1, -1, 20, 64])]; - tensor var_1092 = reshape(shape = var_1091, x = tensor_67)[name = tensor("op_1092")]; - tensor var_1093_perm_0 = const()[name = tensor("op_1093_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_69 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight, x = hidden_states_67)[name = tensor("tensor_69")]; - tensor var_1098 = const()[name = tensor("op_1098"), val = tensor([1, -1, 20, 64])]; - tensor var_1099 = reshape(shape = var_1098, x = tensor_69)[name = tensor("op_1099")]; - tensor var_1100_perm_0 = const()[name = tensor("op_1100_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1107 = const()[name = tensor("op_1107"), val = tensor([1, 77, 20, 64])]; - tensor var_1108 = reshape(shape = var_1107, x = tensor_71)[name = tensor("op_1108")]; - tensor var_1109_perm_0 = const()[name = tensor("op_1109_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1111 = const()[name = tensor("op_1111"), val = tensor([20, -1, 64])]; - tensor transpose_103 = transpose(perm = var_1109_perm_0, x = var_1108)[name = tensor("transpose_103")]; - tensor query_states_23 = reshape(shape = var_1111, x = transpose_103)[name = tensor("query_states_23")]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559608704)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559611328)))]; + tensor hidden_states_67_cast = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast)[name = tensor("hidden_states_67_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559613952)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562890816)))]; + tensor var_1087_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("op_1087_cast")]; + tensor var_1088_to_fp16 = const()[name = tensor("op_1088_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_71_cast = mul(x = var_1087_cast, y = var_1088_to_fp16)[name = tensor("tensor_71_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562893440)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566170304)))]; + tensor tensor_67_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_67_cast")]; + tensor var_1093 = const()[name = tensor("op_1093"), val = tensor([1, -1, 20, 64])]; + tensor var_1094_cast = reshape(shape = var_1093, x = tensor_67_cast)[name = tensor("op_1094_cast")]; + tensor var_1095_perm_0 = const()[name = tensor("op_1095_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566172928)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569449792)))]; + tensor tensor_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_69_cast")]; + tensor var_1100 = const()[name = tensor("op_1100"), val = tensor([1, -1, 20, 64])]; + tensor var_1101_cast = reshape(shape = var_1100, x = tensor_69_cast)[name = tensor("op_1101_cast")]; + tensor var_1102_perm_0 = const()[name = tensor("op_1102_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1109 = const()[name = tensor("op_1109"), val = tensor([1, 77, 20, 64])]; + tensor var_1110_cast = reshape(shape = var_1109, x = tensor_71_cast)[name = tensor("op_1110_cast")]; + tensor var_1111_perm_0 = const()[name = tensor("op_1111_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1113 = const()[name = tensor("op_1113"), val = tensor([20, -1, 64])]; - tensor transpose_105 = transpose(perm = var_1093_perm_0, x = var_1092)[name = tensor("transpose_105")]; - tensor key_states_47 = reshape(shape = var_1113, x = transpose_105)[name = tensor("key_states_47")]; + tensor transpose_103 = transpose(perm = var_1111_perm_0, x = var_1110_cast)[name = tensor("transpose_103")]; + tensor query_states_23_cast = reshape(shape = var_1113, x = transpose_103)[name = tensor("query_states_23_cast")]; tensor var_1115 = const()[name = tensor("op_1115"), val = tensor([20, -1, 64])]; - tensor transpose_104 = transpose(perm = var_1100_perm_0, x = var_1099)[name = tensor("transpose_104")]; - tensor value_states_47 = reshape(shape = var_1115, x = transpose_104)[name = tensor("value_states_47")]; - tensor var_1118_perm_0 = const()[name = tensor("op_1118_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_105 = transpose(perm = var_1095_perm_0, x = var_1094_cast)[name = tensor("transpose_105")]; + tensor key_states_47_cast = reshape(shape = var_1115, x = transpose_105)[name = tensor("key_states_47_cast")]; + tensor var_1117 = const()[name = tensor("op_1117"), val = tensor([20, -1, 64])]; + tensor transpose_104 = transpose(perm = var_1102_perm_0, x = var_1101_cast)[name = tensor("transpose_104")]; + tensor value_states_47_cast = reshape(shape = var_1117, x = transpose_104)[name = tensor("value_states_47_cast")]; + tensor var_1120_perm_0 = const()[name = tensor("op_1120_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_67_transpose_x_0 = const()[name = tensor("attn_weights_67_transpose_x_0"), val = tensor(false)]; tensor attn_weights_67_transpose_y_0 = const()[name = tensor("attn_weights_67_transpose_y_0"), val = tensor(false)]; - tensor transpose_102 = transpose(perm = var_1118_perm_0, x = key_states_47)[name = tensor("transpose_102")]; - tensor attn_weights_67 = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_23, y = transpose_102)[name = tensor("attn_weights_67")]; - tensor var_1120 = const()[name = tensor("op_1120"), val = tensor([1, 20, 77, 77])]; - tensor var_1121 = reshape(shape = var_1120, x = attn_weights_67)[name = tensor("op_1121")]; - tensor attn_weights_69 = add(x = var_1121, y = causal_attention_mask)[name = tensor("attn_weights_69")]; - tensor var_1126 = const()[name = tensor("op_1126"), val = tensor([20, 77, 77])]; - tensor input_181 = reshape(shape = var_1126, x = attn_weights_69)[name = tensor("input_181")]; - tensor input_183 = softmax(axis = var_5, x = input_181)[name = tensor("input_183")]; + tensor transpose_102 = transpose(perm = var_1120_perm_0, x = key_states_47_cast)[name = tensor("transpose_102")]; + tensor attn_weights_67_cast = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_23_cast, y = transpose_102)[name = tensor("attn_weights_67_cast")]; + tensor var_1122 = const()[name = tensor("op_1122"), val = tensor([1, 20, 77, 77])]; + tensor var_1123_cast = reshape(shape = var_1122, x = attn_weights_67_cast)[name = tensor("op_1123_cast")]; + tensor attn_weights_69_cast = add(x = var_1123_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_69_cast")]; + tensor var_1128 = const()[name = tensor("op_1128"), val = tensor([20, 77, 77])]; + tensor input_181_cast = reshape(shape = var_1128, x = attn_weights_69_cast)[name = tensor("input_181_cast")]; + tensor input_183_cast = softmax(axis = var_5, x = input_181_cast)[name = tensor("input_183_cast")]; tensor attn_output_67_transpose_x_0 = const()[name = tensor("attn_output_67_transpose_x_0"), val = tensor(false)]; tensor attn_output_67_transpose_y_0 = const()[name = tensor("attn_output_67_transpose_y_0"), val = tensor(false)]; - tensor attn_output_67 = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183, y = value_states_47)[name = tensor("attn_output_67")]; - tensor var_1131 = const()[name = tensor("op_1131"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_69 = reshape(shape = var_1131, x = attn_output_67)[name = tensor("attn_output_69")]; + tensor attn_output_67_cast = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast, y = value_states_47_cast)[name = tensor("attn_output_67_cast")]; + tensor var_1133 = const()[name = tensor("op_1133"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_69_cast = reshape(shape = var_1133, x = attn_output_67_cast)[name = tensor("attn_output_69_cast")]; tensor attn_output_71_perm_0 = const()[name = tensor("attn_output_71_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1134 = const()[name = tensor("op_1134"), val = tensor([1, 77, 1280])]; - tensor transpose_101 = transpose(perm = attn_output_71_perm_0, x = attn_output_69)[name = tensor("transpose_101")]; - tensor input_185 = reshape(shape = var_1134, x = transpose_101)[name = tensor("input_185")]; - tensor hidden_states_69 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight, x = input_185)[name = tensor("hidden_states_69")]; - tensor input_187 = add(x = input_179, y = hidden_states_69)[name = tensor("input_187")]; + tensor var_1136 = const()[name = tensor("op_1136"), val = tensor([1, 77, 1280])]; + tensor transpose_101 = transpose(perm = attn_output_71_perm_0, x = attn_output_69_cast)[name = tensor("transpose_101")]; + tensor input_185_cast = reshape(shape = var_1136, x = transpose_101)[name = tensor("input_185_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569452416)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572729280)))]; + tensor hidden_states_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16, x = input_185_cast)[name = tensor("hidden_states_69_cast")]; + tensor input_187_cast = add(x = input_179_cast, y = hidden_states_69_cast)[name = tensor("input_187_cast")]; tensor input_189_axes_0 = const()[name = tensor("input_189_axes_0"), val = tensor([-1])]; - tensor input_189 = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight, x = input_187)[name = tensor("input_189")]; - tensor input_191 = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight, x = input_189)[name = tensor("input_191")]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572731904)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572734528)))]; + tensor input_189_cast = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast)[name = tensor("input_189_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572737152)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585844416)))]; + tensor input_191_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16, x = input_189_cast)[name = tensor("input_191_cast")]; tensor input_193_mode_0 = const()[name = tensor("input_193_mode_0"), val = tensor("EXACT")]; - tensor input_193 = gelu(mode = input_193_mode_0, x = input_191)[name = tensor("input_193")]; - tensor hidden_states_71 = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight, x = input_193)[name = tensor("hidden_states_71")]; - tensor input_195 = add(x = input_187, y = hidden_states_71)[name = tensor("input_195")]; + tensor input_193_cast = gelu(mode = input_193_mode_0, x = input_191_cast)[name = tensor("input_193_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585854720)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598961984)))]; + tensor hidden_states_71_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16, x = input_193_cast)[name = tensor("hidden_states_71_cast")]; + tensor input_195_cast = add(x = input_187_cast, y = hidden_states_71_cast)[name = tensor("input_195_cast")]; tensor hidden_states_73_axes_0 = const()[name = tensor("hidden_states_73_axes_0"), val = tensor([-1])]; - tensor hidden_states_73 = layer_norm(axes = hidden_states_73_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_12_layer_norm1_weight, x = input_195)[name = tensor("hidden_states_73")]; - tensor var_1172 = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight, x = hidden_states_73)[name = tensor("op_1172")]; - tensor var_1173 = const()[name = tensor("op_1173"), val = tensor(0x1p-3)]; - tensor tensor_77 = mul(x = var_1172, y = var_1173)[name = tensor("tensor_77")]; - tensor tensor_73 = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight, x = hidden_states_73)[name = tensor("tensor_73")]; - tensor var_1178 = const()[name = tensor("op_1178"), val = tensor([1, -1, 20, 64])]; - tensor var_1179 = reshape(shape = var_1178, x = tensor_73)[name = tensor("op_1179")]; - tensor var_1180_perm_0 = const()[name = tensor("op_1180_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_75 = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight, x = hidden_states_73)[name = tensor("tensor_75")]; - tensor var_1185 = const()[name = tensor("op_1185"), val = tensor([1, -1, 20, 64])]; - tensor var_1186 = reshape(shape = var_1185, x = tensor_75)[name = tensor("op_1186")]; - tensor var_1187_perm_0 = const()[name = tensor("op_1187_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1194 = const()[name = tensor("op_1194"), val = tensor([1, 77, 20, 64])]; - tensor var_1195 = reshape(shape = var_1194, x = tensor_77)[name = tensor("op_1195")]; - tensor var_1196_perm_0 = const()[name = tensor("op_1196_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1198 = const()[name = tensor("op_1198"), val = tensor([20, -1, 64])]; - tensor transpose_98 = transpose(perm = var_1196_perm_0, x = var_1195)[name = tensor("transpose_98")]; - tensor query_states_25 = reshape(shape = var_1198, x = transpose_98)[name = tensor("query_states_25")]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598964608)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598967232)))]; + tensor hidden_states_73_cast = layer_norm(axes = hidden_states_73_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16, x = input_195_cast)[name = tensor("hidden_states_73_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598969856)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602246720)))]; + tensor var_1174_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("op_1174_cast")]; + tensor var_1175_to_fp16 = const()[name = tensor("op_1175_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_77_cast = mul(x = var_1174_cast, y = var_1175_to_fp16)[name = tensor("tensor_77_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602249344)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605526208)))]; + tensor tensor_73_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_73_cast")]; + tensor var_1180 = const()[name = tensor("op_1180"), val = tensor([1, -1, 20, 64])]; + tensor var_1181_cast = reshape(shape = var_1180, x = tensor_73_cast)[name = tensor("op_1181_cast")]; + tensor var_1182_perm_0 = const()[name = tensor("op_1182_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605528832)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608805696)))]; + tensor tensor_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_75_cast")]; + tensor var_1187 = const()[name = tensor("op_1187"), val = tensor([1, -1, 20, 64])]; + tensor var_1188_cast = reshape(shape = var_1187, x = tensor_75_cast)[name = tensor("op_1188_cast")]; + tensor var_1189_perm_0 = const()[name = tensor("op_1189_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1196 = const()[name = tensor("op_1196"), val = tensor([1, 77, 20, 64])]; + tensor var_1197_cast = reshape(shape = var_1196, x = tensor_77_cast)[name = tensor("op_1197_cast")]; + tensor var_1198_perm_0 = const()[name = tensor("op_1198_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1200 = const()[name = tensor("op_1200"), val = tensor([20, -1, 64])]; - tensor transpose_100 = transpose(perm = var_1180_perm_0, x = var_1179)[name = tensor("transpose_100")]; - tensor key_states_51 = reshape(shape = var_1200, x = transpose_100)[name = tensor("key_states_51")]; + tensor transpose_98 = transpose(perm = var_1198_perm_0, x = var_1197_cast)[name = tensor("transpose_98")]; + tensor query_states_25_cast = reshape(shape = var_1200, x = transpose_98)[name = tensor("query_states_25_cast")]; tensor var_1202 = const()[name = tensor("op_1202"), val = tensor([20, -1, 64])]; - tensor transpose_99 = transpose(perm = var_1187_perm_0, x = var_1186)[name = tensor("transpose_99")]; - tensor value_states_51 = reshape(shape = var_1202, x = transpose_99)[name = tensor("value_states_51")]; - tensor var_1205_perm_0 = const()[name = tensor("op_1205_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_100 = transpose(perm = var_1182_perm_0, x = var_1181_cast)[name = tensor("transpose_100")]; + tensor key_states_51_cast = reshape(shape = var_1202, x = transpose_100)[name = tensor("key_states_51_cast")]; + tensor var_1204 = const()[name = tensor("op_1204"), val = tensor([20, -1, 64])]; + tensor transpose_99 = transpose(perm = var_1189_perm_0, x = var_1188_cast)[name = tensor("transpose_99")]; + tensor value_states_51_cast = reshape(shape = var_1204, x = transpose_99)[name = tensor("value_states_51_cast")]; + tensor var_1207_perm_0 = const()[name = tensor("op_1207_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(false)]; tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; - tensor transpose_97 = transpose(perm = var_1205_perm_0, x = key_states_51)[name = tensor("transpose_97")]; - tensor attn_weights_73 = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = query_states_25, y = transpose_97)[name = tensor("attn_weights_73")]; - tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 20, 77, 77])]; - tensor var_1208 = reshape(shape = var_1207, x = attn_weights_73)[name = tensor("op_1208")]; - tensor attn_weights_75 = add(x = var_1208, y = causal_attention_mask)[name = tensor("attn_weights_75")]; - tensor var_1213 = const()[name = tensor("op_1213"), val = tensor([20, 77, 77])]; - tensor input_197 = reshape(shape = var_1213, x = attn_weights_75)[name = tensor("input_197")]; - tensor input_199 = softmax(axis = var_5, x = input_197)[name = tensor("input_199")]; + tensor transpose_97 = transpose(perm = var_1207_perm_0, x = key_states_51_cast)[name = tensor("transpose_97")]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = query_states_25_cast, y = transpose_97)[name = tensor("attn_weights_73_cast")]; + tensor var_1209 = const()[name = tensor("op_1209"), val = tensor([1, 20, 77, 77])]; + tensor var_1210_cast = reshape(shape = var_1209, x = attn_weights_73_cast)[name = tensor("op_1210_cast")]; + tensor attn_weights_75_cast = add(x = var_1210_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_1215 = const()[name = tensor("op_1215"), val = tensor([20, 77, 77])]; + tensor input_197_cast = reshape(shape = var_1215, x = attn_weights_75_cast)[name = tensor("input_197_cast")]; + tensor input_199_cast = softmax(axis = var_5, x = input_197_cast)[name = tensor("input_199_cast")]; tensor attn_output_73_transpose_x_0 = const()[name = tensor("attn_output_73_transpose_x_0"), val = tensor(false)]; tensor attn_output_73_transpose_y_0 = const()[name = tensor("attn_output_73_transpose_y_0"), val = tensor(false)]; - tensor attn_output_73 = matmul(transpose_x = attn_output_73_transpose_x_0, transpose_y = attn_output_73_transpose_y_0, x = input_199, y = value_states_51)[name = tensor("attn_output_73")]; - tensor var_1218 = const()[name = tensor("op_1218"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_75 = reshape(shape = var_1218, x = attn_output_73)[name = tensor("attn_output_75")]; + tensor attn_output_73_cast = matmul(transpose_x = attn_output_73_transpose_x_0, transpose_y = attn_output_73_transpose_y_0, x = input_199_cast, y = value_states_51_cast)[name = tensor("attn_output_73_cast")]; + tensor var_1220 = const()[name = tensor("op_1220"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_75_cast = reshape(shape = var_1220, x = attn_output_73_cast)[name = tensor("attn_output_75_cast")]; tensor attn_output_77_perm_0 = const()[name = tensor("attn_output_77_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1221 = const()[name = tensor("op_1221"), val = tensor([1, 77, 1280])]; - tensor transpose_96 = transpose(perm = attn_output_77_perm_0, x = attn_output_75)[name = tensor("transpose_96")]; - tensor input_201 = reshape(shape = var_1221, x = transpose_96)[name = tensor("input_201")]; - tensor hidden_states_75 = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight, x = input_201)[name = tensor("hidden_states_75")]; - tensor input_203 = add(x = input_195, y = hidden_states_75)[name = tensor("input_203")]; + tensor var_1223 = const()[name = tensor("op_1223"), val = tensor([1, 77, 1280])]; + tensor transpose_96 = transpose(perm = attn_output_77_perm_0, x = attn_output_75_cast)[name = tensor("transpose_96")]; + tensor input_201_cast = reshape(shape = var_1223, x = transpose_96)[name = tensor("input_201_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608808320)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612085184)))]; + tensor hidden_states_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16, x = input_201_cast)[name = tensor("hidden_states_75_cast")]; + tensor input_203_cast = add(x = input_195_cast, y = hidden_states_75_cast)[name = tensor("input_203_cast")]; tensor input_205_axes_0 = const()[name = tensor("input_205_axes_0"), val = tensor([-1])]; - tensor input_205 = layer_norm(axes = input_205_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_12_layer_norm2_weight, x = input_203)[name = tensor("input_205")]; - tensor input_207 = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_12_mlp_fc1_weight, x = input_205)[name = tensor("input_207")]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612087808)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612090432)))]; + tensor input_205_cast = layer_norm(axes = input_205_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16, x = input_203_cast)[name = tensor("input_205_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612093056)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625200320)))]; + tensor input_207_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16, x = input_205_cast)[name = tensor("input_207_cast")]; tensor input_209_mode_0 = const()[name = tensor("input_209_mode_0"), val = tensor("EXACT")]; - tensor input_209 = gelu(mode = input_209_mode_0, x = input_207)[name = tensor("input_209")]; - tensor hidden_states_77 = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_12_mlp_fc2_weight, x = input_209)[name = tensor("hidden_states_77")]; - tensor input_211 = add(x = input_203, y = hidden_states_77)[name = tensor("input_211")]; + tensor input_209_cast = gelu(mode = input_209_mode_0, x = input_207_cast)[name = tensor("input_209_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625210624)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638317888)))]; + tensor hidden_states_77_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16, x = input_209_cast)[name = tensor("hidden_states_77_cast")]; + tensor input_211_cast = add(x = input_203_cast, y = hidden_states_77_cast)[name = tensor("input_211_cast")]; tensor hidden_states_79_axes_0 = const()[name = tensor("hidden_states_79_axes_0"), val = tensor([-1])]; - tensor hidden_states_79 = layer_norm(axes = hidden_states_79_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_13_layer_norm1_weight, x = input_211)[name = tensor("hidden_states_79")]; - tensor var_1259 = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight, x = hidden_states_79)[name = tensor("op_1259")]; - tensor var_1260 = const()[name = tensor("op_1260"), val = tensor(0x1p-3)]; - tensor tensor_83 = mul(x = var_1259, y = var_1260)[name = tensor("tensor_83")]; - tensor tensor_79 = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight, x = hidden_states_79)[name = tensor("tensor_79")]; - tensor var_1265 = const()[name = tensor("op_1265"), val = tensor([1, -1, 20, 64])]; - tensor var_1266 = reshape(shape = var_1265, x = tensor_79)[name = tensor("op_1266")]; - tensor var_1267_perm_0 = const()[name = tensor("op_1267_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_81 = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight, x = hidden_states_79)[name = tensor("tensor_81")]; - tensor var_1272 = const()[name = tensor("op_1272"), val = tensor([1, -1, 20, 64])]; - tensor var_1273 = reshape(shape = var_1272, x = tensor_81)[name = tensor("op_1273")]; - tensor var_1274_perm_0 = const()[name = tensor("op_1274_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([1, 77, 20, 64])]; - tensor var_1282 = reshape(shape = var_1281, x = tensor_83)[name = tensor("op_1282")]; - tensor var_1283_perm_0 = const()[name = tensor("op_1283_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1285 = const()[name = tensor("op_1285"), val = tensor([20, -1, 64])]; - tensor transpose_93 = transpose(perm = var_1283_perm_0, x = var_1282)[name = tensor("transpose_93")]; - tensor query_states_27 = reshape(shape = var_1285, x = transpose_93)[name = tensor("query_states_27")]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638320512)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638323136)))]; + tensor hidden_states_79_cast = layer_norm(axes = hidden_states_79_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16, x = input_211_cast)[name = tensor("hidden_states_79_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638325760)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641602624)))]; + tensor var_1261_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("op_1261_cast")]; + tensor var_1262_to_fp16 = const()[name = tensor("op_1262_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_83_cast = mul(x = var_1261_cast, y = var_1262_to_fp16)[name = tensor("tensor_83_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641605248)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644882112)))]; + tensor tensor_79_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_79_cast")]; + tensor var_1267 = const()[name = tensor("op_1267"), val = tensor([1, -1, 20, 64])]; + tensor var_1268_cast = reshape(shape = var_1267, x = tensor_79_cast)[name = tensor("op_1268_cast")]; + tensor var_1269_perm_0 = const()[name = tensor("op_1269_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644884736)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648161600)))]; + tensor tensor_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_81_cast")]; + tensor var_1274 = const()[name = tensor("op_1274"), val = tensor([1, -1, 20, 64])]; + tensor var_1275_cast = reshape(shape = var_1274, x = tensor_81_cast)[name = tensor("op_1275_cast")]; + tensor var_1276_perm_0 = const()[name = tensor("op_1276_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1283 = const()[name = tensor("op_1283"), val = tensor([1, 77, 20, 64])]; + tensor var_1284_cast = reshape(shape = var_1283, x = tensor_83_cast)[name = tensor("op_1284_cast")]; + tensor var_1285_perm_0 = const()[name = tensor("op_1285_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1287 = const()[name = tensor("op_1287"), val = tensor([20, -1, 64])]; - tensor transpose_95 = transpose(perm = var_1267_perm_0, x = var_1266)[name = tensor("transpose_95")]; - tensor key_states_55 = reshape(shape = var_1287, x = transpose_95)[name = tensor("key_states_55")]; + tensor transpose_93 = transpose(perm = var_1285_perm_0, x = var_1284_cast)[name = tensor("transpose_93")]; + tensor query_states_27_cast = reshape(shape = var_1287, x = transpose_93)[name = tensor("query_states_27_cast")]; tensor var_1289 = const()[name = tensor("op_1289"), val = tensor([20, -1, 64])]; - tensor transpose_94 = transpose(perm = var_1274_perm_0, x = var_1273)[name = tensor("transpose_94")]; - tensor value_states_55 = reshape(shape = var_1289, x = transpose_94)[name = tensor("value_states_55")]; - tensor var_1292_perm_0 = const()[name = tensor("op_1292_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_95 = transpose(perm = var_1269_perm_0, x = var_1268_cast)[name = tensor("transpose_95")]; + tensor key_states_55_cast = reshape(shape = var_1289, x = transpose_95)[name = tensor("key_states_55_cast")]; + tensor var_1291 = const()[name = tensor("op_1291"), val = tensor([20, -1, 64])]; + tensor transpose_94 = transpose(perm = var_1276_perm_0, x = var_1275_cast)[name = tensor("transpose_94")]; + tensor value_states_55_cast = reshape(shape = var_1291, x = transpose_94)[name = tensor("value_states_55_cast")]; + tensor var_1294_perm_0 = const()[name = tensor("op_1294_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_79_transpose_x_0 = const()[name = tensor("attn_weights_79_transpose_x_0"), val = tensor(false)]; tensor attn_weights_79_transpose_y_0 = const()[name = tensor("attn_weights_79_transpose_y_0"), val = tensor(false)]; - tensor transpose_92 = transpose(perm = var_1292_perm_0, x = key_states_55)[name = tensor("transpose_92")]; - tensor attn_weights_79 = matmul(transpose_x = attn_weights_79_transpose_x_0, transpose_y = attn_weights_79_transpose_y_0, x = query_states_27, y = transpose_92)[name = tensor("attn_weights_79")]; - tensor var_1294 = const()[name = tensor("op_1294"), val = tensor([1, 20, 77, 77])]; - tensor var_1295 = reshape(shape = var_1294, x = attn_weights_79)[name = tensor("op_1295")]; - tensor attn_weights_81 = add(x = var_1295, y = causal_attention_mask)[name = tensor("attn_weights_81")]; - tensor var_1300 = const()[name = tensor("op_1300"), val = tensor([20, 77, 77])]; - tensor input_213 = reshape(shape = var_1300, x = attn_weights_81)[name = tensor("input_213")]; - tensor input_215 = softmax(axis = var_5, x = input_213)[name = tensor("input_215")]; + tensor transpose_92 = transpose(perm = var_1294_perm_0, x = key_states_55_cast)[name = tensor("transpose_92")]; + tensor attn_weights_79_cast = matmul(transpose_x = attn_weights_79_transpose_x_0, transpose_y = attn_weights_79_transpose_y_0, x = query_states_27_cast, y = transpose_92)[name = tensor("attn_weights_79_cast")]; + tensor var_1296 = const()[name = tensor("op_1296"), val = tensor([1, 20, 77, 77])]; + tensor var_1297_cast = reshape(shape = var_1296, x = attn_weights_79_cast)[name = tensor("op_1297_cast")]; + tensor attn_weights_81_cast = add(x = var_1297_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_81_cast")]; + tensor var_1302 = const()[name = tensor("op_1302"), val = tensor([20, 77, 77])]; + tensor input_213_cast = reshape(shape = var_1302, x = attn_weights_81_cast)[name = tensor("input_213_cast")]; + tensor input_215_cast = softmax(axis = var_5, x = input_213_cast)[name = tensor("input_215_cast")]; tensor attn_output_79_transpose_x_0 = const()[name = tensor("attn_output_79_transpose_x_0"), val = tensor(false)]; tensor attn_output_79_transpose_y_0 = const()[name = tensor("attn_output_79_transpose_y_0"), val = tensor(false)]; - tensor attn_output_79 = matmul(transpose_x = attn_output_79_transpose_x_0, transpose_y = attn_output_79_transpose_y_0, x = input_215, y = value_states_55)[name = tensor("attn_output_79")]; - tensor var_1305 = const()[name = tensor("op_1305"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_81 = reshape(shape = var_1305, x = attn_output_79)[name = tensor("attn_output_81")]; + tensor attn_output_79_cast = matmul(transpose_x = attn_output_79_transpose_x_0, transpose_y = attn_output_79_transpose_y_0, x = input_215_cast, y = value_states_55_cast)[name = tensor("attn_output_79_cast")]; + tensor var_1307 = const()[name = tensor("op_1307"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_81_cast = reshape(shape = var_1307, x = attn_output_79_cast)[name = tensor("attn_output_81_cast")]; tensor attn_output_83_perm_0 = const()[name = tensor("attn_output_83_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1308 = const()[name = tensor("op_1308"), val = tensor([1, 77, 1280])]; - tensor transpose_91 = transpose(perm = attn_output_83_perm_0, x = attn_output_81)[name = tensor("transpose_91")]; - tensor input_217 = reshape(shape = var_1308, x = transpose_91)[name = tensor("input_217")]; - tensor hidden_states_81 = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight, x = input_217)[name = tensor("hidden_states_81")]; - tensor input_219 = add(x = input_211, y = hidden_states_81)[name = tensor("input_219")]; + tensor var_1310 = const()[name = tensor("op_1310"), val = tensor([1, 77, 1280])]; + tensor transpose_91 = transpose(perm = attn_output_83_perm_0, x = attn_output_81_cast)[name = tensor("transpose_91")]; + tensor input_217_cast = reshape(shape = var_1310, x = transpose_91)[name = tensor("input_217_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648164224)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651441088)))]; + tensor hidden_states_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16, x = input_217_cast)[name = tensor("hidden_states_81_cast")]; + tensor input_219_cast = add(x = input_211_cast, y = hidden_states_81_cast)[name = tensor("input_219_cast")]; tensor input_221_axes_0 = const()[name = tensor("input_221_axes_0"), val = tensor([-1])]; - tensor input_221 = layer_norm(axes = input_221_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_13_layer_norm2_weight, x = input_219)[name = tensor("input_221")]; - tensor input_223 = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_13_mlp_fc1_weight, x = input_221)[name = tensor("input_223")]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651443712)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651446336)))]; + tensor input_221_cast = layer_norm(axes = input_221_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16, x = input_219_cast)[name = tensor("input_221_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651448960)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664556224)))]; + tensor input_223_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16, x = input_221_cast)[name = tensor("input_223_cast")]; tensor input_225_mode_0 = const()[name = tensor("input_225_mode_0"), val = tensor("EXACT")]; - tensor input_225 = gelu(mode = input_225_mode_0, x = input_223)[name = tensor("input_225")]; - tensor hidden_states_83 = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_13_mlp_fc2_weight, x = input_225)[name = tensor("hidden_states_83")]; - tensor input_227 = add(x = input_219, y = hidden_states_83)[name = tensor("input_227")]; + tensor input_225_cast = gelu(mode = input_225_mode_0, x = input_223_cast)[name = tensor("input_225_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664566528)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677673792)))]; + tensor hidden_states_83_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16, x = input_225_cast)[name = tensor("hidden_states_83_cast")]; + tensor input_227_cast = add(x = input_219_cast, y = hidden_states_83_cast)[name = tensor("input_227_cast")]; tensor hidden_states_85_axes_0 = const()[name = tensor("hidden_states_85_axes_0"), val = tensor([-1])]; - tensor hidden_states_85 = layer_norm(axes = hidden_states_85_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_14_layer_norm1_weight, x = input_227)[name = tensor("hidden_states_85")]; - tensor var_1346 = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight, x = hidden_states_85)[name = tensor("op_1346")]; - tensor var_1347 = const()[name = tensor("op_1347"), val = tensor(0x1p-3)]; - tensor tensor_89 = mul(x = var_1346, y = var_1347)[name = tensor("tensor_89")]; - tensor tensor_85 = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight, x = hidden_states_85)[name = tensor("tensor_85")]; - tensor var_1352 = const()[name = tensor("op_1352"), val = tensor([1, -1, 20, 64])]; - tensor var_1353 = reshape(shape = var_1352, x = tensor_85)[name = tensor("op_1353")]; - tensor var_1354_perm_0 = const()[name = tensor("op_1354_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_87 = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight, x = hidden_states_85)[name = tensor("tensor_87")]; - tensor var_1359 = const()[name = tensor("op_1359"), val = tensor([1, -1, 20, 64])]; - tensor var_1360 = reshape(shape = var_1359, x = tensor_87)[name = tensor("op_1360")]; - tensor var_1361_perm_0 = const()[name = tensor("op_1361_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1368 = const()[name = tensor("op_1368"), val = tensor([1, 77, 20, 64])]; - tensor var_1369 = reshape(shape = var_1368, x = tensor_89)[name = tensor("op_1369")]; - tensor var_1370_perm_0 = const()[name = tensor("op_1370_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1372 = const()[name = tensor("op_1372"), val = tensor([20, -1, 64])]; - tensor transpose_88 = transpose(perm = var_1370_perm_0, x = var_1369)[name = tensor("transpose_88")]; - tensor query_states_29 = reshape(shape = var_1372, x = transpose_88)[name = tensor("query_states_29")]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677676416)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677679040)))]; + tensor hidden_states_85_cast = layer_norm(axes = hidden_states_85_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16, x = input_227_cast)[name = tensor("hidden_states_85_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677681664)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680958528)))]; + tensor var_1348_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("op_1348_cast")]; + tensor var_1349_to_fp16 = const()[name = tensor("op_1349_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_89_cast = mul(x = var_1348_cast, y = var_1349_to_fp16)[name = tensor("tensor_89_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680961152)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684238016)))]; + tensor tensor_85_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_85_cast")]; + tensor var_1354 = const()[name = tensor("op_1354"), val = tensor([1, -1, 20, 64])]; + tensor var_1355_cast = reshape(shape = var_1354, x = tensor_85_cast)[name = tensor("op_1355_cast")]; + tensor var_1356_perm_0 = const()[name = tensor("op_1356_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684240640)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687517504)))]; + tensor tensor_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_87_cast")]; + tensor var_1361 = const()[name = tensor("op_1361"), val = tensor([1, -1, 20, 64])]; + tensor var_1362_cast = reshape(shape = var_1361, x = tensor_87_cast)[name = tensor("op_1362_cast")]; + tensor var_1363_perm_0 = const()[name = tensor("op_1363_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1370 = const()[name = tensor("op_1370"), val = tensor([1, 77, 20, 64])]; + tensor var_1371_cast = reshape(shape = var_1370, x = tensor_89_cast)[name = tensor("op_1371_cast")]; + tensor var_1372_perm_0 = const()[name = tensor("op_1372_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1374 = const()[name = tensor("op_1374"), val = tensor([20, -1, 64])]; - tensor transpose_90 = transpose(perm = var_1354_perm_0, x = var_1353)[name = tensor("transpose_90")]; - tensor key_states_59 = reshape(shape = var_1374, x = transpose_90)[name = tensor("key_states_59")]; + tensor transpose_88 = transpose(perm = var_1372_perm_0, x = var_1371_cast)[name = tensor("transpose_88")]; + tensor query_states_29_cast = reshape(shape = var_1374, x = transpose_88)[name = tensor("query_states_29_cast")]; tensor var_1376 = const()[name = tensor("op_1376"), val = tensor([20, -1, 64])]; - tensor transpose_89 = transpose(perm = var_1361_perm_0, x = var_1360)[name = tensor("transpose_89")]; - tensor value_states_59 = reshape(shape = var_1376, x = transpose_89)[name = tensor("value_states_59")]; - tensor var_1379_perm_0 = const()[name = tensor("op_1379_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_90 = transpose(perm = var_1356_perm_0, x = var_1355_cast)[name = tensor("transpose_90")]; + tensor key_states_59_cast = reshape(shape = var_1376, x = transpose_90)[name = tensor("key_states_59_cast")]; + tensor var_1378 = const()[name = tensor("op_1378"), val = tensor([20, -1, 64])]; + tensor transpose_89 = transpose(perm = var_1363_perm_0, x = var_1362_cast)[name = tensor("transpose_89")]; + tensor value_states_59_cast = reshape(shape = var_1378, x = transpose_89)[name = tensor("value_states_59_cast")]; + tensor var_1381_perm_0 = const()[name = tensor("op_1381_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(false)]; tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; - tensor transpose_87 = transpose(perm = var_1379_perm_0, x = key_states_59)[name = tensor("transpose_87")]; - tensor attn_weights_85 = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = query_states_29, y = transpose_87)[name = tensor("attn_weights_85")]; - tensor var_1381 = const()[name = tensor("op_1381"), val = tensor([1, 20, 77, 77])]; - tensor var_1382 = reshape(shape = var_1381, x = attn_weights_85)[name = tensor("op_1382")]; - tensor attn_weights_87 = add(x = var_1382, y = causal_attention_mask)[name = tensor("attn_weights_87")]; - tensor var_1387 = const()[name = tensor("op_1387"), val = tensor([20, 77, 77])]; - tensor input_229 = reshape(shape = var_1387, x = attn_weights_87)[name = tensor("input_229")]; - tensor input_231 = softmax(axis = var_5, x = input_229)[name = tensor("input_231")]; + tensor transpose_87 = transpose(perm = var_1381_perm_0, x = key_states_59_cast)[name = tensor("transpose_87")]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = query_states_29_cast, y = transpose_87)[name = tensor("attn_weights_85_cast")]; + tensor var_1383 = const()[name = tensor("op_1383"), val = tensor([1, 20, 77, 77])]; + tensor var_1384_cast = reshape(shape = var_1383, x = attn_weights_85_cast)[name = tensor("op_1384_cast")]; + tensor attn_weights_87_cast = add(x = var_1384_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_1389 = const()[name = tensor("op_1389"), val = tensor([20, 77, 77])]; + tensor input_229_cast = reshape(shape = var_1389, x = attn_weights_87_cast)[name = tensor("input_229_cast")]; + tensor input_231_cast = softmax(axis = var_5, x = input_229_cast)[name = tensor("input_231_cast")]; tensor attn_output_85_transpose_x_0 = const()[name = tensor("attn_output_85_transpose_x_0"), val = tensor(false)]; tensor attn_output_85_transpose_y_0 = const()[name = tensor("attn_output_85_transpose_y_0"), val = tensor(false)]; - tensor attn_output_85 = matmul(transpose_x = attn_output_85_transpose_x_0, transpose_y = attn_output_85_transpose_y_0, x = input_231, y = value_states_59)[name = tensor("attn_output_85")]; - tensor var_1392 = const()[name = tensor("op_1392"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_87 = reshape(shape = var_1392, x = attn_output_85)[name = tensor("attn_output_87")]; + tensor attn_output_85_cast = matmul(transpose_x = attn_output_85_transpose_x_0, transpose_y = attn_output_85_transpose_y_0, x = input_231_cast, y = value_states_59_cast)[name = tensor("attn_output_85_cast")]; + tensor var_1394 = const()[name = tensor("op_1394"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_87_cast = reshape(shape = var_1394, x = attn_output_85_cast)[name = tensor("attn_output_87_cast")]; tensor attn_output_89_perm_0 = const()[name = tensor("attn_output_89_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([1, 77, 1280])]; - tensor transpose_86 = transpose(perm = attn_output_89_perm_0, x = attn_output_87)[name = tensor("transpose_86")]; - tensor input_233 = reshape(shape = var_1395, x = transpose_86)[name = tensor("input_233")]; - tensor hidden_states_87 = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight, x = input_233)[name = tensor("hidden_states_87")]; - tensor input_235 = add(x = input_227, y = hidden_states_87)[name = tensor("input_235")]; + tensor var_1397 = const()[name = tensor("op_1397"), val = tensor([1, 77, 1280])]; + tensor transpose_86 = transpose(perm = attn_output_89_perm_0, x = attn_output_87_cast)[name = tensor("transpose_86")]; + tensor input_233_cast = reshape(shape = var_1397, x = transpose_86)[name = tensor("input_233_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687520128)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690796992)))]; + tensor hidden_states_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16, x = input_233_cast)[name = tensor("hidden_states_87_cast")]; + tensor input_235_cast = add(x = input_227_cast, y = hidden_states_87_cast)[name = tensor("input_235_cast")]; tensor input_237_axes_0 = const()[name = tensor("input_237_axes_0"), val = tensor([-1])]; - tensor input_237 = layer_norm(axes = input_237_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_14_layer_norm2_weight, x = input_235)[name = tensor("input_237")]; - tensor input_239 = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_14_mlp_fc1_weight, x = input_237)[name = tensor("input_239")]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690799616)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690802240)))]; + tensor input_237_cast = layer_norm(axes = input_237_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16, x = input_235_cast)[name = tensor("input_237_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690804864)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703912128)))]; + tensor input_239_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16, x = input_237_cast)[name = tensor("input_239_cast")]; tensor input_241_mode_0 = const()[name = tensor("input_241_mode_0"), val = tensor("EXACT")]; - tensor input_241 = gelu(mode = input_241_mode_0, x = input_239)[name = tensor("input_241")]; - tensor hidden_states_89 = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_14_mlp_fc2_weight, x = input_241)[name = tensor("hidden_states_89")]; - tensor input_243 = add(x = input_235, y = hidden_states_89)[name = tensor("input_243")]; + tensor input_241_cast = gelu(mode = input_241_mode_0, x = input_239_cast)[name = tensor("input_241_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703922432)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717029696)))]; + tensor hidden_states_89_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16, x = input_241_cast)[name = tensor("hidden_states_89_cast")]; + tensor input_243_cast = add(x = input_235_cast, y = hidden_states_89_cast)[name = tensor("input_243_cast")]; tensor hidden_states_91_axes_0 = const()[name = tensor("hidden_states_91_axes_0"), val = tensor([-1])]; - tensor hidden_states_91 = layer_norm(axes = hidden_states_91_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_15_layer_norm1_weight, x = input_243)[name = tensor("hidden_states_91")]; - tensor var_1433 = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight, x = hidden_states_91)[name = tensor("op_1433")]; - tensor var_1434 = const()[name = tensor("op_1434"), val = tensor(0x1p-3)]; - tensor tensor_95 = mul(x = var_1433, y = var_1434)[name = tensor("tensor_95")]; - tensor tensor_91 = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight, x = hidden_states_91)[name = tensor("tensor_91")]; - tensor var_1439 = const()[name = tensor("op_1439"), val = tensor([1, -1, 20, 64])]; - tensor var_1440 = reshape(shape = var_1439, x = tensor_91)[name = tensor("op_1440")]; - tensor var_1441_perm_0 = const()[name = tensor("op_1441_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_93 = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight, x = hidden_states_91)[name = tensor("tensor_93")]; - tensor var_1446 = const()[name = tensor("op_1446"), val = tensor([1, -1, 20, 64])]; - tensor var_1447 = reshape(shape = var_1446, x = tensor_93)[name = tensor("op_1447")]; - tensor var_1448_perm_0 = const()[name = tensor("op_1448_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1455 = const()[name = tensor("op_1455"), val = tensor([1, 77, 20, 64])]; - tensor var_1456 = reshape(shape = var_1455, x = tensor_95)[name = tensor("op_1456")]; - tensor var_1457_perm_0 = const()[name = tensor("op_1457_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1459 = const()[name = tensor("op_1459"), val = tensor([20, -1, 64])]; - tensor transpose_83 = transpose(perm = var_1457_perm_0, x = var_1456)[name = tensor("transpose_83")]; - tensor query_states_31 = reshape(shape = var_1459, x = transpose_83)[name = tensor("query_states_31")]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717032320)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717034944)))]; + tensor hidden_states_91_cast = layer_norm(axes = hidden_states_91_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16, x = input_243_cast)[name = tensor("hidden_states_91_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717037568)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720314432)))]; + tensor var_1435_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("op_1435_cast")]; + tensor var_1436_to_fp16 = const()[name = tensor("op_1436_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_95_cast = mul(x = var_1435_cast, y = var_1436_to_fp16)[name = tensor("tensor_95_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720317056)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723593920)))]; + tensor tensor_91_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_91_cast")]; + tensor var_1441 = const()[name = tensor("op_1441"), val = tensor([1, -1, 20, 64])]; + tensor var_1442_cast = reshape(shape = var_1441, x = tensor_91_cast)[name = tensor("op_1442_cast")]; + tensor var_1443_perm_0 = const()[name = tensor("op_1443_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723596544)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726873408)))]; + tensor tensor_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_93_cast")]; + tensor var_1448 = const()[name = tensor("op_1448"), val = tensor([1, -1, 20, 64])]; + tensor var_1449_cast = reshape(shape = var_1448, x = tensor_93_cast)[name = tensor("op_1449_cast")]; + tensor var_1450_perm_0 = const()[name = tensor("op_1450_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1457 = const()[name = tensor("op_1457"), val = tensor([1, 77, 20, 64])]; + tensor var_1458_cast = reshape(shape = var_1457, x = tensor_95_cast)[name = tensor("op_1458_cast")]; + tensor var_1459_perm_0 = const()[name = tensor("op_1459_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1461 = const()[name = tensor("op_1461"), val = tensor([20, -1, 64])]; - tensor transpose_85 = transpose(perm = var_1441_perm_0, x = var_1440)[name = tensor("transpose_85")]; - tensor key_states_63 = reshape(shape = var_1461, x = transpose_85)[name = tensor("key_states_63")]; + tensor transpose_83 = transpose(perm = var_1459_perm_0, x = var_1458_cast)[name = tensor("transpose_83")]; + tensor query_states_31_cast = reshape(shape = var_1461, x = transpose_83)[name = tensor("query_states_31_cast")]; tensor var_1463 = const()[name = tensor("op_1463"), val = tensor([20, -1, 64])]; - tensor transpose_84 = transpose(perm = var_1448_perm_0, x = var_1447)[name = tensor("transpose_84")]; - tensor value_states_63 = reshape(shape = var_1463, x = transpose_84)[name = tensor("value_states_63")]; - tensor var_1466_perm_0 = const()[name = tensor("op_1466_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_85 = transpose(perm = var_1443_perm_0, x = var_1442_cast)[name = tensor("transpose_85")]; + tensor key_states_63_cast = reshape(shape = var_1463, x = transpose_85)[name = tensor("key_states_63_cast")]; + tensor var_1465 = const()[name = tensor("op_1465"), val = tensor([20, -1, 64])]; + tensor transpose_84 = transpose(perm = var_1450_perm_0, x = var_1449_cast)[name = tensor("transpose_84")]; + tensor value_states_63_cast = reshape(shape = var_1465, x = transpose_84)[name = tensor("value_states_63_cast")]; + tensor var_1468_perm_0 = const()[name = tensor("op_1468_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_91_transpose_x_0 = const()[name = tensor("attn_weights_91_transpose_x_0"), val = tensor(false)]; tensor attn_weights_91_transpose_y_0 = const()[name = tensor("attn_weights_91_transpose_y_0"), val = tensor(false)]; - tensor transpose_82 = transpose(perm = var_1466_perm_0, x = key_states_63)[name = tensor("transpose_82")]; - tensor attn_weights_91 = matmul(transpose_x = attn_weights_91_transpose_x_0, transpose_y = attn_weights_91_transpose_y_0, x = query_states_31, y = transpose_82)[name = tensor("attn_weights_91")]; - tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1, 20, 77, 77])]; - tensor var_1469 = reshape(shape = var_1468, x = attn_weights_91)[name = tensor("op_1469")]; - tensor attn_weights_93 = add(x = var_1469, y = causal_attention_mask)[name = tensor("attn_weights_93")]; - tensor var_1474 = const()[name = tensor("op_1474"), val = tensor([20, 77, 77])]; - tensor input_245 = reshape(shape = var_1474, x = attn_weights_93)[name = tensor("input_245")]; - tensor input_247 = softmax(axis = var_5, x = input_245)[name = tensor("input_247")]; + tensor transpose_82 = transpose(perm = var_1468_perm_0, x = key_states_63_cast)[name = tensor("transpose_82")]; + tensor attn_weights_91_cast = matmul(transpose_x = attn_weights_91_transpose_x_0, transpose_y = attn_weights_91_transpose_y_0, x = query_states_31_cast, y = transpose_82)[name = tensor("attn_weights_91_cast")]; + tensor var_1470 = const()[name = tensor("op_1470"), val = tensor([1, 20, 77, 77])]; + tensor var_1471_cast = reshape(shape = var_1470, x = attn_weights_91_cast)[name = tensor("op_1471_cast")]; + tensor attn_weights_93_cast = add(x = var_1471_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_93_cast")]; + tensor var_1476 = const()[name = tensor("op_1476"), val = tensor([20, 77, 77])]; + tensor input_245_cast = reshape(shape = var_1476, x = attn_weights_93_cast)[name = tensor("input_245_cast")]; + tensor input_247_cast = softmax(axis = var_5, x = input_245_cast)[name = tensor("input_247_cast")]; tensor attn_output_91_transpose_x_0 = const()[name = tensor("attn_output_91_transpose_x_0"), val = tensor(false)]; tensor attn_output_91_transpose_y_0 = const()[name = tensor("attn_output_91_transpose_y_0"), val = tensor(false)]; - tensor attn_output_91 = matmul(transpose_x = attn_output_91_transpose_x_0, transpose_y = attn_output_91_transpose_y_0, x = input_247, y = value_states_63)[name = tensor("attn_output_91")]; - tensor var_1479 = const()[name = tensor("op_1479"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_93 = reshape(shape = var_1479, x = attn_output_91)[name = tensor("attn_output_93")]; + tensor attn_output_91_cast = matmul(transpose_x = attn_output_91_transpose_x_0, transpose_y = attn_output_91_transpose_y_0, x = input_247_cast, y = value_states_63_cast)[name = tensor("attn_output_91_cast")]; + tensor var_1481 = const()[name = tensor("op_1481"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_93_cast = reshape(shape = var_1481, x = attn_output_91_cast)[name = tensor("attn_output_93_cast")]; tensor attn_output_95_perm_0 = const()[name = tensor("attn_output_95_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1482 = const()[name = tensor("op_1482"), val = tensor([1, 77, 1280])]; - tensor transpose_81 = transpose(perm = attn_output_95_perm_0, x = attn_output_93)[name = tensor("transpose_81")]; - tensor input_249 = reshape(shape = var_1482, x = transpose_81)[name = tensor("input_249")]; - tensor hidden_states_93 = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight, x = input_249)[name = tensor("hidden_states_93")]; - tensor input_251 = add(x = input_243, y = hidden_states_93)[name = tensor("input_251")]; + tensor var_1484 = const()[name = tensor("op_1484"), val = tensor([1, 77, 1280])]; + tensor transpose_81 = transpose(perm = attn_output_95_perm_0, x = attn_output_93_cast)[name = tensor("transpose_81")]; + tensor input_249_cast = reshape(shape = var_1484, x = transpose_81)[name = tensor("input_249_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726876032)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730152896)))]; + tensor hidden_states_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16, x = input_249_cast)[name = tensor("hidden_states_93_cast")]; + tensor input_251_cast = add(x = input_243_cast, y = hidden_states_93_cast)[name = tensor("input_251_cast")]; tensor input_253_axes_0 = const()[name = tensor("input_253_axes_0"), val = tensor([-1])]; - tensor input_253 = layer_norm(axes = input_253_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_15_layer_norm2_weight, x = input_251)[name = tensor("input_253")]; - tensor input_255 = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_15_mlp_fc1_weight, x = input_253)[name = tensor("input_255")]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730155520)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730158144)))]; + tensor input_253_cast = layer_norm(axes = input_253_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16, x = input_251_cast)[name = tensor("input_253_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730160768)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743268032)))]; + tensor input_255_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16, x = input_253_cast)[name = tensor("input_255_cast")]; tensor input_257_mode_0 = const()[name = tensor("input_257_mode_0"), val = tensor("EXACT")]; - tensor input_257 = gelu(mode = input_257_mode_0, x = input_255)[name = tensor("input_257")]; - tensor hidden_states_95 = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_15_mlp_fc2_weight, x = input_257)[name = tensor("hidden_states_95")]; - tensor input_259 = add(x = input_251, y = hidden_states_95)[name = tensor("input_259")]; + tensor input_257_cast = gelu(mode = input_257_mode_0, x = input_255_cast)[name = tensor("input_257_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743278336)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756385600)))]; + tensor hidden_states_95_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16, x = input_257_cast)[name = tensor("hidden_states_95_cast")]; + tensor input_259_cast = add(x = input_251_cast, y = hidden_states_95_cast)[name = tensor("input_259_cast")]; tensor hidden_states_97_axes_0 = const()[name = tensor("hidden_states_97_axes_0"), val = tensor([-1])]; - tensor hidden_states_97 = layer_norm(axes = hidden_states_97_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_16_layer_norm1_weight, x = input_259)[name = tensor("hidden_states_97")]; - tensor var_1520 = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight, x = hidden_states_97)[name = tensor("op_1520")]; - tensor var_1521 = const()[name = tensor("op_1521"), val = tensor(0x1p-3)]; - tensor tensor_101 = mul(x = var_1520, y = var_1521)[name = tensor("tensor_101")]; - tensor tensor_97 = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight, x = hidden_states_97)[name = tensor("tensor_97")]; - tensor var_1526 = const()[name = tensor("op_1526"), val = tensor([1, -1, 20, 64])]; - tensor var_1527 = reshape(shape = var_1526, x = tensor_97)[name = tensor("op_1527")]; - tensor var_1528_perm_0 = const()[name = tensor("op_1528_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_99 = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight, x = hidden_states_97)[name = tensor("tensor_99")]; - tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1, -1, 20, 64])]; - tensor var_1534 = reshape(shape = var_1533, x = tensor_99)[name = tensor("op_1534")]; - tensor var_1535_perm_0 = const()[name = tensor("op_1535_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1542 = const()[name = tensor("op_1542"), val = tensor([1, 77, 20, 64])]; - tensor var_1543 = reshape(shape = var_1542, x = tensor_101)[name = tensor("op_1543")]; - tensor var_1544_perm_0 = const()[name = tensor("op_1544_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1546 = const()[name = tensor("op_1546"), val = tensor([20, -1, 64])]; - tensor transpose_78 = transpose(perm = var_1544_perm_0, x = var_1543)[name = tensor("transpose_78")]; - tensor query_states_33 = reshape(shape = var_1546, x = transpose_78)[name = tensor("query_states_33")]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756388224)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756390848)))]; + tensor hidden_states_97_cast = layer_norm(axes = hidden_states_97_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16, x = input_259_cast)[name = tensor("hidden_states_97_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756393472)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759670336)))]; + tensor var_1522_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("op_1522_cast")]; + tensor var_1523_to_fp16 = const()[name = tensor("op_1523_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_101_cast = mul(x = var_1522_cast, y = var_1523_to_fp16)[name = tensor("tensor_101_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759672960)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762949824)))]; + tensor tensor_97_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_97_cast")]; + tensor var_1528 = const()[name = tensor("op_1528"), val = tensor([1, -1, 20, 64])]; + tensor var_1529_cast = reshape(shape = var_1528, x = tensor_97_cast)[name = tensor("op_1529_cast")]; + tensor var_1530_perm_0 = const()[name = tensor("op_1530_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762952448)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766229312)))]; + tensor tensor_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_99_cast")]; + tensor var_1535 = const()[name = tensor("op_1535"), val = tensor([1, -1, 20, 64])]; + tensor var_1536_cast = reshape(shape = var_1535, x = tensor_99_cast)[name = tensor("op_1536_cast")]; + tensor var_1537_perm_0 = const()[name = tensor("op_1537_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1544 = const()[name = tensor("op_1544"), val = tensor([1, 77, 20, 64])]; + tensor var_1545_cast = reshape(shape = var_1544, x = tensor_101_cast)[name = tensor("op_1545_cast")]; + tensor var_1546_perm_0 = const()[name = tensor("op_1546_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1548 = const()[name = tensor("op_1548"), val = tensor([20, -1, 64])]; - tensor transpose_80 = transpose(perm = var_1528_perm_0, x = var_1527)[name = tensor("transpose_80")]; - tensor key_states_67 = reshape(shape = var_1548, x = transpose_80)[name = tensor("key_states_67")]; + tensor transpose_78 = transpose(perm = var_1546_perm_0, x = var_1545_cast)[name = tensor("transpose_78")]; + tensor query_states_33_cast = reshape(shape = var_1548, x = transpose_78)[name = tensor("query_states_33_cast")]; tensor var_1550 = const()[name = tensor("op_1550"), val = tensor([20, -1, 64])]; - tensor transpose_79 = transpose(perm = var_1535_perm_0, x = var_1534)[name = tensor("transpose_79")]; - tensor value_states_67 = reshape(shape = var_1550, x = transpose_79)[name = tensor("value_states_67")]; - tensor var_1553_perm_0 = const()[name = tensor("op_1553_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_80 = transpose(perm = var_1530_perm_0, x = var_1529_cast)[name = tensor("transpose_80")]; + tensor key_states_67_cast = reshape(shape = var_1550, x = transpose_80)[name = tensor("key_states_67_cast")]; + tensor var_1552 = const()[name = tensor("op_1552"), val = tensor([20, -1, 64])]; + tensor transpose_79 = transpose(perm = var_1537_perm_0, x = var_1536_cast)[name = tensor("transpose_79")]; + tensor value_states_67_cast = reshape(shape = var_1552, x = transpose_79)[name = tensor("value_states_67_cast")]; + tensor var_1555_perm_0 = const()[name = tensor("op_1555_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(false)]; tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; - tensor transpose_77 = transpose(perm = var_1553_perm_0, x = key_states_67)[name = tensor("transpose_77")]; - tensor attn_weights_97 = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = query_states_33, y = transpose_77)[name = tensor("attn_weights_97")]; - tensor var_1555 = const()[name = tensor("op_1555"), val = tensor([1, 20, 77, 77])]; - tensor var_1556 = reshape(shape = var_1555, x = attn_weights_97)[name = tensor("op_1556")]; - tensor attn_weights_99 = add(x = var_1556, y = causal_attention_mask)[name = tensor("attn_weights_99")]; - tensor var_1561 = const()[name = tensor("op_1561"), val = tensor([20, 77, 77])]; - tensor input_261 = reshape(shape = var_1561, x = attn_weights_99)[name = tensor("input_261")]; - tensor input_263 = softmax(axis = var_5, x = input_261)[name = tensor("input_263")]; + tensor transpose_77 = transpose(perm = var_1555_perm_0, x = key_states_67_cast)[name = tensor("transpose_77")]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = query_states_33_cast, y = transpose_77)[name = tensor("attn_weights_97_cast")]; + tensor var_1557 = const()[name = tensor("op_1557"), val = tensor([1, 20, 77, 77])]; + tensor var_1558_cast = reshape(shape = var_1557, x = attn_weights_97_cast)[name = tensor("op_1558_cast")]; + tensor attn_weights_99_cast = add(x = var_1558_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_1563 = const()[name = tensor("op_1563"), val = tensor([20, 77, 77])]; + tensor input_261_cast = reshape(shape = var_1563, x = attn_weights_99_cast)[name = tensor("input_261_cast")]; + tensor input_263_cast = softmax(axis = var_5, x = input_261_cast)[name = tensor("input_263_cast")]; tensor attn_output_97_transpose_x_0 = const()[name = tensor("attn_output_97_transpose_x_0"), val = tensor(false)]; tensor attn_output_97_transpose_y_0 = const()[name = tensor("attn_output_97_transpose_y_0"), val = tensor(false)]; - tensor attn_output_97 = matmul(transpose_x = attn_output_97_transpose_x_0, transpose_y = attn_output_97_transpose_y_0, x = input_263, y = value_states_67)[name = tensor("attn_output_97")]; - tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_99 = reshape(shape = var_1566, x = attn_output_97)[name = tensor("attn_output_99")]; + tensor attn_output_97_cast = matmul(transpose_x = attn_output_97_transpose_x_0, transpose_y = attn_output_97_transpose_y_0, x = input_263_cast, y = value_states_67_cast)[name = tensor("attn_output_97_cast")]; + tensor var_1568 = const()[name = tensor("op_1568"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_99_cast = reshape(shape = var_1568, x = attn_output_97_cast)[name = tensor("attn_output_99_cast")]; tensor attn_output_101_perm_0 = const()[name = tensor("attn_output_101_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1569 = const()[name = tensor("op_1569"), val = tensor([1, 77, 1280])]; - tensor transpose_76 = transpose(perm = attn_output_101_perm_0, x = attn_output_99)[name = tensor("transpose_76")]; - tensor input_265 = reshape(shape = var_1569, x = transpose_76)[name = tensor("input_265")]; - tensor hidden_states_99 = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight, x = input_265)[name = tensor("hidden_states_99")]; - tensor input_267 = add(x = input_259, y = hidden_states_99)[name = tensor("input_267")]; + tensor var_1571 = const()[name = tensor("op_1571"), val = tensor([1, 77, 1280])]; + tensor transpose_76 = transpose(perm = attn_output_101_perm_0, x = attn_output_99_cast)[name = tensor("transpose_76")]; + tensor input_265_cast = reshape(shape = var_1571, x = transpose_76)[name = tensor("input_265_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766231936)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769508800)))]; + tensor hidden_states_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16, x = input_265_cast)[name = tensor("hidden_states_99_cast")]; + tensor input_267_cast = add(x = input_259_cast, y = hidden_states_99_cast)[name = tensor("input_267_cast")]; tensor input_269_axes_0 = const()[name = tensor("input_269_axes_0"), val = tensor([-1])]; - tensor input_269 = layer_norm(axes = input_269_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_16_layer_norm2_weight, x = input_267)[name = tensor("input_269")]; - tensor input_271 = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_16_mlp_fc1_weight, x = input_269)[name = tensor("input_271")]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769511424)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769514048)))]; + tensor input_269_cast = layer_norm(axes = input_269_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16, x = input_267_cast)[name = tensor("input_269_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769516672)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782623936)))]; + tensor input_271_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16, x = input_269_cast)[name = tensor("input_271_cast")]; tensor input_273_mode_0 = const()[name = tensor("input_273_mode_0"), val = tensor("EXACT")]; - tensor input_273 = gelu(mode = input_273_mode_0, x = input_271)[name = tensor("input_273")]; - tensor hidden_states_101 = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_16_mlp_fc2_weight, x = input_273)[name = tensor("hidden_states_101")]; - tensor input_275 = add(x = input_267, y = hidden_states_101)[name = tensor("input_275")]; + tensor input_273_cast = gelu(mode = input_273_mode_0, x = input_271_cast)[name = tensor("input_273_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782634240)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795741504)))]; + tensor hidden_states_101_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16, x = input_273_cast)[name = tensor("hidden_states_101_cast")]; + tensor input_275_cast = add(x = input_267_cast, y = hidden_states_101_cast)[name = tensor("input_275_cast")]; tensor hidden_states_103_axes_0 = const()[name = tensor("hidden_states_103_axes_0"), val = tensor([-1])]; - tensor hidden_states_103 = layer_norm(axes = hidden_states_103_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_17_layer_norm1_weight, x = input_275)[name = tensor("hidden_states_103")]; - tensor var_1607 = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight, x = hidden_states_103)[name = tensor("op_1607")]; - tensor var_1608 = const()[name = tensor("op_1608"), val = tensor(0x1p-3)]; - tensor tensor_107 = mul(x = var_1607, y = var_1608)[name = tensor("tensor_107")]; - tensor tensor_103 = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight, x = hidden_states_103)[name = tensor("tensor_103")]; - tensor var_1613 = const()[name = tensor("op_1613"), val = tensor([1, -1, 20, 64])]; - tensor var_1614 = reshape(shape = var_1613, x = tensor_103)[name = tensor("op_1614")]; - tensor var_1615_perm_0 = const()[name = tensor("op_1615_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_105 = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight, x = hidden_states_103)[name = tensor("tensor_105")]; - tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, -1, 20, 64])]; - tensor var_1621 = reshape(shape = var_1620, x = tensor_105)[name = tensor("op_1621")]; - tensor var_1622_perm_0 = const()[name = tensor("op_1622_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1629 = const()[name = tensor("op_1629"), val = tensor([1, 77, 20, 64])]; - tensor var_1630 = reshape(shape = var_1629, x = tensor_107)[name = tensor("op_1630")]; - tensor var_1631_perm_0 = const()[name = tensor("op_1631_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([20, -1, 64])]; - tensor transpose_73 = transpose(perm = var_1631_perm_0, x = var_1630)[name = tensor("transpose_73")]; - tensor query_states_35 = reshape(shape = var_1633, x = transpose_73)[name = tensor("query_states_35")]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795744128)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795746752)))]; + tensor hidden_states_103_cast = layer_norm(axes = hidden_states_103_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16, x = input_275_cast)[name = tensor("hidden_states_103_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795749376)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799026240)))]; + tensor var_1609_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("op_1609_cast")]; + tensor var_1610_to_fp16 = const()[name = tensor("op_1610_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_107_cast = mul(x = var_1609_cast, y = var_1610_to_fp16)[name = tensor("tensor_107_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799028864)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802305728)))]; + tensor tensor_103_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_103_cast")]; + tensor var_1615 = const()[name = tensor("op_1615"), val = tensor([1, -1, 20, 64])]; + tensor var_1616_cast = reshape(shape = var_1615, x = tensor_103_cast)[name = tensor("op_1616_cast")]; + tensor var_1617_perm_0 = const()[name = tensor("op_1617_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802308352)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805585216)))]; + tensor tensor_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_105_cast")]; + tensor var_1622 = const()[name = tensor("op_1622"), val = tensor([1, -1, 20, 64])]; + tensor var_1623_cast = reshape(shape = var_1622, x = tensor_105_cast)[name = tensor("op_1623_cast")]; + tensor var_1624_perm_0 = const()[name = tensor("op_1624_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 77, 20, 64])]; + tensor var_1632_cast = reshape(shape = var_1631, x = tensor_107_cast)[name = tensor("op_1632_cast")]; + tensor var_1633_perm_0 = const()[name = tensor("op_1633_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1635 = const()[name = tensor("op_1635"), val = tensor([20, -1, 64])]; - tensor transpose_75 = transpose(perm = var_1615_perm_0, x = var_1614)[name = tensor("transpose_75")]; - tensor key_states_71 = reshape(shape = var_1635, x = transpose_75)[name = tensor("key_states_71")]; + tensor transpose_73 = transpose(perm = var_1633_perm_0, x = var_1632_cast)[name = tensor("transpose_73")]; + tensor query_states_35_cast = reshape(shape = var_1635, x = transpose_73)[name = tensor("query_states_35_cast")]; tensor var_1637 = const()[name = tensor("op_1637"), val = tensor([20, -1, 64])]; - tensor transpose_74 = transpose(perm = var_1622_perm_0, x = var_1621)[name = tensor("transpose_74")]; - tensor value_states_71 = reshape(shape = var_1637, x = transpose_74)[name = tensor("value_states_71")]; - tensor var_1640_perm_0 = const()[name = tensor("op_1640_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_75 = transpose(perm = var_1617_perm_0, x = var_1616_cast)[name = tensor("transpose_75")]; + tensor key_states_71_cast = reshape(shape = var_1637, x = transpose_75)[name = tensor("key_states_71_cast")]; + tensor var_1639 = const()[name = tensor("op_1639"), val = tensor([20, -1, 64])]; + tensor transpose_74 = transpose(perm = var_1624_perm_0, x = var_1623_cast)[name = tensor("transpose_74")]; + tensor value_states_71_cast = reshape(shape = var_1639, x = transpose_74)[name = tensor("value_states_71_cast")]; + tensor var_1642_perm_0 = const()[name = tensor("op_1642_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_103_transpose_x_0 = const()[name = tensor("attn_weights_103_transpose_x_0"), val = tensor(false)]; tensor attn_weights_103_transpose_y_0 = const()[name = tensor("attn_weights_103_transpose_y_0"), val = tensor(false)]; - tensor transpose_72 = transpose(perm = var_1640_perm_0, x = key_states_71)[name = tensor("transpose_72")]; - tensor attn_weights_103 = matmul(transpose_x = attn_weights_103_transpose_x_0, transpose_y = attn_weights_103_transpose_y_0, x = query_states_35, y = transpose_72)[name = tensor("attn_weights_103")]; - tensor var_1642 = const()[name = tensor("op_1642"), val = tensor([1, 20, 77, 77])]; - tensor var_1643 = reshape(shape = var_1642, x = attn_weights_103)[name = tensor("op_1643")]; - tensor attn_weights_105 = add(x = var_1643, y = causal_attention_mask)[name = tensor("attn_weights_105")]; - tensor var_1648 = const()[name = tensor("op_1648"), val = tensor([20, 77, 77])]; - tensor input_277 = reshape(shape = var_1648, x = attn_weights_105)[name = tensor("input_277")]; - tensor input_279 = softmax(axis = var_5, x = input_277)[name = tensor("input_279")]; + tensor transpose_72 = transpose(perm = var_1642_perm_0, x = key_states_71_cast)[name = tensor("transpose_72")]; + tensor attn_weights_103_cast = matmul(transpose_x = attn_weights_103_transpose_x_0, transpose_y = attn_weights_103_transpose_y_0, x = query_states_35_cast, y = transpose_72)[name = tensor("attn_weights_103_cast")]; + tensor var_1644 = const()[name = tensor("op_1644"), val = tensor([1, 20, 77, 77])]; + tensor var_1645_cast = reshape(shape = var_1644, x = attn_weights_103_cast)[name = tensor("op_1645_cast")]; + tensor attn_weights_105_cast = add(x = var_1645_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_105_cast")]; + tensor var_1650 = const()[name = tensor("op_1650"), val = tensor([20, 77, 77])]; + tensor input_277_cast = reshape(shape = var_1650, x = attn_weights_105_cast)[name = tensor("input_277_cast")]; + tensor input_279_cast = softmax(axis = var_5, x = input_277_cast)[name = tensor("input_279_cast")]; tensor attn_output_103_transpose_x_0 = const()[name = tensor("attn_output_103_transpose_x_0"), val = tensor(false)]; tensor attn_output_103_transpose_y_0 = const()[name = tensor("attn_output_103_transpose_y_0"), val = tensor(false)]; - tensor attn_output_103 = matmul(transpose_x = attn_output_103_transpose_x_0, transpose_y = attn_output_103_transpose_y_0, x = input_279, y = value_states_71)[name = tensor("attn_output_103")]; - tensor var_1653 = const()[name = tensor("op_1653"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_105 = reshape(shape = var_1653, x = attn_output_103)[name = tensor("attn_output_105")]; + tensor attn_output_103_cast = matmul(transpose_x = attn_output_103_transpose_x_0, transpose_y = attn_output_103_transpose_y_0, x = input_279_cast, y = value_states_71_cast)[name = tensor("attn_output_103_cast")]; + tensor var_1655 = const()[name = tensor("op_1655"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_105_cast = reshape(shape = var_1655, x = attn_output_103_cast)[name = tensor("attn_output_105_cast")]; tensor attn_output_107_perm_0 = const()[name = tensor("attn_output_107_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1656 = const()[name = tensor("op_1656"), val = tensor([1, 77, 1280])]; - tensor transpose_71 = transpose(perm = attn_output_107_perm_0, x = attn_output_105)[name = tensor("transpose_71")]; - tensor input_281 = reshape(shape = var_1656, x = transpose_71)[name = tensor("input_281")]; - tensor hidden_states_105 = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight, x = input_281)[name = tensor("hidden_states_105")]; - tensor input_283 = add(x = input_275, y = hidden_states_105)[name = tensor("input_283")]; + tensor var_1658 = const()[name = tensor("op_1658"), val = tensor([1, 77, 1280])]; + tensor transpose_71 = transpose(perm = attn_output_107_perm_0, x = attn_output_105_cast)[name = tensor("transpose_71")]; + tensor input_281_cast = reshape(shape = var_1658, x = transpose_71)[name = tensor("input_281_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805587840)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808864704)))]; + tensor hidden_states_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16, x = input_281_cast)[name = tensor("hidden_states_105_cast")]; + tensor input_283_cast = add(x = input_275_cast, y = hidden_states_105_cast)[name = tensor("input_283_cast")]; tensor input_285_axes_0 = const()[name = tensor("input_285_axes_0"), val = tensor([-1])]; - tensor input_285 = layer_norm(axes = input_285_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_17_layer_norm2_weight, x = input_283)[name = tensor("input_285")]; - tensor input_287 = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_17_mlp_fc1_weight, x = input_285)[name = tensor("input_287")]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808867328)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808869952)))]; + tensor input_285_cast = layer_norm(axes = input_285_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16, x = input_283_cast)[name = tensor("input_285_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808872576)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821979840)))]; + tensor input_287_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16, x = input_285_cast)[name = tensor("input_287_cast")]; tensor input_289_mode_0 = const()[name = tensor("input_289_mode_0"), val = tensor("EXACT")]; - tensor input_289 = gelu(mode = input_289_mode_0, x = input_287)[name = tensor("input_289")]; - tensor hidden_states_107 = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_17_mlp_fc2_weight, x = input_289)[name = tensor("hidden_states_107")]; - tensor input_291 = add(x = input_283, y = hidden_states_107)[name = tensor("input_291")]; + tensor input_289_cast = gelu(mode = input_289_mode_0, x = input_287_cast)[name = tensor("input_289_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821990144)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835097408)))]; + tensor hidden_states_107_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16, x = input_289_cast)[name = tensor("hidden_states_107_cast")]; + tensor input_291_cast = add(x = input_283_cast, y = hidden_states_107_cast)[name = tensor("input_291_cast")]; tensor hidden_states_109_axes_0 = const()[name = tensor("hidden_states_109_axes_0"), val = tensor([-1])]; - tensor hidden_states_109 = layer_norm(axes = hidden_states_109_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_18_layer_norm1_weight, x = input_291)[name = tensor("hidden_states_109")]; - tensor var_1694 = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight, x = hidden_states_109)[name = tensor("op_1694")]; - tensor var_1695 = const()[name = tensor("op_1695"), val = tensor(0x1p-3)]; - tensor tensor_113 = mul(x = var_1694, y = var_1695)[name = tensor("tensor_113")]; - tensor tensor_109 = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight, x = hidden_states_109)[name = tensor("tensor_109")]; - tensor var_1700 = const()[name = tensor("op_1700"), val = tensor([1, -1, 20, 64])]; - tensor var_1701 = reshape(shape = var_1700, x = tensor_109)[name = tensor("op_1701")]; - tensor var_1702_perm_0 = const()[name = tensor("op_1702_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_111 = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight, x = hidden_states_109)[name = tensor("tensor_111")]; - tensor var_1707 = const()[name = tensor("op_1707"), val = tensor([1, -1, 20, 64])]; - tensor var_1708 = reshape(shape = var_1707, x = tensor_111)[name = tensor("op_1708")]; - tensor var_1709_perm_0 = const()[name = tensor("op_1709_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1716 = const()[name = tensor("op_1716"), val = tensor([1, 77, 20, 64])]; - tensor var_1717 = reshape(shape = var_1716, x = tensor_113)[name = tensor("op_1717")]; - tensor var_1718_perm_0 = const()[name = tensor("op_1718_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1720 = const()[name = tensor("op_1720"), val = tensor([20, -1, 64])]; - tensor transpose_68 = transpose(perm = var_1718_perm_0, x = var_1717)[name = tensor("transpose_68")]; - tensor query_states_37 = reshape(shape = var_1720, x = transpose_68)[name = tensor("query_states_37")]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835100032)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835102656)))]; + tensor hidden_states_109_cast = layer_norm(axes = hidden_states_109_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16, x = input_291_cast)[name = tensor("hidden_states_109_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835105280)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838382144)))]; + tensor var_1696_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("op_1696_cast")]; + tensor var_1697_to_fp16 = const()[name = tensor("op_1697_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_113_cast = mul(x = var_1696_cast, y = var_1697_to_fp16)[name = tensor("tensor_113_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838384768)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841661632)))]; + tensor tensor_109_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_109_cast")]; + tensor var_1702 = const()[name = tensor("op_1702"), val = tensor([1, -1, 20, 64])]; + tensor var_1703_cast = reshape(shape = var_1702, x = tensor_109_cast)[name = tensor("op_1703_cast")]; + tensor var_1704_perm_0 = const()[name = tensor("op_1704_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841664256)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844941120)))]; + tensor tensor_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_111_cast")]; + tensor var_1709 = const()[name = tensor("op_1709"), val = tensor([1, -1, 20, 64])]; + tensor var_1710_cast = reshape(shape = var_1709, x = tensor_111_cast)[name = tensor("op_1710_cast")]; + tensor var_1711_perm_0 = const()[name = tensor("op_1711_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1718 = const()[name = tensor("op_1718"), val = tensor([1, 77, 20, 64])]; + tensor var_1719_cast = reshape(shape = var_1718, x = tensor_113_cast)[name = tensor("op_1719_cast")]; + tensor var_1720_perm_0 = const()[name = tensor("op_1720_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1722 = const()[name = tensor("op_1722"), val = tensor([20, -1, 64])]; - tensor transpose_70 = transpose(perm = var_1702_perm_0, x = var_1701)[name = tensor("transpose_70")]; - tensor key_states_75 = reshape(shape = var_1722, x = transpose_70)[name = tensor("key_states_75")]; + tensor transpose_68 = transpose(perm = var_1720_perm_0, x = var_1719_cast)[name = tensor("transpose_68")]; + tensor query_states_37_cast = reshape(shape = var_1722, x = transpose_68)[name = tensor("query_states_37_cast")]; tensor var_1724 = const()[name = tensor("op_1724"), val = tensor([20, -1, 64])]; - tensor transpose_69 = transpose(perm = var_1709_perm_0, x = var_1708)[name = tensor("transpose_69")]; - tensor value_states_75 = reshape(shape = var_1724, x = transpose_69)[name = tensor("value_states_75")]; - tensor var_1727_perm_0 = const()[name = tensor("op_1727_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_70 = transpose(perm = var_1704_perm_0, x = var_1703_cast)[name = tensor("transpose_70")]; + tensor key_states_75_cast = reshape(shape = var_1724, x = transpose_70)[name = tensor("key_states_75_cast")]; + tensor var_1726 = const()[name = tensor("op_1726"), val = tensor([20, -1, 64])]; + tensor transpose_69 = transpose(perm = var_1711_perm_0, x = var_1710_cast)[name = tensor("transpose_69")]; + tensor value_states_75_cast = reshape(shape = var_1726, x = transpose_69)[name = tensor("value_states_75_cast")]; + tensor var_1729_perm_0 = const()[name = tensor("op_1729_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(false)]; tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; - tensor transpose_67 = transpose(perm = var_1727_perm_0, x = key_states_75)[name = tensor("transpose_67")]; - tensor attn_weights_109 = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = query_states_37, y = transpose_67)[name = tensor("attn_weights_109")]; - tensor var_1729 = const()[name = tensor("op_1729"), val = tensor([1, 20, 77, 77])]; - tensor var_1730 = reshape(shape = var_1729, x = attn_weights_109)[name = tensor("op_1730")]; - tensor attn_weights_111 = add(x = var_1730, y = causal_attention_mask)[name = tensor("attn_weights_111")]; - tensor var_1735 = const()[name = tensor("op_1735"), val = tensor([20, 77, 77])]; - tensor input_293 = reshape(shape = var_1735, x = attn_weights_111)[name = tensor("input_293")]; - tensor input_295 = softmax(axis = var_5, x = input_293)[name = tensor("input_295")]; + tensor transpose_67 = transpose(perm = var_1729_perm_0, x = key_states_75_cast)[name = tensor("transpose_67")]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = query_states_37_cast, y = transpose_67)[name = tensor("attn_weights_109_cast")]; + tensor var_1731 = const()[name = tensor("op_1731"), val = tensor([1, 20, 77, 77])]; + tensor var_1732_cast = reshape(shape = var_1731, x = attn_weights_109_cast)[name = tensor("op_1732_cast")]; + tensor attn_weights_111_cast = add(x = var_1732_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_1737 = const()[name = tensor("op_1737"), val = tensor([20, 77, 77])]; + tensor input_293_cast = reshape(shape = var_1737, x = attn_weights_111_cast)[name = tensor("input_293_cast")]; + tensor input_295_cast = softmax(axis = var_5, x = input_293_cast)[name = tensor("input_295_cast")]; tensor attn_output_109_transpose_x_0 = const()[name = tensor("attn_output_109_transpose_x_0"), val = tensor(false)]; tensor attn_output_109_transpose_y_0 = const()[name = tensor("attn_output_109_transpose_y_0"), val = tensor(false)]; - tensor attn_output_109 = matmul(transpose_x = attn_output_109_transpose_x_0, transpose_y = attn_output_109_transpose_y_0, x = input_295, y = value_states_75)[name = tensor("attn_output_109")]; - tensor var_1740 = const()[name = tensor("op_1740"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_111 = reshape(shape = var_1740, x = attn_output_109)[name = tensor("attn_output_111")]; + tensor attn_output_109_cast = matmul(transpose_x = attn_output_109_transpose_x_0, transpose_y = attn_output_109_transpose_y_0, x = input_295_cast, y = value_states_75_cast)[name = tensor("attn_output_109_cast")]; + tensor var_1742 = const()[name = tensor("op_1742"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_111_cast = reshape(shape = var_1742, x = attn_output_109_cast)[name = tensor("attn_output_111_cast")]; tensor attn_output_113_perm_0 = const()[name = tensor("attn_output_113_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 77, 1280])]; - tensor transpose_66 = transpose(perm = attn_output_113_perm_0, x = attn_output_111)[name = tensor("transpose_66")]; - tensor input_297 = reshape(shape = var_1743, x = transpose_66)[name = tensor("input_297")]; - tensor hidden_states_111 = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight, x = input_297)[name = tensor("hidden_states_111")]; - tensor input_299 = add(x = input_291, y = hidden_states_111)[name = tensor("input_299")]; + tensor var_1745 = const()[name = tensor("op_1745"), val = tensor([1, 77, 1280])]; + tensor transpose_66 = transpose(perm = attn_output_113_perm_0, x = attn_output_111_cast)[name = tensor("transpose_66")]; + tensor input_297_cast = reshape(shape = var_1745, x = transpose_66)[name = tensor("input_297_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844943744)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848220608)))]; + tensor hidden_states_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16, x = input_297_cast)[name = tensor("hidden_states_111_cast")]; + tensor input_299_cast = add(x = input_291_cast, y = hidden_states_111_cast)[name = tensor("input_299_cast")]; tensor input_301_axes_0 = const()[name = tensor("input_301_axes_0"), val = tensor([-1])]; - tensor input_301 = layer_norm(axes = input_301_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_18_layer_norm2_weight, x = input_299)[name = tensor("input_301")]; - tensor input_303 = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_18_mlp_fc1_weight, x = input_301)[name = tensor("input_303")]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848223232)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848225856)))]; + tensor input_301_cast = layer_norm(axes = input_301_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16, x = input_299_cast)[name = tensor("input_301_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848228480)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861335744)))]; + tensor input_303_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16, x = input_301_cast)[name = tensor("input_303_cast")]; tensor input_305_mode_0 = const()[name = tensor("input_305_mode_0"), val = tensor("EXACT")]; - tensor input_305 = gelu(mode = input_305_mode_0, x = input_303)[name = tensor("input_305")]; - tensor hidden_states_113 = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_18_mlp_fc2_weight, x = input_305)[name = tensor("hidden_states_113")]; - tensor input_307 = add(x = input_299, y = hidden_states_113)[name = tensor("input_307")]; + tensor input_305_cast = gelu(mode = input_305_mode_0, x = input_303_cast)[name = tensor("input_305_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861346048)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874453312)))]; + tensor hidden_states_113_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16, x = input_305_cast)[name = tensor("hidden_states_113_cast")]; + tensor input_307_cast = add(x = input_299_cast, y = hidden_states_113_cast)[name = tensor("input_307_cast")]; tensor hidden_states_115_axes_0 = const()[name = tensor("hidden_states_115_axes_0"), val = tensor([-1])]; - tensor hidden_states_115 = layer_norm(axes = hidden_states_115_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_19_layer_norm1_weight, x = input_307)[name = tensor("hidden_states_115")]; - tensor var_1781 = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight, x = hidden_states_115)[name = tensor("op_1781")]; - tensor var_1782 = const()[name = tensor("op_1782"), val = tensor(0x1p-3)]; - tensor tensor_119 = mul(x = var_1781, y = var_1782)[name = tensor("tensor_119")]; - tensor tensor_115 = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight, x = hidden_states_115)[name = tensor("tensor_115")]; - tensor var_1787 = const()[name = tensor("op_1787"), val = tensor([1, -1, 20, 64])]; - tensor var_1788 = reshape(shape = var_1787, x = tensor_115)[name = tensor("op_1788")]; - tensor var_1789_perm_0 = const()[name = tensor("op_1789_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_117 = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight, x = hidden_states_115)[name = tensor("tensor_117")]; - tensor var_1794 = const()[name = tensor("op_1794"), val = tensor([1, -1, 20, 64])]; - tensor var_1795 = reshape(shape = var_1794, x = tensor_117)[name = tensor("op_1795")]; - tensor var_1796_perm_0 = const()[name = tensor("op_1796_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1803 = const()[name = tensor("op_1803"), val = tensor([1, 77, 20, 64])]; - tensor var_1804 = reshape(shape = var_1803, x = tensor_119)[name = tensor("op_1804")]; - tensor var_1805_perm_0 = const()[name = tensor("op_1805_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1807 = const()[name = tensor("op_1807"), val = tensor([20, -1, 64])]; - tensor transpose_63 = transpose(perm = var_1805_perm_0, x = var_1804)[name = tensor("transpose_63")]; - tensor query_states_39 = reshape(shape = var_1807, x = transpose_63)[name = tensor("query_states_39")]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874455936)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874458560)))]; + tensor hidden_states_115_cast = layer_norm(axes = hidden_states_115_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16, x = input_307_cast)[name = tensor("hidden_states_115_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874461184)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877738048)))]; + tensor var_1783_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("op_1783_cast")]; + tensor var_1784_to_fp16 = const()[name = tensor("op_1784_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_119_cast = mul(x = var_1783_cast, y = var_1784_to_fp16)[name = tensor("tensor_119_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877740672)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881017536)))]; + tensor tensor_115_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_115_cast")]; + tensor var_1789 = const()[name = tensor("op_1789"), val = tensor([1, -1, 20, 64])]; + tensor var_1790_cast = reshape(shape = var_1789, x = tensor_115_cast)[name = tensor("op_1790_cast")]; + tensor var_1791_perm_0 = const()[name = tensor("op_1791_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881020160)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884297024)))]; + tensor tensor_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_117_cast")]; + tensor var_1796 = const()[name = tensor("op_1796"), val = tensor([1, -1, 20, 64])]; + tensor var_1797_cast = reshape(shape = var_1796, x = tensor_117_cast)[name = tensor("op_1797_cast")]; + tensor var_1798_perm_0 = const()[name = tensor("op_1798_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1805 = const()[name = tensor("op_1805"), val = tensor([1, 77, 20, 64])]; + tensor var_1806_cast = reshape(shape = var_1805, x = tensor_119_cast)[name = tensor("op_1806_cast")]; + tensor var_1807_perm_0 = const()[name = tensor("op_1807_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1809 = const()[name = tensor("op_1809"), val = tensor([20, -1, 64])]; - tensor transpose_65 = transpose(perm = var_1789_perm_0, x = var_1788)[name = tensor("transpose_65")]; - tensor key_states_79 = reshape(shape = var_1809, x = transpose_65)[name = tensor("key_states_79")]; + tensor transpose_63 = transpose(perm = var_1807_perm_0, x = var_1806_cast)[name = tensor("transpose_63")]; + tensor query_states_39_cast = reshape(shape = var_1809, x = transpose_63)[name = tensor("query_states_39_cast")]; tensor var_1811 = const()[name = tensor("op_1811"), val = tensor([20, -1, 64])]; - tensor transpose_64 = transpose(perm = var_1796_perm_0, x = var_1795)[name = tensor("transpose_64")]; - tensor value_states_79 = reshape(shape = var_1811, x = transpose_64)[name = tensor("value_states_79")]; - tensor var_1814_perm_0 = const()[name = tensor("op_1814_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_65 = transpose(perm = var_1791_perm_0, x = var_1790_cast)[name = tensor("transpose_65")]; + tensor key_states_79_cast = reshape(shape = var_1811, x = transpose_65)[name = tensor("key_states_79_cast")]; + tensor var_1813 = const()[name = tensor("op_1813"), val = tensor([20, -1, 64])]; + tensor transpose_64 = transpose(perm = var_1798_perm_0, x = var_1797_cast)[name = tensor("transpose_64")]; + tensor value_states_79_cast = reshape(shape = var_1813, x = transpose_64)[name = tensor("value_states_79_cast")]; + tensor var_1816_perm_0 = const()[name = tensor("op_1816_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_115_transpose_x_0 = const()[name = tensor("attn_weights_115_transpose_x_0"), val = tensor(false)]; tensor attn_weights_115_transpose_y_0 = const()[name = tensor("attn_weights_115_transpose_y_0"), val = tensor(false)]; - tensor transpose_62 = transpose(perm = var_1814_perm_0, x = key_states_79)[name = tensor("transpose_62")]; - tensor attn_weights_115 = matmul(transpose_x = attn_weights_115_transpose_x_0, transpose_y = attn_weights_115_transpose_y_0, x = query_states_39, y = transpose_62)[name = tensor("attn_weights_115")]; - tensor var_1816 = const()[name = tensor("op_1816"), val = tensor([1, 20, 77, 77])]; - tensor var_1817 = reshape(shape = var_1816, x = attn_weights_115)[name = tensor("op_1817")]; - tensor attn_weights_117 = add(x = var_1817, y = causal_attention_mask)[name = tensor("attn_weights_117")]; - tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([20, 77, 77])]; - tensor input_309 = reshape(shape = var_1822, x = attn_weights_117)[name = tensor("input_309")]; - tensor input_311 = softmax(axis = var_5, x = input_309)[name = tensor("input_311")]; + tensor transpose_62 = transpose(perm = var_1816_perm_0, x = key_states_79_cast)[name = tensor("transpose_62")]; + tensor attn_weights_115_cast = matmul(transpose_x = attn_weights_115_transpose_x_0, transpose_y = attn_weights_115_transpose_y_0, x = query_states_39_cast, y = transpose_62)[name = tensor("attn_weights_115_cast")]; + tensor var_1818 = const()[name = tensor("op_1818"), val = tensor([1, 20, 77, 77])]; + tensor var_1819_cast = reshape(shape = var_1818, x = attn_weights_115_cast)[name = tensor("op_1819_cast")]; + tensor attn_weights_117_cast = add(x = var_1819_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_117_cast")]; + tensor var_1824 = const()[name = tensor("op_1824"), val = tensor([20, 77, 77])]; + tensor input_309_cast = reshape(shape = var_1824, x = attn_weights_117_cast)[name = tensor("input_309_cast")]; + tensor input_311_cast = softmax(axis = var_5, x = input_309_cast)[name = tensor("input_311_cast")]; tensor attn_output_115_transpose_x_0 = const()[name = tensor("attn_output_115_transpose_x_0"), val = tensor(false)]; tensor attn_output_115_transpose_y_0 = const()[name = tensor("attn_output_115_transpose_y_0"), val = tensor(false)]; - tensor attn_output_115 = matmul(transpose_x = attn_output_115_transpose_x_0, transpose_y = attn_output_115_transpose_y_0, x = input_311, y = value_states_79)[name = tensor("attn_output_115")]; - tensor var_1827 = const()[name = tensor("op_1827"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_117 = reshape(shape = var_1827, x = attn_output_115)[name = tensor("attn_output_117")]; + tensor attn_output_115_cast = matmul(transpose_x = attn_output_115_transpose_x_0, transpose_y = attn_output_115_transpose_y_0, x = input_311_cast, y = value_states_79_cast)[name = tensor("attn_output_115_cast")]; + tensor var_1829 = const()[name = tensor("op_1829"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_117_cast = reshape(shape = var_1829, x = attn_output_115_cast)[name = tensor("attn_output_117_cast")]; tensor attn_output_119_perm_0 = const()[name = tensor("attn_output_119_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1830 = const()[name = tensor("op_1830"), val = tensor([1, 77, 1280])]; - tensor transpose_61 = transpose(perm = attn_output_119_perm_0, x = attn_output_117)[name = tensor("transpose_61")]; - tensor input_313 = reshape(shape = var_1830, x = transpose_61)[name = tensor("input_313")]; - tensor hidden_states_117 = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight, x = input_313)[name = tensor("hidden_states_117")]; - tensor input_315 = add(x = input_307, y = hidden_states_117)[name = tensor("input_315")]; + tensor var_1832 = const()[name = tensor("op_1832"), val = tensor([1, 77, 1280])]; + tensor transpose_61 = transpose(perm = attn_output_119_perm_0, x = attn_output_117_cast)[name = tensor("transpose_61")]; + tensor input_313_cast = reshape(shape = var_1832, x = transpose_61)[name = tensor("input_313_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884299648)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887576512)))]; + tensor hidden_states_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16, x = input_313_cast)[name = tensor("hidden_states_117_cast")]; + tensor input_315_cast = add(x = input_307_cast, y = hidden_states_117_cast)[name = tensor("input_315_cast")]; tensor input_317_axes_0 = const()[name = tensor("input_317_axes_0"), val = tensor([-1])]; - tensor input_317 = layer_norm(axes = input_317_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_19_layer_norm2_weight, x = input_315)[name = tensor("input_317")]; - tensor input_319 = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_19_mlp_fc1_weight, x = input_317)[name = tensor("input_319")]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887579136)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887581760)))]; + tensor input_317_cast = layer_norm(axes = input_317_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16, x = input_315_cast)[name = tensor("input_317_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887584384)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900691648)))]; + tensor input_319_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16, x = input_317_cast)[name = tensor("input_319_cast")]; tensor input_321_mode_0 = const()[name = tensor("input_321_mode_0"), val = tensor("EXACT")]; - tensor input_321 = gelu(mode = input_321_mode_0, x = input_319)[name = tensor("input_321")]; - tensor hidden_states_119 = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_19_mlp_fc2_weight, x = input_321)[name = tensor("hidden_states_119")]; - tensor input_323 = add(x = input_315, y = hidden_states_119)[name = tensor("input_323")]; + tensor input_321_cast = gelu(mode = input_321_mode_0, x = input_319_cast)[name = tensor("input_321_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900701952)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913809216)))]; + tensor hidden_states_119_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16, x = input_321_cast)[name = tensor("hidden_states_119_cast")]; + tensor input_323_cast = add(x = input_315_cast, y = hidden_states_119_cast)[name = tensor("input_323_cast")]; tensor hidden_states_121_axes_0 = const()[name = tensor("hidden_states_121_axes_0"), val = tensor([-1])]; - tensor hidden_states_121 = layer_norm(axes = hidden_states_121_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_20_layer_norm1_weight, x = input_323)[name = tensor("hidden_states_121")]; - tensor var_1868 = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight, x = hidden_states_121)[name = tensor("op_1868")]; - tensor var_1869 = const()[name = tensor("op_1869"), val = tensor(0x1p-3)]; - tensor tensor_125 = mul(x = var_1868, y = var_1869)[name = tensor("tensor_125")]; - tensor tensor_121 = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight, x = hidden_states_121)[name = tensor("tensor_121")]; - tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([1, -1, 20, 64])]; - tensor var_1875 = reshape(shape = var_1874, x = tensor_121)[name = tensor("op_1875")]; - tensor var_1876_perm_0 = const()[name = tensor("op_1876_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_123 = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight, x = hidden_states_121)[name = tensor("tensor_123")]; - tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, -1, 20, 64])]; - tensor var_1882 = reshape(shape = var_1881, x = tensor_123)[name = tensor("op_1882")]; - tensor var_1883_perm_0 = const()[name = tensor("op_1883_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1890 = const()[name = tensor("op_1890"), val = tensor([1, 77, 20, 64])]; - tensor var_1891 = reshape(shape = var_1890, x = tensor_125)[name = tensor("op_1891")]; - tensor var_1892_perm_0 = const()[name = tensor("op_1892_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1894 = const()[name = tensor("op_1894"), val = tensor([20, -1, 64])]; - tensor transpose_58 = transpose(perm = var_1892_perm_0, x = var_1891)[name = tensor("transpose_58")]; - tensor query_states_41 = reshape(shape = var_1894, x = transpose_58)[name = tensor("query_states_41")]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913811840)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913814464)))]; + tensor hidden_states_121_cast = layer_norm(axes = hidden_states_121_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16, x = input_323_cast)[name = tensor("hidden_states_121_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913817088)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917093952)))]; + tensor var_1870_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("op_1870_cast")]; + tensor var_1871_to_fp16 = const()[name = tensor("op_1871_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_125_cast = mul(x = var_1870_cast, y = var_1871_to_fp16)[name = tensor("tensor_125_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917096576)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920373440)))]; + tensor tensor_121_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_121_cast")]; + tensor var_1876 = const()[name = tensor("op_1876"), val = tensor([1, -1, 20, 64])]; + tensor var_1877_cast = reshape(shape = var_1876, x = tensor_121_cast)[name = tensor("op_1877_cast")]; + tensor var_1878_perm_0 = const()[name = tensor("op_1878_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920376064)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923652928)))]; + tensor tensor_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_123_cast")]; + tensor var_1883 = const()[name = tensor("op_1883"), val = tensor([1, -1, 20, 64])]; + tensor var_1884_cast = reshape(shape = var_1883, x = tensor_123_cast)[name = tensor("op_1884_cast")]; + tensor var_1885_perm_0 = const()[name = tensor("op_1885_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1892 = const()[name = tensor("op_1892"), val = tensor([1, 77, 20, 64])]; + tensor var_1893_cast = reshape(shape = var_1892, x = tensor_125_cast)[name = tensor("op_1893_cast")]; + tensor var_1894_perm_0 = const()[name = tensor("op_1894_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1896 = const()[name = tensor("op_1896"), val = tensor([20, -1, 64])]; - tensor transpose_60 = transpose(perm = var_1876_perm_0, x = var_1875)[name = tensor("transpose_60")]; - tensor key_states_83 = reshape(shape = var_1896, x = transpose_60)[name = tensor("key_states_83")]; + tensor transpose_58 = transpose(perm = var_1894_perm_0, x = var_1893_cast)[name = tensor("transpose_58")]; + tensor query_states_41_cast = reshape(shape = var_1896, x = transpose_58)[name = tensor("query_states_41_cast")]; tensor var_1898 = const()[name = tensor("op_1898"), val = tensor([20, -1, 64])]; - tensor transpose_59 = transpose(perm = var_1883_perm_0, x = var_1882)[name = tensor("transpose_59")]; - tensor value_states_83 = reshape(shape = var_1898, x = transpose_59)[name = tensor("value_states_83")]; - tensor var_1901_perm_0 = const()[name = tensor("op_1901_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_60 = transpose(perm = var_1878_perm_0, x = var_1877_cast)[name = tensor("transpose_60")]; + tensor key_states_83_cast = reshape(shape = var_1898, x = transpose_60)[name = tensor("key_states_83_cast")]; + tensor var_1900 = const()[name = tensor("op_1900"), val = tensor([20, -1, 64])]; + tensor transpose_59 = transpose(perm = var_1885_perm_0, x = var_1884_cast)[name = tensor("transpose_59")]; + tensor value_states_83_cast = reshape(shape = var_1900, x = transpose_59)[name = tensor("value_states_83_cast")]; + tensor var_1903_perm_0 = const()[name = tensor("op_1903_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(false)]; tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; - tensor transpose_57 = transpose(perm = var_1901_perm_0, x = key_states_83)[name = tensor("transpose_57")]; - tensor attn_weights_121 = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = query_states_41, y = transpose_57)[name = tensor("attn_weights_121")]; - tensor var_1903 = const()[name = tensor("op_1903"), val = tensor([1, 20, 77, 77])]; - tensor var_1904 = reshape(shape = var_1903, x = attn_weights_121)[name = tensor("op_1904")]; - tensor attn_weights_123 = add(x = var_1904, y = causal_attention_mask)[name = tensor("attn_weights_123")]; - tensor var_1909 = const()[name = tensor("op_1909"), val = tensor([20, 77, 77])]; - tensor input_325 = reshape(shape = var_1909, x = attn_weights_123)[name = tensor("input_325")]; - tensor input_327 = softmax(axis = var_5, x = input_325)[name = tensor("input_327")]; + tensor transpose_57 = transpose(perm = var_1903_perm_0, x = key_states_83_cast)[name = tensor("transpose_57")]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = query_states_41_cast, y = transpose_57)[name = tensor("attn_weights_121_cast")]; + tensor var_1905 = const()[name = tensor("op_1905"), val = tensor([1, 20, 77, 77])]; + tensor var_1906_cast = reshape(shape = var_1905, x = attn_weights_121_cast)[name = tensor("op_1906_cast")]; + tensor attn_weights_123_cast = add(x = var_1906_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_1911 = const()[name = tensor("op_1911"), val = tensor([20, 77, 77])]; + tensor input_325_cast = reshape(shape = var_1911, x = attn_weights_123_cast)[name = tensor("input_325_cast")]; + tensor input_327_cast = softmax(axis = var_5, x = input_325_cast)[name = tensor("input_327_cast")]; tensor attn_output_121_transpose_x_0 = const()[name = tensor("attn_output_121_transpose_x_0"), val = tensor(false)]; tensor attn_output_121_transpose_y_0 = const()[name = tensor("attn_output_121_transpose_y_0"), val = tensor(false)]; - tensor attn_output_121 = matmul(transpose_x = attn_output_121_transpose_x_0, transpose_y = attn_output_121_transpose_y_0, x = input_327, y = value_states_83)[name = tensor("attn_output_121")]; - tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_123 = reshape(shape = var_1914, x = attn_output_121)[name = tensor("attn_output_123")]; + tensor attn_output_121_cast = matmul(transpose_x = attn_output_121_transpose_x_0, transpose_y = attn_output_121_transpose_y_0, x = input_327_cast, y = value_states_83_cast)[name = tensor("attn_output_121_cast")]; + tensor var_1916 = const()[name = tensor("op_1916"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_123_cast = reshape(shape = var_1916, x = attn_output_121_cast)[name = tensor("attn_output_123_cast")]; tensor attn_output_125_perm_0 = const()[name = tensor("attn_output_125_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1917 = const()[name = tensor("op_1917"), val = tensor([1, 77, 1280])]; - tensor transpose_56 = transpose(perm = attn_output_125_perm_0, x = attn_output_123)[name = tensor("transpose_56")]; - tensor input_329 = reshape(shape = var_1917, x = transpose_56)[name = tensor("input_329")]; - tensor hidden_states_123 = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight, x = input_329)[name = tensor("hidden_states_123")]; - tensor input_331 = add(x = input_323, y = hidden_states_123)[name = tensor("input_331")]; + tensor var_1919 = const()[name = tensor("op_1919"), val = tensor([1, 77, 1280])]; + tensor transpose_56 = transpose(perm = attn_output_125_perm_0, x = attn_output_123_cast)[name = tensor("transpose_56")]; + tensor input_329_cast = reshape(shape = var_1919, x = transpose_56)[name = tensor("input_329_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923655552)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926932416)))]; + tensor hidden_states_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16, x = input_329_cast)[name = tensor("hidden_states_123_cast")]; + tensor input_331_cast = add(x = input_323_cast, y = hidden_states_123_cast)[name = tensor("input_331_cast")]; tensor input_333_axes_0 = const()[name = tensor("input_333_axes_0"), val = tensor([-1])]; - tensor input_333 = layer_norm(axes = input_333_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_20_layer_norm2_weight, x = input_331)[name = tensor("input_333")]; - tensor input_335 = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_20_mlp_fc1_weight, x = input_333)[name = tensor("input_335")]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926935040)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926937664)))]; + tensor input_333_cast = layer_norm(axes = input_333_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16, x = input_331_cast)[name = tensor("input_333_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926940288)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940047552)))]; + tensor input_335_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16, x = input_333_cast)[name = tensor("input_335_cast")]; tensor input_337_mode_0 = const()[name = tensor("input_337_mode_0"), val = tensor("EXACT")]; - tensor input_337 = gelu(mode = input_337_mode_0, x = input_335)[name = tensor("input_337")]; - tensor hidden_states_125 = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_20_mlp_fc2_weight, x = input_337)[name = tensor("hidden_states_125")]; - tensor input_339 = add(x = input_331, y = hidden_states_125)[name = tensor("input_339")]; + tensor input_337_cast = gelu(mode = input_337_mode_0, x = input_335_cast)[name = tensor("input_337_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940057856)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953165120)))]; + tensor hidden_states_125_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16, x = input_337_cast)[name = tensor("hidden_states_125_cast")]; + tensor input_339_cast = add(x = input_331_cast, y = hidden_states_125_cast)[name = tensor("input_339_cast")]; tensor hidden_states_127_axes_0 = const()[name = tensor("hidden_states_127_axes_0"), val = tensor([-1])]; - tensor hidden_states_127 = layer_norm(axes = hidden_states_127_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_21_layer_norm1_weight, x = input_339)[name = tensor("hidden_states_127")]; - tensor var_1955 = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight, x = hidden_states_127)[name = tensor("op_1955")]; - tensor var_1956 = const()[name = tensor("op_1956"), val = tensor(0x1p-3)]; - tensor tensor_131 = mul(x = var_1955, y = var_1956)[name = tensor("tensor_131")]; - tensor tensor_127 = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight, x = hidden_states_127)[name = tensor("tensor_127")]; - tensor var_1961 = const()[name = tensor("op_1961"), val = tensor([1, -1, 20, 64])]; - tensor var_1962 = reshape(shape = var_1961, x = tensor_127)[name = tensor("op_1962")]; - tensor var_1963_perm_0 = const()[name = tensor("op_1963_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_129 = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight, x = hidden_states_127)[name = tensor("tensor_129")]; - tensor var_1968 = const()[name = tensor("op_1968"), val = tensor([1, -1, 20, 64])]; - tensor var_1969 = reshape(shape = var_1968, x = tensor_129)[name = tensor("op_1969")]; - tensor var_1970_perm_0 = const()[name = tensor("op_1970_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1977 = const()[name = tensor("op_1977"), val = tensor([1, 77, 20, 64])]; - tensor var_1978 = reshape(shape = var_1977, x = tensor_131)[name = tensor("op_1978")]; - tensor var_1979_perm_0 = const()[name = tensor("op_1979_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_1981 = const()[name = tensor("op_1981"), val = tensor([20, -1, 64])]; - tensor transpose_53 = transpose(perm = var_1979_perm_0, x = var_1978)[name = tensor("transpose_53")]; - tensor query_states_43 = reshape(shape = var_1981, x = transpose_53)[name = tensor("query_states_43")]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953167744)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953170368)))]; + tensor hidden_states_127_cast = layer_norm(axes = hidden_states_127_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16, x = input_339_cast)[name = tensor("hidden_states_127_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953172992)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956449856)))]; + tensor var_1957_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("op_1957_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_131_cast = mul(x = var_1957_cast, y = var_1958_to_fp16)[name = tensor("tensor_131_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956452480)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959729344)))]; + tensor tensor_127_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_127_cast")]; + tensor var_1963 = const()[name = tensor("op_1963"), val = tensor([1, -1, 20, 64])]; + tensor var_1964_cast = reshape(shape = var_1963, x = tensor_127_cast)[name = tensor("op_1964_cast")]; + tensor var_1965_perm_0 = const()[name = tensor("op_1965_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959731968)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963008832)))]; + tensor tensor_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_129_cast")]; + tensor var_1970 = const()[name = tensor("op_1970"), val = tensor([1, -1, 20, 64])]; + tensor var_1971_cast = reshape(shape = var_1970, x = tensor_129_cast)[name = tensor("op_1971_cast")]; + tensor var_1972_perm_0 = const()[name = tensor("op_1972_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1979 = const()[name = tensor("op_1979"), val = tensor([1, 77, 20, 64])]; + tensor var_1980_cast = reshape(shape = var_1979, x = tensor_131_cast)[name = tensor("op_1980_cast")]; + tensor var_1981_perm_0 = const()[name = tensor("op_1981_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_1983 = const()[name = tensor("op_1983"), val = tensor([20, -1, 64])]; - tensor transpose_55 = transpose(perm = var_1963_perm_0, x = var_1962)[name = tensor("transpose_55")]; - tensor key_states_87 = reshape(shape = var_1983, x = transpose_55)[name = tensor("key_states_87")]; + tensor transpose_53 = transpose(perm = var_1981_perm_0, x = var_1980_cast)[name = tensor("transpose_53")]; + tensor query_states_43_cast = reshape(shape = var_1983, x = transpose_53)[name = tensor("query_states_43_cast")]; tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([20, -1, 64])]; - tensor transpose_54 = transpose(perm = var_1970_perm_0, x = var_1969)[name = tensor("transpose_54")]; - tensor value_states_87 = reshape(shape = var_1985, x = transpose_54)[name = tensor("value_states_87")]; - tensor var_1988_perm_0 = const()[name = tensor("op_1988_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_55 = transpose(perm = var_1965_perm_0, x = var_1964_cast)[name = tensor("transpose_55")]; + tensor key_states_87_cast = reshape(shape = var_1985, x = transpose_55)[name = tensor("key_states_87_cast")]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([20, -1, 64])]; + tensor transpose_54 = transpose(perm = var_1972_perm_0, x = var_1971_cast)[name = tensor("transpose_54")]; + tensor value_states_87_cast = reshape(shape = var_1987, x = transpose_54)[name = tensor("value_states_87_cast")]; + tensor var_1990_perm_0 = const()[name = tensor("op_1990_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_127_transpose_x_0 = const()[name = tensor("attn_weights_127_transpose_x_0"), val = tensor(false)]; tensor attn_weights_127_transpose_y_0 = const()[name = tensor("attn_weights_127_transpose_y_0"), val = tensor(false)]; - tensor transpose_52 = transpose(perm = var_1988_perm_0, x = key_states_87)[name = tensor("transpose_52")]; - tensor attn_weights_127 = matmul(transpose_x = attn_weights_127_transpose_x_0, transpose_y = attn_weights_127_transpose_y_0, x = query_states_43, y = transpose_52)[name = tensor("attn_weights_127")]; - tensor var_1990 = const()[name = tensor("op_1990"), val = tensor([1, 20, 77, 77])]; - tensor var_1991 = reshape(shape = var_1990, x = attn_weights_127)[name = tensor("op_1991")]; - tensor attn_weights_129 = add(x = var_1991, y = causal_attention_mask)[name = tensor("attn_weights_129")]; - tensor var_1996 = const()[name = tensor("op_1996"), val = tensor([20, 77, 77])]; - tensor input_341 = reshape(shape = var_1996, x = attn_weights_129)[name = tensor("input_341")]; - tensor input_343 = softmax(axis = var_5, x = input_341)[name = tensor("input_343")]; + tensor transpose_52 = transpose(perm = var_1990_perm_0, x = key_states_87_cast)[name = tensor("transpose_52")]; + tensor attn_weights_127_cast = matmul(transpose_x = attn_weights_127_transpose_x_0, transpose_y = attn_weights_127_transpose_y_0, x = query_states_43_cast, y = transpose_52)[name = tensor("attn_weights_127_cast")]; + tensor var_1992 = const()[name = tensor("op_1992"), val = tensor([1, 20, 77, 77])]; + tensor var_1993_cast = reshape(shape = var_1992, x = attn_weights_127_cast)[name = tensor("op_1993_cast")]; + tensor attn_weights_129_cast = add(x = var_1993_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_129_cast")]; + tensor var_1998 = const()[name = tensor("op_1998"), val = tensor([20, 77, 77])]; + tensor input_341_cast = reshape(shape = var_1998, x = attn_weights_129_cast)[name = tensor("input_341_cast")]; + tensor input_343_cast = softmax(axis = var_5, x = input_341_cast)[name = tensor("input_343_cast")]; tensor attn_output_127_transpose_x_0 = const()[name = tensor("attn_output_127_transpose_x_0"), val = tensor(false)]; tensor attn_output_127_transpose_y_0 = const()[name = tensor("attn_output_127_transpose_y_0"), val = tensor(false)]; - tensor attn_output_127 = matmul(transpose_x = attn_output_127_transpose_x_0, transpose_y = attn_output_127_transpose_y_0, x = input_343, y = value_states_87)[name = tensor("attn_output_127")]; - tensor var_2001 = const()[name = tensor("op_2001"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_129 = reshape(shape = var_2001, x = attn_output_127)[name = tensor("attn_output_129")]; + tensor attn_output_127_cast = matmul(transpose_x = attn_output_127_transpose_x_0, transpose_y = attn_output_127_transpose_y_0, x = input_343_cast, y = value_states_87_cast)[name = tensor("attn_output_127_cast")]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_129_cast = reshape(shape = var_2003, x = attn_output_127_cast)[name = tensor("attn_output_129_cast")]; tensor attn_output_131_perm_0 = const()[name = tensor("attn_output_131_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2004 = const()[name = tensor("op_2004"), val = tensor([1, 77, 1280])]; - tensor transpose_51 = transpose(perm = attn_output_131_perm_0, x = attn_output_129)[name = tensor("transpose_51")]; - tensor input_345 = reshape(shape = var_2004, x = transpose_51)[name = tensor("input_345")]; - tensor hidden_states_129 = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight, x = input_345)[name = tensor("hidden_states_129")]; - tensor input_347 = add(x = input_339, y = hidden_states_129)[name = tensor("input_347")]; + tensor var_2006 = const()[name = tensor("op_2006"), val = tensor([1, 77, 1280])]; + tensor transpose_51 = transpose(perm = attn_output_131_perm_0, x = attn_output_129_cast)[name = tensor("transpose_51")]; + tensor input_345_cast = reshape(shape = var_2006, x = transpose_51)[name = tensor("input_345_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963011456)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966288320)))]; + tensor hidden_states_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16, x = input_345_cast)[name = tensor("hidden_states_129_cast")]; + tensor input_347_cast = add(x = input_339_cast, y = hidden_states_129_cast)[name = tensor("input_347_cast")]; tensor input_349_axes_0 = const()[name = tensor("input_349_axes_0"), val = tensor([-1])]; - tensor input_349 = layer_norm(axes = input_349_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_21_layer_norm2_weight, x = input_347)[name = tensor("input_349")]; - tensor input_351 = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_21_mlp_fc1_weight, x = input_349)[name = tensor("input_351")]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966290944)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966293568)))]; + tensor input_349_cast = layer_norm(axes = input_349_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16, x = input_347_cast)[name = tensor("input_349_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966296192)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979403456)))]; + tensor input_351_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16, x = input_349_cast)[name = tensor("input_351_cast")]; tensor input_353_mode_0 = const()[name = tensor("input_353_mode_0"), val = tensor("EXACT")]; - tensor input_353 = gelu(mode = input_353_mode_0, x = input_351)[name = tensor("input_353")]; - tensor hidden_states_131 = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_21_mlp_fc2_weight, x = input_353)[name = tensor("hidden_states_131")]; - tensor input_355 = add(x = input_347, y = hidden_states_131)[name = tensor("input_355")]; + tensor input_353_cast = gelu(mode = input_353_mode_0, x = input_351_cast)[name = tensor("input_353_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979413760)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992521024)))]; + tensor hidden_states_131_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16, x = input_353_cast)[name = tensor("hidden_states_131_cast")]; + tensor input_355_cast = add(x = input_347_cast, y = hidden_states_131_cast)[name = tensor("input_355_cast")]; tensor hidden_states_133_axes_0 = const()[name = tensor("hidden_states_133_axes_0"), val = tensor([-1])]; - tensor hidden_states_133 = layer_norm(axes = hidden_states_133_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_22_layer_norm1_weight, x = input_355)[name = tensor("hidden_states_133")]; - tensor var_2042 = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight, x = hidden_states_133)[name = tensor("op_2042")]; - tensor var_2043 = const()[name = tensor("op_2043"), val = tensor(0x1p-3)]; - tensor tensor_137 = mul(x = var_2042, y = var_2043)[name = tensor("tensor_137")]; - tensor tensor_133 = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight, x = hidden_states_133)[name = tensor("tensor_133")]; - tensor var_2048 = const()[name = tensor("op_2048"), val = tensor([1, -1, 20, 64])]; - tensor var_2049 = reshape(shape = var_2048, x = tensor_133)[name = tensor("op_2049")]; - tensor var_2050_perm_0 = const()[name = tensor("op_2050_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_135 = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight, x = hidden_states_133)[name = tensor("tensor_135")]; - tensor var_2055 = const()[name = tensor("op_2055"), val = tensor([1, -1, 20, 64])]; - tensor var_2056 = reshape(shape = var_2055, x = tensor_135)[name = tensor("op_2056")]; - tensor var_2057_perm_0 = const()[name = tensor("op_2057_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1, 77, 20, 64])]; - tensor var_2065 = reshape(shape = var_2064, x = tensor_137)[name = tensor("op_2065")]; - tensor var_2066_perm_0 = const()[name = tensor("op_2066_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([20, -1, 64])]; - tensor transpose_48 = transpose(perm = var_2066_perm_0, x = var_2065)[name = tensor("transpose_48")]; - tensor query_states_45 = reshape(shape = var_2068, x = transpose_48)[name = tensor("query_states_45")]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992523648)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992526272)))]; + tensor hidden_states_133_cast = layer_norm(axes = hidden_states_133_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16, x = input_355_cast)[name = tensor("hidden_states_133_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992528896)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995805760)))]; + tensor var_2044_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("op_2044_cast")]; + tensor var_2045_to_fp16 = const()[name = tensor("op_2045_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_137_cast = mul(x = var_2044_cast, y = var_2045_to_fp16)[name = tensor("tensor_137_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995808384)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999085248)))]; + tensor tensor_133_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_133_cast")]; + tensor var_2050 = const()[name = tensor("op_2050"), val = tensor([1, -1, 20, 64])]; + tensor var_2051_cast = reshape(shape = var_2050, x = tensor_133_cast)[name = tensor("op_2051_cast")]; + tensor var_2052_perm_0 = const()[name = tensor("op_2052_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999087872)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002364736)))]; + tensor tensor_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_135_cast")]; + tensor var_2057 = const()[name = tensor("op_2057"), val = tensor([1, -1, 20, 64])]; + tensor var_2058_cast = reshape(shape = var_2057, x = tensor_135_cast)[name = tensor("op_2058_cast")]; + tensor var_2059_perm_0 = const()[name = tensor("op_2059_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2066 = const()[name = tensor("op_2066"), val = tensor([1, 77, 20, 64])]; + tensor var_2067_cast = reshape(shape = var_2066, x = tensor_137_cast)[name = tensor("op_2067_cast")]; + tensor var_2068_perm_0 = const()[name = tensor("op_2068_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2070 = const()[name = tensor("op_2070"), val = tensor([20, -1, 64])]; - tensor transpose_50 = transpose(perm = var_2050_perm_0, x = var_2049)[name = tensor("transpose_50")]; - tensor key_states_91 = reshape(shape = var_2070, x = transpose_50)[name = tensor("key_states_91")]; + tensor transpose_48 = transpose(perm = var_2068_perm_0, x = var_2067_cast)[name = tensor("transpose_48")]; + tensor query_states_45_cast = reshape(shape = var_2070, x = transpose_48)[name = tensor("query_states_45_cast")]; tensor var_2072 = const()[name = tensor("op_2072"), val = tensor([20, -1, 64])]; - tensor transpose_49 = transpose(perm = var_2057_perm_0, x = var_2056)[name = tensor("transpose_49")]; - tensor value_states_91 = reshape(shape = var_2072, x = transpose_49)[name = tensor("value_states_91")]; - tensor var_2075_perm_0 = const()[name = tensor("op_2075_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_50 = transpose(perm = var_2052_perm_0, x = var_2051_cast)[name = tensor("transpose_50")]; + tensor key_states_91_cast = reshape(shape = var_2072, x = transpose_50)[name = tensor("key_states_91_cast")]; + tensor var_2074 = const()[name = tensor("op_2074"), val = tensor([20, -1, 64])]; + tensor transpose_49 = transpose(perm = var_2059_perm_0, x = var_2058_cast)[name = tensor("transpose_49")]; + tensor value_states_91_cast = reshape(shape = var_2074, x = transpose_49)[name = tensor("value_states_91_cast")]; + tensor var_2077_perm_0 = const()[name = tensor("op_2077_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(false)]; tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; - tensor transpose_47 = transpose(perm = var_2075_perm_0, x = key_states_91)[name = tensor("transpose_47")]; - tensor attn_weights_133 = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = query_states_45, y = transpose_47)[name = tensor("attn_weights_133")]; - tensor var_2077 = const()[name = tensor("op_2077"), val = tensor([1, 20, 77, 77])]; - tensor var_2078 = reshape(shape = var_2077, x = attn_weights_133)[name = tensor("op_2078")]; - tensor attn_weights_135 = add(x = var_2078, y = causal_attention_mask)[name = tensor("attn_weights_135")]; - tensor var_2083 = const()[name = tensor("op_2083"), val = tensor([20, 77, 77])]; - tensor input_357 = reshape(shape = var_2083, x = attn_weights_135)[name = tensor("input_357")]; - tensor input_359 = softmax(axis = var_5, x = input_357)[name = tensor("input_359")]; + tensor transpose_47 = transpose(perm = var_2077_perm_0, x = key_states_91_cast)[name = tensor("transpose_47")]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = query_states_45_cast, y = transpose_47)[name = tensor("attn_weights_133_cast")]; + tensor var_2079 = const()[name = tensor("op_2079"), val = tensor([1, 20, 77, 77])]; + tensor var_2080_cast = reshape(shape = var_2079, x = attn_weights_133_cast)[name = tensor("op_2080_cast")]; + tensor attn_weights_135_cast = add(x = var_2080_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_2085 = const()[name = tensor("op_2085"), val = tensor([20, 77, 77])]; + tensor input_357_cast = reshape(shape = var_2085, x = attn_weights_135_cast)[name = tensor("input_357_cast")]; + tensor input_359_cast = softmax(axis = var_5, x = input_357_cast)[name = tensor("input_359_cast")]; tensor attn_output_133_transpose_x_0 = const()[name = tensor("attn_output_133_transpose_x_0"), val = tensor(false)]; tensor attn_output_133_transpose_y_0 = const()[name = tensor("attn_output_133_transpose_y_0"), val = tensor(false)]; - tensor attn_output_133 = matmul(transpose_x = attn_output_133_transpose_x_0, transpose_y = attn_output_133_transpose_y_0, x = input_359, y = value_states_91)[name = tensor("attn_output_133")]; - tensor var_2088 = const()[name = tensor("op_2088"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_135 = reshape(shape = var_2088, x = attn_output_133)[name = tensor("attn_output_135")]; + tensor attn_output_133_cast = matmul(transpose_x = attn_output_133_transpose_x_0, transpose_y = attn_output_133_transpose_y_0, x = input_359_cast, y = value_states_91_cast)[name = tensor("attn_output_133_cast")]; + tensor var_2090 = const()[name = tensor("op_2090"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_135_cast = reshape(shape = var_2090, x = attn_output_133_cast)[name = tensor("attn_output_135_cast")]; tensor attn_output_137_perm_0 = const()[name = tensor("attn_output_137_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 77, 1280])]; - tensor transpose_46 = transpose(perm = attn_output_137_perm_0, x = attn_output_135)[name = tensor("transpose_46")]; - tensor input_361 = reshape(shape = var_2091, x = transpose_46)[name = tensor("input_361")]; - tensor hidden_states_135 = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight, x = input_361)[name = tensor("hidden_states_135")]; - tensor input_363 = add(x = input_355, y = hidden_states_135)[name = tensor("input_363")]; + tensor var_2093 = const()[name = tensor("op_2093"), val = tensor([1, 77, 1280])]; + tensor transpose_46 = transpose(perm = attn_output_137_perm_0, x = attn_output_135_cast)[name = tensor("transpose_46")]; + tensor input_361_cast = reshape(shape = var_2093, x = transpose_46)[name = tensor("input_361_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002367360)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005644224)))]; + tensor hidden_states_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16, x = input_361_cast)[name = tensor("hidden_states_135_cast")]; + tensor input_363_cast = add(x = input_355_cast, y = hidden_states_135_cast)[name = tensor("input_363_cast")]; tensor input_365_axes_0 = const()[name = tensor("input_365_axes_0"), val = tensor([-1])]; - tensor input_365 = layer_norm(axes = input_365_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_22_layer_norm2_weight, x = input_363)[name = tensor("input_365")]; - tensor input_367 = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_22_mlp_fc1_weight, x = input_365)[name = tensor("input_367")]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005646848)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005649472)))]; + tensor input_365_cast = layer_norm(axes = input_365_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16, x = input_363_cast)[name = tensor("input_365_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005652096)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018759360)))]; + tensor input_367_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16, x = input_365_cast)[name = tensor("input_367_cast")]; tensor input_369_mode_0 = const()[name = tensor("input_369_mode_0"), val = tensor("EXACT")]; - tensor input_369 = gelu(mode = input_369_mode_0, x = input_367)[name = tensor("input_369")]; - tensor hidden_states_137 = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_22_mlp_fc2_weight, x = input_369)[name = tensor("hidden_states_137")]; - tensor input_371 = add(x = input_363, y = hidden_states_137)[name = tensor("input_371")]; + tensor input_369_cast = gelu(mode = input_369_mode_0, x = input_367_cast)[name = tensor("input_369_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018769664)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031876928)))]; + tensor hidden_states_137_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16, x = input_369_cast)[name = tensor("hidden_states_137_cast")]; + tensor input_371_cast = add(x = input_363_cast, y = hidden_states_137_cast)[name = tensor("input_371_cast")]; tensor hidden_states_139_axes_0 = const()[name = tensor("hidden_states_139_axes_0"), val = tensor([-1])]; - tensor hidden_states_139 = layer_norm(axes = hidden_states_139_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_23_layer_norm1_weight, x = input_371)[name = tensor("hidden_states_139")]; - tensor var_2129 = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight, x = hidden_states_139)[name = tensor("op_2129")]; - tensor var_2130 = const()[name = tensor("op_2130"), val = tensor(0x1p-3)]; - tensor tensor_143 = mul(x = var_2129, y = var_2130)[name = tensor("tensor_143")]; - tensor tensor_139 = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight, x = hidden_states_139)[name = tensor("tensor_139")]; - tensor var_2135 = const()[name = tensor("op_2135"), val = tensor([1, -1, 20, 64])]; - tensor var_2136 = reshape(shape = var_2135, x = tensor_139)[name = tensor("op_2136")]; - tensor var_2137_perm_0 = const()[name = tensor("op_2137_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_141 = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight, x = hidden_states_139)[name = tensor("tensor_141")]; - tensor var_2142 = const()[name = tensor("op_2142"), val = tensor([1, -1, 20, 64])]; - tensor var_2143 = reshape(shape = var_2142, x = tensor_141)[name = tensor("op_2143")]; - tensor var_2144_perm_0 = const()[name = tensor("op_2144_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 77, 20, 64])]; - tensor var_2152 = reshape(shape = var_2151, x = tensor_143)[name = tensor("op_2152")]; - tensor var_2153_perm_0 = const()[name = tensor("op_2153_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2155 = const()[name = tensor("op_2155"), val = tensor([20, -1, 64])]; - tensor transpose_43 = transpose(perm = var_2153_perm_0, x = var_2152)[name = tensor("transpose_43")]; - tensor query_states_47 = reshape(shape = var_2155, x = transpose_43)[name = tensor("query_states_47")]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031879552)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031882176)))]; + tensor hidden_states_139_cast = layer_norm(axes = hidden_states_139_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16, x = input_371_cast)[name = tensor("hidden_states_139_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031884800)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035161664)))]; + tensor var_2131_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("op_2131_cast")]; + tensor var_2132_to_fp16 = const()[name = tensor("op_2132_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_143_cast = mul(x = var_2131_cast, y = var_2132_to_fp16)[name = tensor("tensor_143_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035164288)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038441152)))]; + tensor tensor_139_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_139_cast")]; + tensor var_2137 = const()[name = tensor("op_2137"), val = tensor([1, -1, 20, 64])]; + tensor var_2138_cast = reshape(shape = var_2137, x = tensor_139_cast)[name = tensor("op_2138_cast")]; + tensor var_2139_perm_0 = const()[name = tensor("op_2139_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038443776)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041720640)))]; + tensor tensor_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_141_cast")]; + tensor var_2144 = const()[name = tensor("op_2144"), val = tensor([1, -1, 20, 64])]; + tensor var_2145_cast = reshape(shape = var_2144, x = tensor_141_cast)[name = tensor("op_2145_cast")]; + tensor var_2146_perm_0 = const()[name = tensor("op_2146_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2153 = const()[name = tensor("op_2153"), val = tensor([1, 77, 20, 64])]; + tensor var_2154_cast = reshape(shape = var_2153, x = tensor_143_cast)[name = tensor("op_2154_cast")]; + tensor var_2155_perm_0 = const()[name = tensor("op_2155_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2157 = const()[name = tensor("op_2157"), val = tensor([20, -1, 64])]; - tensor transpose_45 = transpose(perm = var_2137_perm_0, x = var_2136)[name = tensor("transpose_45")]; - tensor key_states_95 = reshape(shape = var_2157, x = transpose_45)[name = tensor("key_states_95")]; + tensor transpose_43 = transpose(perm = var_2155_perm_0, x = var_2154_cast)[name = tensor("transpose_43")]; + tensor query_states_47_cast = reshape(shape = var_2157, x = transpose_43)[name = tensor("query_states_47_cast")]; tensor var_2159 = const()[name = tensor("op_2159"), val = tensor([20, -1, 64])]; - tensor transpose_44 = transpose(perm = var_2144_perm_0, x = var_2143)[name = tensor("transpose_44")]; - tensor value_states_95 = reshape(shape = var_2159, x = transpose_44)[name = tensor("value_states_95")]; - tensor var_2162_perm_0 = const()[name = tensor("op_2162_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_45 = transpose(perm = var_2139_perm_0, x = var_2138_cast)[name = tensor("transpose_45")]; + tensor key_states_95_cast = reshape(shape = var_2159, x = transpose_45)[name = tensor("key_states_95_cast")]; + tensor var_2161 = const()[name = tensor("op_2161"), val = tensor([20, -1, 64])]; + tensor transpose_44 = transpose(perm = var_2146_perm_0, x = var_2145_cast)[name = tensor("transpose_44")]; + tensor value_states_95_cast = reshape(shape = var_2161, x = transpose_44)[name = tensor("value_states_95_cast")]; + tensor var_2164_perm_0 = const()[name = tensor("op_2164_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_139_transpose_x_0 = const()[name = tensor("attn_weights_139_transpose_x_0"), val = tensor(false)]; tensor attn_weights_139_transpose_y_0 = const()[name = tensor("attn_weights_139_transpose_y_0"), val = tensor(false)]; - tensor transpose_42 = transpose(perm = var_2162_perm_0, x = key_states_95)[name = tensor("transpose_42")]; - tensor attn_weights_139 = matmul(transpose_x = attn_weights_139_transpose_x_0, transpose_y = attn_weights_139_transpose_y_0, x = query_states_47, y = transpose_42)[name = tensor("attn_weights_139")]; - tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 20, 77, 77])]; - tensor var_2165 = reshape(shape = var_2164, x = attn_weights_139)[name = tensor("op_2165")]; - tensor attn_weights_141 = add(x = var_2165, y = causal_attention_mask)[name = tensor("attn_weights_141")]; - tensor var_2170 = const()[name = tensor("op_2170"), val = tensor([20, 77, 77])]; - tensor input_373 = reshape(shape = var_2170, x = attn_weights_141)[name = tensor("input_373")]; - tensor input_375 = softmax(axis = var_5, x = input_373)[name = tensor("input_375")]; + tensor transpose_42 = transpose(perm = var_2164_perm_0, x = key_states_95_cast)[name = tensor("transpose_42")]; + tensor attn_weights_139_cast = matmul(transpose_x = attn_weights_139_transpose_x_0, transpose_y = attn_weights_139_transpose_y_0, x = query_states_47_cast, y = transpose_42)[name = tensor("attn_weights_139_cast")]; + tensor var_2166 = const()[name = tensor("op_2166"), val = tensor([1, 20, 77, 77])]; + tensor var_2167_cast = reshape(shape = var_2166, x = attn_weights_139_cast)[name = tensor("op_2167_cast")]; + tensor attn_weights_141_cast = add(x = var_2167_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_141_cast")]; + tensor var_2172 = const()[name = tensor("op_2172"), val = tensor([20, 77, 77])]; + tensor input_373_cast = reshape(shape = var_2172, x = attn_weights_141_cast)[name = tensor("input_373_cast")]; + tensor input_375_cast = softmax(axis = var_5, x = input_373_cast)[name = tensor("input_375_cast")]; tensor attn_output_139_transpose_x_0 = const()[name = tensor("attn_output_139_transpose_x_0"), val = tensor(false)]; tensor attn_output_139_transpose_y_0 = const()[name = tensor("attn_output_139_transpose_y_0"), val = tensor(false)]; - tensor attn_output_139 = matmul(transpose_x = attn_output_139_transpose_x_0, transpose_y = attn_output_139_transpose_y_0, x = input_375, y = value_states_95)[name = tensor("attn_output_139")]; - tensor var_2175 = const()[name = tensor("op_2175"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_141 = reshape(shape = var_2175, x = attn_output_139)[name = tensor("attn_output_141")]; + tensor attn_output_139_cast = matmul(transpose_x = attn_output_139_transpose_x_0, transpose_y = attn_output_139_transpose_y_0, x = input_375_cast, y = value_states_95_cast)[name = tensor("attn_output_139_cast")]; + tensor var_2177 = const()[name = tensor("op_2177"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_141_cast = reshape(shape = var_2177, x = attn_output_139_cast)[name = tensor("attn_output_141_cast")]; tensor attn_output_143_perm_0 = const()[name = tensor("attn_output_143_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2178 = const()[name = tensor("op_2178"), val = tensor([1, 77, 1280])]; - tensor transpose_41 = transpose(perm = attn_output_143_perm_0, x = attn_output_141)[name = tensor("transpose_41")]; - tensor input_377 = reshape(shape = var_2178, x = transpose_41)[name = tensor("input_377")]; - tensor hidden_states_141 = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight, x = input_377)[name = tensor("hidden_states_141")]; - tensor input_379 = add(x = input_371, y = hidden_states_141)[name = tensor("input_379")]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1, 77, 1280])]; + tensor transpose_41 = transpose(perm = attn_output_143_perm_0, x = attn_output_141_cast)[name = tensor("transpose_41")]; + tensor input_377_cast = reshape(shape = var_2180, x = transpose_41)[name = tensor("input_377_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041723264)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045000128)))]; + tensor hidden_states_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16, x = input_377_cast)[name = tensor("hidden_states_141_cast")]; + tensor input_379_cast = add(x = input_371_cast, y = hidden_states_141_cast)[name = tensor("input_379_cast")]; tensor input_381_axes_0 = const()[name = tensor("input_381_axes_0"), val = tensor([-1])]; - tensor input_381 = layer_norm(axes = input_381_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_23_layer_norm2_weight, x = input_379)[name = tensor("input_381")]; - tensor input_383 = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_23_mlp_fc1_weight, x = input_381)[name = tensor("input_383")]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045002752)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045005376)))]; + tensor input_381_cast = layer_norm(axes = input_381_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16, x = input_379_cast)[name = tensor("input_381_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045008000)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058115264)))]; + tensor input_383_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16, x = input_381_cast)[name = tensor("input_383_cast")]; tensor input_385_mode_0 = const()[name = tensor("input_385_mode_0"), val = tensor("EXACT")]; - tensor input_385 = gelu(mode = input_385_mode_0, x = input_383)[name = tensor("input_385")]; - tensor hidden_states_143 = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_23_mlp_fc2_weight, x = input_385)[name = tensor("hidden_states_143")]; - tensor input_387 = add(x = input_379, y = hidden_states_143)[name = tensor("input_387")]; + tensor input_385_cast = gelu(mode = input_385_mode_0, x = input_383_cast)[name = tensor("input_385_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058125568)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071232832)))]; + tensor hidden_states_143_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16, x = input_385_cast)[name = tensor("hidden_states_143_cast")]; + tensor input_387_cast = add(x = input_379_cast, y = hidden_states_143_cast)[name = tensor("input_387_cast")]; tensor hidden_states_145_axes_0 = const()[name = tensor("hidden_states_145_axes_0"), val = tensor([-1])]; - tensor hidden_states_145 = layer_norm(axes = hidden_states_145_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_24_layer_norm1_weight, x = input_387)[name = tensor("hidden_states_145")]; - tensor var_2216 = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight, x = hidden_states_145)[name = tensor("op_2216")]; - tensor var_2217 = const()[name = tensor("op_2217"), val = tensor(0x1p-3)]; - tensor tensor_149 = mul(x = var_2216, y = var_2217)[name = tensor("tensor_149")]; - tensor tensor_145 = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight, x = hidden_states_145)[name = tensor("tensor_145")]; - tensor var_2222 = const()[name = tensor("op_2222"), val = tensor([1, -1, 20, 64])]; - tensor var_2223 = reshape(shape = var_2222, x = tensor_145)[name = tensor("op_2223")]; - tensor var_2224_perm_0 = const()[name = tensor("op_2224_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_147 = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight, x = hidden_states_145)[name = tensor("tensor_147")]; - tensor var_2229 = const()[name = tensor("op_2229"), val = tensor([1, -1, 20, 64])]; - tensor var_2230 = reshape(shape = var_2229, x = tensor_147)[name = tensor("op_2230")]; - tensor var_2231_perm_0 = const()[name = tensor("op_2231_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2238 = const()[name = tensor("op_2238"), val = tensor([1, 77, 20, 64])]; - tensor var_2239 = reshape(shape = var_2238, x = tensor_149)[name = tensor("op_2239")]; - tensor var_2240_perm_0 = const()[name = tensor("op_2240_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2242 = const()[name = tensor("op_2242"), val = tensor([20, -1, 64])]; - tensor transpose_38 = transpose(perm = var_2240_perm_0, x = var_2239)[name = tensor("transpose_38")]; - tensor query_states_49 = reshape(shape = var_2242, x = transpose_38)[name = tensor("query_states_49")]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071235456)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071238080)))]; + tensor hidden_states_145_cast = layer_norm(axes = hidden_states_145_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16, x = input_387_cast)[name = tensor("hidden_states_145_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071240704)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074517568)))]; + tensor var_2218_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("op_2218_cast")]; + tensor var_2219_to_fp16 = const()[name = tensor("op_2219_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_149_cast = mul(x = var_2218_cast, y = var_2219_to_fp16)[name = tensor("tensor_149_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074520192)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077797056)))]; + tensor tensor_145_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_145_cast")]; + tensor var_2224 = const()[name = tensor("op_2224"), val = tensor([1, -1, 20, 64])]; + tensor var_2225_cast = reshape(shape = var_2224, x = tensor_145_cast)[name = tensor("op_2225_cast")]; + tensor var_2226_perm_0 = const()[name = tensor("op_2226_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077799680)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081076544)))]; + tensor tensor_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_147_cast")]; + tensor var_2231 = const()[name = tensor("op_2231"), val = tensor([1, -1, 20, 64])]; + tensor var_2232_cast = reshape(shape = var_2231, x = tensor_147_cast)[name = tensor("op_2232_cast")]; + tensor var_2233_perm_0 = const()[name = tensor("op_2233_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2240 = const()[name = tensor("op_2240"), val = tensor([1, 77, 20, 64])]; + tensor var_2241_cast = reshape(shape = var_2240, x = tensor_149_cast)[name = tensor("op_2241_cast")]; + tensor var_2242_perm_0 = const()[name = tensor("op_2242_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2244 = const()[name = tensor("op_2244"), val = tensor([20, -1, 64])]; - tensor transpose_40 = transpose(perm = var_2224_perm_0, x = var_2223)[name = tensor("transpose_40")]; - tensor key_states_99 = reshape(shape = var_2244, x = transpose_40)[name = tensor("key_states_99")]; + tensor transpose_38 = transpose(perm = var_2242_perm_0, x = var_2241_cast)[name = tensor("transpose_38")]; + tensor query_states_49_cast = reshape(shape = var_2244, x = transpose_38)[name = tensor("query_states_49_cast")]; tensor var_2246 = const()[name = tensor("op_2246"), val = tensor([20, -1, 64])]; - tensor transpose_39 = transpose(perm = var_2231_perm_0, x = var_2230)[name = tensor("transpose_39")]; - tensor value_states_99 = reshape(shape = var_2246, x = transpose_39)[name = tensor("value_states_99")]; - tensor var_2249_perm_0 = const()[name = tensor("op_2249_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_40 = transpose(perm = var_2226_perm_0, x = var_2225_cast)[name = tensor("transpose_40")]; + tensor key_states_99_cast = reshape(shape = var_2246, x = transpose_40)[name = tensor("key_states_99_cast")]; + tensor var_2248 = const()[name = tensor("op_2248"), val = tensor([20, -1, 64])]; + tensor transpose_39 = transpose(perm = var_2233_perm_0, x = var_2232_cast)[name = tensor("transpose_39")]; + tensor value_states_99_cast = reshape(shape = var_2248, x = transpose_39)[name = tensor("value_states_99_cast")]; + tensor var_2251_perm_0 = const()[name = tensor("op_2251_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(false)]; tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; - tensor transpose_37 = transpose(perm = var_2249_perm_0, x = key_states_99)[name = tensor("transpose_37")]; - tensor attn_weights_145 = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = query_states_49, y = transpose_37)[name = tensor("attn_weights_145")]; - tensor var_2251 = const()[name = tensor("op_2251"), val = tensor([1, 20, 77, 77])]; - tensor var_2252 = reshape(shape = var_2251, x = attn_weights_145)[name = tensor("op_2252")]; - tensor attn_weights_147 = add(x = var_2252, y = causal_attention_mask)[name = tensor("attn_weights_147")]; - tensor var_2257 = const()[name = tensor("op_2257"), val = tensor([20, 77, 77])]; - tensor input_389 = reshape(shape = var_2257, x = attn_weights_147)[name = tensor("input_389")]; - tensor input_391 = softmax(axis = var_5, x = input_389)[name = tensor("input_391")]; + tensor transpose_37 = transpose(perm = var_2251_perm_0, x = key_states_99_cast)[name = tensor("transpose_37")]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = query_states_49_cast, y = transpose_37)[name = tensor("attn_weights_145_cast")]; + tensor var_2253 = const()[name = tensor("op_2253"), val = tensor([1, 20, 77, 77])]; + tensor var_2254_cast = reshape(shape = var_2253, x = attn_weights_145_cast)[name = tensor("op_2254_cast")]; + tensor attn_weights_147_cast = add(x = var_2254_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_2259 = const()[name = tensor("op_2259"), val = tensor([20, 77, 77])]; + tensor input_389_cast = reshape(shape = var_2259, x = attn_weights_147_cast)[name = tensor("input_389_cast")]; + tensor input_391_cast = softmax(axis = var_5, x = input_389_cast)[name = tensor("input_391_cast")]; tensor attn_output_145_transpose_x_0 = const()[name = tensor("attn_output_145_transpose_x_0"), val = tensor(false)]; tensor attn_output_145_transpose_y_0 = const()[name = tensor("attn_output_145_transpose_y_0"), val = tensor(false)]; - tensor attn_output_145 = matmul(transpose_x = attn_output_145_transpose_x_0, transpose_y = attn_output_145_transpose_y_0, x = input_391, y = value_states_99)[name = tensor("attn_output_145")]; - tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_147 = reshape(shape = var_2262, x = attn_output_145)[name = tensor("attn_output_147")]; + tensor attn_output_145_cast = matmul(transpose_x = attn_output_145_transpose_x_0, transpose_y = attn_output_145_transpose_y_0, x = input_391_cast, y = value_states_99_cast)[name = tensor("attn_output_145_cast")]; + tensor var_2264 = const()[name = tensor("op_2264"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_147_cast = reshape(shape = var_2264, x = attn_output_145_cast)[name = tensor("attn_output_147_cast")]; tensor attn_output_149_perm_0 = const()[name = tensor("attn_output_149_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2265 = const()[name = tensor("op_2265"), val = tensor([1, 77, 1280])]; - tensor transpose_36 = transpose(perm = attn_output_149_perm_0, x = attn_output_147)[name = tensor("transpose_36")]; - tensor input_393 = reshape(shape = var_2265, x = transpose_36)[name = tensor("input_393")]; - tensor hidden_states_147 = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight, x = input_393)[name = tensor("hidden_states_147")]; - tensor input_395 = add(x = input_387, y = hidden_states_147)[name = tensor("input_395")]; + tensor var_2267 = const()[name = tensor("op_2267"), val = tensor([1, 77, 1280])]; + tensor transpose_36 = transpose(perm = attn_output_149_perm_0, x = attn_output_147_cast)[name = tensor("transpose_36")]; + tensor input_393_cast = reshape(shape = var_2267, x = transpose_36)[name = tensor("input_393_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081079168)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084356032)))]; + tensor hidden_states_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16, x = input_393_cast)[name = tensor("hidden_states_147_cast")]; + tensor input_395_cast = add(x = input_387_cast, y = hidden_states_147_cast)[name = tensor("input_395_cast")]; tensor input_397_axes_0 = const()[name = tensor("input_397_axes_0"), val = tensor([-1])]; - tensor input_397 = layer_norm(axes = input_397_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_24_layer_norm2_weight, x = input_395)[name = tensor("input_397")]; - tensor input_399 = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_24_mlp_fc1_weight, x = input_397)[name = tensor("input_399")]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084358656)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084361280)))]; + tensor input_397_cast = layer_norm(axes = input_397_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16, x = input_395_cast)[name = tensor("input_397_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084363904)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097471168)))]; + tensor input_399_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16, x = input_397_cast)[name = tensor("input_399_cast")]; tensor input_401_mode_0 = const()[name = tensor("input_401_mode_0"), val = tensor("EXACT")]; - tensor input_401 = gelu(mode = input_401_mode_0, x = input_399)[name = tensor("input_401")]; - tensor hidden_states_149 = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_24_mlp_fc2_weight, x = input_401)[name = tensor("hidden_states_149")]; - tensor input_403 = add(x = input_395, y = hidden_states_149)[name = tensor("input_403")]; + tensor input_401_cast = gelu(mode = input_401_mode_0, x = input_399_cast)[name = tensor("input_401_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097481472)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110588736)))]; + tensor hidden_states_149_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16, x = input_401_cast)[name = tensor("hidden_states_149_cast")]; + tensor input_403_cast = add(x = input_395_cast, y = hidden_states_149_cast)[name = tensor("input_403_cast")]; tensor hidden_states_151_axes_0 = const()[name = tensor("hidden_states_151_axes_0"), val = tensor([-1])]; - tensor hidden_states_151 = layer_norm(axes = hidden_states_151_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_25_layer_norm1_weight, x = input_403)[name = tensor("hidden_states_151")]; - tensor var_2303 = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight, x = hidden_states_151)[name = tensor("op_2303")]; - tensor var_2304 = const()[name = tensor("op_2304"), val = tensor(0x1p-3)]; - tensor tensor_155 = mul(x = var_2303, y = var_2304)[name = tensor("tensor_155")]; - tensor tensor_151 = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight, x = hidden_states_151)[name = tensor("tensor_151")]; - tensor var_2309 = const()[name = tensor("op_2309"), val = tensor([1, -1, 20, 64])]; - tensor var_2310 = reshape(shape = var_2309, x = tensor_151)[name = tensor("op_2310")]; - tensor var_2311_perm_0 = const()[name = tensor("op_2311_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_153 = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight, x = hidden_states_151)[name = tensor("tensor_153")]; - tensor var_2316 = const()[name = tensor("op_2316"), val = tensor([1, -1, 20, 64])]; - tensor var_2317 = reshape(shape = var_2316, x = tensor_153)[name = tensor("op_2317")]; - tensor var_2318_perm_0 = const()[name = tensor("op_2318_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2325 = const()[name = tensor("op_2325"), val = tensor([1, 77, 20, 64])]; - tensor var_2326 = reshape(shape = var_2325, x = tensor_155)[name = tensor("op_2326")]; - tensor var_2327_perm_0 = const()[name = tensor("op_2327_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2329 = const()[name = tensor("op_2329"), val = tensor([20, -1, 64])]; - tensor transpose_33 = transpose(perm = var_2327_perm_0, x = var_2326)[name = tensor("transpose_33")]; - tensor query_states_51 = reshape(shape = var_2329, x = transpose_33)[name = tensor("query_states_51")]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110591360)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110593984)))]; + tensor hidden_states_151_cast = layer_norm(axes = hidden_states_151_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16, x = input_403_cast)[name = tensor("hidden_states_151_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110596608)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113873472)))]; + tensor var_2305_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("op_2305_cast")]; + tensor var_2306_to_fp16 = const()[name = tensor("op_2306_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_155_cast = mul(x = var_2305_cast, y = var_2306_to_fp16)[name = tensor("tensor_155_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113876096)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117152960)))]; + tensor tensor_151_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_151_cast")]; + tensor var_2311 = const()[name = tensor("op_2311"), val = tensor([1, -1, 20, 64])]; + tensor var_2312_cast = reshape(shape = var_2311, x = tensor_151_cast)[name = tensor("op_2312_cast")]; + tensor var_2313_perm_0 = const()[name = tensor("op_2313_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117155584)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120432448)))]; + tensor tensor_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_153_cast")]; + tensor var_2318 = const()[name = tensor("op_2318"), val = tensor([1, -1, 20, 64])]; + tensor var_2319_cast = reshape(shape = var_2318, x = tensor_153_cast)[name = tensor("op_2319_cast")]; + tensor var_2320_perm_0 = const()[name = tensor("op_2320_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2327 = const()[name = tensor("op_2327"), val = tensor([1, 77, 20, 64])]; + tensor var_2328_cast = reshape(shape = var_2327, x = tensor_155_cast)[name = tensor("op_2328_cast")]; + tensor var_2329_perm_0 = const()[name = tensor("op_2329_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2331 = const()[name = tensor("op_2331"), val = tensor([20, -1, 64])]; - tensor transpose_35 = transpose(perm = var_2311_perm_0, x = var_2310)[name = tensor("transpose_35")]; - tensor key_states_103 = reshape(shape = var_2331, x = transpose_35)[name = tensor("key_states_103")]; + tensor transpose_33 = transpose(perm = var_2329_perm_0, x = var_2328_cast)[name = tensor("transpose_33")]; + tensor query_states_51_cast = reshape(shape = var_2331, x = transpose_33)[name = tensor("query_states_51_cast")]; tensor var_2333 = const()[name = tensor("op_2333"), val = tensor([20, -1, 64])]; - tensor transpose_34 = transpose(perm = var_2318_perm_0, x = var_2317)[name = tensor("transpose_34")]; - tensor value_states_103 = reshape(shape = var_2333, x = transpose_34)[name = tensor("value_states_103")]; - tensor var_2336_perm_0 = const()[name = tensor("op_2336_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_35 = transpose(perm = var_2313_perm_0, x = var_2312_cast)[name = tensor("transpose_35")]; + tensor key_states_103_cast = reshape(shape = var_2333, x = transpose_35)[name = tensor("key_states_103_cast")]; + tensor var_2335 = const()[name = tensor("op_2335"), val = tensor([20, -1, 64])]; + tensor transpose_34 = transpose(perm = var_2320_perm_0, x = var_2319_cast)[name = tensor("transpose_34")]; + tensor value_states_103_cast = reshape(shape = var_2335, x = transpose_34)[name = tensor("value_states_103_cast")]; + tensor var_2338_perm_0 = const()[name = tensor("op_2338_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_151_transpose_x_0 = const()[name = tensor("attn_weights_151_transpose_x_0"), val = tensor(false)]; tensor attn_weights_151_transpose_y_0 = const()[name = tensor("attn_weights_151_transpose_y_0"), val = tensor(false)]; - tensor transpose_32 = transpose(perm = var_2336_perm_0, x = key_states_103)[name = tensor("transpose_32")]; - tensor attn_weights_151 = matmul(transpose_x = attn_weights_151_transpose_x_0, transpose_y = attn_weights_151_transpose_y_0, x = query_states_51, y = transpose_32)[name = tensor("attn_weights_151")]; - tensor var_2338 = const()[name = tensor("op_2338"), val = tensor([1, 20, 77, 77])]; - tensor var_2339 = reshape(shape = var_2338, x = attn_weights_151)[name = tensor("op_2339")]; - tensor attn_weights_153 = add(x = var_2339, y = causal_attention_mask)[name = tensor("attn_weights_153")]; - tensor var_2344 = const()[name = tensor("op_2344"), val = tensor([20, 77, 77])]; - tensor input_405 = reshape(shape = var_2344, x = attn_weights_153)[name = tensor("input_405")]; - tensor input_407 = softmax(axis = var_5, x = input_405)[name = tensor("input_407")]; + tensor transpose_32 = transpose(perm = var_2338_perm_0, x = key_states_103_cast)[name = tensor("transpose_32")]; + tensor attn_weights_151_cast = matmul(transpose_x = attn_weights_151_transpose_x_0, transpose_y = attn_weights_151_transpose_y_0, x = query_states_51_cast, y = transpose_32)[name = tensor("attn_weights_151_cast")]; + tensor var_2340 = const()[name = tensor("op_2340"), val = tensor([1, 20, 77, 77])]; + tensor var_2341_cast = reshape(shape = var_2340, x = attn_weights_151_cast)[name = tensor("op_2341_cast")]; + tensor attn_weights_153_cast = add(x = var_2341_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_153_cast")]; + tensor var_2346 = const()[name = tensor("op_2346"), val = tensor([20, 77, 77])]; + tensor input_405_cast = reshape(shape = var_2346, x = attn_weights_153_cast)[name = tensor("input_405_cast")]; + tensor input_407_cast = softmax(axis = var_5, x = input_405_cast)[name = tensor("input_407_cast")]; tensor attn_output_151_transpose_x_0 = const()[name = tensor("attn_output_151_transpose_x_0"), val = tensor(false)]; tensor attn_output_151_transpose_y_0 = const()[name = tensor("attn_output_151_transpose_y_0"), val = tensor(false)]; - tensor attn_output_151 = matmul(transpose_x = attn_output_151_transpose_x_0, transpose_y = attn_output_151_transpose_y_0, x = input_407, y = value_states_103)[name = tensor("attn_output_151")]; - tensor var_2349 = const()[name = tensor("op_2349"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_153 = reshape(shape = var_2349, x = attn_output_151)[name = tensor("attn_output_153")]; + tensor attn_output_151_cast = matmul(transpose_x = attn_output_151_transpose_x_0, transpose_y = attn_output_151_transpose_y_0, x = input_407_cast, y = value_states_103_cast)[name = tensor("attn_output_151_cast")]; + tensor var_2351 = const()[name = tensor("op_2351"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_153_cast = reshape(shape = var_2351, x = attn_output_151_cast)[name = tensor("attn_output_153_cast")]; tensor attn_output_155_perm_0 = const()[name = tensor("attn_output_155_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2352 = const()[name = tensor("op_2352"), val = tensor([1, 77, 1280])]; - tensor transpose_31 = transpose(perm = attn_output_155_perm_0, x = attn_output_153)[name = tensor("transpose_31")]; - tensor input_409 = reshape(shape = var_2352, x = transpose_31)[name = tensor("input_409")]; - tensor hidden_states_153 = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight, x = input_409)[name = tensor("hidden_states_153")]; - tensor input_411 = add(x = input_403, y = hidden_states_153)[name = tensor("input_411")]; + tensor var_2354 = const()[name = tensor("op_2354"), val = tensor([1, 77, 1280])]; + tensor transpose_31 = transpose(perm = attn_output_155_perm_0, x = attn_output_153_cast)[name = tensor("transpose_31")]; + tensor input_409_cast = reshape(shape = var_2354, x = transpose_31)[name = tensor("input_409_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120435072)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123711936)))]; + tensor hidden_states_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16, x = input_409_cast)[name = tensor("hidden_states_153_cast")]; + tensor input_411_cast = add(x = input_403_cast, y = hidden_states_153_cast)[name = tensor("input_411_cast")]; tensor input_413_axes_0 = const()[name = tensor("input_413_axes_0"), val = tensor([-1])]; - tensor input_413 = layer_norm(axes = input_413_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_25_layer_norm2_weight, x = input_411)[name = tensor("input_413")]; - tensor input_415 = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_25_mlp_fc1_weight, x = input_413)[name = tensor("input_415")]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123714560)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123717184)))]; + tensor input_413_cast = layer_norm(axes = input_413_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16, x = input_411_cast)[name = tensor("input_413_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123719808)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136827072)))]; + tensor input_415_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16, x = input_413_cast)[name = tensor("input_415_cast")]; tensor input_417_mode_0 = const()[name = tensor("input_417_mode_0"), val = tensor("EXACT")]; - tensor input_417 = gelu(mode = input_417_mode_0, x = input_415)[name = tensor("input_417")]; - tensor hidden_states_155 = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_25_mlp_fc2_weight, x = input_417)[name = tensor("hidden_states_155")]; - tensor input_419 = add(x = input_411, y = hidden_states_155)[name = tensor("input_419")]; + tensor input_417_cast = gelu(mode = input_417_mode_0, x = input_415_cast)[name = tensor("input_417_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136837376)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149944640)))]; + tensor hidden_states_155_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16, x = input_417_cast)[name = tensor("hidden_states_155_cast")]; + tensor input_419_cast = add(x = input_411_cast, y = hidden_states_155_cast)[name = tensor("input_419_cast")]; tensor hidden_states_157_axes_0 = const()[name = tensor("hidden_states_157_axes_0"), val = tensor([-1])]; - tensor hidden_states_157 = layer_norm(axes = hidden_states_157_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_26_layer_norm1_weight, x = input_419)[name = tensor("hidden_states_157")]; - tensor var_2390 = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight, x = hidden_states_157)[name = tensor("op_2390")]; - tensor var_2391 = const()[name = tensor("op_2391"), val = tensor(0x1p-3)]; - tensor tensor_161 = mul(x = var_2390, y = var_2391)[name = tensor("tensor_161")]; - tensor tensor_157 = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight, x = hidden_states_157)[name = tensor("tensor_157")]; - tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([1, -1, 20, 64])]; - tensor var_2397 = reshape(shape = var_2396, x = tensor_157)[name = tensor("op_2397")]; - tensor var_2398_perm_0 = const()[name = tensor("op_2398_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_159 = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight, x = hidden_states_157)[name = tensor("tensor_159")]; - tensor var_2403 = const()[name = tensor("op_2403"), val = tensor([1, -1, 20, 64])]; - tensor var_2404 = reshape(shape = var_2403, x = tensor_159)[name = tensor("op_2404")]; - tensor var_2405_perm_0 = const()[name = tensor("op_2405_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 77, 20, 64])]; - tensor var_2413 = reshape(shape = var_2412, x = tensor_161)[name = tensor("op_2413")]; - tensor var_2414_perm_0 = const()[name = tensor("op_2414_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2416 = const()[name = tensor("op_2416"), val = tensor([20, -1, 64])]; - tensor transpose_28 = transpose(perm = var_2414_perm_0, x = var_2413)[name = tensor("transpose_28")]; - tensor query_states_53 = reshape(shape = var_2416, x = transpose_28)[name = tensor("query_states_53")]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149947264)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149949888)))]; + tensor hidden_states_157_cast = layer_norm(axes = hidden_states_157_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16, x = input_419_cast)[name = tensor("hidden_states_157_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149952512)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153229376)))]; + tensor var_2392_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("op_2392_cast")]; + tensor var_2393_to_fp16 = const()[name = tensor("op_2393_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_161_cast = mul(x = var_2392_cast, y = var_2393_to_fp16)[name = tensor("tensor_161_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153232000)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156508864)))]; + tensor tensor_157_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_157_cast")]; + tensor var_2398 = const()[name = tensor("op_2398"), val = tensor([1, -1, 20, 64])]; + tensor var_2399_cast = reshape(shape = var_2398, x = tensor_157_cast)[name = tensor("op_2399_cast")]; + tensor var_2400_perm_0 = const()[name = tensor("op_2400_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156511488)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159788352)))]; + tensor tensor_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_159_cast")]; + tensor var_2405 = const()[name = tensor("op_2405"), val = tensor([1, -1, 20, 64])]; + tensor var_2406_cast = reshape(shape = var_2405, x = tensor_159_cast)[name = tensor("op_2406_cast")]; + tensor var_2407_perm_0 = const()[name = tensor("op_2407_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2414 = const()[name = tensor("op_2414"), val = tensor([1, 77, 20, 64])]; + tensor var_2415_cast = reshape(shape = var_2414, x = tensor_161_cast)[name = tensor("op_2415_cast")]; + tensor var_2416_perm_0 = const()[name = tensor("op_2416_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([20, -1, 64])]; - tensor transpose_30 = transpose(perm = var_2398_perm_0, x = var_2397)[name = tensor("transpose_30")]; - tensor key_states_107 = reshape(shape = var_2418, x = transpose_30)[name = tensor("key_states_107")]; + tensor transpose_28 = transpose(perm = var_2416_perm_0, x = var_2415_cast)[name = tensor("transpose_28")]; + tensor query_states_53_cast = reshape(shape = var_2418, x = transpose_28)[name = tensor("query_states_53_cast")]; tensor var_2420 = const()[name = tensor("op_2420"), val = tensor([20, -1, 64])]; - tensor transpose_29 = transpose(perm = var_2405_perm_0, x = var_2404)[name = tensor("transpose_29")]; - tensor value_states_107 = reshape(shape = var_2420, x = transpose_29)[name = tensor("value_states_107")]; - tensor var_2423_perm_0 = const()[name = tensor("op_2423_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_30 = transpose(perm = var_2400_perm_0, x = var_2399_cast)[name = tensor("transpose_30")]; + tensor key_states_107_cast = reshape(shape = var_2420, x = transpose_30)[name = tensor("key_states_107_cast")]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([20, -1, 64])]; + tensor transpose_29 = transpose(perm = var_2407_perm_0, x = var_2406_cast)[name = tensor("transpose_29")]; + tensor value_states_107_cast = reshape(shape = var_2422, x = transpose_29)[name = tensor("value_states_107_cast")]; + tensor var_2425_perm_0 = const()[name = tensor("op_2425_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(false)]; tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; - tensor transpose_27 = transpose(perm = var_2423_perm_0, x = key_states_107)[name = tensor("transpose_27")]; - tensor attn_weights_157 = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = query_states_53, y = transpose_27)[name = tensor("attn_weights_157")]; - tensor var_2425 = const()[name = tensor("op_2425"), val = tensor([1, 20, 77, 77])]; - tensor var_2426 = reshape(shape = var_2425, x = attn_weights_157)[name = tensor("op_2426")]; - tensor attn_weights_159 = add(x = var_2426, y = causal_attention_mask)[name = tensor("attn_weights_159")]; - tensor var_2431 = const()[name = tensor("op_2431"), val = tensor([20, 77, 77])]; - tensor input_421 = reshape(shape = var_2431, x = attn_weights_159)[name = tensor("input_421")]; - tensor input_423 = softmax(axis = var_5, x = input_421)[name = tensor("input_423")]; + tensor transpose_27 = transpose(perm = var_2425_perm_0, x = key_states_107_cast)[name = tensor("transpose_27")]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = query_states_53_cast, y = transpose_27)[name = tensor("attn_weights_157_cast")]; + tensor var_2427 = const()[name = tensor("op_2427"), val = tensor([1, 20, 77, 77])]; + tensor var_2428_cast = reshape(shape = var_2427, x = attn_weights_157_cast)[name = tensor("op_2428_cast")]; + tensor attn_weights_159_cast = add(x = var_2428_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_2433 = const()[name = tensor("op_2433"), val = tensor([20, 77, 77])]; + tensor input_421_cast = reshape(shape = var_2433, x = attn_weights_159_cast)[name = tensor("input_421_cast")]; + tensor input_423_cast = softmax(axis = var_5, x = input_421_cast)[name = tensor("input_423_cast")]; tensor attn_output_157_transpose_x_0 = const()[name = tensor("attn_output_157_transpose_x_0"), val = tensor(false)]; tensor attn_output_157_transpose_y_0 = const()[name = tensor("attn_output_157_transpose_y_0"), val = tensor(false)]; - tensor attn_output_157 = matmul(transpose_x = attn_output_157_transpose_x_0, transpose_y = attn_output_157_transpose_y_0, x = input_423, y = value_states_107)[name = tensor("attn_output_157")]; - tensor var_2436 = const()[name = tensor("op_2436"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_159 = reshape(shape = var_2436, x = attn_output_157)[name = tensor("attn_output_159")]; + tensor attn_output_157_cast = matmul(transpose_x = attn_output_157_transpose_x_0, transpose_y = attn_output_157_transpose_y_0, x = input_423_cast, y = value_states_107_cast)[name = tensor("attn_output_157_cast")]; + tensor var_2438 = const()[name = tensor("op_2438"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_159_cast = reshape(shape = var_2438, x = attn_output_157_cast)[name = tensor("attn_output_159_cast")]; tensor attn_output_161_perm_0 = const()[name = tensor("attn_output_161_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 77, 1280])]; - tensor transpose_26 = transpose(perm = attn_output_161_perm_0, x = attn_output_159)[name = tensor("transpose_26")]; - tensor input_425 = reshape(shape = var_2439, x = transpose_26)[name = tensor("input_425")]; - tensor hidden_states_159 = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight, x = input_425)[name = tensor("hidden_states_159")]; - tensor input_427 = add(x = input_419, y = hidden_states_159)[name = tensor("input_427")]; + tensor var_2441 = const()[name = tensor("op_2441"), val = tensor([1, 77, 1280])]; + tensor transpose_26 = transpose(perm = attn_output_161_perm_0, x = attn_output_159_cast)[name = tensor("transpose_26")]; + tensor input_425_cast = reshape(shape = var_2441, x = transpose_26)[name = tensor("input_425_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159790976)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163067840)))]; + tensor hidden_states_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16, x = input_425_cast)[name = tensor("hidden_states_159_cast")]; + tensor input_427_cast = add(x = input_419_cast, y = hidden_states_159_cast)[name = tensor("input_427_cast")]; tensor input_429_axes_0 = const()[name = tensor("input_429_axes_0"), val = tensor([-1])]; - tensor input_429 = layer_norm(axes = input_429_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_26_layer_norm2_weight, x = input_427)[name = tensor("input_429")]; - tensor input_431 = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_26_mlp_fc1_weight, x = input_429)[name = tensor("input_431")]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163070464)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163073088)))]; + tensor input_429_cast = layer_norm(axes = input_429_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16, x = input_427_cast)[name = tensor("input_429_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163075712)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176182976)))]; + tensor input_431_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16, x = input_429_cast)[name = tensor("input_431_cast")]; tensor input_433_mode_0 = const()[name = tensor("input_433_mode_0"), val = tensor("EXACT")]; - tensor input_433 = gelu(mode = input_433_mode_0, x = input_431)[name = tensor("input_433")]; - tensor hidden_states_161 = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_26_mlp_fc2_weight, x = input_433)[name = tensor("hidden_states_161")]; - tensor input_435 = add(x = input_427, y = hidden_states_161)[name = tensor("input_435")]; + tensor input_433_cast = gelu(mode = input_433_mode_0, x = input_431_cast)[name = tensor("input_433_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176193280)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189300544)))]; + tensor hidden_states_161_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16, x = input_433_cast)[name = tensor("hidden_states_161_cast")]; + tensor input_435_cast = add(x = input_427_cast, y = hidden_states_161_cast)[name = tensor("input_435_cast")]; tensor hidden_states_163_axes_0 = const()[name = tensor("hidden_states_163_axes_0"), val = tensor([-1])]; - tensor hidden_states_163 = layer_norm(axes = hidden_states_163_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_27_layer_norm1_weight, x = input_435)[name = tensor("hidden_states_163")]; - tensor var_2477 = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight, x = hidden_states_163)[name = tensor("op_2477")]; - tensor var_2478 = const()[name = tensor("op_2478"), val = tensor(0x1p-3)]; - tensor tensor_167 = mul(x = var_2477, y = var_2478)[name = tensor("tensor_167")]; - tensor tensor_163 = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight, x = hidden_states_163)[name = tensor("tensor_163")]; - tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1, -1, 20, 64])]; - tensor var_2484 = reshape(shape = var_2483, x = tensor_163)[name = tensor("op_2484")]; - tensor var_2485_perm_0 = const()[name = tensor("op_2485_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_165 = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight, x = hidden_states_163)[name = tensor("tensor_165")]; - tensor var_2490 = const()[name = tensor("op_2490"), val = tensor([1, -1, 20, 64])]; - tensor var_2491 = reshape(shape = var_2490, x = tensor_165)[name = tensor("op_2491")]; - tensor var_2492_perm_0 = const()[name = tensor("op_2492_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2499 = const()[name = tensor("op_2499"), val = tensor([1, 77, 20, 64])]; - tensor var_2500 = reshape(shape = var_2499, x = tensor_167)[name = tensor("op_2500")]; - tensor var_2501_perm_0 = const()[name = tensor("op_2501_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2503 = const()[name = tensor("op_2503"), val = tensor([20, -1, 64])]; - tensor transpose_23 = transpose(perm = var_2501_perm_0, x = var_2500)[name = tensor("transpose_23")]; - tensor query_states_55 = reshape(shape = var_2503, x = transpose_23)[name = tensor("query_states_55")]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189303168)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189305792)))]; + tensor hidden_states_163_cast = layer_norm(axes = hidden_states_163_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16, x = input_435_cast)[name = tensor("hidden_states_163_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189308416)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192585280)))]; + tensor var_2479_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("op_2479_cast")]; + tensor var_2480_to_fp16 = const()[name = tensor("op_2480_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_167_cast = mul(x = var_2479_cast, y = var_2480_to_fp16)[name = tensor("tensor_167_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192587904)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195864768)))]; + tensor tensor_163_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_163_cast")]; + tensor var_2485 = const()[name = tensor("op_2485"), val = tensor([1, -1, 20, 64])]; + tensor var_2486_cast = reshape(shape = var_2485, x = tensor_163_cast)[name = tensor("op_2486_cast")]; + tensor var_2487_perm_0 = const()[name = tensor("op_2487_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195867392)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199144256)))]; + tensor tensor_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_165_cast")]; + tensor var_2492 = const()[name = tensor("op_2492"), val = tensor([1, -1, 20, 64])]; + tensor var_2493_cast = reshape(shape = var_2492, x = tensor_165_cast)[name = tensor("op_2493_cast")]; + tensor var_2494_perm_0 = const()[name = tensor("op_2494_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2501 = const()[name = tensor("op_2501"), val = tensor([1, 77, 20, 64])]; + tensor var_2502_cast = reshape(shape = var_2501, x = tensor_167_cast)[name = tensor("op_2502_cast")]; + tensor var_2503_perm_0 = const()[name = tensor("op_2503_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([20, -1, 64])]; - tensor transpose_25 = transpose(perm = var_2485_perm_0, x = var_2484)[name = tensor("transpose_25")]; - tensor key_states_111 = reshape(shape = var_2505, x = transpose_25)[name = tensor("key_states_111")]; + tensor transpose_23 = transpose(perm = var_2503_perm_0, x = var_2502_cast)[name = tensor("transpose_23")]; + tensor query_states_55_cast = reshape(shape = var_2505, x = transpose_23)[name = tensor("query_states_55_cast")]; tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([20, -1, 64])]; - tensor transpose_24 = transpose(perm = var_2492_perm_0, x = var_2491)[name = tensor("transpose_24")]; - tensor value_states_111 = reshape(shape = var_2507, x = transpose_24)[name = tensor("value_states_111")]; - tensor var_2510_perm_0 = const()[name = tensor("op_2510_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_25 = transpose(perm = var_2487_perm_0, x = var_2486_cast)[name = tensor("transpose_25")]; + tensor key_states_111_cast = reshape(shape = var_2507, x = transpose_25)[name = tensor("key_states_111_cast")]; + tensor var_2509 = const()[name = tensor("op_2509"), val = tensor([20, -1, 64])]; + tensor transpose_24 = transpose(perm = var_2494_perm_0, x = var_2493_cast)[name = tensor("transpose_24")]; + tensor value_states_111_cast = reshape(shape = var_2509, x = transpose_24)[name = tensor("value_states_111_cast")]; + tensor var_2512_perm_0 = const()[name = tensor("op_2512_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_163_transpose_x_0 = const()[name = tensor("attn_weights_163_transpose_x_0"), val = tensor(false)]; tensor attn_weights_163_transpose_y_0 = const()[name = tensor("attn_weights_163_transpose_y_0"), val = tensor(false)]; - tensor transpose_22 = transpose(perm = var_2510_perm_0, x = key_states_111)[name = tensor("transpose_22")]; - tensor attn_weights_163 = matmul(transpose_x = attn_weights_163_transpose_x_0, transpose_y = attn_weights_163_transpose_y_0, x = query_states_55, y = transpose_22)[name = tensor("attn_weights_163")]; - tensor var_2512 = const()[name = tensor("op_2512"), val = tensor([1, 20, 77, 77])]; - tensor var_2513 = reshape(shape = var_2512, x = attn_weights_163)[name = tensor("op_2513")]; - tensor attn_weights_165 = add(x = var_2513, y = causal_attention_mask)[name = tensor("attn_weights_165")]; - tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([20, 77, 77])]; - tensor input_437 = reshape(shape = var_2518, x = attn_weights_165)[name = tensor("input_437")]; - tensor input_439 = softmax(axis = var_5, x = input_437)[name = tensor("input_439")]; + tensor transpose_22 = transpose(perm = var_2512_perm_0, x = key_states_111_cast)[name = tensor("transpose_22")]; + tensor attn_weights_163_cast = matmul(transpose_x = attn_weights_163_transpose_x_0, transpose_y = attn_weights_163_transpose_y_0, x = query_states_55_cast, y = transpose_22)[name = tensor("attn_weights_163_cast")]; + tensor var_2514 = const()[name = tensor("op_2514"), val = tensor([1, 20, 77, 77])]; + tensor var_2515_cast = reshape(shape = var_2514, x = attn_weights_163_cast)[name = tensor("op_2515_cast")]; + tensor attn_weights_165_cast = add(x = var_2515_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_165_cast")]; + tensor var_2520 = const()[name = tensor("op_2520"), val = tensor([20, 77, 77])]; + tensor input_437_cast = reshape(shape = var_2520, x = attn_weights_165_cast)[name = tensor("input_437_cast")]; + tensor input_439_cast = softmax(axis = var_5, x = input_437_cast)[name = tensor("input_439_cast")]; tensor attn_output_163_transpose_x_0 = const()[name = tensor("attn_output_163_transpose_x_0"), val = tensor(false)]; tensor attn_output_163_transpose_y_0 = const()[name = tensor("attn_output_163_transpose_y_0"), val = tensor(false)]; - tensor attn_output_163 = matmul(transpose_x = attn_output_163_transpose_x_0, transpose_y = attn_output_163_transpose_y_0, x = input_439, y = value_states_111)[name = tensor("attn_output_163")]; - tensor var_2523 = const()[name = tensor("op_2523"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_165 = reshape(shape = var_2523, x = attn_output_163)[name = tensor("attn_output_165")]; + tensor attn_output_163_cast = matmul(transpose_x = attn_output_163_transpose_x_0, transpose_y = attn_output_163_transpose_y_0, x = input_439_cast, y = value_states_111_cast)[name = tensor("attn_output_163_cast")]; + tensor var_2525 = const()[name = tensor("op_2525"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_165_cast = reshape(shape = var_2525, x = attn_output_163_cast)[name = tensor("attn_output_165_cast")]; tensor attn_output_167_perm_0 = const()[name = tensor("attn_output_167_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2526 = const()[name = tensor("op_2526"), val = tensor([1, 77, 1280])]; - tensor transpose_21 = transpose(perm = attn_output_167_perm_0, x = attn_output_165)[name = tensor("transpose_21")]; - tensor input_441 = reshape(shape = var_2526, x = transpose_21)[name = tensor("input_441")]; - tensor hidden_states_165 = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight, x = input_441)[name = tensor("hidden_states_165")]; - tensor input_443 = add(x = input_435, y = hidden_states_165)[name = tensor("input_443")]; + tensor var_2528 = const()[name = tensor("op_2528"), val = tensor([1, 77, 1280])]; + tensor transpose_21 = transpose(perm = attn_output_167_perm_0, x = attn_output_165_cast)[name = tensor("transpose_21")]; + tensor input_441_cast = reshape(shape = var_2528, x = transpose_21)[name = tensor("input_441_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199146880)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202423744)))]; + tensor hidden_states_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16, x = input_441_cast)[name = tensor("hidden_states_165_cast")]; + tensor input_443_cast = add(x = input_435_cast, y = hidden_states_165_cast)[name = tensor("input_443_cast")]; tensor input_445_axes_0 = const()[name = tensor("input_445_axes_0"), val = tensor([-1])]; - tensor input_445 = layer_norm(axes = input_445_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_27_layer_norm2_weight, x = input_443)[name = tensor("input_445")]; - tensor input_447 = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_27_mlp_fc1_weight, x = input_445)[name = tensor("input_447")]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202426368)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202428992)))]; + tensor input_445_cast = layer_norm(axes = input_445_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16, x = input_443_cast)[name = tensor("input_445_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202431616)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215538880)))]; + tensor input_447_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16, x = input_445_cast)[name = tensor("input_447_cast")]; tensor input_449_mode_0 = const()[name = tensor("input_449_mode_0"), val = tensor("EXACT")]; - tensor input_449 = gelu(mode = input_449_mode_0, x = input_447)[name = tensor("input_449")]; - tensor hidden_states_167 = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_27_mlp_fc2_weight, x = input_449)[name = tensor("hidden_states_167")]; - tensor input_451 = add(x = input_443, y = hidden_states_167)[name = tensor("input_451")]; + tensor input_449_cast = gelu(mode = input_449_mode_0, x = input_447_cast)[name = tensor("input_449_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215549184)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228656448)))]; + tensor hidden_states_167_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16, x = input_449_cast)[name = tensor("hidden_states_167_cast")]; + tensor input_451_cast = add(x = input_443_cast, y = hidden_states_167_cast)[name = tensor("input_451_cast")]; tensor hidden_states_169_axes_0 = const()[name = tensor("hidden_states_169_axes_0"), val = tensor([-1])]; - tensor hidden_states_169 = layer_norm(axes = hidden_states_169_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_28_layer_norm1_weight, x = input_451)[name = tensor("hidden_states_169")]; - tensor var_2564 = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight, x = hidden_states_169)[name = tensor("op_2564")]; - tensor var_2565 = const()[name = tensor("op_2565"), val = tensor(0x1p-3)]; - tensor tensor_173 = mul(x = var_2564, y = var_2565)[name = tensor("tensor_173")]; - tensor tensor_169 = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight, x = hidden_states_169)[name = tensor("tensor_169")]; - tensor var_2570 = const()[name = tensor("op_2570"), val = tensor([1, -1, 20, 64])]; - tensor var_2571 = reshape(shape = var_2570, x = tensor_169)[name = tensor("op_2571")]; - tensor var_2572_perm_0 = const()[name = tensor("op_2572_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_171 = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight, x = hidden_states_169)[name = tensor("tensor_171")]; - tensor var_2577 = const()[name = tensor("op_2577"), val = tensor([1, -1, 20, 64])]; - tensor var_2578 = reshape(shape = var_2577, x = tensor_171)[name = tensor("op_2578")]; - tensor var_2579_perm_0 = const()[name = tensor("op_2579_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2586 = const()[name = tensor("op_2586"), val = tensor([1, 77, 20, 64])]; - tensor var_2587 = reshape(shape = var_2586, x = tensor_173)[name = tensor("op_2587")]; - tensor var_2588_perm_0 = const()[name = tensor("op_2588_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2590 = const()[name = tensor("op_2590"), val = tensor([20, -1, 64])]; - tensor transpose_18 = transpose(perm = var_2588_perm_0, x = var_2587)[name = tensor("transpose_18")]; - tensor query_states_57 = reshape(shape = var_2590, x = transpose_18)[name = tensor("query_states_57")]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228659072)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228661696)))]; + tensor hidden_states_169_cast = layer_norm(axes = hidden_states_169_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16, x = input_451_cast)[name = tensor("hidden_states_169_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228664320)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231941184)))]; + tensor var_2566_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("op_2566_cast")]; + tensor var_2567_to_fp16 = const()[name = tensor("op_2567_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_173_cast = mul(x = var_2566_cast, y = var_2567_to_fp16)[name = tensor("tensor_173_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231943808)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235220672)))]; + tensor tensor_169_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_169_cast")]; + tensor var_2572 = const()[name = tensor("op_2572"), val = tensor([1, -1, 20, 64])]; + tensor var_2573_cast = reshape(shape = var_2572, x = tensor_169_cast)[name = tensor("op_2573_cast")]; + tensor var_2574_perm_0 = const()[name = tensor("op_2574_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235223296)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238500160)))]; + tensor tensor_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_171_cast")]; + tensor var_2579 = const()[name = tensor("op_2579"), val = tensor([1, -1, 20, 64])]; + tensor var_2580_cast = reshape(shape = var_2579, x = tensor_171_cast)[name = tensor("op_2580_cast")]; + tensor var_2581_perm_0 = const()[name = tensor("op_2581_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2588 = const()[name = tensor("op_2588"), val = tensor([1, 77, 20, 64])]; + tensor var_2589_cast = reshape(shape = var_2588, x = tensor_173_cast)[name = tensor("op_2589_cast")]; + tensor var_2590_perm_0 = const()[name = tensor("op_2590_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2592 = const()[name = tensor("op_2592"), val = tensor([20, -1, 64])]; - tensor transpose_20 = transpose(perm = var_2572_perm_0, x = var_2571)[name = tensor("transpose_20")]; - tensor key_states_115 = reshape(shape = var_2592, x = transpose_20)[name = tensor("key_states_115")]; + tensor transpose_18 = transpose(perm = var_2590_perm_0, x = var_2589_cast)[name = tensor("transpose_18")]; + tensor query_states_57_cast = reshape(shape = var_2592, x = transpose_18)[name = tensor("query_states_57_cast")]; tensor var_2594 = const()[name = tensor("op_2594"), val = tensor([20, -1, 64])]; - tensor transpose_19 = transpose(perm = var_2579_perm_0, x = var_2578)[name = tensor("transpose_19")]; - tensor value_states_115 = reshape(shape = var_2594, x = transpose_19)[name = tensor("value_states_115")]; - tensor var_2597_perm_0 = const()[name = tensor("op_2597_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_20 = transpose(perm = var_2574_perm_0, x = var_2573_cast)[name = tensor("transpose_20")]; + tensor key_states_115_cast = reshape(shape = var_2594, x = transpose_20)[name = tensor("key_states_115_cast")]; + tensor var_2596 = const()[name = tensor("op_2596"), val = tensor([20, -1, 64])]; + tensor transpose_19 = transpose(perm = var_2581_perm_0, x = var_2580_cast)[name = tensor("transpose_19")]; + tensor value_states_115_cast = reshape(shape = var_2596, x = transpose_19)[name = tensor("value_states_115_cast")]; + tensor var_2599_perm_0 = const()[name = tensor("op_2599_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(false)]; tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; - tensor transpose_17 = transpose(perm = var_2597_perm_0, x = key_states_115)[name = tensor("transpose_17")]; - tensor attn_weights_169 = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = query_states_57, y = transpose_17)[name = tensor("attn_weights_169")]; - tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1, 20, 77, 77])]; - tensor var_2600 = reshape(shape = var_2599, x = attn_weights_169)[name = tensor("op_2600")]; - tensor attn_weights_171 = add(x = var_2600, y = causal_attention_mask)[name = tensor("attn_weights_171")]; - tensor var_2605 = const()[name = tensor("op_2605"), val = tensor([20, 77, 77])]; - tensor input_453 = reshape(shape = var_2605, x = attn_weights_171)[name = tensor("input_453")]; - tensor input_455 = softmax(axis = var_5, x = input_453)[name = tensor("input_455")]; + tensor transpose_17 = transpose(perm = var_2599_perm_0, x = key_states_115_cast)[name = tensor("transpose_17")]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = query_states_57_cast, y = transpose_17)[name = tensor("attn_weights_169_cast")]; + tensor var_2601 = const()[name = tensor("op_2601"), val = tensor([1, 20, 77, 77])]; + tensor var_2602_cast = reshape(shape = var_2601, x = attn_weights_169_cast)[name = tensor("op_2602_cast")]; + tensor attn_weights_171_cast = add(x = var_2602_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_2607 = const()[name = tensor("op_2607"), val = tensor([20, 77, 77])]; + tensor input_453_cast = reshape(shape = var_2607, x = attn_weights_171_cast)[name = tensor("input_453_cast")]; + tensor input_455_cast = softmax(axis = var_5, x = input_453_cast)[name = tensor("input_455_cast")]; tensor attn_output_169_transpose_x_0 = const()[name = tensor("attn_output_169_transpose_x_0"), val = tensor(false)]; tensor attn_output_169_transpose_y_0 = const()[name = tensor("attn_output_169_transpose_y_0"), val = tensor(false)]; - tensor attn_output_169 = matmul(transpose_x = attn_output_169_transpose_x_0, transpose_y = attn_output_169_transpose_y_0, x = input_455, y = value_states_115)[name = tensor("attn_output_169")]; - tensor var_2610 = const()[name = tensor("op_2610"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_171 = reshape(shape = var_2610, x = attn_output_169)[name = tensor("attn_output_171")]; + tensor attn_output_169_cast = matmul(transpose_x = attn_output_169_transpose_x_0, transpose_y = attn_output_169_transpose_y_0, x = input_455_cast, y = value_states_115_cast)[name = tensor("attn_output_169_cast")]; + tensor var_2612 = const()[name = tensor("op_2612"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_171_cast = reshape(shape = var_2612, x = attn_output_169_cast)[name = tensor("attn_output_171_cast")]; tensor attn_output_173_perm_0 = const()[name = tensor("attn_output_173_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2613 = const()[name = tensor("op_2613"), val = tensor([1, 77, 1280])]; - tensor transpose_16 = transpose(perm = attn_output_173_perm_0, x = attn_output_171)[name = tensor("transpose_16")]; - tensor input_457 = reshape(shape = var_2613, x = transpose_16)[name = tensor("input_457")]; - tensor hidden_states_171 = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight, x = input_457)[name = tensor("hidden_states_171")]; - tensor input_459 = add(x = input_451, y = hidden_states_171)[name = tensor("input_459")]; + tensor var_2615 = const()[name = tensor("op_2615"), val = tensor([1, 77, 1280])]; + tensor transpose_16 = transpose(perm = attn_output_173_perm_0, x = attn_output_171_cast)[name = tensor("transpose_16")]; + tensor input_457_cast = reshape(shape = var_2615, x = transpose_16)[name = tensor("input_457_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238502784)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241779648)))]; + tensor hidden_states_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16, x = input_457_cast)[name = tensor("hidden_states_171_cast")]; + tensor input_459_cast = add(x = input_451_cast, y = hidden_states_171_cast)[name = tensor("input_459_cast")]; tensor input_461_axes_0 = const()[name = tensor("input_461_axes_0"), val = tensor([-1])]; - tensor input_461 = layer_norm(axes = input_461_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_28_layer_norm2_weight, x = input_459)[name = tensor("input_461")]; - tensor input_463 = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_28_mlp_fc1_weight, x = input_461)[name = tensor("input_463")]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241782272)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241784896)))]; + tensor input_461_cast = layer_norm(axes = input_461_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16, x = input_459_cast)[name = tensor("input_461_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241787520)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254894784)))]; + tensor input_463_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16, x = input_461_cast)[name = tensor("input_463_cast")]; tensor input_465_mode_0 = const()[name = tensor("input_465_mode_0"), val = tensor("EXACT")]; - tensor input_465 = gelu(mode = input_465_mode_0, x = input_463)[name = tensor("input_465")]; - tensor hidden_states_173 = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_28_mlp_fc2_weight, x = input_465)[name = tensor("hidden_states_173")]; - tensor input_467 = add(x = input_459, y = hidden_states_173)[name = tensor("input_467")]; + tensor input_465_cast = gelu(mode = input_465_mode_0, x = input_463_cast)[name = tensor("input_465_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254905088)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268012352)))]; + tensor hidden_states_173_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16, x = input_465_cast)[name = tensor("hidden_states_173_cast")]; + tensor input_467_cast = add(x = input_459_cast, y = hidden_states_173_cast)[name = tensor("input_467_cast")]; tensor hidden_states_175_axes_0 = const()[name = tensor("hidden_states_175_axes_0"), val = tensor([-1])]; - tensor hidden_states_175 = layer_norm(axes = hidden_states_175_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_29_layer_norm1_weight, x = input_467)[name = tensor("hidden_states_175")]; - tensor var_2651 = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight, x = hidden_states_175)[name = tensor("op_2651")]; - tensor var_2652 = const()[name = tensor("op_2652"), val = tensor(0x1p-3)]; - tensor tensor_179 = mul(x = var_2651, y = var_2652)[name = tensor("tensor_179")]; - tensor tensor_175 = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight, x = hidden_states_175)[name = tensor("tensor_175")]; - tensor var_2657 = const()[name = tensor("op_2657"), val = tensor([1, -1, 20, 64])]; - tensor var_2658 = reshape(shape = var_2657, x = tensor_175)[name = tensor("op_2658")]; - tensor var_2659_perm_0 = const()[name = tensor("op_2659_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_177 = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight, x = hidden_states_175)[name = tensor("tensor_177")]; - tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1, -1, 20, 64])]; - tensor var_2665 = reshape(shape = var_2664, x = tensor_177)[name = tensor("op_2665")]; - tensor var_2666_perm_0 = const()[name = tensor("op_2666_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2673 = const()[name = tensor("op_2673"), val = tensor([1, 77, 20, 64])]; - tensor var_2674 = reshape(shape = var_2673, x = tensor_179)[name = tensor("op_2674")]; - tensor var_2675_perm_0 = const()[name = tensor("op_2675_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2677 = const()[name = tensor("op_2677"), val = tensor([20, -1, 64])]; - tensor transpose_13 = transpose(perm = var_2675_perm_0, x = var_2674)[name = tensor("transpose_13")]; - tensor query_states_59 = reshape(shape = var_2677, x = transpose_13)[name = tensor("query_states_59")]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268014976)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268017600)))]; + tensor hidden_states_175_cast = layer_norm(axes = hidden_states_175_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16, x = input_467_cast)[name = tensor("hidden_states_175_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268020224)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271297088)))]; + tensor var_2653_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("op_2653_cast")]; + tensor var_2654_to_fp16 = const()[name = tensor("op_2654_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_179_cast = mul(x = var_2653_cast, y = var_2654_to_fp16)[name = tensor("tensor_179_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271299712)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274576576)))]; + tensor tensor_175_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_175_cast")]; + tensor var_2659 = const()[name = tensor("op_2659"), val = tensor([1, -1, 20, 64])]; + tensor var_2660_cast = reshape(shape = var_2659, x = tensor_175_cast)[name = tensor("op_2660_cast")]; + tensor var_2661_perm_0 = const()[name = tensor("op_2661_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274579200)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277856064)))]; + tensor tensor_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_177_cast")]; + tensor var_2666 = const()[name = tensor("op_2666"), val = tensor([1, -1, 20, 64])]; + tensor var_2667_cast = reshape(shape = var_2666, x = tensor_177_cast)[name = tensor("op_2667_cast")]; + tensor var_2668_perm_0 = const()[name = tensor("op_2668_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2675 = const()[name = tensor("op_2675"), val = tensor([1, 77, 20, 64])]; + tensor var_2676_cast = reshape(shape = var_2675, x = tensor_179_cast)[name = tensor("op_2676_cast")]; + tensor var_2677_perm_0 = const()[name = tensor("op_2677_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2679 = const()[name = tensor("op_2679"), val = tensor([20, -1, 64])]; - tensor transpose_15 = transpose(perm = var_2659_perm_0, x = var_2658)[name = tensor("transpose_15")]; - tensor key_states_119 = reshape(shape = var_2679, x = transpose_15)[name = tensor("key_states_119")]; + tensor transpose_13 = transpose(perm = var_2677_perm_0, x = var_2676_cast)[name = tensor("transpose_13")]; + tensor query_states_59_cast = reshape(shape = var_2679, x = transpose_13)[name = tensor("query_states_59_cast")]; tensor var_2681 = const()[name = tensor("op_2681"), val = tensor([20, -1, 64])]; - tensor transpose_14 = transpose(perm = var_2666_perm_0, x = var_2665)[name = tensor("transpose_14")]; - tensor value_states_119 = reshape(shape = var_2681, x = transpose_14)[name = tensor("value_states_119")]; - tensor var_2684_perm_0 = const()[name = tensor("op_2684_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_15 = transpose(perm = var_2661_perm_0, x = var_2660_cast)[name = tensor("transpose_15")]; + tensor key_states_119_cast = reshape(shape = var_2681, x = transpose_15)[name = tensor("key_states_119_cast")]; + tensor var_2683 = const()[name = tensor("op_2683"), val = tensor([20, -1, 64])]; + tensor transpose_14 = transpose(perm = var_2668_perm_0, x = var_2667_cast)[name = tensor("transpose_14")]; + tensor value_states_119_cast = reshape(shape = var_2683, x = transpose_14)[name = tensor("value_states_119_cast")]; + tensor var_2686_perm_0 = const()[name = tensor("op_2686_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_175_transpose_x_0 = const()[name = tensor("attn_weights_175_transpose_x_0"), val = tensor(false)]; tensor attn_weights_175_transpose_y_0 = const()[name = tensor("attn_weights_175_transpose_y_0"), val = tensor(false)]; - tensor transpose_12 = transpose(perm = var_2684_perm_0, x = key_states_119)[name = tensor("transpose_12")]; - tensor attn_weights_175 = matmul(transpose_x = attn_weights_175_transpose_x_0, transpose_y = attn_weights_175_transpose_y_0, x = query_states_59, y = transpose_12)[name = tensor("attn_weights_175")]; - tensor var_2686 = const()[name = tensor("op_2686"), val = tensor([1, 20, 77, 77])]; - tensor var_2687 = reshape(shape = var_2686, x = attn_weights_175)[name = tensor("op_2687")]; - tensor attn_weights_177 = add(x = var_2687, y = causal_attention_mask)[name = tensor("attn_weights_177")]; - tensor var_2692 = const()[name = tensor("op_2692"), val = tensor([20, 77, 77])]; - tensor input_469 = reshape(shape = var_2692, x = attn_weights_177)[name = tensor("input_469")]; - tensor input_471 = softmax(axis = var_5, x = input_469)[name = tensor("input_471")]; + tensor transpose_12 = transpose(perm = var_2686_perm_0, x = key_states_119_cast)[name = tensor("transpose_12")]; + tensor attn_weights_175_cast = matmul(transpose_x = attn_weights_175_transpose_x_0, transpose_y = attn_weights_175_transpose_y_0, x = query_states_59_cast, y = transpose_12)[name = tensor("attn_weights_175_cast")]; + tensor var_2688 = const()[name = tensor("op_2688"), val = tensor([1, 20, 77, 77])]; + tensor var_2689_cast = reshape(shape = var_2688, x = attn_weights_175_cast)[name = tensor("op_2689_cast")]; + tensor attn_weights_177_cast = add(x = var_2689_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_177_cast")]; + tensor var_2694 = const()[name = tensor("op_2694"), val = tensor([20, 77, 77])]; + tensor input_469_cast = reshape(shape = var_2694, x = attn_weights_177_cast)[name = tensor("input_469_cast")]; + tensor input_471_cast = softmax(axis = var_5, x = input_469_cast)[name = tensor("input_471_cast")]; tensor attn_output_175_transpose_x_0 = const()[name = tensor("attn_output_175_transpose_x_0"), val = tensor(false)]; tensor attn_output_175_transpose_y_0 = const()[name = tensor("attn_output_175_transpose_y_0"), val = tensor(false)]; - tensor attn_output_175 = matmul(transpose_x = attn_output_175_transpose_x_0, transpose_y = attn_output_175_transpose_y_0, x = input_471, y = value_states_119)[name = tensor("attn_output_175")]; - tensor var_2697 = const()[name = tensor("op_2697"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_177 = reshape(shape = var_2697, x = attn_output_175)[name = tensor("attn_output_177")]; + tensor attn_output_175_cast = matmul(transpose_x = attn_output_175_transpose_x_0, transpose_y = attn_output_175_transpose_y_0, x = input_471_cast, y = value_states_119_cast)[name = tensor("attn_output_175_cast")]; + tensor var_2699 = const()[name = tensor("op_2699"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_177_cast = reshape(shape = var_2699, x = attn_output_175_cast)[name = tensor("attn_output_177_cast")]; tensor attn_output_179_perm_0 = const()[name = tensor("attn_output_179_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2700 = const()[name = tensor("op_2700"), val = tensor([1, 77, 1280])]; - tensor transpose_11 = transpose(perm = attn_output_179_perm_0, x = attn_output_177)[name = tensor("transpose_11")]; - tensor input_473 = reshape(shape = var_2700, x = transpose_11)[name = tensor("input_473")]; - tensor hidden_states_177 = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight, x = input_473)[name = tensor("hidden_states_177")]; - tensor input_475 = add(x = input_467, y = hidden_states_177)[name = tensor("input_475")]; + tensor var_2702 = const()[name = tensor("op_2702"), val = tensor([1, 77, 1280])]; + tensor transpose_11 = transpose(perm = attn_output_179_perm_0, x = attn_output_177_cast)[name = tensor("transpose_11")]; + tensor input_473_cast = reshape(shape = var_2702, x = transpose_11)[name = tensor("input_473_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277858688)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281135552)))]; + tensor hidden_states_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16, x = input_473_cast)[name = tensor("hidden_states_177_cast")]; + tensor input_475_cast = add(x = input_467_cast, y = hidden_states_177_cast)[name = tensor("input_475_cast")]; tensor input_477_axes_0 = const()[name = tensor("input_477_axes_0"), val = tensor([-1])]; - tensor input_477 = layer_norm(axes = input_477_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_29_layer_norm2_weight, x = input_475)[name = tensor("input_477")]; - tensor input_479 = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_29_mlp_fc1_weight, x = input_477)[name = tensor("input_479")]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281138176)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281140800)))]; + tensor input_477_cast = layer_norm(axes = input_477_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16, x = input_475_cast)[name = tensor("input_477_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281143424)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294250688)))]; + tensor input_479_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16, x = input_477_cast)[name = tensor("input_479_cast")]; tensor input_481_mode_0 = const()[name = tensor("input_481_mode_0"), val = tensor("EXACT")]; - tensor input_481 = gelu(mode = input_481_mode_0, x = input_479)[name = tensor("input_481")]; - tensor hidden_states_179 = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_29_mlp_fc2_weight, x = input_481)[name = tensor("hidden_states_179")]; - tensor input_483 = add(x = input_475, y = hidden_states_179)[name = tensor("input_483")]; + tensor input_481_cast = gelu(mode = input_481_mode_0, x = input_479_cast)[name = tensor("input_481_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294260992)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307368256)))]; + tensor hidden_states_179_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16, x = input_481_cast)[name = tensor("hidden_states_179_cast")]; + tensor input_483_cast = add(x = input_475_cast, y = hidden_states_179_cast)[name = tensor("input_483_cast")]; tensor hidden_states_181_axes_0 = const()[name = tensor("hidden_states_181_axes_0"), val = tensor([-1])]; - tensor hidden_states_181 = layer_norm(axes = hidden_states_181_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_30_layer_norm1_weight, x = input_483)[name = tensor("hidden_states_181")]; - tensor var_2738 = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight, x = hidden_states_181)[name = tensor("op_2738")]; - tensor var_2739 = const()[name = tensor("op_2739"), val = tensor(0x1p-3)]; - tensor tensor_185 = mul(x = var_2738, y = var_2739)[name = tensor("tensor_185")]; - tensor tensor_181 = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight, x = hidden_states_181)[name = tensor("tensor_181")]; - tensor var_2744 = const()[name = tensor("op_2744"), val = tensor([1, -1, 20, 64])]; - tensor var_2745 = reshape(shape = var_2744, x = tensor_181)[name = tensor("op_2745")]; - tensor var_2746_perm_0 = const()[name = tensor("op_2746_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_183 = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight, x = hidden_states_181)[name = tensor("tensor_183")]; - tensor var_2751 = const()[name = tensor("op_2751"), val = tensor([1, -1, 20, 64])]; - tensor var_2752 = reshape(shape = var_2751, x = tensor_183)[name = tensor("op_2752")]; - tensor var_2753_perm_0 = const()[name = tensor("op_2753_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2760 = const()[name = tensor("op_2760"), val = tensor([1, 77, 20, 64])]; - tensor var_2761 = reshape(shape = var_2760, x = tensor_185)[name = tensor("op_2761")]; - tensor var_2762_perm_0 = const()[name = tensor("op_2762_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([20, -1, 64])]; - tensor transpose_8 = transpose(perm = var_2762_perm_0, x = var_2761)[name = tensor("transpose_8")]; - tensor query_states_61 = reshape(shape = var_2764, x = transpose_8)[name = tensor("query_states_61")]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307370880)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307373504)))]; + tensor hidden_states_181_cast = layer_norm(axes = hidden_states_181_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16, x = input_483_cast)[name = tensor("hidden_states_181_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307376128)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310652992)))]; + tensor var_2740_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("op_2740_cast")]; + tensor var_2741_to_fp16 = const()[name = tensor("op_2741_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_185_cast = mul(x = var_2740_cast, y = var_2741_to_fp16)[name = tensor("tensor_185_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310655616)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313932480)))]; + tensor tensor_181_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_181_cast")]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([1, -1, 20, 64])]; + tensor var_2747_cast = reshape(shape = var_2746, x = tensor_181_cast)[name = tensor("op_2747_cast")]; + tensor var_2748_perm_0 = const()[name = tensor("op_2748_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313935104)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317211968)))]; + tensor tensor_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_183_cast")]; + tensor var_2753 = const()[name = tensor("op_2753"), val = tensor([1, -1, 20, 64])]; + tensor var_2754_cast = reshape(shape = var_2753, x = tensor_183_cast)[name = tensor("op_2754_cast")]; + tensor var_2755_perm_0 = const()[name = tensor("op_2755_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2762 = const()[name = tensor("op_2762"), val = tensor([1, 77, 20, 64])]; + tensor var_2763_cast = reshape(shape = var_2762, x = tensor_185_cast)[name = tensor("op_2763_cast")]; + tensor var_2764_perm_0 = const()[name = tensor("op_2764_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([20, -1, 64])]; - tensor transpose_10 = transpose(perm = var_2746_perm_0, x = var_2745)[name = tensor("transpose_10")]; - tensor key_states_123 = reshape(shape = var_2766, x = transpose_10)[name = tensor("key_states_123")]; + tensor transpose_8 = transpose(perm = var_2764_perm_0, x = var_2763_cast)[name = tensor("transpose_8")]; + tensor query_states_61_cast = reshape(shape = var_2766, x = transpose_8)[name = tensor("query_states_61_cast")]; tensor var_2768 = const()[name = tensor("op_2768"), val = tensor([20, -1, 64])]; - tensor transpose_9 = transpose(perm = var_2753_perm_0, x = var_2752)[name = tensor("transpose_9")]; - tensor value_states_123 = reshape(shape = var_2768, x = transpose_9)[name = tensor("value_states_123")]; - tensor var_2771_perm_0 = const()[name = tensor("op_2771_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_10 = transpose(perm = var_2748_perm_0, x = var_2747_cast)[name = tensor("transpose_10")]; + tensor key_states_123_cast = reshape(shape = var_2768, x = transpose_10)[name = tensor("key_states_123_cast")]; + tensor var_2770 = const()[name = tensor("op_2770"), val = tensor([20, -1, 64])]; + tensor transpose_9 = transpose(perm = var_2755_perm_0, x = var_2754_cast)[name = tensor("transpose_9")]; + tensor value_states_123_cast = reshape(shape = var_2770, x = transpose_9)[name = tensor("value_states_123_cast")]; + tensor var_2773_perm_0 = const()[name = tensor("op_2773_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(false)]; tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; - tensor transpose_7 = transpose(perm = var_2771_perm_0, x = key_states_123)[name = tensor("transpose_7")]; - tensor attn_weights_181 = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = query_states_61, y = transpose_7)[name = tensor("attn_weights_181")]; - tensor var_2773 = const()[name = tensor("op_2773"), val = tensor([1, 20, 77, 77])]; - tensor var_2774 = reshape(shape = var_2773, x = attn_weights_181)[name = tensor("op_2774")]; - tensor attn_weights_183 = add(x = var_2774, y = causal_attention_mask)[name = tensor("attn_weights_183")]; - tensor var_2779 = const()[name = tensor("op_2779"), val = tensor([20, 77, 77])]; - tensor input_485 = reshape(shape = var_2779, x = attn_weights_183)[name = tensor("input_485")]; - tensor input_487 = softmax(axis = var_5, x = input_485)[name = tensor("input_487")]; + tensor transpose_7 = transpose(perm = var_2773_perm_0, x = key_states_123_cast)[name = tensor("transpose_7")]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = query_states_61_cast, y = transpose_7)[name = tensor("attn_weights_181_cast")]; + tensor var_2775 = const()[name = tensor("op_2775"), val = tensor([1, 20, 77, 77])]; + tensor var_2776_cast = reshape(shape = var_2775, x = attn_weights_181_cast)[name = tensor("op_2776_cast")]; + tensor attn_weights_183_cast = add(x = var_2776_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_2781 = const()[name = tensor("op_2781"), val = tensor([20, 77, 77])]; + tensor input_485_cast = reshape(shape = var_2781, x = attn_weights_183_cast)[name = tensor("input_485_cast")]; + tensor input_487_cast = softmax(axis = var_5, x = input_485_cast)[name = tensor("input_487_cast")]; tensor attn_output_181_transpose_x_0 = const()[name = tensor("attn_output_181_transpose_x_0"), val = tensor(false)]; tensor attn_output_181_transpose_y_0 = const()[name = tensor("attn_output_181_transpose_y_0"), val = tensor(false)]; - tensor attn_output_181 = matmul(transpose_x = attn_output_181_transpose_x_0, transpose_y = attn_output_181_transpose_y_0, x = input_487, y = value_states_123)[name = tensor("attn_output_181")]; - tensor var_2784 = const()[name = tensor("op_2784"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_183 = reshape(shape = var_2784, x = attn_output_181)[name = tensor("attn_output_183")]; + tensor attn_output_181_cast = matmul(transpose_x = attn_output_181_transpose_x_0, transpose_y = attn_output_181_transpose_y_0, x = input_487_cast, y = value_states_123_cast)[name = tensor("attn_output_181_cast")]; + tensor var_2786 = const()[name = tensor("op_2786"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_183_cast = reshape(shape = var_2786, x = attn_output_181_cast)[name = tensor("attn_output_183_cast")]; tensor attn_output_185_perm_0 = const()[name = tensor("attn_output_185_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2787 = const()[name = tensor("op_2787"), val = tensor([1, 77, 1280])]; - tensor transpose_6 = transpose(perm = attn_output_185_perm_0, x = attn_output_183)[name = tensor("transpose_6")]; - tensor input_489 = reshape(shape = var_2787, x = transpose_6)[name = tensor("input_489")]; - tensor hidden_states_183 = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight, x = input_489)[name = tensor("hidden_states_183")]; - tensor input_491 = add(x = input_483, y = hidden_states_183)[name = tensor("input_491")]; + tensor var_2789 = const()[name = tensor("op_2789"), val = tensor([1, 77, 1280])]; + tensor transpose_6 = transpose(perm = attn_output_185_perm_0, x = attn_output_183_cast)[name = tensor("transpose_6")]; + tensor input_489_cast = reshape(shape = var_2789, x = transpose_6)[name = tensor("input_489_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317214592)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320491456)))]; + tensor hidden_states_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16, x = input_489_cast)[name = tensor("hidden_states_183_cast")]; + tensor input_491_cast = add(x = input_483_cast, y = hidden_states_183_cast)[name = tensor("input_491_cast")]; tensor input_493_axes_0 = const()[name = tensor("input_493_axes_0"), val = tensor([-1])]; - tensor input_493 = layer_norm(axes = input_493_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_30_layer_norm2_weight, x = input_491)[name = tensor("input_493")]; - tensor input_495 = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_30_mlp_fc1_weight, x = input_493)[name = tensor("input_495")]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320494080)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320496704)))]; + tensor input_493_cast = layer_norm(axes = input_493_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16, x = input_491_cast)[name = tensor("input_493_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320499328)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333606592)))]; + tensor input_495_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16, x = input_493_cast)[name = tensor("input_495_cast")]; tensor input_497_mode_0 = const()[name = tensor("input_497_mode_0"), val = tensor("EXACT")]; - tensor input_497 = gelu(mode = input_497_mode_0, x = input_495)[name = tensor("input_497")]; - tensor hidden_states_185 = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_30_mlp_fc2_weight, x = input_497)[name = tensor("hidden_states_185")]; - tensor last_hidden_state = add(x = input_491, y = hidden_states_185)[name = tensor("input_499")]; + tensor input_497_cast = gelu(mode = input_497_mode_0, x = input_495_cast)[name = tensor("input_497_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333616896)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346724160)))]; + tensor hidden_states_185_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16, x = input_497_cast)[name = tensor("hidden_states_185_cast")]; + tensor input_499_cast = add(x = input_491_cast, y = hidden_states_185_cast)[name = tensor("input_499_cast")]; + tensor input_499_cast_to_fp32_dtype_0 = const()[name = tensor("input_499_cast_to_fp32_dtype_0"), val = tensor("fp32")]; tensor hidden_states_187_axes_0 = const()[name = tensor("hidden_states_187_axes_0"), val = tensor([-1])]; - tensor hidden_states_187 = layer_norm(axes = hidden_states_187_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm1_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_31_layer_norm1_weight, x = last_hidden_state)[name = tensor("hidden_states_187")]; - tensor var_2825 = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias, weight = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight, x = hidden_states_187)[name = tensor("op_2825")]; - tensor var_2826 = const()[name = tensor("op_2826"), val = tensor(0x1p-3)]; - tensor tensor_workaround = mul(x = var_2825, y = var_2826)[name = tensor("tensor_workaround")]; - tensor tensor_187 = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias, weight = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight, x = hidden_states_187)[name = tensor("tensor_187")]; - tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, -1, 20, 64])]; - tensor var_2832 = reshape(shape = var_2831, x = tensor_187)[name = tensor("op_2832")]; - tensor var_2833_perm_0 = const()[name = tensor("op_2833_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor tensor_189 = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias, weight = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight, x = hidden_states_187)[name = tensor("tensor_189")]; - tensor var_2838 = const()[name = tensor("op_2838"), val = tensor([1, -1, 20, 64])]; - tensor var_2839 = reshape(shape = var_2838, x = tensor_189)[name = tensor("op_2839")]; - tensor var_2840_perm_0 = const()[name = tensor("op_2840_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2847 = const()[name = tensor("op_2847"), val = tensor([1, 77, 20, 64])]; - tensor var_2848 = reshape(shape = var_2847, x = tensor_workaround)[name = tensor("op_2848")]; - tensor var_2849_perm_0 = const()[name = tensor("op_2849_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2851 = const()[name = tensor("op_2851"), val = tensor([20, -1, 64])]; - tensor transpose_3 = transpose(perm = var_2849_perm_0, x = var_2848)[name = tensor("transpose_3")]; - tensor query_states = reshape(shape = var_2851, x = transpose_3)[name = tensor("query_states")]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346726784)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346729408)))]; + tensor hidden_states_187_cast = layer_norm(axes = hidden_states_187_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16, x = input_499_cast)[name = tensor("hidden_states_187_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346732032)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350008896)))]; + tensor var_2827_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("op_2827_cast")]; + tensor var_2828_to_fp16 = const()[name = tensor("op_2828_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_cast = mul(x = var_2827_cast, y = var_2828_to_fp16)[name = tensor("tensor_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350011520)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353288384)))]; + tensor tensor_187_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_187_cast")]; + tensor var_2833 = const()[name = tensor("op_2833"), val = tensor([1, -1, 20, 64])]; + tensor var_2834_cast = reshape(shape = var_2833, x = tensor_187_cast)[name = tensor("op_2834_cast")]; + tensor var_2835_perm_0 = const()[name = tensor("op_2835_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353291008)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356567872)))]; + tensor tensor_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_189_cast")]; + tensor var_2840 = const()[name = tensor("op_2840"), val = tensor([1, -1, 20, 64])]; + tensor var_2841_cast = reshape(shape = var_2840, x = tensor_189_cast)[name = tensor("op_2841_cast")]; + tensor var_2842_perm_0 = const()[name = tensor("op_2842_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2849 = const()[name = tensor("op_2849"), val = tensor([1, 77, 20, 64])]; + tensor var_2850_cast = reshape(shape = var_2849, x = tensor_cast)[name = tensor("op_2850_cast")]; + tensor var_2851_perm_0 = const()[name = tensor("op_2851_perm_0"), val = tensor([0, 2, 1, 3])]; tensor var_2853 = const()[name = tensor("op_2853"), val = tensor([20, -1, 64])]; - tensor transpose_5 = transpose(perm = var_2833_perm_0, x = var_2832)[name = tensor("transpose_5")]; - tensor key_states = reshape(shape = var_2853, x = transpose_5)[name = tensor("key_states")]; + tensor transpose_3 = transpose(perm = var_2851_perm_0, x = var_2850_cast)[name = tensor("transpose_3")]; + tensor query_states_cast = reshape(shape = var_2853, x = transpose_3)[name = tensor("query_states_cast")]; tensor var_2855 = const()[name = tensor("op_2855"), val = tensor([20, -1, 64])]; - tensor transpose_4 = transpose(perm = var_2840_perm_0, x = var_2839)[name = tensor("transpose_4")]; - tensor value_states = reshape(shape = var_2855, x = transpose_4)[name = tensor("value_states")]; - tensor var_2858_perm_0 = const()[name = tensor("op_2858_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_5 = transpose(perm = var_2835_perm_0, x = var_2834_cast)[name = tensor("transpose_5")]; + tensor key_states_cast = reshape(shape = var_2855, x = transpose_5)[name = tensor("key_states_cast")]; + tensor var_2857 = const()[name = tensor("op_2857"), val = tensor([20, -1, 64])]; + tensor transpose_4 = transpose(perm = var_2842_perm_0, x = var_2841_cast)[name = tensor("transpose_4")]; + tensor value_states_cast = reshape(shape = var_2857, x = transpose_4)[name = tensor("value_states_cast")]; + tensor var_2860_perm_0 = const()[name = tensor("op_2860_perm_0"), val = tensor([0, 2, 1])]; tensor attn_weights_187_transpose_x_0 = const()[name = tensor("attn_weights_187_transpose_x_0"), val = tensor(false)]; tensor attn_weights_187_transpose_y_0 = const()[name = tensor("attn_weights_187_transpose_y_0"), val = tensor(false)]; - tensor transpose_2 = transpose(perm = var_2858_perm_0, x = key_states)[name = tensor("transpose_2")]; - tensor attn_weights_187 = matmul(transpose_x = attn_weights_187_transpose_x_0, transpose_y = attn_weights_187_transpose_y_0, x = query_states, y = transpose_2)[name = tensor("attn_weights_187")]; - tensor var_2860 = const()[name = tensor("op_2860"), val = tensor([1, 20, 77, 77])]; - tensor var_2861 = reshape(shape = var_2860, x = attn_weights_187)[name = tensor("op_2861")]; - tensor attn_weights_189 = add(x = var_2861, y = causal_attention_mask)[name = tensor("attn_weights_189")]; - tensor var_2866 = const()[name = tensor("op_2866"), val = tensor([20, 77, 77])]; - tensor input_501 = reshape(shape = var_2866, x = attn_weights_189)[name = tensor("input_501")]; - tensor input_503 = softmax(axis = var_5, x = input_501)[name = tensor("input_503")]; + tensor transpose_2 = transpose(perm = var_2860_perm_0, x = key_states_cast)[name = tensor("transpose_2")]; + tensor attn_weights_187_cast = matmul(transpose_x = attn_weights_187_transpose_x_0, transpose_y = attn_weights_187_transpose_y_0, x = query_states_cast, y = transpose_2)[name = tensor("attn_weights_187_cast")]; + tensor var_2862 = const()[name = tensor("op_2862"), val = tensor([1, 20, 77, 77])]; + tensor var_2863_cast = reshape(shape = var_2862, x = attn_weights_187_cast)[name = tensor("op_2863_cast")]; + tensor attn_weights_189_cast = add(x = var_2863_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_189_cast")]; + tensor var_2868 = const()[name = tensor("op_2868"), val = tensor([20, 77, 77])]; + tensor input_501_cast = reshape(shape = var_2868, x = attn_weights_189_cast)[name = tensor("input_501_cast")]; + tensor input_503_cast = softmax(axis = var_5, x = input_501_cast)[name = tensor("input_503_cast")]; tensor attn_output_187_transpose_x_0 = const()[name = tensor("attn_output_187_transpose_x_0"), val = tensor(false)]; tensor attn_output_187_transpose_y_0 = const()[name = tensor("attn_output_187_transpose_y_0"), val = tensor(false)]; - tensor attn_output_187 = matmul(transpose_x = attn_output_187_transpose_x_0, transpose_y = attn_output_187_transpose_y_0, x = input_503, y = value_states)[name = tensor("attn_output_187")]; - tensor var_2871 = const()[name = tensor("op_2871"), val = tensor([1, 20, 77, 64])]; - tensor attn_output_189 = reshape(shape = var_2871, x = attn_output_187)[name = tensor("attn_output_189")]; + tensor attn_output_187_cast = matmul(transpose_x = attn_output_187_transpose_x_0, transpose_y = attn_output_187_transpose_y_0, x = input_503_cast, y = value_states_cast)[name = tensor("attn_output_187_cast")]; + tensor var_2873 = const()[name = tensor("op_2873"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_189_cast = reshape(shape = var_2873, x = attn_output_187_cast)[name = tensor("attn_output_189_cast")]; tensor attn_output_perm_0 = const()[name = tensor("attn_output_perm_0"), val = tensor([0, 2, 1, 3])]; - tensor var_2874 = const()[name = tensor("op_2874"), val = tensor([1, 77, 1280])]; - tensor transpose_1 = transpose(perm = attn_output_perm_0, x = attn_output_189)[name = tensor("transpose_1")]; - tensor input_505 = reshape(shape = var_2874, x = transpose_1)[name = tensor("input_505")]; - tensor hidden_states_189 = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias, weight = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight, x = input_505)[name = tensor("hidden_states_189")]; - tensor input_507 = add(x = last_hidden_state, y = hidden_states_189)[name = tensor("input_507")]; + tensor var_2876 = const()[name = tensor("op_2876"), val = tensor([1, 77, 1280])]; + tensor transpose_1 = transpose(perm = attn_output_perm_0, x = attn_output_189_cast)[name = tensor("transpose_1")]; + tensor input_505_cast = reshape(shape = var_2876, x = transpose_1)[name = tensor("input_505_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356570496)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359847360)))]; + tensor hidden_states_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16, x = input_505_cast)[name = tensor("hidden_states_189_cast")]; + tensor input_507_cast = add(x = input_499_cast, y = hidden_states_189_cast)[name = tensor("input_507_cast")]; tensor input_509_axes_0 = const()[name = tensor("input_509_axes_0"), val = tensor([-1])]; - tensor input_509 = layer_norm(axes = input_509_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm2_bias, epsilon = var_12, gamma = text_encoder_text_model_encoder_layers_31_layer_norm2_weight, x = input_507)[name = tensor("input_509")]; - tensor input_511 = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc1_bias, weight = text_encoder_text_model_encoder_layers_31_mlp_fc1_weight, x = input_509)[name = tensor("input_511")]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359849984)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359852608)))]; + tensor input_509_cast = layer_norm(axes = input_509_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16, x = input_507_cast)[name = tensor("input_509_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359855232)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372962496)))]; + tensor input_511_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16, x = input_509_cast)[name = tensor("input_511_cast")]; tensor input_513_mode_0 = const()[name = tensor("input_513_mode_0"), val = tensor("EXACT")]; - tensor input_513 = gelu(mode = input_513_mode_0, x = input_511)[name = tensor("input_513")]; - tensor hidden_states = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc2_bias, weight = text_encoder_text_model_encoder_layers_31_mlp_fc2_weight, x = input_513)[name = tensor("hidden_states")]; - tensor input_515 = add(x = input_507, y = hidden_states)[name = tensor("input_515")]; + tensor input_513_cast = gelu(mode = input_513_mode_0, x = input_511_cast)[name = tensor("input_513_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372972800)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386080064)))]; + tensor hidden_states_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16, x = input_513_cast)[name = tensor("hidden_states_cast")]; + tensor input_515_cast = add(x = input_507_cast, y = hidden_states_cast)[name = tensor("input_515_cast")]; tensor last_hidden_state_axes_0 = const()[name = tensor("last_hidden_state_axes_0"), val = tensor([-1])]; - tensor last_hidden_state_1 = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias, epsilon = var_12, gamma = text_encoder_text_model_final_layer_norm_weight, x = input_515)[name = tensor("last_hidden_state")]; - tensor var_2902 = const()[name = tensor("op_2902"), val = tensor([0])]; - tensor var_2904 = reduce_argmax(axis = var_5, keep_dims = var_6, x = input_ids)[name = tensor("op_2904")]; + tensor text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386082688)))]; + tensor text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386085312)))]; + tensor last_hidden_state_cast = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_13_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_515_cast)[name = tensor("last_hidden_state_cast")]; + tensor var_2904 = const()[name = tensor("op_2904"), val = tensor([0])]; + tensor var_2906 = reduce_argmax(axis = var_5, keep_dims = var_6, x = input_ids)[name = tensor("op_2906")]; tensor stack_0_axis_0 = const()[name = tensor("stack_0_axis_0"), val = tensor(1)]; - tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_2902, var_2904))[name = tensor("stack_0")]; + tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_2904, var_2906))[name = tensor("stack_0")]; tensor input_transpose_batch_dims_0 = const()[name = tensor("input_transpose_batch_dims_0"), val = tensor(0)]; - tensor input_transpose = gather_nd(batch_dims = input_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_1)[name = tensor("input_transpose")]; - tensor var_2911_bias_0 = const()[name = tensor("op_2911_bias_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2778696320)))]; - tensor pooled_outputs = linear(bias = var_2911_bias_0, weight = text_encoder_text_projection_weight, x = input_transpose)[name = tensor("op_2911")]; + tensor input_transpose_cast = gather_nd(batch_dims = input_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast)[name = tensor("input_transpose_cast")]; + tensor text_encoder_text_projection_weight_to_fp16 = const()[name = tensor("text_encoder_text_projection_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386087936)))]; + tensor var_2913_bias_0_to_fp16 = const()[name = tensor("op_2913_bias_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389364800)))]; + tensor var_2913_cast = linear(bias = var_2913_bias_0_to_fp16, weight = text_encoder_text_projection_weight_to_fp16, x = input_transpose_cast)[name = tensor("op_2913_cast")]; + tensor var_2913_cast_to_fp32_dtype_0 = const()[name = tensor("op_2913_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor pooled_outputs = cast(dtype = var_2913_cast_to_fp32_dtype_0, x = var_2913_cast)[name = tensor("cast_325")]; + tensor last_hidden_state = cast(dtype = input_499_cast_to_fp32_dtype_0, x = input_499_cast)[name = tensor("cast_359")]; } -> (last_hidden_state, pooled_outputs); } \ No newline at end of file