diff --git "a/original/compiled/Unet.mlmodelc/model.mil" "b/original/compiled/Unet.mlmodelc/model.mil" new file mode 100644--- /dev/null +++ "b/original/compiled/Unet.mlmodelc/model.mil" @@ -0,0 +1,12305 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1436.100.10"}})] +{ + func main(tensor encoder_hidden_states, tensor sample, tensor text_embeds, tensor time_ids, tensor timestep) { + tensor var_18 = const()[name = tensor("op_18"), val = tensor(3)]; + tensor var_23 = const()[name = tensor("op_23"), val = tensor(true)]; + tensor var_31 = const()[name = tensor("op_31"), val = tensor(1)]; + tensor var_32 = const()[name = tensor("op_32"), val = tensor(-1)]; + tensor var_59_axes_0 = const()[name = tensor("op_59_axes_0"), val = tensor([1])]; + tensor var_59_cast = expand_dims(axes = var_59_axes_0, x = timestep)[name = tensor("op_59_cast")]; + tensor var_61_to_fp16 = const()[name = tensor("op_61_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor emb_3_cast = mul(x = var_59_cast, y = var_61_to_fp16)[name = tensor("emb_3_cast")]; + tensor var_66_cast = sin(x = emb_3_cast)[name = tensor("op_66_cast")]; + tensor var_67_cast = cos(x = emb_3_cast)[name = tensor("op_67_cast")]; + tensor emb_7_interleave_0 = const()[name = tensor("emb_7_interleave_0"), val = tensor(false)]; + tensor emb_7_cast = concat(axis = var_32, interleave = emb_7_interleave_0, values = (var_66_cast, var_67_cast))[name = tensor("emb_7_cast")]; + tensor var_71_begin_0 = const()[name = tensor("op_71_begin_0"), val = tensor([0, 160])]; + tensor var_71_end_0 = const()[name = tensor("op_71_end_0"), val = tensor([2, 320])]; + tensor var_71_end_mask_0 = const()[name = tensor("op_71_end_mask_0"), val = tensor([true, true])]; + tensor var_71_cast = slice_by_index(begin = var_71_begin_0, end = var_71_end_0, end_mask = var_71_end_mask_0, x = emb_7_cast)[name = tensor("op_71_cast")]; + tensor var_73_begin_0 = const()[name = tensor("op_73_begin_0"), val = tensor([0, 0])]; + tensor var_73_end_0 = const()[name = tensor("op_73_end_0"), val = tensor([2, 160])]; + tensor var_73_end_mask_0 = const()[name = tensor("op_73_end_mask_0"), val = tensor([true, false])]; + tensor var_73_cast = slice_by_index(begin = var_73_begin_0, end = var_73_end_0, end_mask = var_73_end_mask_0, x = emb_7_cast)[name = tensor("op_73_cast")]; + tensor sample_3_interleave_0 = const()[name = tensor("sample_3_interleave_0"), val = tensor(false)]; + tensor sample_3_cast = concat(axis = var_32, interleave = sample_3_interleave_0, values = (var_71_cast, var_73_cast))[name = tensor("sample_3_cast")]; + tensor var_78_axes_0 = const()[name = tensor("op_78_axes_0"), val = tensor([-1])]; + tensor var_78_cast = expand_dims(axes = var_78_axes_0, x = sample_3_cast)[name = tensor("op_78_cast")]; + tensor input_1_axes_0 = const()[name = tensor("input_1_axes_0"), val = tensor([-1])]; + tensor input_1_cast = expand_dims(axes = input_1_axes_0, x = var_78_cast)[name = tensor("input_1_cast")]; + tensor var_82 = const()[name = tensor("op_82"), val = tensor([1, 1])]; + tensor var_84 = const()[name = tensor("op_84"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_time_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307712))), name = tensor("unet_time_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 320, 1, 1])]; + tensor unet_time_embedding_linear_1_bias_to_fp16 = const()[name = tensor("unet_time_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307904)))]; + tensor input_3_cast = conv(bias = unet_time_embedding_linear_1_bias_to_fp16, dilations = var_84, groups = var_31, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_82, weight = unet_time_embedding_linear_1_weight_to_fp16_palettized, x = input_1_cast)[name = tensor("input_3_cast")]; + tensor input_5_cast = silu(x = input_3_cast)[name = tensor("input_5_cast")]; + tensor var_90 = const()[name = tensor("op_90"), val = tensor([1, 1])]; + tensor var_92 = const()[name = tensor("op_92"), val = tensor([1, 1])]; + tensor emb_pad_type_0 = const()[name = tensor("emb_pad_type_0"), val = tensor("custom")]; + tensor emb_pad_0 = const()[name = tensor("emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_time_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1539392))), name = tensor("unet_time_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_time_embedding_linear_2_bias_to_fp16 = const()[name = tensor("unet_time_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1539584)))]; + tensor emb_cast = conv(bias = unet_time_embedding_linear_2_bias_to_fp16, dilations = var_92, groups = var_31, pad = emb_pad_0, pad_type = emb_pad_type_0, strides = var_90, weight = unet_time_embedding_linear_2_weight_to_fp16_palettized, x = input_5_cast)[name = tensor("emb_cast")]; + tensor concat_0 = const()[name = tensor("concat_0"), val = tensor([12])]; + tensor timesteps_cast = reshape(shape = concat_0, x = time_ids)[name = tensor("timesteps_cast")]; + tensor var_104_axes_0 = const()[name = tensor("op_104_axes_0"), val = tensor([1])]; + tensor var_104_cast = expand_dims(axes = var_104_axes_0, x = timesteps_cast)[name = tensor("op_104_cast")]; + tensor var_106_to_fp16 = const()[name = tensor("op_106_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1542208)))]; + tensor emb_11_cast = mul(x = var_104_cast, y = var_106_to_fp16)[name = tensor("emb_11_cast")]; + tensor var_111_cast = sin(x = emb_11_cast)[name = tensor("op_111_cast")]; + tensor var_112_cast = cos(x = emb_11_cast)[name = tensor("op_112_cast")]; + tensor emb_15_interleave_0 = const()[name = tensor("emb_15_interleave_0"), val = tensor(false)]; + tensor emb_15_cast = concat(axis = var_32, interleave = emb_15_interleave_0, values = (var_111_cast, var_112_cast))[name = tensor("emb_15_cast")]; + tensor var_116_begin_0 = const()[name = tensor("op_116_begin_0"), val = tensor([0, 128])]; + tensor var_116_end_0 = const()[name = tensor("op_116_end_0"), val = tensor([12, 256])]; + tensor var_116_end_mask_0 = const()[name = tensor("op_116_end_mask_0"), val = tensor([true, true])]; + tensor var_116_cast = slice_by_index(begin = var_116_begin_0, end = var_116_end_0, end_mask = var_116_end_mask_0, x = emb_15_cast)[name = tensor("op_116_cast")]; + tensor var_118_begin_0 = const()[name = tensor("op_118_begin_0"), val = tensor([0, 0])]; + tensor var_118_end_0 = const()[name = tensor("op_118_end_0"), val = tensor([12, 128])]; + tensor var_118_end_mask_0 = const()[name = tensor("op_118_end_mask_0"), val = tensor([true, false])]; + tensor var_118_cast = slice_by_index(begin = var_118_begin_0, end = var_118_end_0, end_mask = var_118_end_mask_0, x = emb_15_cast)[name = tensor("op_118_cast")]; + tensor time_embeds_1_interleave_0 = const()[name = tensor("time_embeds_1_interleave_0"), val = tensor(false)]; + tensor time_embeds_1_cast = concat(axis = var_32, interleave = time_embeds_1_interleave_0, values = (var_116_cast, var_118_cast))[name = tensor("time_embeds_1_cast")]; + tensor var_122 = const()[name = tensor("op_122"), val = tensor([2, -1])]; + tensor time_embeds_cast = reshape(shape = var_122, x = time_embeds_1_cast)[name = tensor("time_embeds_cast")]; + tensor add_embeds_interleave_0 = const()[name = tensor("add_embeds_interleave_0"), val = tensor(false)]; + tensor add_embeds_cast = concat(axis = var_32, interleave = add_embeds_interleave_0, values = (text_embeds, time_embeds_cast))[name = tensor("add_embeds_cast")]; + tensor var_129_axes_0 = const()[name = tensor("op_129_axes_0"), val = tensor([-1])]; + tensor var_129_cast = expand_dims(axes = var_129_axes_0, x = add_embeds_cast)[name = tensor("op_129_cast")]; + tensor input_7_axes_0 = const()[name = tensor("input_7_axes_0"), val = tensor([-1])]; + tensor input_7_cast = expand_dims(axes = input_7_axes_0, x = var_129_cast)[name = tensor("input_7_cast")]; + tensor var_133 = const()[name = tensor("op_133"), val = tensor([1, 1])]; + tensor var_135 = const()[name = tensor("op_135"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_add_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1542528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4245952))), name = tensor("unet_add_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 2816, 1, 1])]; + tensor unet_add_embedding_linear_1_bias_to_fp16 = const()[name = tensor("unet_add_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4246144)))]; + tensor input_9_cast = conv(bias = unet_add_embedding_linear_1_bias_to_fp16, dilations = var_135, groups = var_31, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_133, weight = unet_add_embedding_linear_1_weight_to_fp16_palettized, x = input_7_cast)[name = tensor("input_9_cast")]; + tensor input_11_cast = silu(x = input_9_cast)[name = tensor("input_11_cast")]; + tensor var_141 = const()[name = tensor("op_141"), val = tensor([1, 1])]; + tensor var_143 = const()[name = tensor("op_143"), val = tensor([1, 1])]; + tensor aug_emb_pad_type_0 = const()[name = tensor("aug_emb_pad_type_0"), val = tensor("custom")]; + tensor aug_emb_pad_0 = const()[name = tensor("aug_emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_add_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4248768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5477632))), name = tensor("unet_add_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_add_embedding_linear_2_bias_to_fp16 = const()[name = tensor("unet_add_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5477824)))]; + tensor aug_emb_cast = conv(bias = unet_add_embedding_linear_2_bias_to_fp16, dilations = var_143, groups = var_31, pad = aug_emb_pad_0, pad_type = aug_emb_pad_type_0, strides = var_141, weight = unet_add_embedding_linear_2_weight_to_fp16_palettized, x = input_11_cast)[name = tensor("aug_emb_cast")]; + tensor input_19_cast = add(x = emb_cast, y = aug_emb_cast)[name = tensor("input_19_cast")]; + tensor var_149 = const()[name = tensor("op_149"), val = tensor([1, 1])]; + tensor var_151 = const()[name = tensor("op_151"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_conv_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5480448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5489152))), name = tensor("unet_conv_in_weight_to_fp16_palettized"), shape = tensor([320, 4, 3, 3])]; + tensor unet_conv_in_bias_to_fp16 = const()[name = tensor("unet_conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5489344)))]; + tensor input_13_cast = conv(bias = unet_conv_in_bias_to_fp16, dilations = var_151, groups = var_31, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_149, weight = unet_conv_in_weight_to_fp16_palettized, x = sample)[name = tensor("input_13_cast")]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_13_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5490048)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5490752)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5491456)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5492160)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_17_cast = silu(x = add_1_cast)[name = tensor("input_17_cast")]; + tensor var_171 = const()[name = tensor("op_171"), val = tensor([1, 1])]; + tensor var_173 = const()[name = tensor("op_173"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5492864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6184128))), name = tensor("unet_down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6184320)))]; + tensor hidden_states_1_cast = conv(bias = unet_down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_173, groups = var_31, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_171, weight = unet_down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_17_cast)[name = tensor("hidden_states_1_cast")]; + tensor input_21_cast = silu(x = input_19_cast)[name = tensor("input_21_cast")]; + tensor var_179 = const()[name = tensor("op_179"), val = tensor([1, 1])]; + tensor var_181 = const()[name = tensor("op_181"), val = tensor([1, 1])]; + tensor temb_1_pad_type_0 = const()[name = tensor("temb_1_pad_type_0"), val = tensor("custom")]; + tensor temb_1_pad_0 = const()[name = tensor("temb_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6185024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6492288))), name = tensor("unet_down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor unet_down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6492480)))]; + tensor temb_1_cast = conv(bias = unet_down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_181, groups = var_31, pad = temb_1_pad_0, pad_type = temb_1_pad_type_0, strides = var_179, weight = unet_down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_1_cast")]; + tensor input_23_cast = add(x = hidden_states_1_cast, y = temb_1_cast)[name = tensor("input_23_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_23_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6493184)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6493888)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_27_cast = silu(x = add_3_cast)[name = tensor("input_27_cast")]; + tensor var_191 = const()[name = tensor("op_191"), val = tensor([1, 1])]; + tensor var_193 = const()[name = tensor("op_193"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6494592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7185856))), name = tensor("unet_down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7186048)))]; + tensor hidden_states_3_cast = conv(bias = unet_down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_193, groups = var_31, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_191, weight = unet_down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_27_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_29_cast = add(x = input_13_cast, y = hidden_states_3_cast)[name = tensor("input_29_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = input_29_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7186752)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7187456)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_33_cast = silu(x = add_5_cast)[name = tensor("input_33_cast")]; + tensor var_208 = const()[name = tensor("op_208"), val = tensor([1, 1])]; + tensor var_210 = const()[name = tensor("op_210"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7188160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7879424))), name = tensor("unet_down_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7879616)))]; + tensor hidden_states_5_cast = conv(bias = unet_down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_210, groups = var_31, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_208, weight = unet_down_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_33_cast)[name = tensor("hidden_states_5_cast")]; + tensor var_216 = const()[name = tensor("op_216"), val = tensor([1, 1])]; + tensor var_218 = const()[name = tensor("op_218"), val = tensor([1, 1])]; + tensor temb_3_pad_type_0 = const()[name = tensor("temb_3_pad_type_0"), val = tensor("custom")]; + tensor temb_3_pad_0 = const()[name = tensor("temb_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7880320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8187584))), name = tensor("unet_down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor unet_down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8187776)))]; + tensor temb_3_cast = conv(bias = unet_down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_218, groups = var_31, pad = temb_3_pad_0, pad_type = temb_3_pad_type_0, strides = var_216, weight = unet_down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_3_cast")]; + tensor input_37_cast = add(x = hidden_states_5_cast, y = temb_3_cast)[name = tensor("input_37_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_37_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8188480)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8189184)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_41_cast = silu(x = add_7_cast)[name = tensor("input_41_cast")]; + tensor var_228 = const()[name = tensor("op_228"), val = tensor([1, 1])]; + tensor var_230 = const()[name = tensor("op_230"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8189888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8881152))), name = tensor("unet_down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8881344)))]; + tensor hidden_states_7_cast = conv(bias = unet_down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_230, groups = var_31, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_228, weight = unet_down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_41_cast)[name = tensor("hidden_states_7_cast")]; + tensor input_43_cast = add(x = input_29_cast, y = hidden_states_7_cast)[name = tensor("input_43_cast")]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([2, 2])]; + tensor var_239 = const()[name = tensor("op_239"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8882048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9573312))), name = tensor("unet_down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("unet_down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9573504)))]; + tensor input_45_cast = conv(bias = unet_down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_239, groups = var_31, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_237, weight = unet_down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([2, 32, 10, 64, 64])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_45_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([2, 320, 64, 64])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9574208)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9574912)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_49_cast = silu(x = add_9_cast)[name = tensor("input_49_cast")]; + tensor var_268 = const()[name = tensor("op_268"), val = tensor([1, 1])]; + tensor var_270 = const()[name = tensor("op_270"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9575616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10958080))), name = tensor("unet_down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 320, 3, 3])]; + tensor unet_down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10958272)))]; + tensor hidden_states_9_cast = conv(bias = unet_down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_270, groups = var_31, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_268, weight = unet_down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_49_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_276 = const()[name = tensor("op_276"), val = tensor([1, 1])]; + tensor var_278 = const()[name = tensor("op_278"), val = tensor([1, 1])]; + tensor temb_5_pad_type_0 = const()[name = tensor("temb_5_pad_type_0"), val = tensor("custom")]; + tensor temb_5_pad_0 = const()[name = tensor("temb_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10959616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11574080))), name = tensor("unet_down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11574272)))]; + tensor temb_5_cast = conv(bias = unet_down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_278, groups = var_31, pad = temb_5_pad_0, pad_type = temb_5_pad_type_0, strides = var_276, weight = unet_down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_5_cast")]; + tensor input_53_cast = add(x = hidden_states_9_cast, y = temb_5_cast)[name = tensor("input_53_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_53_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11575616)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11576960)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11578304)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11579648)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_57_cast = silu(x = add_11_cast)[name = tensor("input_57_cast")]; + tensor var_288 = const()[name = tensor("op_288"), val = tensor([1, 1])]; + tensor var_290 = const()[name = tensor("op_290"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11580992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14345856))), name = tensor("unet_down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14346048)))]; + tensor hidden_states_11_cast = conv(bias = unet_down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_290, groups = var_31, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_288, weight = unet_down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_57_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_295 = const()[name = tensor("op_295"), val = tensor([1, 1])]; + tensor var_297 = const()[name = tensor("op_297"), val = tensor([1, 1])]; + tensor x_1_pad_type_0 = const()[name = tensor("x_1_pad_type_0"), val = tensor("custom")]; + tensor x_1_pad_0 = const()[name = tensor("x_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14347392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14501056))), name = tensor("unet_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 320, 1, 1])]; + tensor unet_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14501248)))]; + tensor x_1_cast = conv(bias = unet_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_297, groups = var_31, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = var_295, weight = unet_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_45_cast)[name = tensor("x_1_cast")]; + tensor hidden_states_13_cast = add(x = x_1_cast, y = hidden_states_11_cast)[name = tensor("hidden_states_13_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = hidden_states_13_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14502592)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14503936)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor var_319 = const()[name = tensor("op_319"), val = tensor([1, 1])]; + tensor var_321 = const()[name = tensor("op_321"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14505280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14812544))), name = tensor("unet_down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14812736)))]; + tensor hidden_states_15_cast = conv(bias = unet_down_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_321, groups = var_31, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_319, weight = unet_down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_13_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_326 = const()[name = tensor("op_326"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_1_cast = reshape(shape = var_326, x = hidden_states_15_cast)[name = tensor("inputs_1_cast")]; + tensor var_336 = const()[name = tensor("op_336"), val = tensor([1])]; + tensor channels_mean_1_cast = reduce_mean(axes = var_336, keep_dims = var_23, x = inputs_1_cast)[name = tensor("channels_mean_1_cast")]; + tensor zero_mean_1_cast = sub(x = inputs_1_cast, y = channels_mean_1_cast)[name = tensor("zero_mean_1_cast")]; + tensor zero_mean_sq_1_cast = mul(x = zero_mean_1_cast, y = zero_mean_1_cast)[name = tensor("zero_mean_sq_1_cast")]; + tensor var_340 = const()[name = tensor("op_340"), val = tensor([1])]; + tensor var_341_cast = reduce_mean(axes = var_340, keep_dims = var_23, x = zero_mean_sq_1_cast)[name = tensor("op_341_cast")]; + tensor var_342_to_fp16 = const()[name = tensor("op_342_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_343_cast = add(x = var_341_cast, y = var_342_to_fp16)[name = tensor("op_343_cast")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_343_cast)[name = tensor("denom_1_cast")]; + tensor out_1_cast = mul(x = zero_mean_1_cast, y = denom_1_cast)[name = tensor("out_1_cast")]; + tensor var_347_to_fp16 = const()[name = tensor("op_347_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14814080)))]; + tensor var_348_cast = add(x = out_1_cast, y = var_347_to_fp16)[name = tensor("op_348_cast")]; + tensor var_350_to_fp16 = const()[name = tensor("op_350_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14815424)))]; + tensor hidden_states_17_cast = mul(x = var_348_cast, y = var_350_to_fp16)[name = tensor("hidden_states_17_cast")]; + tensor var_357 = const()[name = tensor("op_357"), val = tensor([1, 1])]; + tensor var_359 = const()[name = tensor("op_359"), val = tensor([1, 1])]; + tensor q_1_pad_type_0 = const()[name = tensor("q_1_pad_type_0"), val = tensor("custom")]; + tensor q_1_pad_0 = const()[name = tensor("q_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14816768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15124032))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_1_cast = conv(dilations = var_359, groups = var_31, pad = q_1_pad_0, pad_type = q_1_pad_type_0, strides = var_357, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("q_1_cast")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([1, 1])]; + tensor var_365 = const()[name = tensor("op_365"), val = tensor([1, 1])]; + tensor k_1_pad_type_0 = const()[name = tensor("k_1_pad_type_0"), val = tensor("custom")]; + tensor k_1_pad_0 = const()[name = tensor("k_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15124224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15431488))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_1_cast = conv(dilations = var_365, groups = var_31, pad = k_1_pad_0, pad_type = k_1_pad_type_0, strides = var_363, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("k_1_cast")]; + tensor var_369 = const()[name = tensor("op_369"), val = tensor([1, 1])]; + tensor var_371 = const()[name = tensor("op_371"), val = tensor([1, 1])]; + tensor v_1_pad_type_0 = const()[name = tensor("v_1_pad_type_0"), val = tensor("custom")]; + tensor v_1_pad_0 = const()[name = tensor("v_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15431680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15738944))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_1_cast = conv(dilations = var_371, groups = var_31, pad = v_1_pad_0, pad_type = v_1_pad_type_0, strides = var_369, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("v_1_cast")]; + tensor var_375 = const()[name = tensor("op_375"), val = tensor([2, 10, 64, -1])]; + tensor var_376_cast = reshape(shape = var_375, x = q_1_cast)[name = tensor("op_376_cast")]; + tensor var_377 = const()[name = tensor("op_377"), val = tensor([2, 10, 64, -1])]; + tensor var_378_cast = reshape(shape = var_377, x = k_1_cast)[name = tensor("op_378_cast")]; + tensor var_379 = const()[name = tensor("op_379"), val = tensor([2, 10, 64, -1])]; + tensor var_380_cast = reshape(shape = var_379, x = v_1_cast)[name = tensor("op_380_cast")]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_376_cast, y = var_378_cast)[name = tensor("attn_weights_1_cast")]; + tensor var_12_to_fp16 = const()[name = tensor("op_12_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_3_cast = mul(x = attn_weights_1_cast, y = var_12_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_384_cast = softmax(axis = var_18, x = attn_weights_3_cast)[name = tensor("op_384_cast")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_380_cast, y = var_384_cast)[name = tensor("attn_1_cast")]; + tensor var_388 = const()[name = tensor("op_388"), val = tensor([2, 640, 1, -1])]; + tensor input_61_cast = reshape(shape = var_388, x = attn_1_cast)[name = tensor("input_61_cast")]; + tensor var_393 = const()[name = tensor("op_393"), val = tensor([1, 1])]; + tensor var_395 = const()[name = tensor("op_395"), val = tensor([1, 1])]; + tensor var_397_pad_type_0 = const()[name = tensor("op_397_pad_type_0"), val = tensor("custom")]; + tensor var_397_pad_0 = const()[name = tensor("op_397_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15739136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16046400))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16046592)))]; + tensor var_397_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_395, groups = var_31, pad = var_397_pad_0, pad_type = var_397_pad_type_0, strides = var_393, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_61_cast)[name = tensor("op_397_cast")]; + tensor inputs_3_cast = add(x = var_397_cast, y = inputs_1_cast)[name = tensor("inputs_3_cast")]; + tensor var_401 = const()[name = tensor("op_401"), val = tensor([1])]; + tensor channels_mean_3_cast = reduce_mean(axes = var_401, keep_dims = var_23, x = inputs_3_cast)[name = tensor("channels_mean_3_cast")]; + tensor zero_mean_3_cast = sub(x = inputs_3_cast, y = channels_mean_3_cast)[name = tensor("zero_mean_3_cast")]; + tensor zero_mean_sq_3_cast = mul(x = zero_mean_3_cast, y = zero_mean_3_cast)[name = tensor("zero_mean_sq_3_cast")]; + tensor var_405 = const()[name = tensor("op_405"), val = tensor([1])]; + tensor var_406_cast = reduce_mean(axes = var_405, keep_dims = var_23, x = zero_mean_sq_3_cast)[name = tensor("op_406_cast")]; + tensor var_407_to_fp16 = const()[name = tensor("op_407_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_408_cast = add(x = var_406_cast, y = var_407_to_fp16)[name = tensor("op_408_cast")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_408_cast)[name = tensor("denom_3_cast")]; + tensor out_3_cast = mul(x = zero_mean_3_cast, y = denom_3_cast)[name = tensor("out_3_cast")]; + tensor var_412_to_fp16 = const()[name = tensor("op_412_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16047936)))]; + tensor var_413_cast = add(x = out_3_cast, y = var_412_to_fp16)[name = tensor("op_413_cast")]; + tensor var_415_to_fp16 = const()[name = tensor("op_415_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16049280)))]; + tensor hidden_states_19_cast = mul(x = var_413_cast, y = var_415_to_fp16)[name = tensor("hidden_states_19_cast")]; + tensor var_422 = const()[name = tensor("op_422"), val = tensor([1, 1])]; + tensor var_424 = const()[name = tensor("op_424"), val = tensor([1, 1])]; + tensor q_3_pad_type_0 = const()[name = tensor("q_3_pad_type_0"), val = tensor("custom")]; + tensor q_3_pad_0 = const()[name = tensor("q_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16050624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16357888))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_3_cast = conv(dilations = var_424, groups = var_31, pad = q_3_pad_0, pad_type = q_3_pad_type_0, strides = var_422, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("q_3_cast")]; + tensor var_428 = const()[name = tensor("op_428"), val = tensor([1, 1])]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor k_3_pad_type_0 = const()[name = tensor("k_3_pad_type_0"), val = tensor("custom")]; + tensor k_3_pad_0 = const()[name = tensor("k_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16358080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17341184))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_3_cast = conv(dilations = var_430, groups = var_31, pad = k_3_pad_0, pad_type = k_3_pad_type_0, strides = var_428, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_3_cast")]; + tensor var_434 = const()[name = tensor("op_434"), val = tensor([1, 1])]; + tensor var_436 = const()[name = tensor("op_436"), val = tensor([1, 1])]; + tensor v_3_pad_type_0 = const()[name = tensor("v_3_pad_type_0"), val = tensor("custom")]; + tensor v_3_pad_0 = const()[name = tensor("v_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17341376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18324480))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_3_cast = conv(dilations = var_436, groups = var_31, pad = v_3_pad_0, pad_type = v_3_pad_type_0, strides = var_434, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_3_cast")]; + tensor var_440 = const()[name = tensor("op_440"), val = tensor([2, 10, 64, -1])]; + tensor var_441_cast = reshape(shape = var_440, x = q_3_cast)[name = tensor("op_441_cast")]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([2, 10, 64, -1])]; + tensor var_443_cast = reshape(shape = var_442, x = k_3_cast)[name = tensor("op_443_cast")]; + tensor var_444 = const()[name = tensor("op_444"), val = tensor([2, 10, 64, -1])]; + tensor var_445_cast = reshape(shape = var_444, x = v_3_cast)[name = tensor("op_445_cast")]; + tensor attn_weights_5_transpose_x_0 = const()[name = tensor("attn_weights_5_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_5_transpose_y_0 = const()[name = tensor("attn_weights_5_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_5_cast = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_441_cast, y = var_443_cast)[name = tensor("attn_weights_5_cast")]; + tensor attn_weights_7_cast = mul(x = attn_weights_5_cast, y = var_12_to_fp16)[name = tensor("attn_weights_7_cast")]; + tensor var_449_cast = softmax(axis = var_18, x = attn_weights_7_cast)[name = tensor("op_449_cast")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_445_cast, y = var_449_cast)[name = tensor("attn_3_cast")]; + tensor var_453 = const()[name = tensor("op_453"), val = tensor([2, 640, 1, -1])]; + tensor input_63_cast = reshape(shape = var_453, x = attn_3_cast)[name = tensor("input_63_cast")]; + tensor var_458 = const()[name = tensor("op_458"), val = tensor([1, 1])]; + tensor var_460 = const()[name = tensor("op_460"), val = tensor([1, 1])]; + tensor var_462_pad_type_0 = const()[name = tensor("op_462_pad_type_0"), val = tensor("custom")]; + tensor var_462_pad_0 = const()[name = tensor("op_462_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18324672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18631936))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18632128)))]; + tensor var_462_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_460, groups = var_31, pad = var_462_pad_0, pad_type = var_462_pad_type_0, strides = var_458, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_63_cast)[name = tensor("op_462_cast")]; + tensor inputs_5_cast = add(x = var_462_cast, y = inputs_3_cast)[name = tensor("inputs_5_cast")]; + tensor var_466 = const()[name = tensor("op_466"), val = tensor([1])]; + tensor channels_mean_5_cast = reduce_mean(axes = var_466, keep_dims = var_23, x = inputs_5_cast)[name = tensor("channels_mean_5_cast")]; + tensor zero_mean_5_cast = sub(x = inputs_5_cast, y = channels_mean_5_cast)[name = tensor("zero_mean_5_cast")]; + tensor zero_mean_sq_5_cast = mul(x = zero_mean_5_cast, y = zero_mean_5_cast)[name = tensor("zero_mean_sq_5_cast")]; + tensor var_470 = const()[name = tensor("op_470"), val = tensor([1])]; + tensor var_471_cast = reduce_mean(axes = var_470, keep_dims = var_23, x = zero_mean_sq_5_cast)[name = tensor("op_471_cast")]; + tensor var_472_to_fp16 = const()[name = tensor("op_472_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_473_cast = add(x = var_471_cast, y = var_472_to_fp16)[name = tensor("op_473_cast")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_473_cast)[name = tensor("denom_5_cast")]; + tensor out_5_cast = mul(x = zero_mean_5_cast, y = denom_5_cast)[name = tensor("out_5_cast")]; + tensor var_477_to_fp16 = const()[name = tensor("op_477_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18633472)))]; + tensor var_478_cast = add(x = out_5_cast, y = var_477_to_fp16)[name = tensor("op_478_cast")]; + tensor var_480_to_fp16 = const()[name = tensor("op_480_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18634816)))]; + tensor input_65_cast = mul(x = var_478_cast, y = var_480_to_fp16)[name = tensor("input_65_cast")]; + tensor var_488 = const()[name = tensor("op_488"), val = tensor([1, 1])]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([1, 1])]; + tensor var_492_pad_type_0 = const()[name = tensor("op_492_pad_type_0"), val = tensor("custom")]; + tensor var_492_pad_0 = const()[name = tensor("op_492_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18636160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21093824))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21094016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21097920))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_492_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_490, groups = var_31, pad = var_492_pad_0, pad_type = var_492_pad_type_0, strides = var_488, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_65_cast)[name = tensor("op_492_cast")]; + tensor var_493_split_sizes_0 = const()[name = tensor("op_493_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_493_axis_0 = const()[name = tensor("op_493_axis_0"), val = tensor(1)]; + tensor var_493_cast_0, tensor var_493_cast_1 = split(axis = var_493_axis_0, split_sizes = var_493_split_sizes_0, x = var_492_cast)[name = tensor("op_493_cast")]; + tensor var_495_mode_0 = const()[name = tensor("op_495_mode_0"), val = tensor("EXACT")]; + tensor var_495_cast = gelu(mode = var_495_mode_0, x = var_493_cast_1)[name = tensor("op_495_cast")]; + tensor input_67_cast = mul(x = var_493_cast_0, y = var_495_cast)[name = tensor("input_67_cast")]; + tensor var_499 = const()[name = tensor("op_499"), val = tensor([1, 1])]; + tensor var_501 = const()[name = tensor("op_501"), val = tensor([1, 1])]; + tensor var_503_pad_type_0 = const()[name = tensor("op_503_pad_type_0"), val = tensor("custom")]; + tensor var_503_pad_0 = const()[name = tensor("op_503_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21098112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22326976))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22327168)))]; + tensor var_503_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_501, groups = var_31, pad = var_503_pad_0, pad_type = var_503_pad_type_0, strides = var_499, weight = unet_down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_67_cast)[name = tensor("op_503_cast")]; + tensor inputs_7_cast = add(x = var_503_cast, y = inputs_5_cast)[name = tensor("inputs_7_cast")]; + tensor var_513 = const()[name = tensor("op_513"), val = tensor([1])]; + tensor channels_mean_7_cast = reduce_mean(axes = var_513, keep_dims = var_23, x = inputs_7_cast)[name = tensor("channels_mean_7_cast")]; + tensor zero_mean_7_cast = sub(x = inputs_7_cast, y = channels_mean_7_cast)[name = tensor("zero_mean_7_cast")]; + tensor zero_mean_sq_7_cast = mul(x = zero_mean_7_cast, y = zero_mean_7_cast)[name = tensor("zero_mean_sq_7_cast")]; + tensor var_517 = const()[name = tensor("op_517"), val = tensor([1])]; + tensor var_518_cast = reduce_mean(axes = var_517, keep_dims = var_23, x = zero_mean_sq_7_cast)[name = tensor("op_518_cast")]; + tensor var_519_to_fp16 = const()[name = tensor("op_519_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_520_cast = add(x = var_518_cast, y = var_519_to_fp16)[name = tensor("op_520_cast")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_520_cast)[name = tensor("denom_7_cast")]; + tensor out_7_cast = mul(x = zero_mean_7_cast, y = denom_7_cast)[name = tensor("out_7_cast")]; + tensor var_524_to_fp16 = const()[name = tensor("op_524_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22328512)))]; + tensor var_525_cast = add(x = out_7_cast, y = var_524_to_fp16)[name = tensor("op_525_cast")]; + tensor var_527_to_fp16 = const()[name = tensor("op_527_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22329856)))]; + tensor hidden_states_23_cast = mul(x = var_525_cast, y = var_527_to_fp16)[name = tensor("hidden_states_23_cast")]; + tensor var_534 = const()[name = tensor("op_534"), val = tensor([1, 1])]; + tensor var_536 = const()[name = tensor("op_536"), val = tensor([1, 1])]; + tensor q_5_pad_type_0 = const()[name = tensor("q_5_pad_type_0"), val = tensor("custom")]; + tensor q_5_pad_0 = const()[name = tensor("q_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22331200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22638464))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_5_cast = conv(dilations = var_536, groups = var_31, pad = q_5_pad_0, pad_type = q_5_pad_type_0, strides = var_534, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("q_5_cast")]; + tensor var_540 = const()[name = tensor("op_540"), val = tensor([1, 1])]; + tensor var_542 = const()[name = tensor("op_542"), val = tensor([1, 1])]; + tensor k_5_pad_type_0 = const()[name = tensor("k_5_pad_type_0"), val = tensor("custom")]; + tensor k_5_pad_0 = const()[name = tensor("k_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22638656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22945920))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_5_cast = conv(dilations = var_542, groups = var_31, pad = k_5_pad_0, pad_type = k_5_pad_type_0, strides = var_540, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("k_5_cast")]; + tensor var_546 = const()[name = tensor("op_546"), val = tensor([1, 1])]; + tensor var_548 = const()[name = tensor("op_548"), val = tensor([1, 1])]; + tensor v_5_pad_type_0 = const()[name = tensor("v_5_pad_type_0"), val = tensor("custom")]; + tensor v_5_pad_0 = const()[name = tensor("v_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22946112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23253376))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_5_cast = conv(dilations = var_548, groups = var_31, pad = v_5_pad_0, pad_type = v_5_pad_type_0, strides = var_546, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("v_5_cast")]; + tensor var_552 = const()[name = tensor("op_552"), val = tensor([2, 10, 64, -1])]; + tensor var_553_cast = reshape(shape = var_552, x = q_5_cast)[name = tensor("op_553_cast")]; + tensor var_554 = const()[name = tensor("op_554"), val = tensor([2, 10, 64, -1])]; + tensor var_555_cast = reshape(shape = var_554, x = k_5_cast)[name = tensor("op_555_cast")]; + tensor var_556 = const()[name = tensor("op_556"), val = tensor([2, 10, 64, -1])]; + tensor var_557_cast = reshape(shape = var_556, x = v_5_cast)[name = tensor("op_557_cast")]; + tensor attn_weights_9_transpose_x_0 = const()[name = tensor("attn_weights_9_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_9_transpose_y_0 = const()[name = tensor("attn_weights_9_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_9_cast = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_553_cast, y = var_555_cast)[name = tensor("attn_weights_9_cast")]; + tensor attn_weights_11_cast = mul(x = attn_weights_9_cast, y = var_12_to_fp16)[name = tensor("attn_weights_11_cast")]; + tensor var_561_cast = softmax(axis = var_18, x = attn_weights_11_cast)[name = tensor("op_561_cast")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_557_cast, y = var_561_cast)[name = tensor("attn_5_cast")]; + tensor var_565 = const()[name = tensor("op_565"), val = tensor([2, 640, 1, -1])]; + tensor input_69_cast = reshape(shape = var_565, x = attn_5_cast)[name = tensor("input_69_cast")]; + tensor var_570 = const()[name = tensor("op_570"), val = tensor([1, 1])]; + tensor var_572 = const()[name = tensor("op_572"), val = tensor([1, 1])]; + tensor var_574_pad_type_0 = const()[name = tensor("op_574_pad_type_0"), val = tensor("custom")]; + tensor var_574_pad_0 = const()[name = tensor("op_574_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23253568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23560832))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23561024)))]; + tensor var_574_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_572, groups = var_31, pad = var_574_pad_0, pad_type = var_574_pad_type_0, strides = var_570, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_69_cast)[name = tensor("op_574_cast")]; + tensor inputs_9_cast = add(x = var_574_cast, y = inputs_7_cast)[name = tensor("inputs_9_cast")]; + tensor var_578 = const()[name = tensor("op_578"), val = tensor([1])]; + tensor channels_mean_9_cast = reduce_mean(axes = var_578, keep_dims = var_23, x = inputs_9_cast)[name = tensor("channels_mean_9_cast")]; + tensor zero_mean_9_cast = sub(x = inputs_9_cast, y = channels_mean_9_cast)[name = tensor("zero_mean_9_cast")]; + tensor zero_mean_sq_9_cast = mul(x = zero_mean_9_cast, y = zero_mean_9_cast)[name = tensor("zero_mean_sq_9_cast")]; + tensor var_582 = const()[name = tensor("op_582"), val = tensor([1])]; + tensor var_583_cast = reduce_mean(axes = var_582, keep_dims = var_23, x = zero_mean_sq_9_cast)[name = tensor("op_583_cast")]; + tensor var_584_to_fp16 = const()[name = tensor("op_584_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_585_cast = add(x = var_583_cast, y = var_584_to_fp16)[name = tensor("op_585_cast")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_585_cast)[name = tensor("denom_9_cast")]; + tensor out_9_cast = mul(x = zero_mean_9_cast, y = denom_9_cast)[name = tensor("out_9_cast")]; + tensor var_589_to_fp16 = const()[name = tensor("op_589_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23562368)))]; + tensor var_590_cast = add(x = out_9_cast, y = var_589_to_fp16)[name = tensor("op_590_cast")]; + tensor var_592_to_fp16 = const()[name = tensor("op_592_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23563712)))]; + tensor hidden_states_25_cast = mul(x = var_590_cast, y = var_592_to_fp16)[name = tensor("hidden_states_25_cast")]; + tensor var_599 = const()[name = tensor("op_599"), val = tensor([1, 1])]; + tensor var_601 = const()[name = tensor("op_601"), val = tensor([1, 1])]; + tensor q_7_pad_type_0 = const()[name = tensor("q_7_pad_type_0"), val = tensor("custom")]; + tensor q_7_pad_0 = const()[name = tensor("q_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23565056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23872320))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_7_cast = conv(dilations = var_601, groups = var_31, pad = q_7_pad_0, pad_type = q_7_pad_type_0, strides = var_599, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("q_7_cast")]; + tensor var_605 = const()[name = tensor("op_605"), val = tensor([1, 1])]; + tensor var_607 = const()[name = tensor("op_607"), val = tensor([1, 1])]; + tensor k_7_pad_type_0 = const()[name = tensor("k_7_pad_type_0"), val = tensor("custom")]; + tensor k_7_pad_0 = const()[name = tensor("k_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23872512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24855616))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_7_cast = conv(dilations = var_607, groups = var_31, pad = k_7_pad_0, pad_type = k_7_pad_type_0, strides = var_605, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_7_cast")]; + tensor var_611 = const()[name = tensor("op_611"), val = tensor([1, 1])]; + tensor var_613 = const()[name = tensor("op_613"), val = tensor([1, 1])]; + tensor v_7_pad_type_0 = const()[name = tensor("v_7_pad_type_0"), val = tensor("custom")]; + tensor v_7_pad_0 = const()[name = tensor("v_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24855808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25838912))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_7_cast = conv(dilations = var_613, groups = var_31, pad = v_7_pad_0, pad_type = v_7_pad_type_0, strides = var_611, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_7_cast")]; + tensor var_617 = const()[name = tensor("op_617"), val = tensor([2, 10, 64, -1])]; + tensor var_618_cast = reshape(shape = var_617, x = q_7_cast)[name = tensor("op_618_cast")]; + tensor var_619 = const()[name = tensor("op_619"), val = tensor([2, 10, 64, -1])]; + tensor var_620_cast = reshape(shape = var_619, x = k_7_cast)[name = tensor("op_620_cast")]; + tensor var_621 = const()[name = tensor("op_621"), val = tensor([2, 10, 64, -1])]; + tensor var_622_cast = reshape(shape = var_621, x = v_7_cast)[name = tensor("op_622_cast")]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = var_618_cast, y = var_620_cast)[name = tensor("attn_weights_13_cast")]; + tensor attn_weights_15_cast = mul(x = attn_weights_13_cast, y = var_12_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_626_cast = softmax(axis = var_18, x = attn_weights_15_cast)[name = tensor("op_626_cast")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_622_cast, y = var_626_cast)[name = tensor("attn_7_cast")]; + tensor var_630 = const()[name = tensor("op_630"), val = tensor([2, 640, 1, -1])]; + tensor input_71_cast = reshape(shape = var_630, x = attn_7_cast)[name = tensor("input_71_cast")]; + tensor var_635 = const()[name = tensor("op_635"), val = tensor([1, 1])]; + tensor var_637 = const()[name = tensor("op_637"), val = tensor([1, 1])]; + tensor var_639_pad_type_0 = const()[name = tensor("op_639_pad_type_0"), val = tensor("custom")]; + tensor var_639_pad_0 = const()[name = tensor("op_639_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25839104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26146368))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26146560)))]; + tensor var_639_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_637, groups = var_31, pad = var_639_pad_0, pad_type = var_639_pad_type_0, strides = var_635, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_71_cast)[name = tensor("op_639_cast")]; + tensor inputs_11_cast = add(x = var_639_cast, y = inputs_9_cast)[name = tensor("inputs_11_cast")]; + tensor var_643 = const()[name = tensor("op_643"), val = tensor([1])]; + tensor channels_mean_11_cast = reduce_mean(axes = var_643, keep_dims = var_23, x = inputs_11_cast)[name = tensor("channels_mean_11_cast")]; + tensor zero_mean_11_cast = sub(x = inputs_11_cast, y = channels_mean_11_cast)[name = tensor("zero_mean_11_cast")]; + tensor zero_mean_sq_11_cast = mul(x = zero_mean_11_cast, y = zero_mean_11_cast)[name = tensor("zero_mean_sq_11_cast")]; + tensor var_647 = const()[name = tensor("op_647"), val = tensor([1])]; + tensor var_648_cast = reduce_mean(axes = var_647, keep_dims = var_23, x = zero_mean_sq_11_cast)[name = tensor("op_648_cast")]; + tensor var_649_to_fp16 = const()[name = tensor("op_649_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_650_cast = add(x = var_648_cast, y = var_649_to_fp16)[name = tensor("op_650_cast")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_650_cast)[name = tensor("denom_11_cast")]; + tensor out_11_cast = mul(x = zero_mean_11_cast, y = denom_11_cast)[name = tensor("out_11_cast")]; + tensor var_654_to_fp16 = const()[name = tensor("op_654_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26147904)))]; + tensor var_655_cast = add(x = out_11_cast, y = var_654_to_fp16)[name = tensor("op_655_cast")]; + tensor var_657_to_fp16 = const()[name = tensor("op_657_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26149248)))]; + tensor input_73_cast = mul(x = var_655_cast, y = var_657_to_fp16)[name = tensor("input_73_cast")]; + tensor var_665 = const()[name = tensor("op_665"), val = tensor([1, 1])]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([1, 1])]; + tensor var_669_pad_type_0 = const()[name = tensor("op_669_pad_type_0"), val = tensor("custom")]; + tensor var_669_pad_0 = const()[name = tensor("op_669_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26150592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28608256))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28608448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28612352))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_669_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_667, groups = var_31, pad = var_669_pad_0, pad_type = var_669_pad_type_0, strides = var_665, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_73_cast)[name = tensor("op_669_cast")]; + tensor var_670_split_sizes_0 = const()[name = tensor("op_670_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_670_axis_0 = const()[name = tensor("op_670_axis_0"), val = tensor(1)]; + tensor var_670_cast_0, tensor var_670_cast_1 = split(axis = var_670_axis_0, split_sizes = var_670_split_sizes_0, x = var_669_cast)[name = tensor("op_670_cast")]; + tensor var_672_mode_0 = const()[name = tensor("op_672_mode_0"), val = tensor("EXACT")]; + tensor var_672_cast = gelu(mode = var_672_mode_0, x = var_670_cast_1)[name = tensor("op_672_cast")]; + tensor input_75_cast = mul(x = var_670_cast_0, y = var_672_cast)[name = tensor("input_75_cast")]; + tensor var_676 = const()[name = tensor("op_676"), val = tensor([1, 1])]; + tensor var_678 = const()[name = tensor("op_678"), val = tensor([1, 1])]; + tensor var_680_pad_type_0 = const()[name = tensor("op_680_pad_type_0"), val = tensor("custom")]; + tensor var_680_pad_0 = const()[name = tensor("op_680_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28612544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29841408))), name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29841600)))]; + tensor var_680_cast = conv(bias = unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_678, groups = var_31, pad = var_680_pad_0, pad_type = var_680_pad_type_0, strides = var_676, weight = unet_down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_75_cast)[name = tensor("op_680_cast")]; + tensor hidden_states_29_cast = add(x = var_680_cast, y = inputs_11_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_682 = const()[name = tensor("op_682"), val = tensor([2, 640, 64, 64])]; + tensor input_77_cast = reshape(shape = var_682, x = hidden_states_29_cast)[name = tensor("input_77_cast")]; + tensor var_686 = const()[name = tensor("op_686"), val = tensor([1, 1])]; + tensor var_688 = const()[name = tensor("op_688"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29842944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30150208))), name = tensor("unet_down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30150400)))]; + tensor hidden_states_31_cast = conv(bias = unet_down_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_688, groups = var_31, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_686, weight = unet_down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_77_cast)[name = tensor("hidden_states_31_cast")]; + tensor input_79_cast = add(x = hidden_states_31_cast, y = hidden_states_13_cast)[name = tensor("input_79_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_79_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30151744)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30153088)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_83_cast = silu(x = add_15_cast)[name = tensor("input_83_cast")]; + tensor var_703 = const()[name = tensor("op_703"), val = tensor([1, 1])]; + tensor var_705 = const()[name = tensor("op_705"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30154432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32919296))), name = tensor("unet_down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32919488)))]; + tensor hidden_states_33_cast = conv(bias = unet_down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_705, groups = var_31, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_703, weight = unet_down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_83_cast)[name = tensor("hidden_states_33_cast")]; + tensor var_711 = const()[name = tensor("op_711"), val = tensor([1, 1])]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 1])]; + tensor temb_7_pad_type_0 = const()[name = tensor("temb_7_pad_type_0"), val = tensor("custom")]; + tensor temb_7_pad_0 = const()[name = tensor("temb_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32920832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33535296))), name = tensor("unet_down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33535488)))]; + tensor temb_7_cast = conv(bias = unet_down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_713, groups = var_31, pad = temb_7_pad_0, pad_type = temb_7_pad_type_0, strides = var_711, weight = unet_down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_7_cast")]; + tensor input_87_cast = add(x = hidden_states_33_cast, y = temb_7_cast)[name = tensor("input_87_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_87_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33536832)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33538176)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_91_cast = silu(x = add_17_cast)[name = tensor("input_91_cast")]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor var_725 = const()[name = tensor("op_725"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33539520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36304384))), name = tensor("unet_down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36304576)))]; + tensor hidden_states_35_cast = conv(bias = unet_down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_725, groups = var_31, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_723, weight = unet_down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_91_cast)[name = tensor("hidden_states_35_cast")]; + tensor hidden_states_37_cast = add(x = input_79_cast, y = hidden_states_35_cast)[name = tensor("hidden_states_37_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = hidden_states_37_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36305920)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36307264)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor var_747 = const()[name = tensor("op_747"), val = tensor([1, 1])]; + tensor var_749 = const()[name = tensor("op_749"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36308608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36615872))), name = tensor("unet_down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36616064)))]; + tensor hidden_states_39_cast = conv(bias = unet_down_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_749, groups = var_31, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_747, weight = unet_down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_19_cast)[name = tensor("hidden_states_39_cast")]; + tensor var_754 = const()[name = tensor("op_754"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_13_cast = reshape(shape = var_754, x = hidden_states_39_cast)[name = tensor("inputs_13_cast")]; + tensor var_764 = const()[name = tensor("op_764"), val = tensor([1])]; + tensor channels_mean_13_cast = reduce_mean(axes = var_764, keep_dims = var_23, x = inputs_13_cast)[name = tensor("channels_mean_13_cast")]; + tensor zero_mean_13_cast = sub(x = inputs_13_cast, y = channels_mean_13_cast)[name = tensor("zero_mean_13_cast")]; + tensor zero_mean_sq_13_cast = mul(x = zero_mean_13_cast, y = zero_mean_13_cast)[name = tensor("zero_mean_sq_13_cast")]; + tensor var_768 = const()[name = tensor("op_768"), val = tensor([1])]; + tensor var_769_cast = reduce_mean(axes = var_768, keep_dims = var_23, x = zero_mean_sq_13_cast)[name = tensor("op_769_cast")]; + tensor var_770_to_fp16 = const()[name = tensor("op_770_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_771_cast = add(x = var_769_cast, y = var_770_to_fp16)[name = tensor("op_771_cast")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_771_cast)[name = tensor("denom_13_cast")]; + tensor out_13_cast = mul(x = zero_mean_13_cast, y = denom_13_cast)[name = tensor("out_13_cast")]; + tensor var_775_to_fp16 = const()[name = tensor("op_775_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36617408)))]; + tensor var_776_cast = add(x = out_13_cast, y = var_775_to_fp16)[name = tensor("op_776_cast")]; + tensor var_778_to_fp16 = const()[name = tensor("op_778_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36618752)))]; + tensor hidden_states_41_cast = mul(x = var_776_cast, y = var_778_to_fp16)[name = tensor("hidden_states_41_cast")]; + tensor var_785 = const()[name = tensor("op_785"), val = tensor([1, 1])]; + tensor var_787 = const()[name = tensor("op_787"), val = tensor([1, 1])]; + tensor q_9_pad_type_0 = const()[name = tensor("q_9_pad_type_0"), val = tensor("custom")]; + tensor q_9_pad_0 = const()[name = tensor("q_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36620096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36927360))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_9_cast = conv(dilations = var_787, groups = var_31, pad = q_9_pad_0, pad_type = q_9_pad_type_0, strides = var_785, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("q_9_cast")]; + tensor var_791 = const()[name = tensor("op_791"), val = tensor([1, 1])]; + tensor var_793 = const()[name = tensor("op_793"), val = tensor([1, 1])]; + tensor k_9_pad_type_0 = const()[name = tensor("k_9_pad_type_0"), val = tensor("custom")]; + tensor k_9_pad_0 = const()[name = tensor("k_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36927552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37234816))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_9_cast = conv(dilations = var_793, groups = var_31, pad = k_9_pad_0, pad_type = k_9_pad_type_0, strides = var_791, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("k_9_cast")]; + tensor var_797 = const()[name = tensor("op_797"), val = tensor([1, 1])]; + tensor var_799 = const()[name = tensor("op_799"), val = tensor([1, 1])]; + tensor v_9_pad_type_0 = const()[name = tensor("v_9_pad_type_0"), val = tensor("custom")]; + tensor v_9_pad_0 = const()[name = tensor("v_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37235008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37542272))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_9_cast = conv(dilations = var_799, groups = var_31, pad = v_9_pad_0, pad_type = v_9_pad_type_0, strides = var_797, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("v_9_cast")]; + tensor var_803 = const()[name = tensor("op_803"), val = tensor([2, 10, 64, -1])]; + tensor var_804_cast = reshape(shape = var_803, x = q_9_cast)[name = tensor("op_804_cast")]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([2, 10, 64, -1])]; + tensor var_806_cast = reshape(shape = var_805, x = k_9_cast)[name = tensor("op_806_cast")]; + tensor var_807 = const()[name = tensor("op_807"), val = tensor([2, 10, 64, -1])]; + tensor var_808_cast = reshape(shape = var_807, x = v_9_cast)[name = tensor("op_808_cast")]; + tensor attn_weights_17_transpose_x_0 = const()[name = tensor("attn_weights_17_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_17_transpose_y_0 = const()[name = tensor("attn_weights_17_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_17_cast = matmul(transpose_x = attn_weights_17_transpose_x_0, transpose_y = attn_weights_17_transpose_y_0, x = var_804_cast, y = var_806_cast)[name = tensor("attn_weights_17_cast")]; + tensor attn_weights_19_cast = mul(x = attn_weights_17_cast, y = var_12_to_fp16)[name = tensor("attn_weights_19_cast")]; + tensor var_812_cast = softmax(axis = var_18, x = attn_weights_19_cast)[name = tensor("op_812_cast")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_808_cast, y = var_812_cast)[name = tensor("attn_9_cast")]; + tensor var_816 = const()[name = tensor("op_816"), val = tensor([2, 640, 1, -1])]; + tensor input_95_cast = reshape(shape = var_816, x = attn_9_cast)[name = tensor("input_95_cast")]; + tensor var_821 = const()[name = tensor("op_821"), val = tensor([1, 1])]; + tensor var_823 = const()[name = tensor("op_823"), val = tensor([1, 1])]; + tensor var_825_pad_type_0 = const()[name = tensor("op_825_pad_type_0"), val = tensor("custom")]; + tensor var_825_pad_0 = const()[name = tensor("op_825_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37542464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849728))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849920)))]; + tensor var_825_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_823, groups = var_31, pad = var_825_pad_0, pad_type = var_825_pad_type_0, strides = var_821, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_95_cast)[name = tensor("op_825_cast")]; + tensor inputs_15_cast = add(x = var_825_cast, y = inputs_13_cast)[name = tensor("inputs_15_cast")]; + tensor var_829 = const()[name = tensor("op_829"), val = tensor([1])]; + tensor channels_mean_15_cast = reduce_mean(axes = var_829, keep_dims = var_23, x = inputs_15_cast)[name = tensor("channels_mean_15_cast")]; + tensor zero_mean_15_cast = sub(x = inputs_15_cast, y = channels_mean_15_cast)[name = tensor("zero_mean_15_cast")]; + tensor zero_mean_sq_15_cast = mul(x = zero_mean_15_cast, y = zero_mean_15_cast)[name = tensor("zero_mean_sq_15_cast")]; + tensor var_833 = const()[name = tensor("op_833"), val = tensor([1])]; + tensor var_834_cast = reduce_mean(axes = var_833, keep_dims = var_23, x = zero_mean_sq_15_cast)[name = tensor("op_834_cast")]; + tensor var_835_to_fp16 = const()[name = tensor("op_835_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_836_cast = add(x = var_834_cast, y = var_835_to_fp16)[name = tensor("op_836_cast")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_836_cast)[name = tensor("denom_15_cast")]; + tensor out_15_cast = mul(x = zero_mean_15_cast, y = denom_15_cast)[name = tensor("out_15_cast")]; + tensor var_840_to_fp16 = const()[name = tensor("op_840_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37851264)))]; + tensor var_841_cast = add(x = out_15_cast, y = var_840_to_fp16)[name = tensor("op_841_cast")]; + tensor var_843_to_fp16 = const()[name = tensor("op_843_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37852608)))]; + tensor hidden_states_43_cast = mul(x = var_841_cast, y = var_843_to_fp16)[name = tensor("hidden_states_43_cast")]; + tensor var_850 = const()[name = tensor("op_850"), val = tensor([1, 1])]; + tensor var_852 = const()[name = tensor("op_852"), val = tensor([1, 1])]; + tensor q_11_pad_type_0 = const()[name = tensor("q_11_pad_type_0"), val = tensor("custom")]; + tensor q_11_pad_0 = const()[name = tensor("q_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37853952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38161216))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_11_cast = conv(dilations = var_852, groups = var_31, pad = q_11_pad_0, pad_type = q_11_pad_type_0, strides = var_850, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("q_11_cast")]; + tensor var_856 = const()[name = tensor("op_856"), val = tensor([1, 1])]; + tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; + tensor k_11_pad_type_0 = const()[name = tensor("k_11_pad_type_0"), val = tensor("custom")]; + tensor k_11_pad_0 = const()[name = tensor("k_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38161408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39144512))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_11_cast = conv(dilations = var_858, groups = var_31, pad = k_11_pad_0, pad_type = k_11_pad_type_0, strides = var_856, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_11_cast")]; + tensor var_862 = const()[name = tensor("op_862"), val = tensor([1, 1])]; + tensor var_864 = const()[name = tensor("op_864"), val = tensor([1, 1])]; + tensor v_11_pad_type_0 = const()[name = tensor("v_11_pad_type_0"), val = tensor("custom")]; + tensor v_11_pad_0 = const()[name = tensor("v_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39144704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40127808))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_11_cast = conv(dilations = var_864, groups = var_31, pad = v_11_pad_0, pad_type = v_11_pad_type_0, strides = var_862, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_11_cast")]; + tensor var_868 = const()[name = tensor("op_868"), val = tensor([2, 10, 64, -1])]; + tensor var_869_cast = reshape(shape = var_868, x = q_11_cast)[name = tensor("op_869_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([2, 10, 64, -1])]; + tensor var_871_cast = reshape(shape = var_870, x = k_11_cast)[name = tensor("op_871_cast")]; + tensor var_872 = const()[name = tensor("op_872"), val = tensor([2, 10, 64, -1])]; + tensor var_873_cast = reshape(shape = var_872, x = v_11_cast)[name = tensor("op_873_cast")]; + tensor attn_weights_21_transpose_x_0 = const()[name = tensor("attn_weights_21_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_21_transpose_y_0 = const()[name = tensor("attn_weights_21_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_21_cast = matmul(transpose_x = attn_weights_21_transpose_x_0, transpose_y = attn_weights_21_transpose_y_0, x = var_869_cast, y = var_871_cast)[name = tensor("attn_weights_21_cast")]; + tensor attn_weights_23_cast = mul(x = attn_weights_21_cast, y = var_12_to_fp16)[name = tensor("attn_weights_23_cast")]; + tensor var_877_cast = softmax(axis = var_18, x = attn_weights_23_cast)[name = tensor("op_877_cast")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_873_cast, y = var_877_cast)[name = tensor("attn_11_cast")]; + tensor var_881 = const()[name = tensor("op_881"), val = tensor([2, 640, 1, -1])]; + tensor input_97_cast = reshape(shape = var_881, x = attn_11_cast)[name = tensor("input_97_cast")]; + tensor var_886 = const()[name = tensor("op_886"), val = tensor([1, 1])]; + tensor var_888 = const()[name = tensor("op_888"), val = tensor([1, 1])]; + tensor var_890_pad_type_0 = const()[name = tensor("op_890_pad_type_0"), val = tensor("custom")]; + tensor var_890_pad_0 = const()[name = tensor("op_890_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40128000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40435264))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40435456)))]; + tensor var_890_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_888, groups = var_31, pad = var_890_pad_0, pad_type = var_890_pad_type_0, strides = var_886, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("op_890_cast")]; + tensor inputs_17_cast = add(x = var_890_cast, y = inputs_15_cast)[name = tensor("inputs_17_cast")]; + tensor var_894 = const()[name = tensor("op_894"), val = tensor([1])]; + tensor channels_mean_17_cast = reduce_mean(axes = var_894, keep_dims = var_23, x = inputs_17_cast)[name = tensor("channels_mean_17_cast")]; + tensor zero_mean_17_cast = sub(x = inputs_17_cast, y = channels_mean_17_cast)[name = tensor("zero_mean_17_cast")]; + tensor zero_mean_sq_17_cast = mul(x = zero_mean_17_cast, y = zero_mean_17_cast)[name = tensor("zero_mean_sq_17_cast")]; + tensor var_898 = const()[name = tensor("op_898"), val = tensor([1])]; + tensor var_899_cast = reduce_mean(axes = var_898, keep_dims = var_23, x = zero_mean_sq_17_cast)[name = tensor("op_899_cast")]; + tensor var_900_to_fp16 = const()[name = tensor("op_900_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_901_cast = add(x = var_899_cast, y = var_900_to_fp16)[name = tensor("op_901_cast")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_901_cast)[name = tensor("denom_17_cast")]; + tensor out_17_cast = mul(x = zero_mean_17_cast, y = denom_17_cast)[name = tensor("out_17_cast")]; + tensor var_905_to_fp16 = const()[name = tensor("op_905_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40436800)))]; + tensor var_906_cast = add(x = out_17_cast, y = var_905_to_fp16)[name = tensor("op_906_cast")]; + tensor var_908_to_fp16 = const()[name = tensor("op_908_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40438144)))]; + tensor input_99_cast = mul(x = var_906_cast, y = var_908_to_fp16)[name = tensor("input_99_cast")]; + tensor var_916 = const()[name = tensor("op_916"), val = tensor([1, 1])]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([1, 1])]; + tensor var_920_pad_type_0 = const()[name = tensor("op_920_pad_type_0"), val = tensor("custom")]; + tensor var_920_pad_0 = const()[name = tensor("op_920_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40439488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42897152))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42897344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42901248))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_920_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_918, groups = var_31, pad = var_920_pad_0, pad_type = var_920_pad_type_0, strides = var_916, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_99_cast)[name = tensor("op_920_cast")]; + tensor var_921_split_sizes_0 = const()[name = tensor("op_921_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_921_axis_0 = const()[name = tensor("op_921_axis_0"), val = tensor(1)]; + tensor var_921_cast_0, tensor var_921_cast_1 = split(axis = var_921_axis_0, split_sizes = var_921_split_sizes_0, x = var_920_cast)[name = tensor("op_921_cast")]; + tensor var_923_mode_0 = const()[name = tensor("op_923_mode_0"), val = tensor("EXACT")]; + tensor var_923_cast = gelu(mode = var_923_mode_0, x = var_921_cast_1)[name = tensor("op_923_cast")]; + tensor input_101_cast = mul(x = var_921_cast_0, y = var_923_cast)[name = tensor("input_101_cast")]; + tensor var_927 = const()[name = tensor("op_927"), val = tensor([1, 1])]; + tensor var_929 = const()[name = tensor("op_929"), val = tensor([1, 1])]; + tensor var_931_pad_type_0 = const()[name = tensor("op_931_pad_type_0"), val = tensor("custom")]; + tensor var_931_pad_0 = const()[name = tensor("op_931_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42901440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44130304))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44130496)))]; + tensor var_931_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_929, groups = var_31, pad = var_931_pad_0, pad_type = var_931_pad_type_0, strides = var_927, weight = unet_down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_101_cast)[name = tensor("op_931_cast")]; + tensor inputs_19_cast = add(x = var_931_cast, y = inputs_17_cast)[name = tensor("inputs_19_cast")]; + tensor var_941 = const()[name = tensor("op_941"), val = tensor([1])]; + tensor channels_mean_19_cast = reduce_mean(axes = var_941, keep_dims = var_23, x = inputs_19_cast)[name = tensor("channels_mean_19_cast")]; + tensor zero_mean_19_cast = sub(x = inputs_19_cast, y = channels_mean_19_cast)[name = tensor("zero_mean_19_cast")]; + tensor zero_mean_sq_19_cast = mul(x = zero_mean_19_cast, y = zero_mean_19_cast)[name = tensor("zero_mean_sq_19_cast")]; + tensor var_945 = const()[name = tensor("op_945"), val = tensor([1])]; + tensor var_946_cast = reduce_mean(axes = var_945, keep_dims = var_23, x = zero_mean_sq_19_cast)[name = tensor("op_946_cast")]; + tensor var_947_to_fp16 = const()[name = tensor("op_947_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_948_cast = add(x = var_946_cast, y = var_947_to_fp16)[name = tensor("op_948_cast")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_948_cast)[name = tensor("denom_19_cast")]; + tensor out_19_cast = mul(x = zero_mean_19_cast, y = denom_19_cast)[name = tensor("out_19_cast")]; + tensor var_952_to_fp16 = const()[name = tensor("op_952_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44131840)))]; + tensor var_953_cast = add(x = out_19_cast, y = var_952_to_fp16)[name = tensor("op_953_cast")]; + tensor var_955_to_fp16 = const()[name = tensor("op_955_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44133184)))]; + tensor hidden_states_47_cast = mul(x = var_953_cast, y = var_955_to_fp16)[name = tensor("hidden_states_47_cast")]; + tensor var_962 = const()[name = tensor("op_962"), val = tensor([1, 1])]; + tensor var_964 = const()[name = tensor("op_964"), val = tensor([1, 1])]; + tensor q_13_pad_type_0 = const()[name = tensor("q_13_pad_type_0"), val = tensor("custom")]; + tensor q_13_pad_0 = const()[name = tensor("q_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44134528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44441792))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_13_cast = conv(dilations = var_964, groups = var_31, pad = q_13_pad_0, pad_type = q_13_pad_type_0, strides = var_962, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("q_13_cast")]; + tensor var_968 = const()[name = tensor("op_968"), val = tensor([1, 1])]; + tensor var_970 = const()[name = tensor("op_970"), val = tensor([1, 1])]; + tensor k_13_pad_type_0 = const()[name = tensor("k_13_pad_type_0"), val = tensor("custom")]; + tensor k_13_pad_0 = const()[name = tensor("k_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44441984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44749248))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_13_cast = conv(dilations = var_970, groups = var_31, pad = k_13_pad_0, pad_type = k_13_pad_type_0, strides = var_968, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("k_13_cast")]; + tensor var_974 = const()[name = tensor("op_974"), val = tensor([1, 1])]; + tensor var_976 = const()[name = tensor("op_976"), val = tensor([1, 1])]; + tensor v_13_pad_type_0 = const()[name = tensor("v_13_pad_type_0"), val = tensor("custom")]; + tensor v_13_pad_0 = const()[name = tensor("v_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44749440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45056704))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_13_cast = conv(dilations = var_976, groups = var_31, pad = v_13_pad_0, pad_type = v_13_pad_type_0, strides = var_974, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("v_13_cast")]; + tensor var_980 = const()[name = tensor("op_980"), val = tensor([2, 10, 64, -1])]; + tensor var_981_cast = reshape(shape = var_980, x = q_13_cast)[name = tensor("op_981_cast")]; + tensor var_982 = const()[name = tensor("op_982"), val = tensor([2, 10, 64, -1])]; + tensor var_983_cast = reshape(shape = var_982, x = k_13_cast)[name = tensor("op_983_cast")]; + tensor var_984 = const()[name = tensor("op_984"), val = tensor([2, 10, 64, -1])]; + tensor var_985_cast = reshape(shape = var_984, x = v_13_cast)[name = tensor("op_985_cast")]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = var_981_cast, y = var_983_cast)[name = tensor("attn_weights_25_cast")]; + tensor attn_weights_27_cast = mul(x = attn_weights_25_cast, y = var_12_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_989_cast = softmax(axis = var_18, x = attn_weights_27_cast)[name = tensor("op_989_cast")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_985_cast, y = var_989_cast)[name = tensor("attn_13_cast")]; + tensor var_993 = const()[name = tensor("op_993"), val = tensor([2, 640, 1, -1])]; + tensor input_103_cast = reshape(shape = var_993, x = attn_13_cast)[name = tensor("input_103_cast")]; + tensor var_998 = const()[name = tensor("op_998"), val = tensor([1, 1])]; + tensor var_1000 = const()[name = tensor("op_1000"), val = tensor([1, 1])]; + tensor var_1002_pad_type_0 = const()[name = tensor("op_1002_pad_type_0"), val = tensor("custom")]; + tensor var_1002_pad_0 = const()[name = tensor("op_1002_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45056896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45364160))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45364352)))]; + tensor var_1002_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1000, groups = var_31, pad = var_1002_pad_0, pad_type = var_1002_pad_type_0, strides = var_998, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_103_cast)[name = tensor("op_1002_cast")]; + tensor inputs_21_cast = add(x = var_1002_cast, y = inputs_19_cast)[name = tensor("inputs_21_cast")]; + tensor var_1006 = const()[name = tensor("op_1006"), val = tensor([1])]; + tensor channels_mean_21_cast = reduce_mean(axes = var_1006, keep_dims = var_23, x = inputs_21_cast)[name = tensor("channels_mean_21_cast")]; + tensor zero_mean_21_cast = sub(x = inputs_21_cast, y = channels_mean_21_cast)[name = tensor("zero_mean_21_cast")]; + tensor zero_mean_sq_21_cast = mul(x = zero_mean_21_cast, y = zero_mean_21_cast)[name = tensor("zero_mean_sq_21_cast")]; + tensor var_1010 = const()[name = tensor("op_1010"), val = tensor([1])]; + tensor var_1011_cast = reduce_mean(axes = var_1010, keep_dims = var_23, x = zero_mean_sq_21_cast)[name = tensor("op_1011_cast")]; + tensor var_1012_to_fp16 = const()[name = tensor("op_1012_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1013_cast = add(x = var_1011_cast, y = var_1012_to_fp16)[name = tensor("op_1013_cast")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_1013_cast)[name = tensor("denom_21_cast")]; + tensor out_21_cast = mul(x = zero_mean_21_cast, y = denom_21_cast)[name = tensor("out_21_cast")]; + tensor var_1017_to_fp16 = const()[name = tensor("op_1017_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45365696)))]; + tensor var_1018_cast = add(x = out_21_cast, y = var_1017_to_fp16)[name = tensor("op_1018_cast")]; + tensor var_1020_to_fp16 = const()[name = tensor("op_1020_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45367040)))]; + tensor hidden_states_49_cast = mul(x = var_1018_cast, y = var_1020_to_fp16)[name = tensor("hidden_states_49_cast")]; + tensor var_1027 = const()[name = tensor("op_1027"), val = tensor([1, 1])]; + tensor var_1029 = const()[name = tensor("op_1029"), val = tensor([1, 1])]; + tensor q_15_pad_type_0 = const()[name = tensor("q_15_pad_type_0"), val = tensor("custom")]; + tensor q_15_pad_0 = const()[name = tensor("q_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45368384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45675648))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_15_cast = conv(dilations = var_1029, groups = var_31, pad = q_15_pad_0, pad_type = q_15_pad_type_0, strides = var_1027, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("q_15_cast")]; + tensor var_1033 = const()[name = tensor("op_1033"), val = tensor([1, 1])]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 1])]; + tensor k_15_pad_type_0 = const()[name = tensor("k_15_pad_type_0"), val = tensor("custom")]; + tensor k_15_pad_0 = const()[name = tensor("k_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45675840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46658944))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_15_cast = conv(dilations = var_1035, groups = var_31, pad = k_15_pad_0, pad_type = k_15_pad_type_0, strides = var_1033, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_15_cast")]; + tensor var_1039 = const()[name = tensor("op_1039"), val = tensor([1, 1])]; + tensor var_1041 = const()[name = tensor("op_1041"), val = tensor([1, 1])]; + tensor v_15_pad_type_0 = const()[name = tensor("v_15_pad_type_0"), val = tensor("custom")]; + tensor v_15_pad_0 = const()[name = tensor("v_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46659136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47642240))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_15_cast = conv(dilations = var_1041, groups = var_31, pad = v_15_pad_0, pad_type = v_15_pad_type_0, strides = var_1039, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_15_cast")]; + tensor var_1045 = const()[name = tensor("op_1045"), val = tensor([2, 10, 64, -1])]; + tensor var_1046_cast = reshape(shape = var_1045, x = q_15_cast)[name = tensor("op_1046_cast")]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([2, 10, 64, -1])]; + tensor var_1048_cast = reshape(shape = var_1047, x = k_15_cast)[name = tensor("op_1048_cast")]; + tensor var_1049 = const()[name = tensor("op_1049"), val = tensor([2, 10, 64, -1])]; + tensor var_1050_cast = reshape(shape = var_1049, x = v_15_cast)[name = tensor("op_1050_cast")]; + tensor attn_weights_29_transpose_x_0 = const()[name = tensor("attn_weights_29_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_29_transpose_y_0 = const()[name = tensor("attn_weights_29_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_29_cast = matmul(transpose_x = attn_weights_29_transpose_x_0, transpose_y = attn_weights_29_transpose_y_0, x = var_1046_cast, y = var_1048_cast)[name = tensor("attn_weights_29_cast")]; + tensor attn_weights_31_cast = mul(x = attn_weights_29_cast, y = var_12_to_fp16)[name = tensor("attn_weights_31_cast")]; + tensor var_1054_cast = softmax(axis = var_18, x = attn_weights_31_cast)[name = tensor("op_1054_cast")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1050_cast, y = var_1054_cast)[name = tensor("attn_15_cast")]; + tensor var_1058 = const()[name = tensor("op_1058"), val = tensor([2, 640, 1, -1])]; + tensor input_105_cast = reshape(shape = var_1058, x = attn_15_cast)[name = tensor("input_105_cast")]; + tensor var_1063 = const()[name = tensor("op_1063"), val = tensor([1, 1])]; + tensor var_1065 = const()[name = tensor("op_1065"), val = tensor([1, 1])]; + tensor var_1067_pad_type_0 = const()[name = tensor("op_1067_pad_type_0"), val = tensor("custom")]; + tensor var_1067_pad_0 = const()[name = tensor("op_1067_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47642432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47949696))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47949888)))]; + tensor var_1067_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1065, groups = var_31, pad = var_1067_pad_0, pad_type = var_1067_pad_type_0, strides = var_1063, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("op_1067_cast")]; + tensor inputs_23_cast = add(x = var_1067_cast, y = inputs_21_cast)[name = tensor("inputs_23_cast")]; + tensor var_1071 = const()[name = tensor("op_1071"), val = tensor([1])]; + tensor channels_mean_23_cast = reduce_mean(axes = var_1071, keep_dims = var_23, x = inputs_23_cast)[name = tensor("channels_mean_23_cast")]; + tensor zero_mean_23_cast = sub(x = inputs_23_cast, y = channels_mean_23_cast)[name = tensor("zero_mean_23_cast")]; + tensor zero_mean_sq_23_cast = mul(x = zero_mean_23_cast, y = zero_mean_23_cast)[name = tensor("zero_mean_sq_23_cast")]; + tensor var_1075 = const()[name = tensor("op_1075"), val = tensor([1])]; + tensor var_1076_cast = reduce_mean(axes = var_1075, keep_dims = var_23, x = zero_mean_sq_23_cast)[name = tensor("op_1076_cast")]; + tensor var_1077_to_fp16 = const()[name = tensor("op_1077_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1078_cast = add(x = var_1076_cast, y = var_1077_to_fp16)[name = tensor("op_1078_cast")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_1078_cast)[name = tensor("denom_23_cast")]; + tensor out_23_cast = mul(x = zero_mean_23_cast, y = denom_23_cast)[name = tensor("out_23_cast")]; + tensor var_1082_to_fp16 = const()[name = tensor("op_1082_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47951232)))]; + tensor var_1083_cast = add(x = out_23_cast, y = var_1082_to_fp16)[name = tensor("op_1083_cast")]; + tensor var_1085_to_fp16 = const()[name = tensor("op_1085_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47952576)))]; + tensor input_107_cast = mul(x = var_1083_cast, y = var_1085_to_fp16)[name = tensor("input_107_cast")]; + tensor var_1093 = const()[name = tensor("op_1093"), val = tensor([1, 1])]; + tensor var_1095 = const()[name = tensor("op_1095"), val = tensor([1, 1])]; + tensor var_1097_pad_type_0 = const()[name = tensor("op_1097_pad_type_0"), val = tensor("custom")]; + tensor var_1097_pad_0 = const()[name = tensor("op_1097_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47953920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50411584))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50411776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50415680))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_1097_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_1095, groups = var_31, pad = var_1097_pad_0, pad_type = var_1097_pad_type_0, strides = var_1093, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_107_cast)[name = tensor("op_1097_cast")]; + tensor var_1098_split_sizes_0 = const()[name = tensor("op_1098_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_1098_axis_0 = const()[name = tensor("op_1098_axis_0"), val = tensor(1)]; + tensor var_1098_cast_0, tensor var_1098_cast_1 = split(axis = var_1098_axis_0, split_sizes = var_1098_split_sizes_0, x = var_1097_cast)[name = tensor("op_1098_cast")]; + tensor var_1100_mode_0 = const()[name = tensor("op_1100_mode_0"), val = tensor("EXACT")]; + tensor var_1100_cast = gelu(mode = var_1100_mode_0, x = var_1098_cast_1)[name = tensor("op_1100_cast")]; + tensor input_109_cast = mul(x = var_1098_cast_0, y = var_1100_cast)[name = tensor("input_109_cast")]; + tensor var_1104 = const()[name = tensor("op_1104"), val = tensor([1, 1])]; + tensor var_1106 = const()[name = tensor("op_1106"), val = tensor([1, 1])]; + tensor var_1108_pad_type_0 = const()[name = tensor("op_1108_pad_type_0"), val = tensor("custom")]; + tensor var_1108_pad_0 = const()[name = tensor("op_1108_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50415872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51644736))), name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51644928)))]; + tensor var_1108_cast = conv(bias = unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1106, groups = var_31, pad = var_1108_pad_0, pad_type = var_1108_pad_type_0, strides = var_1104, weight = unet_down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_109_cast)[name = tensor("op_1108_cast")]; + tensor hidden_states_53_cast = add(x = var_1108_cast, y = inputs_23_cast)[name = tensor("hidden_states_53_cast")]; + tensor var_1110 = const()[name = tensor("op_1110"), val = tensor([2, 640, 64, 64])]; + tensor input_111_cast = reshape(shape = var_1110, x = hidden_states_53_cast)[name = tensor("input_111_cast")]; + tensor var_1114 = const()[name = tensor("op_1114"), val = tensor([1, 1])]; + tensor var_1116 = const()[name = tensor("op_1116"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51646272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51953536))), name = tensor("unet_down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_down_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51953728)))]; + tensor hidden_states_55_cast = conv(bias = unet_down_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_1116, groups = var_31, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_1114, weight = unet_down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_111_cast)[name = tensor("hidden_states_55_cast")]; + tensor input_113_cast = add(x = hidden_states_55_cast, y = hidden_states_37_cast)[name = tensor("input_113_cast")]; + tensor var_1123 = const()[name = tensor("op_1123"), val = tensor([2, 2])]; + tensor var_1125 = const()[name = tensor("op_1125"), val = tensor([1, 1])]; + tensor input_115_pad_type_0 = const()[name = tensor("input_115_pad_type_0"), val = tensor("custom")]; + tensor input_115_pad_0 = const()[name = tensor("input_115_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51955072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54719936))), name = tensor("unet_down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("unet_down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54720128)))]; + tensor input_115_cast = conv(bias = unet_down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_1125, groups = var_31, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = var_1123, weight = unet_down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("input_115_cast")]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([2, 32, 20, 32, 32])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = input_115_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([2, 640, 32, 32])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54721472)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54722816)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_119_cast = silu(x = add_21_cast)[name = tensor("input_119_cast")]; + tensor var_1152 = const()[name = tensor("op_1152"), val = tensor([1, 1])]; + tensor var_1154 = const()[name = tensor("op_1154"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54724160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60253824))), name = tensor("unet_down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 640, 3, 3])]; + tensor unet_down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60254016)))]; + tensor hidden_states_57_cast = conv(bias = unet_down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_1154, groups = var_31, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_1152, weight = unet_down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_119_cast)[name = tensor("hidden_states_57_cast")]; + tensor var_1160 = const()[name = tensor("op_1160"), val = tensor([1, 1])]; + tensor var_1162 = const()[name = tensor("op_1162"), val = tensor([1, 1])]; + tensor temb_9_pad_type_0 = const()[name = tensor("temb_9_pad_type_0"), val = tensor("custom")]; + tensor temb_9_pad_0 = const()[name = tensor("temb_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60256640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61485504))), name = tensor("unet_down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61485696)))]; + tensor temb_9_cast = conv(bias = unet_down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_1162, groups = var_31, pad = temb_9_pad_0, pad_type = temb_9_pad_type_0, strides = var_1160, weight = unet_down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_9_cast")]; + tensor input_123_cast = add(x = hidden_states_57_cast, y = temb_9_cast)[name = tensor("input_123_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_123_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_mean_0_to_fp16 = const()[name = tensor("add_23_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61488320)))]; + tensor add_23_variance_0_to_fp16 = const()[name = tensor("add_23_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61490944)))]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61493568)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61496192)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_127_cast = silu(x = add_23_cast)[name = tensor("input_127_cast")]; + tensor var_1172 = const()[name = tensor("op_1172"), val = tensor([1, 1])]; + tensor var_1174 = const()[name = tensor("op_1174"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61498816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72558080))), name = tensor("unet_down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72558272)))]; + tensor hidden_states_59_cast = conv(bias = unet_down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_1174, groups = var_31, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_1172, weight = unet_down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_127_cast)[name = tensor("hidden_states_59_cast")]; + tensor var_1179 = const()[name = tensor("op_1179"), val = tensor([1, 1])]; + tensor var_1181 = const()[name = tensor("op_1181"), val = tensor([1, 1])]; + tensor x_3_pad_type_0 = const()[name = tensor("x_3_pad_type_0"), val = tensor("custom")]; + tensor x_3_pad_0 = const()[name = tensor("x_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72560896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73175360))), name = tensor("unet_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 640, 1, 1])]; + tensor unet_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73175552)))]; + tensor x_3_cast = conv(bias = unet_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_1181, groups = var_31, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = var_1179, weight = unet_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_115_cast)[name = tensor("x_3_cast")]; + tensor hidden_states_61_cast = add(x = x_3_cast, y = hidden_states_59_cast)[name = tensor("hidden_states_61_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = hidden_states_61_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73178176)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73180800)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor var_1219 = const()[name = tensor("op_1219"), val = tensor([1, 1])]; + tensor var_1221 = const()[name = tensor("op_1221"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73183424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74412288))), name = tensor("unet_down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74412480)))]; + tensor hidden_states_63_cast = conv(bias = unet_down_blocks_2_attentions_0_proj_in_bias_to_fp16, dilations = var_1221, groups = var_31, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_1219, weight = unet_down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized, x = add_25_cast)[name = tensor("hidden_states_63_cast")]; + tensor var_1226 = const()[name = tensor("op_1226"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_25_cast = reshape(shape = var_1226, x = hidden_states_63_cast)[name = tensor("inputs_25_cast")]; + tensor var_1236 = const()[name = tensor("op_1236"), val = tensor([1])]; + tensor channels_mean_25_cast = reduce_mean(axes = var_1236, keep_dims = var_23, x = inputs_25_cast)[name = tensor("channels_mean_25_cast")]; + tensor zero_mean_25_cast = sub(x = inputs_25_cast, y = channels_mean_25_cast)[name = tensor("zero_mean_25_cast")]; + tensor zero_mean_sq_25_cast = mul(x = zero_mean_25_cast, y = zero_mean_25_cast)[name = tensor("zero_mean_sq_25_cast")]; + tensor var_1240 = const()[name = tensor("op_1240"), val = tensor([1])]; + tensor var_1241_cast = reduce_mean(axes = var_1240, keep_dims = var_23, x = zero_mean_sq_25_cast)[name = tensor("op_1241_cast")]; + tensor var_1242_to_fp16 = const()[name = tensor("op_1242_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1243_cast = add(x = var_1241_cast, y = var_1242_to_fp16)[name = tensor("op_1243_cast")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_1243_cast)[name = tensor("denom_25_cast")]; + tensor out_25_cast = mul(x = zero_mean_25_cast, y = denom_25_cast)[name = tensor("out_25_cast")]; + tensor var_1247_to_fp16 = const()[name = tensor("op_1247_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74415104)))]; + tensor var_1248_cast = add(x = out_25_cast, y = var_1247_to_fp16)[name = tensor("op_1248_cast")]; + tensor var_1250_to_fp16 = const()[name = tensor("op_1250_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74417728)))]; + tensor hidden_states_65_cast = mul(x = var_1248_cast, y = var_1250_to_fp16)[name = tensor("hidden_states_65_cast")]; + tensor var_1257 = const()[name = tensor("op_1257"), val = tensor([1, 1])]; + tensor var_1259 = const()[name = tensor("op_1259"), val = tensor([1, 1])]; + tensor q_17_pad_type_0 = const()[name = tensor("q_17_pad_type_0"), val = tensor("custom")]; + tensor q_17_pad_0 = const()[name = tensor("q_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74420352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75649216))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_17_cast = conv(dilations = var_1259, groups = var_31, pad = q_17_pad_0, pad_type = q_17_pad_type_0, strides = var_1257, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("q_17_cast")]; + tensor var_1263 = const()[name = tensor("op_1263"), val = tensor([1, 1])]; + tensor var_1265 = const()[name = tensor("op_1265"), val = tensor([1, 1])]; + tensor k_17_pad_type_0 = const()[name = tensor("k_17_pad_type_0"), val = tensor("custom")]; + tensor k_17_pad_0 = const()[name = tensor("k_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75649408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76878272))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_17_cast = conv(dilations = var_1265, groups = var_31, pad = k_17_pad_0, pad_type = k_17_pad_type_0, strides = var_1263, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("k_17_cast")]; + tensor var_1269 = const()[name = tensor("op_1269"), val = tensor([1, 1])]; + tensor var_1271 = const()[name = tensor("op_1271"), val = tensor([1, 1])]; + tensor v_17_pad_type_0 = const()[name = tensor("v_17_pad_type_0"), val = tensor("custom")]; + tensor v_17_pad_0 = const()[name = tensor("v_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76878464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78107328))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_17_cast = conv(dilations = var_1271, groups = var_31, pad = v_17_pad_0, pad_type = v_17_pad_type_0, strides = var_1269, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("v_17_cast")]; + tensor var_1275 = const()[name = tensor("op_1275"), val = tensor([2, 20, 64, -1])]; + tensor var_1276_cast = reshape(shape = var_1275, x = q_17_cast)[name = tensor("op_1276_cast")]; + tensor var_1277 = const()[name = tensor("op_1277"), val = tensor([2, 20, 64, -1])]; + tensor var_1278_cast = reshape(shape = var_1277, x = k_17_cast)[name = tensor("op_1278_cast")]; + tensor var_1279 = const()[name = tensor("op_1279"), val = tensor([2, 20, 64, -1])]; + tensor var_1280_cast = reshape(shape = var_1279, x = v_17_cast)[name = tensor("op_1280_cast")]; + tensor attn_weights_33_transpose_x_0 = const()[name = tensor("attn_weights_33_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_33_transpose_y_0 = const()[name = tensor("attn_weights_33_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_33_cast = matmul(transpose_x = attn_weights_33_transpose_x_0, transpose_y = attn_weights_33_transpose_y_0, x = var_1276_cast, y = var_1278_cast)[name = tensor("attn_weights_33_cast")]; + tensor attn_weights_35_cast = mul(x = attn_weights_33_cast, y = var_12_to_fp16)[name = tensor("attn_weights_35_cast")]; + tensor var_1284_cast = softmax(axis = var_18, x = attn_weights_35_cast)[name = tensor("op_1284_cast")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1280_cast, y = var_1284_cast)[name = tensor("attn_17_cast")]; + tensor var_1288 = const()[name = tensor("op_1288"), val = tensor([2, 1280, 1, -1])]; + tensor input_131_cast = reshape(shape = var_1288, x = attn_17_cast)[name = tensor("input_131_cast")]; + tensor var_1293 = const()[name = tensor("op_1293"), val = tensor([1, 1])]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1, 1])]; + tensor var_1297_pad_type_0 = const()[name = tensor("op_1297_pad_type_0"), val = tensor("custom")]; + tensor var_1297_pad_0 = const()[name = tensor("op_1297_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78107520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79336384))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79336576)))]; + tensor var_1297_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_1295, groups = var_31, pad = var_1297_pad_0, pad_type = var_1297_pad_type_0, strides = var_1293, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_131_cast)[name = tensor("op_1297_cast")]; + tensor inputs_27_cast = add(x = var_1297_cast, y = inputs_25_cast)[name = tensor("inputs_27_cast")]; + tensor var_1301 = const()[name = tensor("op_1301"), val = tensor([1])]; + tensor channels_mean_27_cast = reduce_mean(axes = var_1301, keep_dims = var_23, x = inputs_27_cast)[name = tensor("channels_mean_27_cast")]; + tensor zero_mean_27_cast = sub(x = inputs_27_cast, y = channels_mean_27_cast)[name = tensor("zero_mean_27_cast")]; + tensor zero_mean_sq_27_cast = mul(x = zero_mean_27_cast, y = zero_mean_27_cast)[name = tensor("zero_mean_sq_27_cast")]; + tensor var_1305 = const()[name = tensor("op_1305"), val = tensor([1])]; + tensor var_1306_cast = reduce_mean(axes = var_1305, keep_dims = var_23, x = zero_mean_sq_27_cast)[name = tensor("op_1306_cast")]; + tensor var_1307_to_fp16 = const()[name = tensor("op_1307_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1308_cast = add(x = var_1306_cast, y = var_1307_to_fp16)[name = tensor("op_1308_cast")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_1308_cast)[name = tensor("denom_27_cast")]; + tensor out_27_cast = mul(x = zero_mean_27_cast, y = denom_27_cast)[name = tensor("out_27_cast")]; + tensor var_1312_to_fp16 = const()[name = tensor("op_1312_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79339200)))]; + tensor var_1313_cast = add(x = out_27_cast, y = var_1312_to_fp16)[name = tensor("op_1313_cast")]; + tensor var_1315_to_fp16 = const()[name = tensor("op_1315_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79341824)))]; + tensor hidden_states_67_cast = mul(x = var_1313_cast, y = var_1315_to_fp16)[name = tensor("hidden_states_67_cast")]; + tensor var_1322 = const()[name = tensor("op_1322"), val = tensor([1, 1])]; + tensor var_1324 = const()[name = tensor("op_1324"), val = tensor([1, 1])]; + tensor q_19_pad_type_0 = const()[name = tensor("q_19_pad_type_0"), val = tensor("custom")]; + tensor q_19_pad_0 = const()[name = tensor("q_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79344448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80573312))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_19_cast = conv(dilations = var_1324, groups = var_31, pad = q_19_pad_0, pad_type = q_19_pad_type_0, strides = var_1322, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("q_19_cast")]; + tensor var_1328 = const()[name = tensor("op_1328"), val = tensor([1, 1])]; + tensor var_1330 = const()[name = tensor("op_1330"), val = tensor([1, 1])]; + tensor k_19_pad_type_0 = const()[name = tensor("k_19_pad_type_0"), val = tensor("custom")]; + tensor k_19_pad_0 = const()[name = tensor("k_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80573504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(82539648))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_19_cast = conv(dilations = var_1330, groups = var_31, pad = k_19_pad_0, pad_type = k_19_pad_type_0, strides = var_1328, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_19_cast")]; + tensor var_1334 = const()[name = tensor("op_1334"), val = tensor([1, 1])]; + tensor var_1336 = const()[name = tensor("op_1336"), val = tensor([1, 1])]; + tensor v_19_pad_type_0 = const()[name = tensor("v_19_pad_type_0"), val = tensor("custom")]; + tensor v_19_pad_0 = const()[name = tensor("v_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(82539840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84505984))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_19_cast = conv(dilations = var_1336, groups = var_31, pad = v_19_pad_0, pad_type = v_19_pad_type_0, strides = var_1334, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_19_cast")]; + tensor var_1340 = const()[name = tensor("op_1340"), val = tensor([2, 20, 64, -1])]; + tensor var_1341_cast = reshape(shape = var_1340, x = q_19_cast)[name = tensor("op_1341_cast")]; + tensor var_1342 = const()[name = tensor("op_1342"), val = tensor([2, 20, 64, -1])]; + tensor var_1343_cast = reshape(shape = var_1342, x = k_19_cast)[name = tensor("op_1343_cast")]; + tensor var_1344 = const()[name = tensor("op_1344"), val = tensor([2, 20, 64, -1])]; + tensor var_1345_cast = reshape(shape = var_1344, x = v_19_cast)[name = tensor("op_1345_cast")]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = var_1341_cast, y = var_1343_cast)[name = tensor("attn_weights_37_cast")]; + tensor attn_weights_39_cast = mul(x = attn_weights_37_cast, y = var_12_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_1349_cast = softmax(axis = var_18, x = attn_weights_39_cast)[name = tensor("op_1349_cast")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1345_cast, y = var_1349_cast)[name = tensor("attn_19_cast")]; + tensor var_1353 = const()[name = tensor("op_1353"), val = tensor([2, 1280, 1, -1])]; + tensor input_133_cast = reshape(shape = var_1353, x = attn_19_cast)[name = tensor("input_133_cast")]; + tensor var_1358 = const()[name = tensor("op_1358"), val = tensor([1, 1])]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1, 1])]; + tensor var_1362_pad_type_0 = const()[name = tensor("op_1362_pad_type_0"), val = tensor("custom")]; + tensor var_1362_pad_0 = const()[name = tensor("op_1362_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84506176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85735040))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85735232)))]; + tensor var_1362_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_1360, groups = var_31, pad = var_1362_pad_0, pad_type = var_1362_pad_type_0, strides = var_1358, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_133_cast)[name = tensor("op_1362_cast")]; + tensor inputs_29_cast = add(x = var_1362_cast, y = inputs_27_cast)[name = tensor("inputs_29_cast")]; + tensor var_1366 = const()[name = tensor("op_1366"), val = tensor([1])]; + tensor channels_mean_29_cast = reduce_mean(axes = var_1366, keep_dims = var_23, x = inputs_29_cast)[name = tensor("channels_mean_29_cast")]; + tensor zero_mean_29_cast = sub(x = inputs_29_cast, y = channels_mean_29_cast)[name = tensor("zero_mean_29_cast")]; + tensor zero_mean_sq_29_cast = mul(x = zero_mean_29_cast, y = zero_mean_29_cast)[name = tensor("zero_mean_sq_29_cast")]; + tensor var_1370 = const()[name = tensor("op_1370"), val = tensor([1])]; + tensor var_1371_cast = reduce_mean(axes = var_1370, keep_dims = var_23, x = zero_mean_sq_29_cast)[name = tensor("op_1371_cast")]; + tensor var_1372_to_fp16 = const()[name = tensor("op_1372_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1373_cast = add(x = var_1371_cast, y = var_1372_to_fp16)[name = tensor("op_1373_cast")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1373_cast)[name = tensor("denom_29_cast")]; + tensor out_29_cast = mul(x = zero_mean_29_cast, y = denom_29_cast)[name = tensor("out_29_cast")]; + tensor var_1377_to_fp16 = const()[name = tensor("op_1377_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85737856)))]; + tensor var_1378_cast = add(x = out_29_cast, y = var_1377_to_fp16)[name = tensor("op_1378_cast")]; + tensor var_1380_to_fp16 = const()[name = tensor("op_1380_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85740480)))]; + tensor input_135_cast = mul(x = var_1378_cast, y = var_1380_to_fp16)[name = tensor("input_135_cast")]; + tensor var_1388 = const()[name = tensor("op_1388"), val = tensor([1, 1])]; + tensor var_1390 = const()[name = tensor("op_1390"), val = tensor([1, 1])]; + tensor var_1392_pad_type_0 = const()[name = tensor("op_1392_pad_type_0"), val = tensor("custom")]; + tensor var_1392_pad_0 = const()[name = tensor("op_1392_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85743104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95573568))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95573760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95581504))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_1392_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_1390, groups = var_31, pad = var_1392_pad_0, pad_type = var_1392_pad_type_0, strides = var_1388, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_135_cast)[name = tensor("op_1392_cast")]; + tensor var_1393_split_sizes_0 = const()[name = tensor("op_1393_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1393_axis_0 = const()[name = tensor("op_1393_axis_0"), val = tensor(1)]; + tensor var_1393_cast_0, tensor var_1393_cast_1 = split(axis = var_1393_axis_0, split_sizes = var_1393_split_sizes_0, x = var_1392_cast)[name = tensor("op_1393_cast")]; + tensor var_1395_mode_0 = const()[name = tensor("op_1395_mode_0"), val = tensor("EXACT")]; + tensor var_1395_cast = gelu(mode = var_1395_mode_0, x = var_1393_cast_1)[name = tensor("op_1395_cast")]; + tensor input_137_cast = mul(x = var_1393_cast_0, y = var_1395_cast)[name = tensor("input_137_cast")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([1, 1])]; + tensor var_1401 = const()[name = tensor("op_1401"), val = tensor([1, 1])]; + tensor var_1403_pad_type_0 = const()[name = tensor("op_1403_pad_type_0"), val = tensor("custom")]; + tensor var_1403_pad_0 = const()[name = tensor("op_1403_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95581696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100496960))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100497152)))]; + tensor var_1403_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_1401, groups = var_31, pad = var_1403_pad_0, pad_type = var_1403_pad_type_0, strides = var_1399, weight = unet_down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("op_1403_cast")]; + tensor inputs_31_cast = add(x = var_1403_cast, y = inputs_29_cast)[name = tensor("inputs_31_cast")]; + tensor var_1413 = const()[name = tensor("op_1413"), val = tensor([1])]; + tensor channels_mean_31_cast = reduce_mean(axes = var_1413, keep_dims = var_23, x = inputs_31_cast)[name = tensor("channels_mean_31_cast")]; + tensor zero_mean_31_cast = sub(x = inputs_31_cast, y = channels_mean_31_cast)[name = tensor("zero_mean_31_cast")]; + tensor zero_mean_sq_31_cast = mul(x = zero_mean_31_cast, y = zero_mean_31_cast)[name = tensor("zero_mean_sq_31_cast")]; + tensor var_1417 = const()[name = tensor("op_1417"), val = tensor([1])]; + tensor var_1418_cast = reduce_mean(axes = var_1417, keep_dims = var_23, x = zero_mean_sq_31_cast)[name = tensor("op_1418_cast")]; + tensor var_1419_to_fp16 = const()[name = tensor("op_1419_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1420_cast = add(x = var_1418_cast, y = var_1419_to_fp16)[name = tensor("op_1420_cast")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1420_cast)[name = tensor("denom_31_cast")]; + tensor out_31_cast = mul(x = zero_mean_31_cast, y = denom_31_cast)[name = tensor("out_31_cast")]; + tensor var_1424_to_fp16 = const()[name = tensor("op_1424_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100499776)))]; + tensor var_1425_cast = add(x = out_31_cast, y = var_1424_to_fp16)[name = tensor("op_1425_cast")]; + tensor var_1427_to_fp16 = const()[name = tensor("op_1427_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100502400)))]; + tensor hidden_states_71_cast = mul(x = var_1425_cast, y = var_1427_to_fp16)[name = tensor("hidden_states_71_cast")]; + tensor var_1434 = const()[name = tensor("op_1434"), val = tensor([1, 1])]; + tensor var_1436 = const()[name = tensor("op_1436"), val = tensor([1, 1])]; + tensor q_21_pad_type_0 = const()[name = tensor("q_21_pad_type_0"), val = tensor("custom")]; + tensor q_21_pad_0 = const()[name = tensor("q_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100505024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101733888))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_21_cast = conv(dilations = var_1436, groups = var_31, pad = q_21_pad_0, pad_type = q_21_pad_type_0, strides = var_1434, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("q_21_cast")]; + tensor var_1440 = const()[name = tensor("op_1440"), val = tensor([1, 1])]; + tensor var_1442 = const()[name = tensor("op_1442"), val = tensor([1, 1])]; + tensor k_21_pad_type_0 = const()[name = tensor("k_21_pad_type_0"), val = tensor("custom")]; + tensor k_21_pad_0 = const()[name = tensor("k_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101734080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102962944))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_21_cast = conv(dilations = var_1442, groups = var_31, pad = k_21_pad_0, pad_type = k_21_pad_type_0, strides = var_1440, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("k_21_cast")]; + tensor var_1446 = const()[name = tensor("op_1446"), val = tensor([1, 1])]; + tensor var_1448 = const()[name = tensor("op_1448"), val = tensor([1, 1])]; + tensor v_21_pad_type_0 = const()[name = tensor("v_21_pad_type_0"), val = tensor("custom")]; + tensor v_21_pad_0 = const()[name = tensor("v_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102963136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104192000))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_21_cast = conv(dilations = var_1448, groups = var_31, pad = v_21_pad_0, pad_type = v_21_pad_type_0, strides = var_1446, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("v_21_cast")]; + tensor var_1452 = const()[name = tensor("op_1452"), val = tensor([2, 20, 64, -1])]; + tensor var_1453_cast = reshape(shape = var_1452, x = q_21_cast)[name = tensor("op_1453_cast")]; + tensor var_1454 = const()[name = tensor("op_1454"), val = tensor([2, 20, 64, -1])]; + tensor var_1455_cast = reshape(shape = var_1454, x = k_21_cast)[name = tensor("op_1455_cast")]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([2, 20, 64, -1])]; + tensor var_1457_cast = reshape(shape = var_1456, x = v_21_cast)[name = tensor("op_1457_cast")]; + tensor attn_weights_41_transpose_x_0 = const()[name = tensor("attn_weights_41_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_41_transpose_y_0 = const()[name = tensor("attn_weights_41_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_41_cast = matmul(transpose_x = attn_weights_41_transpose_x_0, transpose_y = attn_weights_41_transpose_y_0, x = var_1453_cast, y = var_1455_cast)[name = tensor("attn_weights_41_cast")]; + tensor attn_weights_43_cast = mul(x = attn_weights_41_cast, y = var_12_to_fp16)[name = tensor("attn_weights_43_cast")]; + tensor var_1461_cast = softmax(axis = var_18, x = attn_weights_43_cast)[name = tensor("op_1461_cast")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1457_cast, y = var_1461_cast)[name = tensor("attn_21_cast")]; + tensor var_1465 = const()[name = tensor("op_1465"), val = tensor([2, 1280, 1, -1])]; + tensor input_139_cast = reshape(shape = var_1465, x = attn_21_cast)[name = tensor("input_139_cast")]; + tensor var_1470 = const()[name = tensor("op_1470"), val = tensor([1, 1])]; + tensor var_1472 = const()[name = tensor("op_1472"), val = tensor([1, 1])]; + tensor var_1474_pad_type_0 = const()[name = tensor("op_1474_pad_type_0"), val = tensor("custom")]; + tensor var_1474_pad_0 = const()[name = tensor("op_1474_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104192192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105421056))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105421248)))]; + tensor var_1474_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1472, groups = var_31, pad = var_1474_pad_0, pad_type = var_1474_pad_type_0, strides = var_1470, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_139_cast)[name = tensor("op_1474_cast")]; + tensor inputs_33_cast = add(x = var_1474_cast, y = inputs_31_cast)[name = tensor("inputs_33_cast")]; + tensor var_1478 = const()[name = tensor("op_1478"), val = tensor([1])]; + tensor channels_mean_33_cast = reduce_mean(axes = var_1478, keep_dims = var_23, x = inputs_33_cast)[name = tensor("channels_mean_33_cast")]; + tensor zero_mean_33_cast = sub(x = inputs_33_cast, y = channels_mean_33_cast)[name = tensor("zero_mean_33_cast")]; + tensor zero_mean_sq_33_cast = mul(x = zero_mean_33_cast, y = zero_mean_33_cast)[name = tensor("zero_mean_sq_33_cast")]; + tensor var_1482 = const()[name = tensor("op_1482"), val = tensor([1])]; + tensor var_1483_cast = reduce_mean(axes = var_1482, keep_dims = var_23, x = zero_mean_sq_33_cast)[name = tensor("op_1483_cast")]; + tensor var_1484_to_fp16 = const()[name = tensor("op_1484_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1485_cast = add(x = var_1483_cast, y = var_1484_to_fp16)[name = tensor("op_1485_cast")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1485_cast)[name = tensor("denom_33_cast")]; + tensor out_33_cast = mul(x = zero_mean_33_cast, y = denom_33_cast)[name = tensor("out_33_cast")]; + tensor var_1489_to_fp16 = const()[name = tensor("op_1489_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105423872)))]; + tensor var_1490_cast = add(x = out_33_cast, y = var_1489_to_fp16)[name = tensor("op_1490_cast")]; + tensor var_1492_to_fp16 = const()[name = tensor("op_1492_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105426496)))]; + tensor hidden_states_73_cast = mul(x = var_1490_cast, y = var_1492_to_fp16)[name = tensor("hidden_states_73_cast")]; + tensor var_1499 = const()[name = tensor("op_1499"), val = tensor([1, 1])]; + tensor var_1501 = const()[name = tensor("op_1501"), val = tensor([1, 1])]; + tensor q_23_pad_type_0 = const()[name = tensor("q_23_pad_type_0"), val = tensor("custom")]; + tensor q_23_pad_0 = const()[name = tensor("q_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105429120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106657984))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_23_cast = conv(dilations = var_1501, groups = var_31, pad = q_23_pad_0, pad_type = q_23_pad_type_0, strides = var_1499, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("q_23_cast")]; + tensor var_1505 = const()[name = tensor("op_1505"), val = tensor([1, 1])]; + tensor var_1507 = const()[name = tensor("op_1507"), val = tensor([1, 1])]; + tensor k_23_pad_type_0 = const()[name = tensor("k_23_pad_type_0"), val = tensor("custom")]; + tensor k_23_pad_0 = const()[name = tensor("k_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106658176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108624320))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_23_cast = conv(dilations = var_1507, groups = var_31, pad = k_23_pad_0, pad_type = k_23_pad_type_0, strides = var_1505, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_23_cast")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([1, 1])]; + tensor var_1513 = const()[name = tensor("op_1513"), val = tensor([1, 1])]; + tensor v_23_pad_type_0 = const()[name = tensor("v_23_pad_type_0"), val = tensor("custom")]; + tensor v_23_pad_0 = const()[name = tensor("v_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108624512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110590656))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_23_cast = conv(dilations = var_1513, groups = var_31, pad = v_23_pad_0, pad_type = v_23_pad_type_0, strides = var_1511, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_23_cast")]; + tensor var_1517 = const()[name = tensor("op_1517"), val = tensor([2, 20, 64, -1])]; + tensor var_1518_cast = reshape(shape = var_1517, x = q_23_cast)[name = tensor("op_1518_cast")]; + tensor var_1519 = const()[name = tensor("op_1519"), val = tensor([2, 20, 64, -1])]; + tensor var_1520_cast = reshape(shape = var_1519, x = k_23_cast)[name = tensor("op_1520_cast")]; + tensor var_1521 = const()[name = tensor("op_1521"), val = tensor([2, 20, 64, -1])]; + tensor var_1522_cast = reshape(shape = var_1521, x = v_23_cast)[name = tensor("op_1522_cast")]; + tensor attn_weights_45_transpose_x_0 = const()[name = tensor("attn_weights_45_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_45_transpose_y_0 = const()[name = tensor("attn_weights_45_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_45_cast = matmul(transpose_x = attn_weights_45_transpose_x_0, transpose_y = attn_weights_45_transpose_y_0, x = var_1518_cast, y = var_1520_cast)[name = tensor("attn_weights_45_cast")]; + tensor attn_weights_47_cast = mul(x = attn_weights_45_cast, y = var_12_to_fp16)[name = tensor("attn_weights_47_cast")]; + tensor var_1526_cast = softmax(axis = var_18, x = attn_weights_47_cast)[name = tensor("op_1526_cast")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1522_cast, y = var_1526_cast)[name = tensor("attn_23_cast")]; + tensor var_1530 = const()[name = tensor("op_1530"), val = tensor([2, 1280, 1, -1])]; + tensor input_141_cast = reshape(shape = var_1530, x = attn_23_cast)[name = tensor("input_141_cast")]; + tensor var_1535 = const()[name = tensor("op_1535"), val = tensor([1, 1])]; + tensor var_1537 = const()[name = tensor("op_1537"), val = tensor([1, 1])]; + tensor var_1539_pad_type_0 = const()[name = tensor("op_1539_pad_type_0"), val = tensor("custom")]; + tensor var_1539_pad_0 = const()[name = tensor("op_1539_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110590848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111819712))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111819904)))]; + tensor var_1539_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1537, groups = var_31, pad = var_1539_pad_0, pad_type = var_1539_pad_type_0, strides = var_1535, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("op_1539_cast")]; + tensor inputs_35_cast = add(x = var_1539_cast, y = inputs_33_cast)[name = tensor("inputs_35_cast")]; + tensor var_1543 = const()[name = tensor("op_1543"), val = tensor([1])]; + tensor channels_mean_35_cast = reduce_mean(axes = var_1543, keep_dims = var_23, x = inputs_35_cast)[name = tensor("channels_mean_35_cast")]; + tensor zero_mean_35_cast = sub(x = inputs_35_cast, y = channels_mean_35_cast)[name = tensor("zero_mean_35_cast")]; + tensor zero_mean_sq_35_cast = mul(x = zero_mean_35_cast, y = zero_mean_35_cast)[name = tensor("zero_mean_sq_35_cast")]; + tensor var_1547 = const()[name = tensor("op_1547"), val = tensor([1])]; + tensor var_1548_cast = reduce_mean(axes = var_1547, keep_dims = var_23, x = zero_mean_sq_35_cast)[name = tensor("op_1548_cast")]; + tensor var_1549_to_fp16 = const()[name = tensor("op_1549_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1550_cast = add(x = var_1548_cast, y = var_1549_to_fp16)[name = tensor("op_1550_cast")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1550_cast)[name = tensor("denom_35_cast")]; + tensor out_35_cast = mul(x = zero_mean_35_cast, y = denom_35_cast)[name = tensor("out_35_cast")]; + tensor var_1554_to_fp16 = const()[name = tensor("op_1554_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111822528)))]; + tensor var_1555_cast = add(x = out_35_cast, y = var_1554_to_fp16)[name = tensor("op_1555_cast")]; + tensor var_1557_to_fp16 = const()[name = tensor("op_1557_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111825152)))]; + tensor input_143_cast = mul(x = var_1555_cast, y = var_1557_to_fp16)[name = tensor("input_143_cast")]; + tensor var_1565 = const()[name = tensor("op_1565"), val = tensor([1, 1])]; + tensor var_1567 = const()[name = tensor("op_1567"), val = tensor([1, 1])]; + tensor var_1569_pad_type_0 = const()[name = tensor("op_1569_pad_type_0"), val = tensor("custom")]; + tensor var_1569_pad_0 = const()[name = tensor("op_1569_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111827776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121658240))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121658432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121666176))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_1569_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_1567, groups = var_31, pad = var_1569_pad_0, pad_type = var_1569_pad_type_0, strides = var_1565, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_143_cast)[name = tensor("op_1569_cast")]; + tensor var_1570_split_sizes_0 = const()[name = tensor("op_1570_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1570_axis_0 = const()[name = tensor("op_1570_axis_0"), val = tensor(1)]; + tensor var_1570_cast_0, tensor var_1570_cast_1 = split(axis = var_1570_axis_0, split_sizes = var_1570_split_sizes_0, x = var_1569_cast)[name = tensor("op_1570_cast")]; + tensor var_1572_mode_0 = const()[name = tensor("op_1572_mode_0"), val = tensor("EXACT")]; + tensor var_1572_cast = gelu(mode = var_1572_mode_0, x = var_1570_cast_1)[name = tensor("op_1572_cast")]; + tensor input_145_cast = mul(x = var_1570_cast_0, y = var_1572_cast)[name = tensor("input_145_cast")]; + tensor var_1576 = const()[name = tensor("op_1576"), val = tensor([1, 1])]; + tensor var_1578 = const()[name = tensor("op_1578"), val = tensor([1, 1])]; + tensor var_1580_pad_type_0 = const()[name = tensor("op_1580_pad_type_0"), val = tensor("custom")]; + tensor var_1580_pad_0 = const()[name = tensor("op_1580_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121666368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126581632))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126581824)))]; + tensor var_1580_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1578, groups = var_31, pad = var_1580_pad_0, pad_type = var_1580_pad_type_0, strides = var_1576, weight = unet_down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("op_1580_cast")]; + tensor inputs_37_cast = add(x = var_1580_cast, y = inputs_35_cast)[name = tensor("inputs_37_cast")]; + tensor var_1590 = const()[name = tensor("op_1590"), val = tensor([1])]; + tensor channels_mean_37_cast = reduce_mean(axes = var_1590, keep_dims = var_23, x = inputs_37_cast)[name = tensor("channels_mean_37_cast")]; + tensor zero_mean_37_cast = sub(x = inputs_37_cast, y = channels_mean_37_cast)[name = tensor("zero_mean_37_cast")]; + tensor zero_mean_sq_37_cast = mul(x = zero_mean_37_cast, y = zero_mean_37_cast)[name = tensor("zero_mean_sq_37_cast")]; + tensor var_1594 = const()[name = tensor("op_1594"), val = tensor([1])]; + tensor var_1595_cast = reduce_mean(axes = var_1594, keep_dims = var_23, x = zero_mean_sq_37_cast)[name = tensor("op_1595_cast")]; + tensor var_1596_to_fp16 = const()[name = tensor("op_1596_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1597_cast = add(x = var_1595_cast, y = var_1596_to_fp16)[name = tensor("op_1597_cast")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1597_cast)[name = tensor("denom_37_cast")]; + tensor out_37_cast = mul(x = zero_mean_37_cast, y = denom_37_cast)[name = tensor("out_37_cast")]; + tensor var_1601_to_fp16 = const()[name = tensor("op_1601_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126584448)))]; + tensor var_1602_cast = add(x = out_37_cast, y = var_1601_to_fp16)[name = tensor("op_1602_cast")]; + tensor var_1604_to_fp16 = const()[name = tensor("op_1604_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126587072)))]; + tensor hidden_states_77_cast = mul(x = var_1602_cast, y = var_1604_to_fp16)[name = tensor("hidden_states_77_cast")]; + tensor var_1611 = const()[name = tensor("op_1611"), val = tensor([1, 1])]; + tensor var_1613 = const()[name = tensor("op_1613"), val = tensor([1, 1])]; + tensor q_25_pad_type_0 = const()[name = tensor("q_25_pad_type_0"), val = tensor("custom")]; + tensor q_25_pad_0 = const()[name = tensor("q_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126589696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127818560))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_25_cast = conv(dilations = var_1613, groups = var_31, pad = q_25_pad_0, pad_type = q_25_pad_type_0, strides = var_1611, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("q_25_cast")]; + tensor var_1617 = const()[name = tensor("op_1617"), val = tensor([1, 1])]; + tensor var_1619 = const()[name = tensor("op_1619"), val = tensor([1, 1])]; + tensor k_25_pad_type_0 = const()[name = tensor("k_25_pad_type_0"), val = tensor("custom")]; + tensor k_25_pad_0 = const()[name = tensor("k_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127818752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129047616))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_25_cast = conv(dilations = var_1619, groups = var_31, pad = k_25_pad_0, pad_type = k_25_pad_type_0, strides = var_1617, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("k_25_cast")]; + tensor var_1623 = const()[name = tensor("op_1623"), val = tensor([1, 1])]; + tensor var_1625 = const()[name = tensor("op_1625"), val = tensor([1, 1])]; + tensor v_25_pad_type_0 = const()[name = tensor("v_25_pad_type_0"), val = tensor("custom")]; + tensor v_25_pad_0 = const()[name = tensor("v_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129047808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130276672))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_25_cast = conv(dilations = var_1625, groups = var_31, pad = v_25_pad_0, pad_type = v_25_pad_type_0, strides = var_1623, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("v_25_cast")]; + tensor var_1629 = const()[name = tensor("op_1629"), val = tensor([2, 20, 64, -1])]; + tensor var_1630_cast = reshape(shape = var_1629, x = q_25_cast)[name = tensor("op_1630_cast")]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([2, 20, 64, -1])]; + tensor var_1632_cast = reshape(shape = var_1631, x = k_25_cast)[name = tensor("op_1632_cast")]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([2, 20, 64, -1])]; + tensor var_1634_cast = reshape(shape = var_1633, x = v_25_cast)[name = tensor("op_1634_cast")]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = var_1630_cast, y = var_1632_cast)[name = tensor("attn_weights_49_cast")]; + tensor attn_weights_51_cast = mul(x = attn_weights_49_cast, y = var_12_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_1638_cast = softmax(axis = var_18, x = attn_weights_51_cast)[name = tensor("op_1638_cast")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1634_cast, y = var_1638_cast)[name = tensor("attn_25_cast")]; + tensor var_1642 = const()[name = tensor("op_1642"), val = tensor([2, 1280, 1, -1])]; + tensor input_147_cast = reshape(shape = var_1642, x = attn_25_cast)[name = tensor("input_147_cast")]; + tensor var_1647 = const()[name = tensor("op_1647"), val = tensor([1, 1])]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([1, 1])]; + tensor var_1651_pad_type_0 = const()[name = tensor("op_1651_pad_type_0"), val = tensor("custom")]; + tensor var_1651_pad_0 = const()[name = tensor("op_1651_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130276864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131505728))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131505920)))]; + tensor var_1651_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_1649, groups = var_31, pad = var_1651_pad_0, pad_type = var_1651_pad_type_0, strides = var_1647, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_147_cast)[name = tensor("op_1651_cast")]; + tensor inputs_39_cast = add(x = var_1651_cast, y = inputs_37_cast)[name = tensor("inputs_39_cast")]; + tensor var_1655 = const()[name = tensor("op_1655"), val = tensor([1])]; + tensor channels_mean_39_cast = reduce_mean(axes = var_1655, keep_dims = var_23, x = inputs_39_cast)[name = tensor("channels_mean_39_cast")]; + tensor zero_mean_39_cast = sub(x = inputs_39_cast, y = channels_mean_39_cast)[name = tensor("zero_mean_39_cast")]; + tensor zero_mean_sq_39_cast = mul(x = zero_mean_39_cast, y = zero_mean_39_cast)[name = tensor("zero_mean_sq_39_cast")]; + tensor var_1659 = const()[name = tensor("op_1659"), val = tensor([1])]; + tensor var_1660_cast = reduce_mean(axes = var_1659, keep_dims = var_23, x = zero_mean_sq_39_cast)[name = tensor("op_1660_cast")]; + tensor var_1661_to_fp16 = const()[name = tensor("op_1661_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1662_cast = add(x = var_1660_cast, y = var_1661_to_fp16)[name = tensor("op_1662_cast")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1662_cast)[name = tensor("denom_39_cast")]; + tensor out_39_cast = mul(x = zero_mean_39_cast, y = denom_39_cast)[name = tensor("out_39_cast")]; + tensor var_1666_to_fp16 = const()[name = tensor("op_1666_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131508544)))]; + tensor var_1667_cast = add(x = out_39_cast, y = var_1666_to_fp16)[name = tensor("op_1667_cast")]; + tensor var_1669_to_fp16 = const()[name = tensor("op_1669_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131511168)))]; + tensor hidden_states_79_cast = mul(x = var_1667_cast, y = var_1669_to_fp16)[name = tensor("hidden_states_79_cast")]; + tensor var_1676 = const()[name = tensor("op_1676"), val = tensor([1, 1])]; + tensor var_1678 = const()[name = tensor("op_1678"), val = tensor([1, 1])]; + tensor q_27_pad_type_0 = const()[name = tensor("q_27_pad_type_0"), val = tensor("custom")]; + tensor q_27_pad_0 = const()[name = tensor("q_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131513792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132742656))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_27_cast = conv(dilations = var_1678, groups = var_31, pad = q_27_pad_0, pad_type = q_27_pad_type_0, strides = var_1676, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("q_27_cast")]; + tensor var_1682 = const()[name = tensor("op_1682"), val = tensor([1, 1])]; + tensor var_1684 = const()[name = tensor("op_1684"), val = tensor([1, 1])]; + tensor k_27_pad_type_0 = const()[name = tensor("k_27_pad_type_0"), val = tensor("custom")]; + tensor k_27_pad_0 = const()[name = tensor("k_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132742848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134708992))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_27_cast = conv(dilations = var_1684, groups = var_31, pad = k_27_pad_0, pad_type = k_27_pad_type_0, strides = var_1682, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_27_cast")]; + tensor var_1688 = const()[name = tensor("op_1688"), val = tensor([1, 1])]; + tensor var_1690 = const()[name = tensor("op_1690"), val = tensor([1, 1])]; + tensor v_27_pad_type_0 = const()[name = tensor("v_27_pad_type_0"), val = tensor("custom")]; + tensor v_27_pad_0 = const()[name = tensor("v_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134709184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136675328))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_27_cast = conv(dilations = var_1690, groups = var_31, pad = v_27_pad_0, pad_type = v_27_pad_type_0, strides = var_1688, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_27_cast")]; + tensor var_1694 = const()[name = tensor("op_1694"), val = tensor([2, 20, 64, -1])]; + tensor var_1695_cast = reshape(shape = var_1694, x = q_27_cast)[name = tensor("op_1695_cast")]; + tensor var_1696 = const()[name = tensor("op_1696"), val = tensor([2, 20, 64, -1])]; + tensor var_1697_cast = reshape(shape = var_1696, x = k_27_cast)[name = tensor("op_1697_cast")]; + tensor var_1698 = const()[name = tensor("op_1698"), val = tensor([2, 20, 64, -1])]; + tensor var_1699_cast = reshape(shape = var_1698, x = v_27_cast)[name = tensor("op_1699_cast")]; + tensor attn_weights_53_transpose_x_0 = const()[name = tensor("attn_weights_53_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_53_transpose_y_0 = const()[name = tensor("attn_weights_53_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_53_cast = matmul(transpose_x = attn_weights_53_transpose_x_0, transpose_y = attn_weights_53_transpose_y_0, x = var_1695_cast, y = var_1697_cast)[name = tensor("attn_weights_53_cast")]; + tensor attn_weights_55_cast = mul(x = attn_weights_53_cast, y = var_12_to_fp16)[name = tensor("attn_weights_55_cast")]; + tensor var_1703_cast = softmax(axis = var_18, x = attn_weights_55_cast)[name = tensor("op_1703_cast")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1699_cast, y = var_1703_cast)[name = tensor("attn_27_cast")]; + tensor var_1707 = const()[name = tensor("op_1707"), val = tensor([2, 1280, 1, -1])]; + tensor input_149_cast = reshape(shape = var_1707, x = attn_27_cast)[name = tensor("input_149_cast")]; + tensor var_1712 = const()[name = tensor("op_1712"), val = tensor([1, 1])]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor([1, 1])]; + tensor var_1716_pad_type_0 = const()[name = tensor("op_1716_pad_type_0"), val = tensor("custom")]; + tensor var_1716_pad_0 = const()[name = tensor("op_1716_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136675520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137904384))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137904576)))]; + tensor var_1716_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_1714, groups = var_31, pad = var_1716_pad_0, pad_type = var_1716_pad_type_0, strides = var_1712, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_149_cast)[name = tensor("op_1716_cast")]; + tensor inputs_41_cast = add(x = var_1716_cast, y = inputs_39_cast)[name = tensor("inputs_41_cast")]; + tensor var_1720 = const()[name = tensor("op_1720"), val = tensor([1])]; + tensor channels_mean_41_cast = reduce_mean(axes = var_1720, keep_dims = var_23, x = inputs_41_cast)[name = tensor("channels_mean_41_cast")]; + tensor zero_mean_41_cast = sub(x = inputs_41_cast, y = channels_mean_41_cast)[name = tensor("zero_mean_41_cast")]; + tensor zero_mean_sq_41_cast = mul(x = zero_mean_41_cast, y = zero_mean_41_cast)[name = tensor("zero_mean_sq_41_cast")]; + tensor var_1724 = const()[name = tensor("op_1724"), val = tensor([1])]; + tensor var_1725_cast = reduce_mean(axes = var_1724, keep_dims = var_23, x = zero_mean_sq_41_cast)[name = tensor("op_1725_cast")]; + tensor var_1726_to_fp16 = const()[name = tensor("op_1726_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1727_cast = add(x = var_1725_cast, y = var_1726_to_fp16)[name = tensor("op_1727_cast")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1727_cast)[name = tensor("denom_41_cast")]; + tensor out_41_cast = mul(x = zero_mean_41_cast, y = denom_41_cast)[name = tensor("out_41_cast")]; + tensor var_1731_to_fp16 = const()[name = tensor("op_1731_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137907200)))]; + tensor var_1732_cast = add(x = out_41_cast, y = var_1731_to_fp16)[name = tensor("op_1732_cast")]; + tensor var_1734_to_fp16 = const()[name = tensor("op_1734_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137909824)))]; + tensor input_151_cast = mul(x = var_1732_cast, y = var_1734_to_fp16)[name = tensor("input_151_cast")]; + tensor var_1742 = const()[name = tensor("op_1742"), val = tensor([1, 1])]; + tensor var_1744 = const()[name = tensor("op_1744"), val = tensor([1, 1])]; + tensor var_1746_pad_type_0 = const()[name = tensor("op_1746_pad_type_0"), val = tensor("custom")]; + tensor var_1746_pad_0 = const()[name = tensor("op_1746_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137912448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147742912))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147743104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147750848))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_1746_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_1744, groups = var_31, pad = var_1746_pad_0, pad_type = var_1746_pad_type_0, strides = var_1742, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_151_cast)[name = tensor("op_1746_cast")]; + tensor var_1747_split_sizes_0 = const()[name = tensor("op_1747_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1747_axis_0 = const()[name = tensor("op_1747_axis_0"), val = tensor(1)]; + tensor var_1747_cast_0, tensor var_1747_cast_1 = split(axis = var_1747_axis_0, split_sizes = var_1747_split_sizes_0, x = var_1746_cast)[name = tensor("op_1747_cast")]; + tensor var_1749_mode_0 = const()[name = tensor("op_1749_mode_0"), val = tensor("EXACT")]; + tensor var_1749_cast = gelu(mode = var_1749_mode_0, x = var_1747_cast_1)[name = tensor("op_1749_cast")]; + tensor input_153_cast = mul(x = var_1747_cast_0, y = var_1749_cast)[name = tensor("input_153_cast")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([1, 1])]; + tensor var_1755 = const()[name = tensor("op_1755"), val = tensor([1, 1])]; + tensor var_1757_pad_type_0 = const()[name = tensor("op_1757_pad_type_0"), val = tensor("custom")]; + tensor var_1757_pad_0 = const()[name = tensor("op_1757_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147751040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152666304))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152666496)))]; + tensor var_1757_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_1755, groups = var_31, pad = var_1757_pad_0, pad_type = var_1757_pad_type_0, strides = var_1753, weight = unet_down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("op_1757_cast")]; + tensor inputs_43_cast = add(x = var_1757_cast, y = inputs_41_cast)[name = tensor("inputs_43_cast")]; + tensor var_1767 = const()[name = tensor("op_1767"), val = tensor([1])]; + tensor channels_mean_43_cast = reduce_mean(axes = var_1767, keep_dims = var_23, x = inputs_43_cast)[name = tensor("channels_mean_43_cast")]; + tensor zero_mean_43_cast = sub(x = inputs_43_cast, y = channels_mean_43_cast)[name = tensor("zero_mean_43_cast")]; + tensor zero_mean_sq_43_cast = mul(x = zero_mean_43_cast, y = zero_mean_43_cast)[name = tensor("zero_mean_sq_43_cast")]; + tensor var_1771 = const()[name = tensor("op_1771"), val = tensor([1])]; + tensor var_1772_cast = reduce_mean(axes = var_1771, keep_dims = var_23, x = zero_mean_sq_43_cast)[name = tensor("op_1772_cast")]; + tensor var_1773_to_fp16 = const()[name = tensor("op_1773_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1774_cast = add(x = var_1772_cast, y = var_1773_to_fp16)[name = tensor("op_1774_cast")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1774_cast)[name = tensor("denom_43_cast")]; + tensor out_43_cast = mul(x = zero_mean_43_cast, y = denom_43_cast)[name = tensor("out_43_cast")]; + tensor var_1778_to_fp16 = const()[name = tensor("op_1778_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152669120)))]; + tensor var_1779_cast = add(x = out_43_cast, y = var_1778_to_fp16)[name = tensor("op_1779_cast")]; + tensor var_1781_to_fp16 = const()[name = tensor("op_1781_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152671744)))]; + tensor hidden_states_83_cast = mul(x = var_1779_cast, y = var_1781_to_fp16)[name = tensor("hidden_states_83_cast")]; + tensor var_1788 = const()[name = tensor("op_1788"), val = tensor([1, 1])]; + tensor var_1790 = const()[name = tensor("op_1790"), val = tensor([1, 1])]; + tensor q_29_pad_type_0 = const()[name = tensor("q_29_pad_type_0"), val = tensor("custom")]; + tensor q_29_pad_0 = const()[name = tensor("q_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152674368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153903232))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_29_cast = conv(dilations = var_1790, groups = var_31, pad = q_29_pad_0, pad_type = q_29_pad_type_0, strides = var_1788, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("q_29_cast")]; + tensor var_1794 = const()[name = tensor("op_1794"), val = tensor([1, 1])]; + tensor var_1796 = const()[name = tensor("op_1796"), val = tensor([1, 1])]; + tensor k_29_pad_type_0 = const()[name = tensor("k_29_pad_type_0"), val = tensor("custom")]; + tensor k_29_pad_0 = const()[name = tensor("k_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153903424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155132288))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_29_cast = conv(dilations = var_1796, groups = var_31, pad = k_29_pad_0, pad_type = k_29_pad_type_0, strides = var_1794, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("k_29_cast")]; + tensor var_1800 = const()[name = tensor("op_1800"), val = tensor([1, 1])]; + tensor var_1802 = const()[name = tensor("op_1802"), val = tensor([1, 1])]; + tensor v_29_pad_type_0 = const()[name = tensor("v_29_pad_type_0"), val = tensor("custom")]; + tensor v_29_pad_0 = const()[name = tensor("v_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155132480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(156361344))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_29_cast = conv(dilations = var_1802, groups = var_31, pad = v_29_pad_0, pad_type = v_29_pad_type_0, strides = var_1800, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("v_29_cast")]; + tensor var_1806 = const()[name = tensor("op_1806"), val = tensor([2, 20, 64, -1])]; + tensor var_1807_cast = reshape(shape = var_1806, x = q_29_cast)[name = tensor("op_1807_cast")]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([2, 20, 64, -1])]; + tensor var_1809_cast = reshape(shape = var_1808, x = k_29_cast)[name = tensor("op_1809_cast")]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([2, 20, 64, -1])]; + tensor var_1811_cast = reshape(shape = var_1810, x = v_29_cast)[name = tensor("op_1811_cast")]; + tensor attn_weights_57_transpose_x_0 = const()[name = tensor("attn_weights_57_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_57_transpose_y_0 = const()[name = tensor("attn_weights_57_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_57_cast = matmul(transpose_x = attn_weights_57_transpose_x_0, transpose_y = attn_weights_57_transpose_y_0, x = var_1807_cast, y = var_1809_cast)[name = tensor("attn_weights_57_cast")]; + tensor attn_weights_59_cast = mul(x = attn_weights_57_cast, y = var_12_to_fp16)[name = tensor("attn_weights_59_cast")]; + tensor var_1815_cast = softmax(axis = var_18, x = attn_weights_59_cast)[name = tensor("op_1815_cast")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1811_cast, y = var_1815_cast)[name = tensor("attn_29_cast")]; + tensor var_1819 = const()[name = tensor("op_1819"), val = tensor([2, 1280, 1, -1])]; + tensor input_155_cast = reshape(shape = var_1819, x = attn_29_cast)[name = tensor("input_155_cast")]; + tensor var_1824 = const()[name = tensor("op_1824"), val = tensor([1, 1])]; + tensor var_1826 = const()[name = tensor("op_1826"), val = tensor([1, 1])]; + tensor var_1828_pad_type_0 = const()[name = tensor("op_1828_pad_type_0"), val = tensor("custom")]; + tensor var_1828_pad_0 = const()[name = tensor("op_1828_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(156361536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157590400))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157590592)))]; + tensor var_1828_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_1826, groups = var_31, pad = var_1828_pad_0, pad_type = var_1828_pad_type_0, strides = var_1824, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_155_cast)[name = tensor("op_1828_cast")]; + tensor inputs_45_cast = add(x = var_1828_cast, y = inputs_43_cast)[name = tensor("inputs_45_cast")]; + tensor var_1832 = const()[name = tensor("op_1832"), val = tensor([1])]; + tensor channels_mean_45_cast = reduce_mean(axes = var_1832, keep_dims = var_23, x = inputs_45_cast)[name = tensor("channels_mean_45_cast")]; + tensor zero_mean_45_cast = sub(x = inputs_45_cast, y = channels_mean_45_cast)[name = tensor("zero_mean_45_cast")]; + tensor zero_mean_sq_45_cast = mul(x = zero_mean_45_cast, y = zero_mean_45_cast)[name = tensor("zero_mean_sq_45_cast")]; + tensor var_1836 = const()[name = tensor("op_1836"), val = tensor([1])]; + tensor var_1837_cast = reduce_mean(axes = var_1836, keep_dims = var_23, x = zero_mean_sq_45_cast)[name = tensor("op_1837_cast")]; + tensor var_1838_to_fp16 = const()[name = tensor("op_1838_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1839_cast = add(x = var_1837_cast, y = var_1838_to_fp16)[name = tensor("op_1839_cast")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1839_cast)[name = tensor("denom_45_cast")]; + tensor out_45_cast = mul(x = zero_mean_45_cast, y = denom_45_cast)[name = tensor("out_45_cast")]; + tensor var_1843_to_fp16 = const()[name = tensor("op_1843_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157593216)))]; + tensor var_1844_cast = add(x = out_45_cast, y = var_1843_to_fp16)[name = tensor("op_1844_cast")]; + tensor var_1846_to_fp16 = const()[name = tensor("op_1846_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157595840)))]; + tensor hidden_states_85_cast = mul(x = var_1844_cast, y = var_1846_to_fp16)[name = tensor("hidden_states_85_cast")]; + tensor var_1853 = const()[name = tensor("op_1853"), val = tensor([1, 1])]; + tensor var_1855 = const()[name = tensor("op_1855"), val = tensor([1, 1])]; + tensor q_31_pad_type_0 = const()[name = tensor("q_31_pad_type_0"), val = tensor("custom")]; + tensor q_31_pad_0 = const()[name = tensor("q_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157598464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158827328))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_31_cast = conv(dilations = var_1855, groups = var_31, pad = q_31_pad_0, pad_type = q_31_pad_type_0, strides = var_1853, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("q_31_cast")]; + tensor var_1859 = const()[name = tensor("op_1859"), val = tensor([1, 1])]; + tensor var_1861 = const()[name = tensor("op_1861"), val = tensor([1, 1])]; + tensor k_31_pad_type_0 = const()[name = tensor("k_31_pad_type_0"), val = tensor("custom")]; + tensor k_31_pad_0 = const()[name = tensor("k_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158827520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160793664))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_31_cast = conv(dilations = var_1861, groups = var_31, pad = k_31_pad_0, pad_type = k_31_pad_type_0, strides = var_1859, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_31_cast")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([1, 1])]; + tensor var_1867 = const()[name = tensor("op_1867"), val = tensor([1, 1])]; + tensor v_31_pad_type_0 = const()[name = tensor("v_31_pad_type_0"), val = tensor("custom")]; + tensor v_31_pad_0 = const()[name = tensor("v_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160793856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162760000))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_31_cast = conv(dilations = var_1867, groups = var_31, pad = v_31_pad_0, pad_type = v_31_pad_type_0, strides = var_1865, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_31_cast")]; + tensor var_1871 = const()[name = tensor("op_1871"), val = tensor([2, 20, 64, -1])]; + tensor var_1872_cast = reshape(shape = var_1871, x = q_31_cast)[name = tensor("op_1872_cast")]; + tensor var_1873 = const()[name = tensor("op_1873"), val = tensor([2, 20, 64, -1])]; + tensor var_1874_cast = reshape(shape = var_1873, x = k_31_cast)[name = tensor("op_1874_cast")]; + tensor var_1875 = const()[name = tensor("op_1875"), val = tensor([2, 20, 64, -1])]; + tensor var_1876_cast = reshape(shape = var_1875, x = v_31_cast)[name = tensor("op_1876_cast")]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = var_1872_cast, y = var_1874_cast)[name = tensor("attn_weights_61_cast")]; + tensor attn_weights_63_cast = mul(x = attn_weights_61_cast, y = var_12_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1880_cast = softmax(axis = var_18, x = attn_weights_63_cast)[name = tensor("op_1880_cast")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_1876_cast, y = var_1880_cast)[name = tensor("attn_31_cast")]; + tensor var_1884 = const()[name = tensor("op_1884"), val = tensor([2, 1280, 1, -1])]; + tensor input_157_cast = reshape(shape = var_1884, x = attn_31_cast)[name = tensor("input_157_cast")]; + tensor var_1889 = const()[name = tensor("op_1889"), val = tensor([1, 1])]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1, 1])]; + tensor var_1893_pad_type_0 = const()[name = tensor("op_1893_pad_type_0"), val = tensor("custom")]; + tensor var_1893_pad_0 = const()[name = tensor("op_1893_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162760192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163989056))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163989248)))]; + tensor var_1893_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_1891, groups = var_31, pad = var_1893_pad_0, pad_type = var_1893_pad_type_0, strides = var_1889, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("op_1893_cast")]; + tensor inputs_47_cast = add(x = var_1893_cast, y = inputs_45_cast)[name = tensor("inputs_47_cast")]; + tensor var_1897 = const()[name = tensor("op_1897"), val = tensor([1])]; + tensor channels_mean_47_cast = reduce_mean(axes = var_1897, keep_dims = var_23, x = inputs_47_cast)[name = tensor("channels_mean_47_cast")]; + tensor zero_mean_47_cast = sub(x = inputs_47_cast, y = channels_mean_47_cast)[name = tensor("zero_mean_47_cast")]; + tensor zero_mean_sq_47_cast = mul(x = zero_mean_47_cast, y = zero_mean_47_cast)[name = tensor("zero_mean_sq_47_cast")]; + tensor var_1901 = const()[name = tensor("op_1901"), val = tensor([1])]; + tensor var_1902_cast = reduce_mean(axes = var_1901, keep_dims = var_23, x = zero_mean_sq_47_cast)[name = tensor("op_1902_cast")]; + tensor var_1903_to_fp16 = const()[name = tensor("op_1903_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1904_cast = add(x = var_1902_cast, y = var_1903_to_fp16)[name = tensor("op_1904_cast")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1904_cast)[name = tensor("denom_47_cast")]; + tensor out_47_cast = mul(x = zero_mean_47_cast, y = denom_47_cast)[name = tensor("out_47_cast")]; + tensor var_1908_to_fp16 = const()[name = tensor("op_1908_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163991872)))]; + tensor var_1909_cast = add(x = out_47_cast, y = var_1908_to_fp16)[name = tensor("op_1909_cast")]; + tensor var_1911_to_fp16 = const()[name = tensor("op_1911_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163994496)))]; + tensor input_159_cast = mul(x = var_1909_cast, y = var_1911_to_fp16)[name = tensor("input_159_cast")]; + tensor var_1919 = const()[name = tensor("op_1919"), val = tensor([1, 1])]; + tensor var_1921 = const()[name = tensor("op_1921"), val = tensor([1, 1])]; + tensor var_1923_pad_type_0 = const()[name = tensor("op_1923_pad_type_0"), val = tensor("custom")]; + tensor var_1923_pad_0 = const()[name = tensor("op_1923_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163997120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173827584))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173827776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173835520))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_1923_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_1921, groups = var_31, pad = var_1923_pad_0, pad_type = var_1923_pad_type_0, strides = var_1919, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_159_cast)[name = tensor("op_1923_cast")]; + tensor var_1924_split_sizes_0 = const()[name = tensor("op_1924_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1924_axis_0 = const()[name = tensor("op_1924_axis_0"), val = tensor(1)]; + tensor var_1924_cast_0, tensor var_1924_cast_1 = split(axis = var_1924_axis_0, split_sizes = var_1924_split_sizes_0, x = var_1923_cast)[name = tensor("op_1924_cast")]; + tensor var_1926_mode_0 = const()[name = tensor("op_1926_mode_0"), val = tensor("EXACT")]; + tensor var_1926_cast = gelu(mode = var_1926_mode_0, x = var_1924_cast_1)[name = tensor("op_1926_cast")]; + tensor input_161_cast = mul(x = var_1924_cast_0, y = var_1926_cast)[name = tensor("input_161_cast")]; + tensor var_1930 = const()[name = tensor("op_1930"), val = tensor([1, 1])]; + tensor var_1932 = const()[name = tensor("op_1932"), val = tensor([1, 1])]; + tensor var_1934_pad_type_0 = const()[name = tensor("op_1934_pad_type_0"), val = tensor("custom")]; + tensor var_1934_pad_0 = const()[name = tensor("op_1934_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173835712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178750976))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178751168)))]; + tensor var_1934_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_1932, groups = var_31, pad = var_1934_pad_0, pad_type = var_1934_pad_type_0, strides = var_1930, weight = unet_down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("op_1934_cast")]; + tensor inputs_49_cast = add(x = var_1934_cast, y = inputs_47_cast)[name = tensor("inputs_49_cast")]; + tensor var_1944 = const()[name = tensor("op_1944"), val = tensor([1])]; + tensor channels_mean_49_cast = reduce_mean(axes = var_1944, keep_dims = var_23, x = inputs_49_cast)[name = tensor("channels_mean_49_cast")]; + tensor zero_mean_49_cast = sub(x = inputs_49_cast, y = channels_mean_49_cast)[name = tensor("zero_mean_49_cast")]; + tensor zero_mean_sq_49_cast = mul(x = zero_mean_49_cast, y = zero_mean_49_cast)[name = tensor("zero_mean_sq_49_cast")]; + tensor var_1948 = const()[name = tensor("op_1948"), val = tensor([1])]; + tensor var_1949_cast = reduce_mean(axes = var_1948, keep_dims = var_23, x = zero_mean_sq_49_cast)[name = tensor("op_1949_cast")]; + tensor var_1950_to_fp16 = const()[name = tensor("op_1950_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1951_cast = add(x = var_1949_cast, y = var_1950_to_fp16)[name = tensor("op_1951_cast")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_1951_cast)[name = tensor("denom_49_cast")]; + tensor out_49_cast = mul(x = zero_mean_49_cast, y = denom_49_cast)[name = tensor("out_49_cast")]; + tensor var_1955_to_fp16 = const()[name = tensor("op_1955_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178753792)))]; + tensor var_1956_cast = add(x = out_49_cast, y = var_1955_to_fp16)[name = tensor("op_1956_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178756416)))]; + tensor hidden_states_89_cast = mul(x = var_1956_cast, y = var_1958_to_fp16)[name = tensor("hidden_states_89_cast")]; + tensor var_1965 = const()[name = tensor("op_1965"), val = tensor([1, 1])]; + tensor var_1967 = const()[name = tensor("op_1967"), val = tensor([1, 1])]; + tensor q_33_pad_type_0 = const()[name = tensor("q_33_pad_type_0"), val = tensor("custom")]; + tensor q_33_pad_0 = const()[name = tensor("q_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178759040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179987904))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_33_cast = conv(dilations = var_1967, groups = var_31, pad = q_33_pad_0, pad_type = q_33_pad_type_0, strides = var_1965, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("q_33_cast")]; + tensor var_1971 = const()[name = tensor("op_1971"), val = tensor([1, 1])]; + tensor var_1973 = const()[name = tensor("op_1973"), val = tensor([1, 1])]; + tensor k_33_pad_type_0 = const()[name = tensor("k_33_pad_type_0"), val = tensor("custom")]; + tensor k_33_pad_0 = const()[name = tensor("k_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179988096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181216960))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_33_cast = conv(dilations = var_1973, groups = var_31, pad = k_33_pad_0, pad_type = k_33_pad_type_0, strides = var_1971, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("k_33_cast")]; + tensor var_1977 = const()[name = tensor("op_1977"), val = tensor([1, 1])]; + tensor var_1979 = const()[name = tensor("op_1979"), val = tensor([1, 1])]; + tensor v_33_pad_type_0 = const()[name = tensor("v_33_pad_type_0"), val = tensor("custom")]; + tensor v_33_pad_0 = const()[name = tensor("v_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181217152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182446016))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_33_cast = conv(dilations = var_1979, groups = var_31, pad = v_33_pad_0, pad_type = v_33_pad_type_0, strides = var_1977, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("v_33_cast")]; + tensor var_1983 = const()[name = tensor("op_1983"), val = tensor([2, 20, 64, -1])]; + tensor var_1984_cast = reshape(shape = var_1983, x = q_33_cast)[name = tensor("op_1984_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([2, 20, 64, -1])]; + tensor var_1986_cast = reshape(shape = var_1985, x = k_33_cast)[name = tensor("op_1986_cast")]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([2, 20, 64, -1])]; + tensor var_1988_cast = reshape(shape = var_1987, x = v_33_cast)[name = tensor("op_1988_cast")]; + tensor attn_weights_65_transpose_x_0 = const()[name = tensor("attn_weights_65_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_65_transpose_y_0 = const()[name = tensor("attn_weights_65_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_65_cast = matmul(transpose_x = attn_weights_65_transpose_x_0, transpose_y = attn_weights_65_transpose_y_0, x = var_1984_cast, y = var_1986_cast)[name = tensor("attn_weights_65_cast")]; + tensor attn_weights_67_cast = mul(x = attn_weights_65_cast, y = var_12_to_fp16)[name = tensor("attn_weights_67_cast")]; + tensor var_1992_cast = softmax(axis = var_18, x = attn_weights_67_cast)[name = tensor("op_1992_cast")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_1988_cast, y = var_1992_cast)[name = tensor("attn_33_cast")]; + tensor var_1996 = const()[name = tensor("op_1996"), val = tensor([2, 1280, 1, -1])]; + tensor input_163_cast = reshape(shape = var_1996, x = attn_33_cast)[name = tensor("input_163_cast")]; + tensor var_2001 = const()[name = tensor("op_2001"), val = tensor([1, 1])]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1, 1])]; + tensor var_2005_pad_type_0 = const()[name = tensor("op_2005_pad_type_0"), val = tensor("custom")]; + tensor var_2005_pad_0 = const()[name = tensor("op_2005_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182446208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183675072))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183675264)))]; + tensor var_2005_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_2003, groups = var_31, pad = var_2005_pad_0, pad_type = var_2005_pad_type_0, strides = var_2001, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_163_cast)[name = tensor("op_2005_cast")]; + tensor inputs_51_cast = add(x = var_2005_cast, y = inputs_49_cast)[name = tensor("inputs_51_cast")]; + tensor var_2009 = const()[name = tensor("op_2009"), val = tensor([1])]; + tensor channels_mean_51_cast = reduce_mean(axes = var_2009, keep_dims = var_23, x = inputs_51_cast)[name = tensor("channels_mean_51_cast")]; + tensor zero_mean_51_cast = sub(x = inputs_51_cast, y = channels_mean_51_cast)[name = tensor("zero_mean_51_cast")]; + tensor zero_mean_sq_51_cast = mul(x = zero_mean_51_cast, y = zero_mean_51_cast)[name = tensor("zero_mean_sq_51_cast")]; + tensor var_2013 = const()[name = tensor("op_2013"), val = tensor([1])]; + tensor var_2014_cast = reduce_mean(axes = var_2013, keep_dims = var_23, x = zero_mean_sq_51_cast)[name = tensor("op_2014_cast")]; + tensor var_2015_to_fp16 = const()[name = tensor("op_2015_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2016_cast = add(x = var_2014_cast, y = var_2015_to_fp16)[name = tensor("op_2016_cast")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_2016_cast)[name = tensor("denom_51_cast")]; + tensor out_51_cast = mul(x = zero_mean_51_cast, y = denom_51_cast)[name = tensor("out_51_cast")]; + tensor var_2020_to_fp16 = const()[name = tensor("op_2020_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183677888)))]; + tensor var_2021_cast = add(x = out_51_cast, y = var_2020_to_fp16)[name = tensor("op_2021_cast")]; + tensor var_2023_to_fp16 = const()[name = tensor("op_2023_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183680512)))]; + tensor hidden_states_91_cast = mul(x = var_2021_cast, y = var_2023_to_fp16)[name = tensor("hidden_states_91_cast")]; + tensor var_2030 = const()[name = tensor("op_2030"), val = tensor([1, 1])]; + tensor var_2032 = const()[name = tensor("op_2032"), val = tensor([1, 1])]; + tensor q_35_pad_type_0 = const()[name = tensor("q_35_pad_type_0"), val = tensor("custom")]; + tensor q_35_pad_0 = const()[name = tensor("q_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183683136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184912000))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_35_cast = conv(dilations = var_2032, groups = var_31, pad = q_35_pad_0, pad_type = q_35_pad_type_0, strides = var_2030, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("q_35_cast")]; + tensor var_2036 = const()[name = tensor("op_2036"), val = tensor([1, 1])]; + tensor var_2038 = const()[name = tensor("op_2038"), val = tensor([1, 1])]; + tensor k_35_pad_type_0 = const()[name = tensor("k_35_pad_type_0"), val = tensor("custom")]; + tensor k_35_pad_0 = const()[name = tensor("k_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184912192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186878336))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_35_cast = conv(dilations = var_2038, groups = var_31, pad = k_35_pad_0, pad_type = k_35_pad_type_0, strides = var_2036, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_35_cast")]; + tensor var_2042 = const()[name = tensor("op_2042"), val = tensor([1, 1])]; + tensor var_2044 = const()[name = tensor("op_2044"), val = tensor([1, 1])]; + tensor v_35_pad_type_0 = const()[name = tensor("v_35_pad_type_0"), val = tensor("custom")]; + tensor v_35_pad_0 = const()[name = tensor("v_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186878528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(188844672))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_35_cast = conv(dilations = var_2044, groups = var_31, pad = v_35_pad_0, pad_type = v_35_pad_type_0, strides = var_2042, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_35_cast")]; + tensor var_2048 = const()[name = tensor("op_2048"), val = tensor([2, 20, 64, -1])]; + tensor var_2049_cast = reshape(shape = var_2048, x = q_35_cast)[name = tensor("op_2049_cast")]; + tensor var_2050 = const()[name = tensor("op_2050"), val = tensor([2, 20, 64, -1])]; + tensor var_2051_cast = reshape(shape = var_2050, x = k_35_cast)[name = tensor("op_2051_cast")]; + tensor var_2052 = const()[name = tensor("op_2052"), val = tensor([2, 20, 64, -1])]; + tensor var_2053_cast = reshape(shape = var_2052, x = v_35_cast)[name = tensor("op_2053_cast")]; + tensor attn_weights_69_transpose_x_0 = const()[name = tensor("attn_weights_69_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_69_transpose_y_0 = const()[name = tensor("attn_weights_69_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_69_cast = matmul(transpose_x = attn_weights_69_transpose_x_0, transpose_y = attn_weights_69_transpose_y_0, x = var_2049_cast, y = var_2051_cast)[name = tensor("attn_weights_69_cast")]; + tensor attn_weights_71_cast = mul(x = attn_weights_69_cast, y = var_12_to_fp16)[name = tensor("attn_weights_71_cast")]; + tensor var_2057_cast = softmax(axis = var_18, x = attn_weights_71_cast)[name = tensor("op_2057_cast")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2053_cast, y = var_2057_cast)[name = tensor("attn_35_cast")]; + tensor var_2061 = const()[name = tensor("op_2061"), val = tensor([2, 1280, 1, -1])]; + tensor input_165_cast = reshape(shape = var_2061, x = attn_35_cast)[name = tensor("input_165_cast")]; + tensor var_2066 = const()[name = tensor("op_2066"), val = tensor([1, 1])]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([1, 1])]; + tensor var_2070_pad_type_0 = const()[name = tensor("op_2070_pad_type_0"), val = tensor("custom")]; + tensor var_2070_pad_0 = const()[name = tensor("op_2070_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(188844864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190073728))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190073920)))]; + tensor var_2070_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_2068, groups = var_31, pad = var_2070_pad_0, pad_type = var_2070_pad_type_0, strides = var_2066, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_165_cast)[name = tensor("op_2070_cast")]; + tensor inputs_53_cast = add(x = var_2070_cast, y = inputs_51_cast)[name = tensor("inputs_53_cast")]; + tensor var_2074 = const()[name = tensor("op_2074"), val = tensor([1])]; + tensor channels_mean_53_cast = reduce_mean(axes = var_2074, keep_dims = var_23, x = inputs_53_cast)[name = tensor("channels_mean_53_cast")]; + tensor zero_mean_53_cast = sub(x = inputs_53_cast, y = channels_mean_53_cast)[name = tensor("zero_mean_53_cast")]; + tensor zero_mean_sq_53_cast = mul(x = zero_mean_53_cast, y = zero_mean_53_cast)[name = tensor("zero_mean_sq_53_cast")]; + tensor var_2078 = const()[name = tensor("op_2078"), val = tensor([1])]; + tensor var_2079_cast = reduce_mean(axes = var_2078, keep_dims = var_23, x = zero_mean_sq_53_cast)[name = tensor("op_2079_cast")]; + tensor var_2080_to_fp16 = const()[name = tensor("op_2080_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2081_cast = add(x = var_2079_cast, y = var_2080_to_fp16)[name = tensor("op_2081_cast")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_2081_cast)[name = tensor("denom_53_cast")]; + tensor out_53_cast = mul(x = zero_mean_53_cast, y = denom_53_cast)[name = tensor("out_53_cast")]; + tensor var_2085_to_fp16 = const()[name = tensor("op_2085_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190076544)))]; + tensor var_2086_cast = add(x = out_53_cast, y = var_2085_to_fp16)[name = tensor("op_2086_cast")]; + tensor var_2088_to_fp16 = const()[name = tensor("op_2088_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190079168)))]; + tensor input_167_cast = mul(x = var_2086_cast, y = var_2088_to_fp16)[name = tensor("input_167_cast")]; + tensor var_2096 = const()[name = tensor("op_2096"), val = tensor([1, 1])]; + tensor var_2098 = const()[name = tensor("op_2098"), val = tensor([1, 1])]; + tensor var_2100_pad_type_0 = const()[name = tensor("op_2100_pad_type_0"), val = tensor("custom")]; + tensor var_2100_pad_0 = const()[name = tensor("op_2100_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190081792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199912256))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199912448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199920192))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2100_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2098, groups = var_31, pad = var_2100_pad_0, pad_type = var_2100_pad_type_0, strides = var_2096, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_167_cast)[name = tensor("op_2100_cast")]; + tensor var_2101_split_sizes_0 = const()[name = tensor("op_2101_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2101_axis_0 = const()[name = tensor("op_2101_axis_0"), val = tensor(1)]; + tensor var_2101_cast_0, tensor var_2101_cast_1 = split(axis = var_2101_axis_0, split_sizes = var_2101_split_sizes_0, x = var_2100_cast)[name = tensor("op_2101_cast")]; + tensor var_2103_mode_0 = const()[name = tensor("op_2103_mode_0"), val = tensor("EXACT")]; + tensor var_2103_cast = gelu(mode = var_2103_mode_0, x = var_2101_cast_1)[name = tensor("op_2103_cast")]; + tensor input_169_cast = mul(x = var_2101_cast_0, y = var_2103_cast)[name = tensor("input_169_cast")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([1, 1])]; + tensor var_2109 = const()[name = tensor("op_2109"), val = tensor([1, 1])]; + tensor var_2111_pad_type_0 = const()[name = tensor("op_2111_pad_type_0"), val = tensor("custom")]; + tensor var_2111_pad_0 = const()[name = tensor("op_2111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199920384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204835648))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204835840)))]; + tensor var_2111_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_2109, groups = var_31, pad = var_2111_pad_0, pad_type = var_2111_pad_type_0, strides = var_2107, weight = unet_down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("op_2111_cast")]; + tensor inputs_55_cast = add(x = var_2111_cast, y = inputs_53_cast)[name = tensor("inputs_55_cast")]; + tensor var_2121 = const()[name = tensor("op_2121"), val = tensor([1])]; + tensor channels_mean_55_cast = reduce_mean(axes = var_2121, keep_dims = var_23, x = inputs_55_cast)[name = tensor("channels_mean_55_cast")]; + tensor zero_mean_55_cast = sub(x = inputs_55_cast, y = channels_mean_55_cast)[name = tensor("zero_mean_55_cast")]; + tensor zero_mean_sq_55_cast = mul(x = zero_mean_55_cast, y = zero_mean_55_cast)[name = tensor("zero_mean_sq_55_cast")]; + tensor var_2125 = const()[name = tensor("op_2125"), val = tensor([1])]; + tensor var_2126_cast = reduce_mean(axes = var_2125, keep_dims = var_23, x = zero_mean_sq_55_cast)[name = tensor("op_2126_cast")]; + tensor var_2127_to_fp16 = const()[name = tensor("op_2127_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2128_cast = add(x = var_2126_cast, y = var_2127_to_fp16)[name = tensor("op_2128_cast")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_2128_cast)[name = tensor("denom_55_cast")]; + tensor out_55_cast = mul(x = zero_mean_55_cast, y = denom_55_cast)[name = tensor("out_55_cast")]; + tensor var_2132_to_fp16 = const()[name = tensor("op_2132_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204838464)))]; + tensor var_2133_cast = add(x = out_55_cast, y = var_2132_to_fp16)[name = tensor("op_2133_cast")]; + tensor var_2135_to_fp16 = const()[name = tensor("op_2135_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204841088)))]; + tensor hidden_states_95_cast = mul(x = var_2133_cast, y = var_2135_to_fp16)[name = tensor("hidden_states_95_cast")]; + tensor var_2142 = const()[name = tensor("op_2142"), val = tensor([1, 1])]; + tensor var_2144 = const()[name = tensor("op_2144"), val = tensor([1, 1])]; + tensor q_37_pad_type_0 = const()[name = tensor("q_37_pad_type_0"), val = tensor("custom")]; + tensor q_37_pad_0 = const()[name = tensor("q_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204843712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206072576))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_37_cast = conv(dilations = var_2144, groups = var_31, pad = q_37_pad_0, pad_type = q_37_pad_type_0, strides = var_2142, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("q_37_cast")]; + tensor var_2148 = const()[name = tensor("op_2148"), val = tensor([1, 1])]; + tensor var_2150 = const()[name = tensor("op_2150"), val = tensor([1, 1])]; + tensor k_37_pad_type_0 = const()[name = tensor("k_37_pad_type_0"), val = tensor("custom")]; + tensor k_37_pad_0 = const()[name = tensor("k_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206072768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207301632))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_37_cast = conv(dilations = var_2150, groups = var_31, pad = k_37_pad_0, pad_type = k_37_pad_type_0, strides = var_2148, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("k_37_cast")]; + tensor var_2154 = const()[name = tensor("op_2154"), val = tensor([1, 1])]; + tensor var_2156 = const()[name = tensor("op_2156"), val = tensor([1, 1])]; + tensor v_37_pad_type_0 = const()[name = tensor("v_37_pad_type_0"), val = tensor("custom")]; + tensor v_37_pad_0 = const()[name = tensor("v_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207301824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208530688))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_37_cast = conv(dilations = var_2156, groups = var_31, pad = v_37_pad_0, pad_type = v_37_pad_type_0, strides = var_2154, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("v_37_cast")]; + tensor var_2160 = const()[name = tensor("op_2160"), val = tensor([2, 20, 64, -1])]; + tensor var_2161_cast = reshape(shape = var_2160, x = q_37_cast)[name = tensor("op_2161_cast")]; + tensor var_2162 = const()[name = tensor("op_2162"), val = tensor([2, 20, 64, -1])]; + tensor var_2163_cast = reshape(shape = var_2162, x = k_37_cast)[name = tensor("op_2163_cast")]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([2, 20, 64, -1])]; + tensor var_2165_cast = reshape(shape = var_2164, x = v_37_cast)[name = tensor("op_2165_cast")]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = var_2161_cast, y = var_2163_cast)[name = tensor("attn_weights_73_cast")]; + tensor attn_weights_75_cast = mul(x = attn_weights_73_cast, y = var_12_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_2169_cast = softmax(axis = var_18, x = attn_weights_75_cast)[name = tensor("op_2169_cast")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2165_cast, y = var_2169_cast)[name = tensor("attn_37_cast")]; + tensor var_2173 = const()[name = tensor("op_2173"), val = tensor([2, 1280, 1, -1])]; + tensor input_171_cast = reshape(shape = var_2173, x = attn_37_cast)[name = tensor("input_171_cast")]; + tensor var_2178 = const()[name = tensor("op_2178"), val = tensor([1, 1])]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1, 1])]; + tensor var_2182_pad_type_0 = const()[name = tensor("op_2182_pad_type_0"), val = tensor("custom")]; + tensor var_2182_pad_0 = const()[name = tensor("op_2182_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208530880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209759744))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209759936)))]; + tensor var_2182_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_2180, groups = var_31, pad = var_2182_pad_0, pad_type = var_2182_pad_type_0, strides = var_2178, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_171_cast)[name = tensor("op_2182_cast")]; + tensor inputs_57_cast = add(x = var_2182_cast, y = inputs_55_cast)[name = tensor("inputs_57_cast")]; + tensor var_2186 = const()[name = tensor("op_2186"), val = tensor([1])]; + tensor channels_mean_57_cast = reduce_mean(axes = var_2186, keep_dims = var_23, x = inputs_57_cast)[name = tensor("channels_mean_57_cast")]; + tensor zero_mean_57_cast = sub(x = inputs_57_cast, y = channels_mean_57_cast)[name = tensor("zero_mean_57_cast")]; + tensor zero_mean_sq_57_cast = mul(x = zero_mean_57_cast, y = zero_mean_57_cast)[name = tensor("zero_mean_sq_57_cast")]; + tensor var_2190 = const()[name = tensor("op_2190"), val = tensor([1])]; + tensor var_2191_cast = reduce_mean(axes = var_2190, keep_dims = var_23, x = zero_mean_sq_57_cast)[name = tensor("op_2191_cast")]; + tensor var_2192_to_fp16 = const()[name = tensor("op_2192_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2193_cast = add(x = var_2191_cast, y = var_2192_to_fp16)[name = tensor("op_2193_cast")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_2193_cast)[name = tensor("denom_57_cast")]; + tensor out_57_cast = mul(x = zero_mean_57_cast, y = denom_57_cast)[name = tensor("out_57_cast")]; + tensor var_2197_to_fp16 = const()[name = tensor("op_2197_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209762560)))]; + tensor var_2198_cast = add(x = out_57_cast, y = var_2197_to_fp16)[name = tensor("op_2198_cast")]; + tensor var_2200_to_fp16 = const()[name = tensor("op_2200_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209765184)))]; + tensor hidden_states_97_cast = mul(x = var_2198_cast, y = var_2200_to_fp16)[name = tensor("hidden_states_97_cast")]; + tensor var_2207 = const()[name = tensor("op_2207"), val = tensor([1, 1])]; + tensor var_2209 = const()[name = tensor("op_2209"), val = tensor([1, 1])]; + tensor q_39_pad_type_0 = const()[name = tensor("q_39_pad_type_0"), val = tensor("custom")]; + tensor q_39_pad_0 = const()[name = tensor("q_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209767808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210996672))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_39_cast = conv(dilations = var_2209, groups = var_31, pad = q_39_pad_0, pad_type = q_39_pad_type_0, strides = var_2207, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("q_39_cast")]; + tensor var_2213 = const()[name = tensor("op_2213"), val = tensor([1, 1])]; + tensor var_2215 = const()[name = tensor("op_2215"), val = tensor([1, 1])]; + tensor k_39_pad_type_0 = const()[name = tensor("k_39_pad_type_0"), val = tensor("custom")]; + tensor k_39_pad_0 = const()[name = tensor("k_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210996864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212963008))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_39_cast = conv(dilations = var_2215, groups = var_31, pad = k_39_pad_0, pad_type = k_39_pad_type_0, strides = var_2213, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_39_cast")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([1, 1])]; + tensor var_2221 = const()[name = tensor("op_2221"), val = tensor([1, 1])]; + tensor v_39_pad_type_0 = const()[name = tensor("v_39_pad_type_0"), val = tensor("custom")]; + tensor v_39_pad_0 = const()[name = tensor("v_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212963200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214929344))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_39_cast = conv(dilations = var_2221, groups = var_31, pad = v_39_pad_0, pad_type = v_39_pad_type_0, strides = var_2219, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_39_cast")]; + tensor var_2225 = const()[name = tensor("op_2225"), val = tensor([2, 20, 64, -1])]; + tensor var_2226_cast = reshape(shape = var_2225, x = q_39_cast)[name = tensor("op_2226_cast")]; + tensor var_2227 = const()[name = tensor("op_2227"), val = tensor([2, 20, 64, -1])]; + tensor var_2228_cast = reshape(shape = var_2227, x = k_39_cast)[name = tensor("op_2228_cast")]; + tensor var_2229 = const()[name = tensor("op_2229"), val = tensor([2, 20, 64, -1])]; + tensor var_2230_cast = reshape(shape = var_2229, x = v_39_cast)[name = tensor("op_2230_cast")]; + tensor attn_weights_77_transpose_x_0 = const()[name = tensor("attn_weights_77_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_77_transpose_y_0 = const()[name = tensor("attn_weights_77_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_77_cast = matmul(transpose_x = attn_weights_77_transpose_x_0, transpose_y = attn_weights_77_transpose_y_0, x = var_2226_cast, y = var_2228_cast)[name = tensor("attn_weights_77_cast")]; + tensor attn_weights_79_cast = mul(x = attn_weights_77_cast, y = var_12_to_fp16)[name = tensor("attn_weights_79_cast")]; + tensor var_2234_cast = softmax(axis = var_18, x = attn_weights_79_cast)[name = tensor("op_2234_cast")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2230_cast, y = var_2234_cast)[name = tensor("attn_39_cast")]; + tensor var_2238 = const()[name = tensor("op_2238"), val = tensor([2, 1280, 1, -1])]; + tensor input_173_cast = reshape(shape = var_2238, x = attn_39_cast)[name = tensor("input_173_cast")]; + tensor var_2243 = const()[name = tensor("op_2243"), val = tensor([1, 1])]; + tensor var_2245 = const()[name = tensor("op_2245"), val = tensor([1, 1])]; + tensor var_2247_pad_type_0 = const()[name = tensor("op_2247_pad_type_0"), val = tensor("custom")]; + tensor var_2247_pad_0 = const()[name = tensor("op_2247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214929536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216158400))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216158592)))]; + tensor var_2247_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_2245, groups = var_31, pad = var_2247_pad_0, pad_type = var_2247_pad_type_0, strides = var_2243, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("op_2247_cast")]; + tensor inputs_59_cast = add(x = var_2247_cast, y = inputs_57_cast)[name = tensor("inputs_59_cast")]; + tensor var_2251 = const()[name = tensor("op_2251"), val = tensor([1])]; + tensor channels_mean_59_cast = reduce_mean(axes = var_2251, keep_dims = var_23, x = inputs_59_cast)[name = tensor("channels_mean_59_cast")]; + tensor zero_mean_59_cast = sub(x = inputs_59_cast, y = channels_mean_59_cast)[name = tensor("zero_mean_59_cast")]; + tensor zero_mean_sq_59_cast = mul(x = zero_mean_59_cast, y = zero_mean_59_cast)[name = tensor("zero_mean_sq_59_cast")]; + tensor var_2255 = const()[name = tensor("op_2255"), val = tensor([1])]; + tensor var_2256_cast = reduce_mean(axes = var_2255, keep_dims = var_23, x = zero_mean_sq_59_cast)[name = tensor("op_2256_cast")]; + tensor var_2257_to_fp16 = const()[name = tensor("op_2257_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2258_cast = add(x = var_2256_cast, y = var_2257_to_fp16)[name = tensor("op_2258_cast")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_2258_cast)[name = tensor("denom_59_cast")]; + tensor out_59_cast = mul(x = zero_mean_59_cast, y = denom_59_cast)[name = tensor("out_59_cast")]; + tensor var_2262_to_fp16 = const()[name = tensor("op_2262_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216161216)))]; + tensor var_2263_cast = add(x = out_59_cast, y = var_2262_to_fp16)[name = tensor("op_2263_cast")]; + tensor var_2265_to_fp16 = const()[name = tensor("op_2265_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216163840)))]; + tensor input_175_cast = mul(x = var_2263_cast, y = var_2265_to_fp16)[name = tensor("input_175_cast")]; + tensor var_2273 = const()[name = tensor("op_2273"), val = tensor([1, 1])]; + tensor var_2275 = const()[name = tensor("op_2275"), val = tensor([1, 1])]; + tensor var_2277_pad_type_0 = const()[name = tensor("op_2277_pad_type_0"), val = tensor("custom")]; + tensor var_2277_pad_0 = const()[name = tensor("op_2277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216166464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225996928))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225997120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226004864))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2277_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2275, groups = var_31, pad = var_2277_pad_0, pad_type = var_2277_pad_type_0, strides = var_2273, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_175_cast)[name = tensor("op_2277_cast")]; + tensor var_2278_split_sizes_0 = const()[name = tensor("op_2278_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2278_axis_0 = const()[name = tensor("op_2278_axis_0"), val = tensor(1)]; + tensor var_2278_cast_0, tensor var_2278_cast_1 = split(axis = var_2278_axis_0, split_sizes = var_2278_split_sizes_0, x = var_2277_cast)[name = tensor("op_2278_cast")]; + tensor var_2280_mode_0 = const()[name = tensor("op_2280_mode_0"), val = tensor("EXACT")]; + tensor var_2280_cast = gelu(mode = var_2280_mode_0, x = var_2278_cast_1)[name = tensor("op_2280_cast")]; + tensor input_177_cast = mul(x = var_2278_cast_0, y = var_2280_cast)[name = tensor("input_177_cast")]; + tensor var_2284 = const()[name = tensor("op_2284"), val = tensor([1, 1])]; + tensor var_2286 = const()[name = tensor("op_2286"), val = tensor([1, 1])]; + tensor var_2288_pad_type_0 = const()[name = tensor("op_2288_pad_type_0"), val = tensor("custom")]; + tensor var_2288_pad_0 = const()[name = tensor("op_2288_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226005056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230920320))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230920512)))]; + tensor var_2288_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_2286, groups = var_31, pad = var_2288_pad_0, pad_type = var_2288_pad_type_0, strides = var_2284, weight = unet_down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("op_2288_cast")]; + tensor inputs_61_cast = add(x = var_2288_cast, y = inputs_59_cast)[name = tensor("inputs_61_cast")]; + tensor var_2298 = const()[name = tensor("op_2298"), val = tensor([1])]; + tensor channels_mean_61_cast = reduce_mean(axes = var_2298, keep_dims = var_23, x = inputs_61_cast)[name = tensor("channels_mean_61_cast")]; + tensor zero_mean_61_cast = sub(x = inputs_61_cast, y = channels_mean_61_cast)[name = tensor("zero_mean_61_cast")]; + tensor zero_mean_sq_61_cast = mul(x = zero_mean_61_cast, y = zero_mean_61_cast)[name = tensor("zero_mean_sq_61_cast")]; + tensor var_2302 = const()[name = tensor("op_2302"), val = tensor([1])]; + tensor var_2303_cast = reduce_mean(axes = var_2302, keep_dims = var_23, x = zero_mean_sq_61_cast)[name = tensor("op_2303_cast")]; + tensor var_2304_to_fp16 = const()[name = tensor("op_2304_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2305_cast = add(x = var_2303_cast, y = var_2304_to_fp16)[name = tensor("op_2305_cast")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_2305_cast)[name = tensor("denom_61_cast")]; + tensor out_61_cast = mul(x = zero_mean_61_cast, y = denom_61_cast)[name = tensor("out_61_cast")]; + tensor var_2309_to_fp16 = const()[name = tensor("op_2309_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230923136)))]; + tensor var_2310_cast = add(x = out_61_cast, y = var_2309_to_fp16)[name = tensor("op_2310_cast")]; + tensor var_2312_to_fp16 = const()[name = tensor("op_2312_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230925760)))]; + tensor hidden_states_101_cast = mul(x = var_2310_cast, y = var_2312_to_fp16)[name = tensor("hidden_states_101_cast")]; + tensor var_2319 = const()[name = tensor("op_2319"), val = tensor([1, 1])]; + tensor var_2321 = const()[name = tensor("op_2321"), val = tensor([1, 1])]; + tensor q_41_pad_type_0 = const()[name = tensor("q_41_pad_type_0"), val = tensor("custom")]; + tensor q_41_pad_0 = const()[name = tensor("q_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230928384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232157248))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_41_cast = conv(dilations = var_2321, groups = var_31, pad = q_41_pad_0, pad_type = q_41_pad_type_0, strides = var_2319, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("q_41_cast")]; + tensor var_2325 = const()[name = tensor("op_2325"), val = tensor([1, 1])]; + tensor var_2327 = const()[name = tensor("op_2327"), val = tensor([1, 1])]; + tensor k_41_pad_type_0 = const()[name = tensor("k_41_pad_type_0"), val = tensor("custom")]; + tensor k_41_pad_0 = const()[name = tensor("k_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232157440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233386304))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_41_cast = conv(dilations = var_2327, groups = var_31, pad = k_41_pad_0, pad_type = k_41_pad_type_0, strides = var_2325, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("k_41_cast")]; + tensor var_2331 = const()[name = tensor("op_2331"), val = tensor([1, 1])]; + tensor var_2333 = const()[name = tensor("op_2333"), val = tensor([1, 1])]; + tensor v_41_pad_type_0 = const()[name = tensor("v_41_pad_type_0"), val = tensor("custom")]; + tensor v_41_pad_0 = const()[name = tensor("v_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233386496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234615360))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_41_cast = conv(dilations = var_2333, groups = var_31, pad = v_41_pad_0, pad_type = v_41_pad_type_0, strides = var_2331, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("v_41_cast")]; + tensor var_2337 = const()[name = tensor("op_2337"), val = tensor([2, 20, 64, -1])]; + tensor var_2338_cast = reshape(shape = var_2337, x = q_41_cast)[name = tensor("op_2338_cast")]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([2, 20, 64, -1])]; + tensor var_2340_cast = reshape(shape = var_2339, x = k_41_cast)[name = tensor("op_2340_cast")]; + tensor var_2341 = const()[name = tensor("op_2341"), val = tensor([2, 20, 64, -1])]; + tensor var_2342_cast = reshape(shape = var_2341, x = v_41_cast)[name = tensor("op_2342_cast")]; + tensor attn_weights_81_transpose_x_0 = const()[name = tensor("attn_weights_81_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_81_transpose_y_0 = const()[name = tensor("attn_weights_81_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_81_cast = matmul(transpose_x = attn_weights_81_transpose_x_0, transpose_y = attn_weights_81_transpose_y_0, x = var_2338_cast, y = var_2340_cast)[name = tensor("attn_weights_81_cast")]; + tensor attn_weights_83_cast = mul(x = attn_weights_81_cast, y = var_12_to_fp16)[name = tensor("attn_weights_83_cast")]; + tensor var_2346_cast = softmax(axis = var_18, x = attn_weights_83_cast)[name = tensor("op_2346_cast")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2342_cast, y = var_2346_cast)[name = tensor("attn_41_cast")]; + tensor var_2350 = const()[name = tensor("op_2350"), val = tensor([2, 1280, 1, -1])]; + tensor input_179_cast = reshape(shape = var_2350, x = attn_41_cast)[name = tensor("input_179_cast")]; + tensor var_2355 = const()[name = tensor("op_2355"), val = tensor([1, 1])]; + tensor var_2357 = const()[name = tensor("op_2357"), val = tensor([1, 1])]; + tensor var_2359_pad_type_0 = const()[name = tensor("op_2359_pad_type_0"), val = tensor("custom")]; + tensor var_2359_pad_0 = const()[name = tensor("op_2359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234615552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235844416))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235844608)))]; + tensor var_2359_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_2357, groups = var_31, pad = var_2359_pad_0, pad_type = var_2359_pad_type_0, strides = var_2355, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_179_cast)[name = tensor("op_2359_cast")]; + tensor inputs_63_cast = add(x = var_2359_cast, y = inputs_61_cast)[name = tensor("inputs_63_cast")]; + tensor var_2363 = const()[name = tensor("op_2363"), val = tensor([1])]; + tensor channels_mean_63_cast = reduce_mean(axes = var_2363, keep_dims = var_23, x = inputs_63_cast)[name = tensor("channels_mean_63_cast")]; + tensor zero_mean_63_cast = sub(x = inputs_63_cast, y = channels_mean_63_cast)[name = tensor("zero_mean_63_cast")]; + tensor zero_mean_sq_63_cast = mul(x = zero_mean_63_cast, y = zero_mean_63_cast)[name = tensor("zero_mean_sq_63_cast")]; + tensor var_2367 = const()[name = tensor("op_2367"), val = tensor([1])]; + tensor var_2368_cast = reduce_mean(axes = var_2367, keep_dims = var_23, x = zero_mean_sq_63_cast)[name = tensor("op_2368_cast")]; + tensor var_2369_to_fp16 = const()[name = tensor("op_2369_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2370_cast = add(x = var_2368_cast, y = var_2369_to_fp16)[name = tensor("op_2370_cast")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2370_cast)[name = tensor("denom_63_cast")]; + tensor out_63_cast = mul(x = zero_mean_63_cast, y = denom_63_cast)[name = tensor("out_63_cast")]; + tensor var_2374_to_fp16 = const()[name = tensor("op_2374_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235847232)))]; + tensor var_2375_cast = add(x = out_63_cast, y = var_2374_to_fp16)[name = tensor("op_2375_cast")]; + tensor var_2377_to_fp16 = const()[name = tensor("op_2377_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235849856)))]; + tensor hidden_states_103_cast = mul(x = var_2375_cast, y = var_2377_to_fp16)[name = tensor("hidden_states_103_cast")]; + tensor var_2384 = const()[name = tensor("op_2384"), val = tensor([1, 1])]; + tensor var_2386 = const()[name = tensor("op_2386"), val = tensor([1, 1])]; + tensor q_43_pad_type_0 = const()[name = tensor("q_43_pad_type_0"), val = tensor("custom")]; + tensor q_43_pad_0 = const()[name = tensor("q_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235852480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237081344))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_43_cast = conv(dilations = var_2386, groups = var_31, pad = q_43_pad_0, pad_type = q_43_pad_type_0, strides = var_2384, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("q_43_cast")]; + tensor var_2390 = const()[name = tensor("op_2390"), val = tensor([1, 1])]; + tensor var_2392 = const()[name = tensor("op_2392"), val = tensor([1, 1])]; + tensor k_43_pad_type_0 = const()[name = tensor("k_43_pad_type_0"), val = tensor("custom")]; + tensor k_43_pad_0 = const()[name = tensor("k_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237081536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(239047680))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_43_cast = conv(dilations = var_2392, groups = var_31, pad = k_43_pad_0, pad_type = k_43_pad_type_0, strides = var_2390, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_43_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([1, 1])]; + tensor var_2398 = const()[name = tensor("op_2398"), val = tensor([1, 1])]; + tensor v_43_pad_type_0 = const()[name = tensor("v_43_pad_type_0"), val = tensor("custom")]; + tensor v_43_pad_0 = const()[name = tensor("v_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(239047872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241014016))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_43_cast = conv(dilations = var_2398, groups = var_31, pad = v_43_pad_0, pad_type = v_43_pad_type_0, strides = var_2396, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_43_cast")]; + tensor var_2402 = const()[name = tensor("op_2402"), val = tensor([2, 20, 64, -1])]; + tensor var_2403_cast = reshape(shape = var_2402, x = q_43_cast)[name = tensor("op_2403_cast")]; + tensor var_2404 = const()[name = tensor("op_2404"), val = tensor([2, 20, 64, -1])]; + tensor var_2405_cast = reshape(shape = var_2404, x = k_43_cast)[name = tensor("op_2405_cast")]; + tensor var_2406 = const()[name = tensor("op_2406"), val = tensor([2, 20, 64, -1])]; + tensor var_2407_cast = reshape(shape = var_2406, x = v_43_cast)[name = tensor("op_2407_cast")]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = var_2403_cast, y = var_2405_cast)[name = tensor("attn_weights_85_cast")]; + tensor attn_weights_87_cast = mul(x = attn_weights_85_cast, y = var_12_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_2411_cast = softmax(axis = var_18, x = attn_weights_87_cast)[name = tensor("op_2411_cast")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2407_cast, y = var_2411_cast)[name = tensor("attn_43_cast")]; + tensor var_2415 = const()[name = tensor("op_2415"), val = tensor([2, 1280, 1, -1])]; + tensor input_181_cast = reshape(shape = var_2415, x = attn_43_cast)[name = tensor("input_181_cast")]; + tensor var_2420 = const()[name = tensor("op_2420"), val = tensor([1, 1])]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([1, 1])]; + tensor var_2424_pad_type_0 = const()[name = tensor("op_2424_pad_type_0"), val = tensor("custom")]; + tensor var_2424_pad_0 = const()[name = tensor("op_2424_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241014208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242243072))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242243264)))]; + tensor var_2424_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_2422, groups = var_31, pad = var_2424_pad_0, pad_type = var_2424_pad_type_0, strides = var_2420, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_181_cast)[name = tensor("op_2424_cast")]; + tensor inputs_65_cast = add(x = var_2424_cast, y = inputs_63_cast)[name = tensor("inputs_65_cast")]; + tensor var_2428 = const()[name = tensor("op_2428"), val = tensor([1])]; + tensor channels_mean_65_cast = reduce_mean(axes = var_2428, keep_dims = var_23, x = inputs_65_cast)[name = tensor("channels_mean_65_cast")]; + tensor zero_mean_65_cast = sub(x = inputs_65_cast, y = channels_mean_65_cast)[name = tensor("zero_mean_65_cast")]; + tensor zero_mean_sq_65_cast = mul(x = zero_mean_65_cast, y = zero_mean_65_cast)[name = tensor("zero_mean_sq_65_cast")]; + tensor var_2432 = const()[name = tensor("op_2432"), val = tensor([1])]; + tensor var_2433_cast = reduce_mean(axes = var_2432, keep_dims = var_23, x = zero_mean_sq_65_cast)[name = tensor("op_2433_cast")]; + tensor var_2434_to_fp16 = const()[name = tensor("op_2434_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2435_cast = add(x = var_2433_cast, y = var_2434_to_fp16)[name = tensor("op_2435_cast")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2435_cast)[name = tensor("denom_65_cast")]; + tensor out_65_cast = mul(x = zero_mean_65_cast, y = denom_65_cast)[name = tensor("out_65_cast")]; + tensor var_2439_to_fp16 = const()[name = tensor("op_2439_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242245888)))]; + tensor var_2440_cast = add(x = out_65_cast, y = var_2439_to_fp16)[name = tensor("op_2440_cast")]; + tensor var_2442_to_fp16 = const()[name = tensor("op_2442_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242248512)))]; + tensor input_183_cast = mul(x = var_2440_cast, y = var_2442_to_fp16)[name = tensor("input_183_cast")]; + tensor var_2450 = const()[name = tensor("op_2450"), val = tensor([1, 1])]; + tensor var_2452 = const()[name = tensor("op_2452"), val = tensor([1, 1])]; + tensor var_2454_pad_type_0 = const()[name = tensor("op_2454_pad_type_0"), val = tensor("custom")]; + tensor var_2454_pad_0 = const()[name = tensor("op_2454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242251136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252081600))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252081792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252089536))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2454_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2452, groups = var_31, pad = var_2454_pad_0, pad_type = var_2454_pad_type_0, strides = var_2450, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_183_cast)[name = tensor("op_2454_cast")]; + tensor var_2455_split_sizes_0 = const()[name = tensor("op_2455_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2455_axis_0 = const()[name = tensor("op_2455_axis_0"), val = tensor(1)]; + tensor var_2455_cast_0, tensor var_2455_cast_1 = split(axis = var_2455_axis_0, split_sizes = var_2455_split_sizes_0, x = var_2454_cast)[name = tensor("op_2455_cast")]; + tensor var_2457_mode_0 = const()[name = tensor("op_2457_mode_0"), val = tensor("EXACT")]; + tensor var_2457_cast = gelu(mode = var_2457_mode_0, x = var_2455_cast_1)[name = tensor("op_2457_cast")]; + tensor input_185_cast = mul(x = var_2455_cast_0, y = var_2457_cast)[name = tensor("input_185_cast")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([1, 1])]; + tensor var_2463 = const()[name = tensor("op_2463"), val = tensor([1, 1])]; + tensor var_2465_pad_type_0 = const()[name = tensor("op_2465_pad_type_0"), val = tensor("custom")]; + tensor var_2465_pad_0 = const()[name = tensor("op_2465_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252089728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257004992))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257005184)))]; + tensor var_2465_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_2463, groups = var_31, pad = var_2465_pad_0, pad_type = var_2465_pad_type_0, strides = var_2461, weight = unet_down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("op_2465_cast")]; + tensor inputs_67_cast = add(x = var_2465_cast, y = inputs_65_cast)[name = tensor("inputs_67_cast")]; + tensor var_2475 = const()[name = tensor("op_2475"), val = tensor([1])]; + tensor channels_mean_67_cast = reduce_mean(axes = var_2475, keep_dims = var_23, x = inputs_67_cast)[name = tensor("channels_mean_67_cast")]; + tensor zero_mean_67_cast = sub(x = inputs_67_cast, y = channels_mean_67_cast)[name = tensor("zero_mean_67_cast")]; + tensor zero_mean_sq_67_cast = mul(x = zero_mean_67_cast, y = zero_mean_67_cast)[name = tensor("zero_mean_sq_67_cast")]; + tensor var_2479 = const()[name = tensor("op_2479"), val = tensor([1])]; + tensor var_2480_cast = reduce_mean(axes = var_2479, keep_dims = var_23, x = zero_mean_sq_67_cast)[name = tensor("op_2480_cast")]; + tensor var_2481_to_fp16 = const()[name = tensor("op_2481_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2482_cast = add(x = var_2480_cast, y = var_2481_to_fp16)[name = tensor("op_2482_cast")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2482_cast)[name = tensor("denom_67_cast")]; + tensor out_67_cast = mul(x = zero_mean_67_cast, y = denom_67_cast)[name = tensor("out_67_cast")]; + tensor var_2486_to_fp16 = const()[name = tensor("op_2486_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257007808)))]; + tensor var_2487_cast = add(x = out_67_cast, y = var_2486_to_fp16)[name = tensor("op_2487_cast")]; + tensor var_2489_to_fp16 = const()[name = tensor("op_2489_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257010432)))]; + tensor hidden_states_107_cast = mul(x = var_2487_cast, y = var_2489_to_fp16)[name = tensor("hidden_states_107_cast")]; + tensor var_2496 = const()[name = tensor("op_2496"), val = tensor([1, 1])]; + tensor var_2498 = const()[name = tensor("op_2498"), val = tensor([1, 1])]; + tensor q_45_pad_type_0 = const()[name = tensor("q_45_pad_type_0"), val = tensor("custom")]; + tensor q_45_pad_0 = const()[name = tensor("q_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257013056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(258241920))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_45_cast = conv(dilations = var_2498, groups = var_31, pad = q_45_pad_0, pad_type = q_45_pad_type_0, strides = var_2496, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("q_45_cast")]; + tensor var_2502 = const()[name = tensor("op_2502"), val = tensor([1, 1])]; + tensor var_2504 = const()[name = tensor("op_2504"), val = tensor([1, 1])]; + tensor k_45_pad_type_0 = const()[name = tensor("k_45_pad_type_0"), val = tensor("custom")]; + tensor k_45_pad_0 = const()[name = tensor("k_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(258242112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(259470976))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_45_cast = conv(dilations = var_2504, groups = var_31, pad = k_45_pad_0, pad_type = k_45_pad_type_0, strides = var_2502, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("k_45_cast")]; + tensor var_2508 = const()[name = tensor("op_2508"), val = tensor([1, 1])]; + tensor var_2510 = const()[name = tensor("op_2510"), val = tensor([1, 1])]; + tensor v_45_pad_type_0 = const()[name = tensor("v_45_pad_type_0"), val = tensor("custom")]; + tensor v_45_pad_0 = const()[name = tensor("v_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(259471168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260700032))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_45_cast = conv(dilations = var_2510, groups = var_31, pad = v_45_pad_0, pad_type = v_45_pad_type_0, strides = var_2508, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("v_45_cast")]; + tensor var_2514 = const()[name = tensor("op_2514"), val = tensor([2, 20, 64, -1])]; + tensor var_2515_cast = reshape(shape = var_2514, x = q_45_cast)[name = tensor("op_2515_cast")]; + tensor var_2516 = const()[name = tensor("op_2516"), val = tensor([2, 20, 64, -1])]; + tensor var_2517_cast = reshape(shape = var_2516, x = k_45_cast)[name = tensor("op_2517_cast")]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([2, 20, 64, -1])]; + tensor var_2519_cast = reshape(shape = var_2518, x = v_45_cast)[name = tensor("op_2519_cast")]; + tensor attn_weights_89_transpose_x_0 = const()[name = tensor("attn_weights_89_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_89_transpose_y_0 = const()[name = tensor("attn_weights_89_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_89_cast = matmul(transpose_x = attn_weights_89_transpose_x_0, transpose_y = attn_weights_89_transpose_y_0, x = var_2515_cast, y = var_2517_cast)[name = tensor("attn_weights_89_cast")]; + tensor attn_weights_91_cast = mul(x = attn_weights_89_cast, y = var_12_to_fp16)[name = tensor("attn_weights_91_cast")]; + tensor var_2523_cast = softmax(axis = var_18, x = attn_weights_91_cast)[name = tensor("op_2523_cast")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2519_cast, y = var_2523_cast)[name = tensor("attn_45_cast")]; + tensor var_2527 = const()[name = tensor("op_2527"), val = tensor([2, 1280, 1, -1])]; + tensor input_187_cast = reshape(shape = var_2527, x = attn_45_cast)[name = tensor("input_187_cast")]; + tensor var_2532 = const()[name = tensor("op_2532"), val = tensor([1, 1])]; + tensor var_2534 = const()[name = tensor("op_2534"), val = tensor([1, 1])]; + tensor var_2536_pad_type_0 = const()[name = tensor("op_2536_pad_type_0"), val = tensor("custom")]; + tensor var_2536_pad_0 = const()[name = tensor("op_2536_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260700224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261929088))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261929280)))]; + tensor var_2536_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_2534, groups = var_31, pad = var_2536_pad_0, pad_type = var_2536_pad_type_0, strides = var_2532, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_187_cast)[name = tensor("op_2536_cast")]; + tensor inputs_69_cast = add(x = var_2536_cast, y = inputs_67_cast)[name = tensor("inputs_69_cast")]; + tensor var_2540 = const()[name = tensor("op_2540"), val = tensor([1])]; + tensor channels_mean_69_cast = reduce_mean(axes = var_2540, keep_dims = var_23, x = inputs_69_cast)[name = tensor("channels_mean_69_cast")]; + tensor zero_mean_69_cast = sub(x = inputs_69_cast, y = channels_mean_69_cast)[name = tensor("zero_mean_69_cast")]; + tensor zero_mean_sq_69_cast = mul(x = zero_mean_69_cast, y = zero_mean_69_cast)[name = tensor("zero_mean_sq_69_cast")]; + tensor var_2544 = const()[name = tensor("op_2544"), val = tensor([1])]; + tensor var_2545_cast = reduce_mean(axes = var_2544, keep_dims = var_23, x = zero_mean_sq_69_cast)[name = tensor("op_2545_cast")]; + tensor var_2546_to_fp16 = const()[name = tensor("op_2546_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2547_cast = add(x = var_2545_cast, y = var_2546_to_fp16)[name = tensor("op_2547_cast")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2547_cast)[name = tensor("denom_69_cast")]; + tensor out_69_cast = mul(x = zero_mean_69_cast, y = denom_69_cast)[name = tensor("out_69_cast")]; + tensor var_2551_to_fp16 = const()[name = tensor("op_2551_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261931904)))]; + tensor var_2552_cast = add(x = out_69_cast, y = var_2551_to_fp16)[name = tensor("op_2552_cast")]; + tensor var_2554_to_fp16 = const()[name = tensor("op_2554_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261934528)))]; + tensor hidden_states_109_cast = mul(x = var_2552_cast, y = var_2554_to_fp16)[name = tensor("hidden_states_109_cast")]; + tensor var_2561 = const()[name = tensor("op_2561"), val = tensor([1, 1])]; + tensor var_2563 = const()[name = tensor("op_2563"), val = tensor([1, 1])]; + tensor q_47_pad_type_0 = const()[name = tensor("q_47_pad_type_0"), val = tensor("custom")]; + tensor q_47_pad_0 = const()[name = tensor("q_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261937152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263166016))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_47_cast = conv(dilations = var_2563, groups = var_31, pad = q_47_pad_0, pad_type = q_47_pad_type_0, strides = var_2561, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("q_47_cast")]; + tensor var_2567 = const()[name = tensor("op_2567"), val = tensor([1, 1])]; + tensor var_2569 = const()[name = tensor("op_2569"), val = tensor([1, 1])]; + tensor k_47_pad_type_0 = const()[name = tensor("k_47_pad_type_0"), val = tensor("custom")]; + tensor k_47_pad_0 = const()[name = tensor("k_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263166208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265132352))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_47_cast = conv(dilations = var_2569, groups = var_31, pad = k_47_pad_0, pad_type = k_47_pad_type_0, strides = var_2567, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_47_cast")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([1, 1])]; + tensor var_2575 = const()[name = tensor("op_2575"), val = tensor([1, 1])]; + tensor v_47_pad_type_0 = const()[name = tensor("v_47_pad_type_0"), val = tensor("custom")]; + tensor v_47_pad_0 = const()[name = tensor("v_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265132544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267098688))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_47_cast = conv(dilations = var_2575, groups = var_31, pad = v_47_pad_0, pad_type = v_47_pad_type_0, strides = var_2573, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_47_cast")]; + tensor var_2579 = const()[name = tensor("op_2579"), val = tensor([2, 20, 64, -1])]; + tensor var_2580_cast = reshape(shape = var_2579, x = q_47_cast)[name = tensor("op_2580_cast")]; + tensor var_2581 = const()[name = tensor("op_2581"), val = tensor([2, 20, 64, -1])]; + tensor var_2582_cast = reshape(shape = var_2581, x = k_47_cast)[name = tensor("op_2582_cast")]; + tensor var_2583 = const()[name = tensor("op_2583"), val = tensor([2, 20, 64, -1])]; + tensor var_2584_cast = reshape(shape = var_2583, x = v_47_cast)[name = tensor("op_2584_cast")]; + tensor attn_weights_93_transpose_x_0 = const()[name = tensor("attn_weights_93_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_93_transpose_y_0 = const()[name = tensor("attn_weights_93_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_93_cast = matmul(transpose_x = attn_weights_93_transpose_x_0, transpose_y = attn_weights_93_transpose_y_0, x = var_2580_cast, y = var_2582_cast)[name = tensor("attn_weights_93_cast")]; + tensor attn_weights_95_cast = mul(x = attn_weights_93_cast, y = var_12_to_fp16)[name = tensor("attn_weights_95_cast")]; + tensor var_2588_cast = softmax(axis = var_18, x = attn_weights_95_cast)[name = tensor("op_2588_cast")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2584_cast, y = var_2588_cast)[name = tensor("attn_47_cast")]; + tensor var_2592 = const()[name = tensor("op_2592"), val = tensor([2, 1280, 1, -1])]; + tensor input_189_cast = reshape(shape = var_2592, x = attn_47_cast)[name = tensor("input_189_cast")]; + tensor var_2597 = const()[name = tensor("op_2597"), val = tensor([1, 1])]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1, 1])]; + tensor var_2601_pad_type_0 = const()[name = tensor("op_2601_pad_type_0"), val = tensor("custom")]; + tensor var_2601_pad_0 = const()[name = tensor("op_2601_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267098880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268327744))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268327936)))]; + tensor var_2601_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_2599, groups = var_31, pad = var_2601_pad_0, pad_type = var_2601_pad_type_0, strides = var_2597, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("op_2601_cast")]; + tensor inputs_71_cast = add(x = var_2601_cast, y = inputs_69_cast)[name = tensor("inputs_71_cast")]; + tensor var_2605 = const()[name = tensor("op_2605"), val = tensor([1])]; + tensor channels_mean_71_cast = reduce_mean(axes = var_2605, keep_dims = var_23, x = inputs_71_cast)[name = tensor("channels_mean_71_cast")]; + tensor zero_mean_71_cast = sub(x = inputs_71_cast, y = channels_mean_71_cast)[name = tensor("zero_mean_71_cast")]; + tensor zero_mean_sq_71_cast = mul(x = zero_mean_71_cast, y = zero_mean_71_cast)[name = tensor("zero_mean_sq_71_cast")]; + tensor var_2609 = const()[name = tensor("op_2609"), val = tensor([1])]; + tensor var_2610_cast = reduce_mean(axes = var_2609, keep_dims = var_23, x = zero_mean_sq_71_cast)[name = tensor("op_2610_cast")]; + tensor var_2611_to_fp16 = const()[name = tensor("op_2611_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2612_cast = add(x = var_2610_cast, y = var_2611_to_fp16)[name = tensor("op_2612_cast")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2612_cast)[name = tensor("denom_71_cast")]; + tensor out_71_cast = mul(x = zero_mean_71_cast, y = denom_71_cast)[name = tensor("out_71_cast")]; + tensor var_2616_to_fp16 = const()[name = tensor("op_2616_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268330560)))]; + tensor var_2617_cast = add(x = out_71_cast, y = var_2616_to_fp16)[name = tensor("op_2617_cast")]; + tensor var_2619_to_fp16 = const()[name = tensor("op_2619_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268333184)))]; + tensor input_191_cast = mul(x = var_2617_cast, y = var_2619_to_fp16)[name = tensor("input_191_cast")]; + tensor var_2627 = const()[name = tensor("op_2627"), val = tensor([1, 1])]; + tensor var_2629 = const()[name = tensor("op_2629"), val = tensor([1, 1])]; + tensor var_2631_pad_type_0 = const()[name = tensor("op_2631_pad_type_0"), val = tensor("custom")]; + tensor var_2631_pad_0 = const()[name = tensor("op_2631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268335808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278166272))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278166464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278174208))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2631_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2629, groups = var_31, pad = var_2631_pad_0, pad_type = var_2631_pad_type_0, strides = var_2627, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_191_cast)[name = tensor("op_2631_cast")]; + tensor var_2632_split_sizes_0 = const()[name = tensor("op_2632_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2632_axis_0 = const()[name = tensor("op_2632_axis_0"), val = tensor(1)]; + tensor var_2632_cast_0, tensor var_2632_cast_1 = split(axis = var_2632_axis_0, split_sizes = var_2632_split_sizes_0, x = var_2631_cast)[name = tensor("op_2632_cast")]; + tensor var_2634_mode_0 = const()[name = tensor("op_2634_mode_0"), val = tensor("EXACT")]; + tensor var_2634_cast = gelu(mode = var_2634_mode_0, x = var_2632_cast_1)[name = tensor("op_2634_cast")]; + tensor input_193_cast = mul(x = var_2632_cast_0, y = var_2634_cast)[name = tensor("input_193_cast")]; + tensor var_2638 = const()[name = tensor("op_2638"), val = tensor([1, 1])]; + tensor var_2640 = const()[name = tensor("op_2640"), val = tensor([1, 1])]; + tensor var_2642_pad_type_0 = const()[name = tensor("op_2642_pad_type_0"), val = tensor("custom")]; + tensor var_2642_pad_0 = const()[name = tensor("op_2642_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278174400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(283089664))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(283089856)))]; + tensor var_2642_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_2640, groups = var_31, pad = var_2642_pad_0, pad_type = var_2642_pad_type_0, strides = var_2638, weight = unet_down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("op_2642_cast")]; + tensor inputs_73_cast = add(x = var_2642_cast, y = inputs_71_cast)[name = tensor("inputs_73_cast")]; + tensor var_2652 = const()[name = tensor("op_2652"), val = tensor([1])]; + tensor channels_mean_73_cast = reduce_mean(axes = var_2652, keep_dims = var_23, x = inputs_73_cast)[name = tensor("channels_mean_73_cast")]; + tensor zero_mean_73_cast = sub(x = inputs_73_cast, y = channels_mean_73_cast)[name = tensor("zero_mean_73_cast")]; + tensor zero_mean_sq_73_cast = mul(x = zero_mean_73_cast, y = zero_mean_73_cast)[name = tensor("zero_mean_sq_73_cast")]; + tensor var_2656 = const()[name = tensor("op_2656"), val = tensor([1])]; + tensor var_2657_cast = reduce_mean(axes = var_2656, keep_dims = var_23, x = zero_mean_sq_73_cast)[name = tensor("op_2657_cast")]; + tensor var_2658_to_fp16 = const()[name = tensor("op_2658_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2659_cast = add(x = var_2657_cast, y = var_2658_to_fp16)[name = tensor("op_2659_cast")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2659_cast)[name = tensor("denom_73_cast")]; + tensor out_73_cast = mul(x = zero_mean_73_cast, y = denom_73_cast)[name = tensor("out_73_cast")]; + tensor var_2663_to_fp16 = const()[name = tensor("op_2663_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(283092480)))]; + tensor var_2664_cast = add(x = out_73_cast, y = var_2663_to_fp16)[name = tensor("op_2664_cast")]; + tensor var_2666_to_fp16 = const()[name = tensor("op_2666_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(283095104)))]; + tensor hidden_states_113_cast = mul(x = var_2664_cast, y = var_2666_to_fp16)[name = tensor("hidden_states_113_cast")]; + tensor var_2673 = const()[name = tensor("op_2673"), val = tensor([1, 1])]; + tensor var_2675 = const()[name = tensor("op_2675"), val = tensor([1, 1])]; + tensor q_49_pad_type_0 = const()[name = tensor("q_49_pad_type_0"), val = tensor("custom")]; + tensor q_49_pad_0 = const()[name = tensor("q_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(283097728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284326592))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_49_cast = conv(dilations = var_2675, groups = var_31, pad = q_49_pad_0, pad_type = q_49_pad_type_0, strides = var_2673, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("q_49_cast")]; + tensor var_2679 = const()[name = tensor("op_2679"), val = tensor([1, 1])]; + tensor var_2681 = const()[name = tensor("op_2681"), val = tensor([1, 1])]; + tensor k_49_pad_type_0 = const()[name = tensor("k_49_pad_type_0"), val = tensor("custom")]; + tensor k_49_pad_0 = const()[name = tensor("k_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284326784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285555648))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_49_cast = conv(dilations = var_2681, groups = var_31, pad = k_49_pad_0, pad_type = k_49_pad_type_0, strides = var_2679, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("k_49_cast")]; + tensor var_2685 = const()[name = tensor("op_2685"), val = tensor([1, 1])]; + tensor var_2687 = const()[name = tensor("op_2687"), val = tensor([1, 1])]; + tensor v_49_pad_type_0 = const()[name = tensor("v_49_pad_type_0"), val = tensor("custom")]; + tensor v_49_pad_0 = const()[name = tensor("v_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285555840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(286784704))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_49_cast = conv(dilations = var_2687, groups = var_31, pad = v_49_pad_0, pad_type = v_49_pad_type_0, strides = var_2685, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("v_49_cast")]; + tensor var_2691 = const()[name = tensor("op_2691"), val = tensor([2, 20, 64, -1])]; + tensor var_2692_cast = reshape(shape = var_2691, x = q_49_cast)[name = tensor("op_2692_cast")]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([2, 20, 64, -1])]; + tensor var_2694_cast = reshape(shape = var_2693, x = k_49_cast)[name = tensor("op_2694_cast")]; + tensor var_2695 = const()[name = tensor("op_2695"), val = tensor([2, 20, 64, -1])]; + tensor var_2696_cast = reshape(shape = var_2695, x = v_49_cast)[name = tensor("op_2696_cast")]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = var_2692_cast, y = var_2694_cast)[name = tensor("attn_weights_97_cast")]; + tensor attn_weights_99_cast = mul(x = attn_weights_97_cast, y = var_12_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_2700_cast = softmax(axis = var_18, x = attn_weights_99_cast)[name = tensor("op_2700_cast")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_2696_cast, y = var_2700_cast)[name = tensor("attn_49_cast")]; + tensor var_2704 = const()[name = tensor("op_2704"), val = tensor([2, 1280, 1, -1])]; + tensor input_195_cast = reshape(shape = var_2704, x = attn_49_cast)[name = tensor("input_195_cast")]; + tensor var_2709 = const()[name = tensor("op_2709"), val = tensor([1, 1])]; + tensor var_2711 = const()[name = tensor("op_2711"), val = tensor([1, 1])]; + tensor var_2713_pad_type_0 = const()[name = tensor("op_2713_pad_type_0"), val = tensor("custom")]; + tensor var_2713_pad_0 = const()[name = tensor("op_2713_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(286784896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288013760))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288013952)))]; + tensor var_2713_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_2711, groups = var_31, pad = var_2713_pad_0, pad_type = var_2713_pad_type_0, strides = var_2709, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_195_cast)[name = tensor("op_2713_cast")]; + tensor inputs_75_cast = add(x = var_2713_cast, y = inputs_73_cast)[name = tensor("inputs_75_cast")]; + tensor var_2717 = const()[name = tensor("op_2717"), val = tensor([1])]; + tensor channels_mean_75_cast = reduce_mean(axes = var_2717, keep_dims = var_23, x = inputs_75_cast)[name = tensor("channels_mean_75_cast")]; + tensor zero_mean_75_cast = sub(x = inputs_75_cast, y = channels_mean_75_cast)[name = tensor("zero_mean_75_cast")]; + tensor zero_mean_sq_75_cast = mul(x = zero_mean_75_cast, y = zero_mean_75_cast)[name = tensor("zero_mean_sq_75_cast")]; + tensor var_2721 = const()[name = tensor("op_2721"), val = tensor([1])]; + tensor var_2722_cast = reduce_mean(axes = var_2721, keep_dims = var_23, x = zero_mean_sq_75_cast)[name = tensor("op_2722_cast")]; + tensor var_2723_to_fp16 = const()[name = tensor("op_2723_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2724_cast = add(x = var_2722_cast, y = var_2723_to_fp16)[name = tensor("op_2724_cast")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2724_cast)[name = tensor("denom_75_cast")]; + tensor out_75_cast = mul(x = zero_mean_75_cast, y = denom_75_cast)[name = tensor("out_75_cast")]; + tensor var_2728_to_fp16 = const()[name = tensor("op_2728_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288016576)))]; + tensor var_2729_cast = add(x = out_75_cast, y = var_2728_to_fp16)[name = tensor("op_2729_cast")]; + tensor var_2731_to_fp16 = const()[name = tensor("op_2731_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288019200)))]; + tensor hidden_states_115_cast = mul(x = var_2729_cast, y = var_2731_to_fp16)[name = tensor("hidden_states_115_cast")]; + tensor var_2738 = const()[name = tensor("op_2738"), val = tensor([1, 1])]; + tensor var_2740 = const()[name = tensor("op_2740"), val = tensor([1, 1])]; + tensor q_51_pad_type_0 = const()[name = tensor("q_51_pad_type_0"), val = tensor("custom")]; + tensor q_51_pad_0 = const()[name = tensor("q_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288021824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289250688))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_51_cast = conv(dilations = var_2740, groups = var_31, pad = q_51_pad_0, pad_type = q_51_pad_type_0, strides = var_2738, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("q_51_cast")]; + tensor var_2744 = const()[name = tensor("op_2744"), val = tensor([1, 1])]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([1, 1])]; + tensor k_51_pad_type_0 = const()[name = tensor("k_51_pad_type_0"), val = tensor("custom")]; + tensor k_51_pad_0 = const()[name = tensor("k_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289250880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291217024))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_51_cast = conv(dilations = var_2746, groups = var_31, pad = k_51_pad_0, pad_type = k_51_pad_type_0, strides = var_2744, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_51_cast")]; + tensor var_2750 = const()[name = tensor("op_2750"), val = tensor([1, 1])]; + tensor var_2752 = const()[name = tensor("op_2752"), val = tensor([1, 1])]; + tensor v_51_pad_type_0 = const()[name = tensor("v_51_pad_type_0"), val = tensor("custom")]; + tensor v_51_pad_0 = const()[name = tensor("v_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291217216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293183360))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_51_cast = conv(dilations = var_2752, groups = var_31, pad = v_51_pad_0, pad_type = v_51_pad_type_0, strides = var_2750, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_51_cast")]; + tensor var_2756 = const()[name = tensor("op_2756"), val = tensor([2, 20, 64, -1])]; + tensor var_2757_cast = reshape(shape = var_2756, x = q_51_cast)[name = tensor("op_2757_cast")]; + tensor var_2758 = const()[name = tensor("op_2758"), val = tensor([2, 20, 64, -1])]; + tensor var_2759_cast = reshape(shape = var_2758, x = k_51_cast)[name = tensor("op_2759_cast")]; + tensor var_2760 = const()[name = tensor("op_2760"), val = tensor([2, 20, 64, -1])]; + tensor var_2761_cast = reshape(shape = var_2760, x = v_51_cast)[name = tensor("op_2761_cast")]; + tensor attn_weights_101_transpose_x_0 = const()[name = tensor("attn_weights_101_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_101_transpose_y_0 = const()[name = tensor("attn_weights_101_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_101_cast = matmul(transpose_x = attn_weights_101_transpose_x_0, transpose_y = attn_weights_101_transpose_y_0, x = var_2757_cast, y = var_2759_cast)[name = tensor("attn_weights_101_cast")]; + tensor attn_weights_103_cast = mul(x = attn_weights_101_cast, y = var_12_to_fp16)[name = tensor("attn_weights_103_cast")]; + tensor var_2765_cast = softmax(axis = var_18, x = attn_weights_103_cast)[name = tensor("op_2765_cast")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_2761_cast, y = var_2765_cast)[name = tensor("attn_51_cast")]; + tensor var_2769 = const()[name = tensor("op_2769"), val = tensor([2, 1280, 1, -1])]; + tensor input_197_cast = reshape(shape = var_2769, x = attn_51_cast)[name = tensor("input_197_cast")]; + tensor var_2774 = const()[name = tensor("op_2774"), val = tensor([1, 1])]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor([1, 1])]; + tensor var_2778_pad_type_0 = const()[name = tensor("op_2778_pad_type_0"), val = tensor("custom")]; + tensor var_2778_pad_0 = const()[name = tensor("op_2778_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293183552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294412416))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294412608)))]; + tensor var_2778_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_2776, groups = var_31, pad = var_2778_pad_0, pad_type = var_2778_pad_type_0, strides = var_2774, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_197_cast)[name = tensor("op_2778_cast")]; + tensor inputs_77_cast = add(x = var_2778_cast, y = inputs_75_cast)[name = tensor("inputs_77_cast")]; + tensor var_2782 = const()[name = tensor("op_2782"), val = tensor([1])]; + tensor channels_mean_77_cast = reduce_mean(axes = var_2782, keep_dims = var_23, x = inputs_77_cast)[name = tensor("channels_mean_77_cast")]; + tensor zero_mean_77_cast = sub(x = inputs_77_cast, y = channels_mean_77_cast)[name = tensor("zero_mean_77_cast")]; + tensor zero_mean_sq_77_cast = mul(x = zero_mean_77_cast, y = zero_mean_77_cast)[name = tensor("zero_mean_sq_77_cast")]; + tensor var_2786 = const()[name = tensor("op_2786"), val = tensor([1])]; + tensor var_2787_cast = reduce_mean(axes = var_2786, keep_dims = var_23, x = zero_mean_sq_77_cast)[name = tensor("op_2787_cast")]; + tensor var_2788_to_fp16 = const()[name = tensor("op_2788_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2789_cast = add(x = var_2787_cast, y = var_2788_to_fp16)[name = tensor("op_2789_cast")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2789_cast)[name = tensor("denom_77_cast")]; + tensor out_77_cast = mul(x = zero_mean_77_cast, y = denom_77_cast)[name = tensor("out_77_cast")]; + tensor var_2793_to_fp16 = const()[name = tensor("op_2793_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294415232)))]; + tensor var_2794_cast = add(x = out_77_cast, y = var_2793_to_fp16)[name = tensor("op_2794_cast")]; + tensor var_2796_to_fp16 = const()[name = tensor("op_2796_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294417856)))]; + tensor input_199_cast = mul(x = var_2794_cast, y = var_2796_to_fp16)[name = tensor("input_199_cast")]; + tensor var_2804 = const()[name = tensor("op_2804"), val = tensor([1, 1])]; + tensor var_2806 = const()[name = tensor("op_2806"), val = tensor([1, 1])]; + tensor var_2808_pad_type_0 = const()[name = tensor("op_2808_pad_type_0"), val = tensor("custom")]; + tensor var_2808_pad_0 = const()[name = tensor("op_2808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294420480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304250944))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304251136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304258880))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2808_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2806, groups = var_31, pad = var_2808_pad_0, pad_type = var_2808_pad_type_0, strides = var_2804, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_199_cast)[name = tensor("op_2808_cast")]; + tensor var_2809_split_sizes_0 = const()[name = tensor("op_2809_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2809_axis_0 = const()[name = tensor("op_2809_axis_0"), val = tensor(1)]; + tensor var_2809_cast_0, tensor var_2809_cast_1 = split(axis = var_2809_axis_0, split_sizes = var_2809_split_sizes_0, x = var_2808_cast)[name = tensor("op_2809_cast")]; + tensor var_2811_mode_0 = const()[name = tensor("op_2811_mode_0"), val = tensor("EXACT")]; + tensor var_2811_cast = gelu(mode = var_2811_mode_0, x = var_2809_cast_1)[name = tensor("op_2811_cast")]; + tensor input_201_cast = mul(x = var_2809_cast_0, y = var_2811_cast)[name = tensor("input_201_cast")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([1, 1])]; + tensor var_2817 = const()[name = tensor("op_2817"), val = tensor([1, 1])]; + tensor var_2819_pad_type_0 = const()[name = tensor("op_2819_pad_type_0"), val = tensor("custom")]; + tensor var_2819_pad_0 = const()[name = tensor("op_2819_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304259072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309174336))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309174528)))]; + tensor var_2819_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_2817, groups = var_31, pad = var_2819_pad_0, pad_type = var_2819_pad_type_0, strides = var_2815, weight = unet_down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("op_2819_cast")]; + tensor inputs_79_cast = add(x = var_2819_cast, y = inputs_77_cast)[name = tensor("inputs_79_cast")]; + tensor var_2829 = const()[name = tensor("op_2829"), val = tensor([1])]; + tensor channels_mean_79_cast = reduce_mean(axes = var_2829, keep_dims = var_23, x = inputs_79_cast)[name = tensor("channels_mean_79_cast")]; + tensor zero_mean_79_cast = sub(x = inputs_79_cast, y = channels_mean_79_cast)[name = tensor("zero_mean_79_cast")]; + tensor zero_mean_sq_79_cast = mul(x = zero_mean_79_cast, y = zero_mean_79_cast)[name = tensor("zero_mean_sq_79_cast")]; + tensor var_2833 = const()[name = tensor("op_2833"), val = tensor([1])]; + tensor var_2834_cast = reduce_mean(axes = var_2833, keep_dims = var_23, x = zero_mean_sq_79_cast)[name = tensor("op_2834_cast")]; + tensor var_2835_to_fp16 = const()[name = tensor("op_2835_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2836_cast = add(x = var_2834_cast, y = var_2835_to_fp16)[name = tensor("op_2836_cast")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2836_cast)[name = tensor("denom_79_cast")]; + tensor out_79_cast = mul(x = zero_mean_79_cast, y = denom_79_cast)[name = tensor("out_79_cast")]; + tensor var_2840_to_fp16 = const()[name = tensor("op_2840_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309177152)))]; + tensor var_2841_cast = add(x = out_79_cast, y = var_2840_to_fp16)[name = tensor("op_2841_cast")]; + tensor var_2843_to_fp16 = const()[name = tensor("op_2843_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309179776)))]; + tensor hidden_states_119_cast = mul(x = var_2841_cast, y = var_2843_to_fp16)[name = tensor("hidden_states_119_cast")]; + tensor var_2850 = const()[name = tensor("op_2850"), val = tensor([1, 1])]; + tensor var_2852 = const()[name = tensor("op_2852"), val = tensor([1, 1])]; + tensor q_53_pad_type_0 = const()[name = tensor("q_53_pad_type_0"), val = tensor("custom")]; + tensor q_53_pad_0 = const()[name = tensor("q_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309182400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310411264))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_53_cast = conv(dilations = var_2852, groups = var_31, pad = q_53_pad_0, pad_type = q_53_pad_type_0, strides = var_2850, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("q_53_cast")]; + tensor var_2856 = const()[name = tensor("op_2856"), val = tensor([1, 1])]; + tensor var_2858 = const()[name = tensor("op_2858"), val = tensor([1, 1])]; + tensor k_53_pad_type_0 = const()[name = tensor("k_53_pad_type_0"), val = tensor("custom")]; + tensor k_53_pad_0 = const()[name = tensor("k_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310411456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311640320))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_53_cast = conv(dilations = var_2858, groups = var_31, pad = k_53_pad_0, pad_type = k_53_pad_type_0, strides = var_2856, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("k_53_cast")]; + tensor var_2862 = const()[name = tensor("op_2862"), val = tensor([1, 1])]; + tensor var_2864 = const()[name = tensor("op_2864"), val = tensor([1, 1])]; + tensor v_53_pad_type_0 = const()[name = tensor("v_53_pad_type_0"), val = tensor("custom")]; + tensor v_53_pad_0 = const()[name = tensor("v_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311640512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312869376))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_53_cast = conv(dilations = var_2864, groups = var_31, pad = v_53_pad_0, pad_type = v_53_pad_type_0, strides = var_2862, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("v_53_cast")]; + tensor var_2868 = const()[name = tensor("op_2868"), val = tensor([2, 20, 64, -1])]; + tensor var_2869_cast = reshape(shape = var_2868, x = q_53_cast)[name = tensor("op_2869_cast")]; + tensor var_2870 = const()[name = tensor("op_2870"), val = tensor([2, 20, 64, -1])]; + tensor var_2871_cast = reshape(shape = var_2870, x = k_53_cast)[name = tensor("op_2871_cast")]; + tensor var_2872 = const()[name = tensor("op_2872"), val = tensor([2, 20, 64, -1])]; + tensor var_2873_cast = reshape(shape = var_2872, x = v_53_cast)[name = tensor("op_2873_cast")]; + tensor attn_weights_105_transpose_x_0 = const()[name = tensor("attn_weights_105_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_105_transpose_y_0 = const()[name = tensor("attn_weights_105_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_105_cast = matmul(transpose_x = attn_weights_105_transpose_x_0, transpose_y = attn_weights_105_transpose_y_0, x = var_2869_cast, y = var_2871_cast)[name = tensor("attn_weights_105_cast")]; + tensor attn_weights_107_cast = mul(x = attn_weights_105_cast, y = var_12_to_fp16)[name = tensor("attn_weights_107_cast")]; + tensor var_2877_cast = softmax(axis = var_18, x = attn_weights_107_cast)[name = tensor("op_2877_cast")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_2873_cast, y = var_2877_cast)[name = tensor("attn_53_cast")]; + tensor var_2881 = const()[name = tensor("op_2881"), val = tensor([2, 1280, 1, -1])]; + tensor input_203_cast = reshape(shape = var_2881, x = attn_53_cast)[name = tensor("input_203_cast")]; + tensor var_2886 = const()[name = tensor("op_2886"), val = tensor([1, 1])]; + tensor var_2888 = const()[name = tensor("op_2888"), val = tensor([1, 1])]; + tensor var_2890_pad_type_0 = const()[name = tensor("op_2890_pad_type_0"), val = tensor("custom")]; + tensor var_2890_pad_0 = const()[name = tensor("op_2890_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312869568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314098432))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314098624)))]; + tensor var_2890_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_2888, groups = var_31, pad = var_2890_pad_0, pad_type = var_2890_pad_type_0, strides = var_2886, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_203_cast)[name = tensor("op_2890_cast")]; + tensor inputs_81_cast = add(x = var_2890_cast, y = inputs_79_cast)[name = tensor("inputs_81_cast")]; + tensor var_2894 = const()[name = tensor("op_2894"), val = tensor([1])]; + tensor channels_mean_81_cast = reduce_mean(axes = var_2894, keep_dims = var_23, x = inputs_81_cast)[name = tensor("channels_mean_81_cast")]; + tensor zero_mean_81_cast = sub(x = inputs_81_cast, y = channels_mean_81_cast)[name = tensor("zero_mean_81_cast")]; + tensor zero_mean_sq_81_cast = mul(x = zero_mean_81_cast, y = zero_mean_81_cast)[name = tensor("zero_mean_sq_81_cast")]; + tensor var_2898 = const()[name = tensor("op_2898"), val = tensor([1])]; + tensor var_2899_cast = reduce_mean(axes = var_2898, keep_dims = var_23, x = zero_mean_sq_81_cast)[name = tensor("op_2899_cast")]; + tensor var_2900_to_fp16 = const()[name = tensor("op_2900_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2901_cast = add(x = var_2899_cast, y = var_2900_to_fp16)[name = tensor("op_2901_cast")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2901_cast)[name = tensor("denom_81_cast")]; + tensor out_81_cast = mul(x = zero_mean_81_cast, y = denom_81_cast)[name = tensor("out_81_cast")]; + tensor var_2905_to_fp16 = const()[name = tensor("op_2905_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314101248)))]; + tensor var_2906_cast = add(x = out_81_cast, y = var_2905_to_fp16)[name = tensor("op_2906_cast")]; + tensor var_2908_to_fp16 = const()[name = tensor("op_2908_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314103872)))]; + tensor hidden_states_121_cast = mul(x = var_2906_cast, y = var_2908_to_fp16)[name = tensor("hidden_states_121_cast")]; + tensor var_2915 = const()[name = tensor("op_2915"), val = tensor([1, 1])]; + tensor var_2917 = const()[name = tensor("op_2917"), val = tensor([1, 1])]; + tensor q_55_pad_type_0 = const()[name = tensor("q_55_pad_type_0"), val = tensor("custom")]; + tensor q_55_pad_0 = const()[name = tensor("q_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314106496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(315335360))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_55_cast = conv(dilations = var_2917, groups = var_31, pad = q_55_pad_0, pad_type = q_55_pad_type_0, strides = var_2915, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("q_55_cast")]; + tensor var_2921 = const()[name = tensor("op_2921"), val = tensor([1, 1])]; + tensor var_2923 = const()[name = tensor("op_2923"), val = tensor([1, 1])]; + tensor k_55_pad_type_0 = const()[name = tensor("k_55_pad_type_0"), val = tensor("custom")]; + tensor k_55_pad_0 = const()[name = tensor("k_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(315335552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(317301696))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_55_cast = conv(dilations = var_2923, groups = var_31, pad = k_55_pad_0, pad_type = k_55_pad_type_0, strides = var_2921, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_55_cast")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([1, 1])]; + tensor var_2929 = const()[name = tensor("op_2929"), val = tensor([1, 1])]; + tensor v_55_pad_type_0 = const()[name = tensor("v_55_pad_type_0"), val = tensor("custom")]; + tensor v_55_pad_0 = const()[name = tensor("v_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(317301888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319268032))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_55_cast = conv(dilations = var_2929, groups = var_31, pad = v_55_pad_0, pad_type = v_55_pad_type_0, strides = var_2927, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_55_cast")]; + tensor var_2933 = const()[name = tensor("op_2933"), val = tensor([2, 20, 64, -1])]; + tensor var_2934_cast = reshape(shape = var_2933, x = q_55_cast)[name = tensor("op_2934_cast")]; + tensor var_2935 = const()[name = tensor("op_2935"), val = tensor([2, 20, 64, -1])]; + tensor var_2936_cast = reshape(shape = var_2935, x = k_55_cast)[name = tensor("op_2936_cast")]; + tensor var_2937 = const()[name = tensor("op_2937"), val = tensor([2, 20, 64, -1])]; + tensor var_2938_cast = reshape(shape = var_2937, x = v_55_cast)[name = tensor("op_2938_cast")]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = var_2934_cast, y = var_2936_cast)[name = tensor("attn_weights_109_cast")]; + tensor attn_weights_111_cast = mul(x = attn_weights_109_cast, y = var_12_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_2942_cast = softmax(axis = var_18, x = attn_weights_111_cast)[name = tensor("op_2942_cast")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_2938_cast, y = var_2942_cast)[name = tensor("attn_55_cast")]; + tensor var_2946 = const()[name = tensor("op_2946"), val = tensor([2, 1280, 1, -1])]; + tensor input_205_cast = reshape(shape = var_2946, x = attn_55_cast)[name = tensor("input_205_cast")]; + tensor var_2951 = const()[name = tensor("op_2951"), val = tensor([1, 1])]; + tensor var_2953 = const()[name = tensor("op_2953"), val = tensor([1, 1])]; + tensor var_2955_pad_type_0 = const()[name = tensor("op_2955_pad_type_0"), val = tensor("custom")]; + tensor var_2955_pad_0 = const()[name = tensor("op_2955_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319268224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(320497088))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(320497280)))]; + tensor var_2955_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_2953, groups = var_31, pad = var_2955_pad_0, pad_type = var_2955_pad_type_0, strides = var_2951, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("op_2955_cast")]; + tensor inputs_83_cast = add(x = var_2955_cast, y = inputs_81_cast)[name = tensor("inputs_83_cast")]; + tensor var_2959 = const()[name = tensor("op_2959"), val = tensor([1])]; + tensor channels_mean_83_cast = reduce_mean(axes = var_2959, keep_dims = var_23, x = inputs_83_cast)[name = tensor("channels_mean_83_cast")]; + tensor zero_mean_83_cast = sub(x = inputs_83_cast, y = channels_mean_83_cast)[name = tensor("zero_mean_83_cast")]; + tensor zero_mean_sq_83_cast = mul(x = zero_mean_83_cast, y = zero_mean_83_cast)[name = tensor("zero_mean_sq_83_cast")]; + tensor var_2963 = const()[name = tensor("op_2963"), val = tensor([1])]; + tensor var_2964_cast = reduce_mean(axes = var_2963, keep_dims = var_23, x = zero_mean_sq_83_cast)[name = tensor("op_2964_cast")]; + tensor var_2965_to_fp16 = const()[name = tensor("op_2965_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2966_cast = add(x = var_2964_cast, y = var_2965_to_fp16)[name = tensor("op_2966_cast")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_2966_cast)[name = tensor("denom_83_cast")]; + tensor out_83_cast = mul(x = zero_mean_83_cast, y = denom_83_cast)[name = tensor("out_83_cast")]; + tensor var_2970_to_fp16 = const()[name = tensor("op_2970_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(320499904)))]; + tensor var_2971_cast = add(x = out_83_cast, y = var_2970_to_fp16)[name = tensor("op_2971_cast")]; + tensor var_2973_to_fp16 = const()[name = tensor("op_2973_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(320502528)))]; + tensor input_207_cast = mul(x = var_2971_cast, y = var_2973_to_fp16)[name = tensor("input_207_cast")]; + tensor var_2981 = const()[name = tensor("op_2981"), val = tensor([1, 1])]; + tensor var_2983 = const()[name = tensor("op_2983"), val = tensor([1, 1])]; + tensor var_2985_pad_type_0 = const()[name = tensor("op_2985_pad_type_0"), val = tensor("custom")]; + tensor var_2985_pad_0 = const()[name = tensor("op_2985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(320505152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330335616))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330335808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330343552))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_2985_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_2983, groups = var_31, pad = var_2985_pad_0, pad_type = var_2985_pad_type_0, strides = var_2981, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_207_cast)[name = tensor("op_2985_cast")]; + tensor var_2986_split_sizes_0 = const()[name = tensor("op_2986_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2986_axis_0 = const()[name = tensor("op_2986_axis_0"), val = tensor(1)]; + tensor var_2986_cast_0, tensor var_2986_cast_1 = split(axis = var_2986_axis_0, split_sizes = var_2986_split_sizes_0, x = var_2985_cast)[name = tensor("op_2986_cast")]; + tensor var_2988_mode_0 = const()[name = tensor("op_2988_mode_0"), val = tensor("EXACT")]; + tensor var_2988_cast = gelu(mode = var_2988_mode_0, x = var_2986_cast_1)[name = tensor("op_2988_cast")]; + tensor input_209_cast = mul(x = var_2986_cast_0, y = var_2988_cast)[name = tensor("input_209_cast")]; + tensor var_2992 = const()[name = tensor("op_2992"), val = tensor([1, 1])]; + tensor var_2994 = const()[name = tensor("op_2994"), val = tensor([1, 1])]; + tensor var_2996_pad_type_0 = const()[name = tensor("op_2996_pad_type_0"), val = tensor("custom")]; + tensor var_2996_pad_0 = const()[name = tensor("op_2996_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330343744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335259008))), name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335259200)))]; + tensor var_2996_cast = conv(bias = unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_2994, groups = var_31, pad = var_2996_pad_0, pad_type = var_2996_pad_type_0, strides = var_2992, weight = unet_down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("op_2996_cast")]; + tensor hidden_states_125_cast = add(x = var_2996_cast, y = inputs_83_cast)[name = tensor("hidden_states_125_cast")]; + tensor var_2998 = const()[name = tensor("op_2998"), val = tensor([2, 1280, 32, 32])]; + tensor input_211_cast = reshape(shape = var_2998, x = hidden_states_125_cast)[name = tensor("input_211_cast")]; + tensor var_3002 = const()[name = tensor("op_3002"), val = tensor([1, 1])]; + tensor var_3004 = const()[name = tensor("op_3004"), val = tensor([1, 1])]; + tensor hidden_states_127_pad_type_0 = const()[name = tensor("hidden_states_127_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_127_pad_0 = const()[name = tensor("hidden_states_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335261824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336490688))), name = tensor("unet_down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336490880)))]; + tensor hidden_states_127_cast = conv(bias = unet_down_blocks_2_attentions_0_proj_out_bias_to_fp16, dilations = var_3004, groups = var_31, pad = hidden_states_127_pad_0, pad_type = hidden_states_127_pad_type_0, strides = var_3002, weight = unet_down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized, x = input_211_cast)[name = tensor("hidden_states_127_cast")]; + tensor input_213_cast = add(x = hidden_states_127_cast, y = hidden_states_61_cast)[name = tensor("input_213_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_213_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336493504)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336496128)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_217_cast = silu(x = add_27_cast)[name = tensor("input_217_cast")]; + tensor var_3019 = const()[name = tensor("op_3019"), val = tensor([1, 1])]; + tensor var_3021 = const()[name = tensor("op_3021"), val = tensor([1, 1])]; + tensor hidden_states_129_pad_type_0 = const()[name = tensor("hidden_states_129_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_129_pad_0 = const()[name = tensor("hidden_states_129_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336498752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347558016))), name = tensor("unet_down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347558208)))]; + tensor hidden_states_129_cast = conv(bias = unet_down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_3021, groups = var_31, pad = hidden_states_129_pad_0, pad_type = hidden_states_129_pad_type_0, strides = var_3019, weight = unet_down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("hidden_states_129_cast")]; + tensor var_3027 = const()[name = tensor("op_3027"), val = tensor([1, 1])]; + tensor var_3029 = const()[name = tensor("op_3029"), val = tensor([1, 1])]; + tensor temb_11_pad_type_0 = const()[name = tensor("temb_11_pad_type_0"), val = tensor("custom")]; + tensor temb_11_pad_0 = const()[name = tensor("temb_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347560832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348789696))), name = tensor("unet_down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348789888)))]; + tensor temb_11_cast = conv(bias = unet_down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_3029, groups = var_31, pad = temb_11_pad_0, pad_type = temb_11_pad_type_0, strides = var_3027, weight = unet_down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_11_cast")]; + tensor input_221_cast = add(x = hidden_states_129_cast, y = temb_11_cast)[name = tensor("input_221_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = input_221_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348792512)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348795136)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_225_cast = silu(x = add_29_cast)[name = tensor("input_225_cast")]; + tensor var_3039 = const()[name = tensor("op_3039"), val = tensor([1, 1])]; + tensor var_3041 = const()[name = tensor("op_3041"), val = tensor([1, 1])]; + tensor hidden_states_131_pad_type_0 = const()[name = tensor("hidden_states_131_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_131_pad_0 = const()[name = tensor("hidden_states_131_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348797760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359857024))), name = tensor("unet_down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359857216)))]; + tensor hidden_states_131_cast = conv(bias = unet_down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_3041, groups = var_31, pad = hidden_states_131_pad_0, pad_type = hidden_states_131_pad_type_0, strides = var_3039, weight = unet_down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("hidden_states_131_cast")]; + tensor hidden_states_133_cast = add(x = input_213_cast, y = hidden_states_131_cast)[name = tensor("hidden_states_133_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = hidden_states_133_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359859840)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359862464)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor var_3079 = const()[name = tensor("op_3079"), val = tensor([1, 1])]; + tensor var_3081 = const()[name = tensor("op_3081"), val = tensor([1, 1])]; + tensor hidden_states_135_pad_type_0 = const()[name = tensor("hidden_states_135_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_135_pad_0 = const()[name = tensor("hidden_states_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359865088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(361093952))), name = tensor("unet_down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(361094144)))]; + tensor hidden_states_135_cast = conv(bias = unet_down_blocks_2_attentions_1_proj_in_bias_to_fp16, dilations = var_3081, groups = var_31, pad = hidden_states_135_pad_0, pad_type = hidden_states_135_pad_type_0, strides = var_3079, weight = unet_down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized, x = add_31_cast)[name = tensor("hidden_states_135_cast")]; + tensor var_3086 = const()[name = tensor("op_3086"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_85_cast = reshape(shape = var_3086, x = hidden_states_135_cast)[name = tensor("inputs_85_cast")]; + tensor var_3096 = const()[name = tensor("op_3096"), val = tensor([1])]; + tensor channels_mean_85_cast = reduce_mean(axes = var_3096, keep_dims = var_23, x = inputs_85_cast)[name = tensor("channels_mean_85_cast")]; + tensor zero_mean_85_cast = sub(x = inputs_85_cast, y = channels_mean_85_cast)[name = tensor("zero_mean_85_cast")]; + tensor zero_mean_sq_85_cast = mul(x = zero_mean_85_cast, y = zero_mean_85_cast)[name = tensor("zero_mean_sq_85_cast")]; + tensor var_3100 = const()[name = tensor("op_3100"), val = tensor([1])]; + tensor var_3101_cast = reduce_mean(axes = var_3100, keep_dims = var_23, x = zero_mean_sq_85_cast)[name = tensor("op_3101_cast")]; + tensor var_3102_to_fp16 = const()[name = tensor("op_3102_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3103_cast = add(x = var_3101_cast, y = var_3102_to_fp16)[name = tensor("op_3103_cast")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_3103_cast)[name = tensor("denom_85_cast")]; + tensor out_85_cast = mul(x = zero_mean_85_cast, y = denom_85_cast)[name = tensor("out_85_cast")]; + tensor var_3107_to_fp16 = const()[name = tensor("op_3107_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(361096768)))]; + tensor var_3108_cast = add(x = out_85_cast, y = var_3107_to_fp16)[name = tensor("op_3108_cast")]; + tensor var_3110_to_fp16 = const()[name = tensor("op_3110_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(361099392)))]; + tensor hidden_states_137_cast = mul(x = var_3108_cast, y = var_3110_to_fp16)[name = tensor("hidden_states_137_cast")]; + tensor var_3117 = const()[name = tensor("op_3117"), val = tensor([1, 1])]; + tensor var_3119 = const()[name = tensor("op_3119"), val = tensor([1, 1])]; + tensor q_57_pad_type_0 = const()[name = tensor("q_57_pad_type_0"), val = tensor("custom")]; + tensor q_57_pad_0 = const()[name = tensor("q_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(361102016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362330880))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_57_cast = conv(dilations = var_3119, groups = var_31, pad = q_57_pad_0, pad_type = q_57_pad_type_0, strides = var_3117, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("q_57_cast")]; + tensor var_3123 = const()[name = tensor("op_3123"), val = tensor([1, 1])]; + tensor var_3125 = const()[name = tensor("op_3125"), val = tensor([1, 1])]; + tensor k_57_pad_type_0 = const()[name = tensor("k_57_pad_type_0"), val = tensor("custom")]; + tensor k_57_pad_0 = const()[name = tensor("k_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362331072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363559936))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_57_cast = conv(dilations = var_3125, groups = var_31, pad = k_57_pad_0, pad_type = k_57_pad_type_0, strides = var_3123, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("k_57_cast")]; + tensor var_3129 = const()[name = tensor("op_3129"), val = tensor([1, 1])]; + tensor var_3131 = const()[name = tensor("op_3131"), val = tensor([1, 1])]; + tensor v_57_pad_type_0 = const()[name = tensor("v_57_pad_type_0"), val = tensor("custom")]; + tensor v_57_pad_0 = const()[name = tensor("v_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363560128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(364788992))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_57_cast = conv(dilations = var_3131, groups = var_31, pad = v_57_pad_0, pad_type = v_57_pad_type_0, strides = var_3129, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("v_57_cast")]; + tensor var_3135 = const()[name = tensor("op_3135"), val = tensor([2, 20, 64, -1])]; + tensor var_3136_cast = reshape(shape = var_3135, x = q_57_cast)[name = tensor("op_3136_cast")]; + tensor var_3137 = const()[name = tensor("op_3137"), val = tensor([2, 20, 64, -1])]; + tensor var_3138_cast = reshape(shape = var_3137, x = k_57_cast)[name = tensor("op_3138_cast")]; + tensor var_3139 = const()[name = tensor("op_3139"), val = tensor([2, 20, 64, -1])]; + tensor var_3140_cast = reshape(shape = var_3139, x = v_57_cast)[name = tensor("op_3140_cast")]; + tensor attn_weights_113_transpose_x_0 = const()[name = tensor("attn_weights_113_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_113_transpose_y_0 = const()[name = tensor("attn_weights_113_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_113_cast = matmul(transpose_x = attn_weights_113_transpose_x_0, transpose_y = attn_weights_113_transpose_y_0, x = var_3136_cast, y = var_3138_cast)[name = tensor("attn_weights_113_cast")]; + tensor attn_weights_115_cast = mul(x = attn_weights_113_cast, y = var_12_to_fp16)[name = tensor("attn_weights_115_cast")]; + tensor var_3144_cast = softmax(axis = var_18, x = attn_weights_115_cast)[name = tensor("op_3144_cast")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3140_cast, y = var_3144_cast)[name = tensor("attn_57_cast")]; + tensor var_3148 = const()[name = tensor("op_3148"), val = tensor([2, 1280, 1, -1])]; + tensor input_229_cast = reshape(shape = var_3148, x = attn_57_cast)[name = tensor("input_229_cast")]; + tensor var_3153 = const()[name = tensor("op_3153"), val = tensor([1, 1])]; + tensor var_3155 = const()[name = tensor("op_3155"), val = tensor([1, 1])]; + tensor var_3157_pad_type_0 = const()[name = tensor("op_3157_pad_type_0"), val = tensor("custom")]; + tensor var_3157_pad_0 = const()[name = tensor("op_3157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(364789184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366018048))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366018240)))]; + tensor var_3157_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_3155, groups = var_31, pad = var_3157_pad_0, pad_type = var_3157_pad_type_0, strides = var_3153, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_229_cast)[name = tensor("op_3157_cast")]; + tensor inputs_87_cast = add(x = var_3157_cast, y = inputs_85_cast)[name = tensor("inputs_87_cast")]; + tensor var_3161 = const()[name = tensor("op_3161"), val = tensor([1])]; + tensor channels_mean_87_cast = reduce_mean(axes = var_3161, keep_dims = var_23, x = inputs_87_cast)[name = tensor("channels_mean_87_cast")]; + tensor zero_mean_87_cast = sub(x = inputs_87_cast, y = channels_mean_87_cast)[name = tensor("zero_mean_87_cast")]; + tensor zero_mean_sq_87_cast = mul(x = zero_mean_87_cast, y = zero_mean_87_cast)[name = tensor("zero_mean_sq_87_cast")]; + tensor var_3165 = const()[name = tensor("op_3165"), val = tensor([1])]; + tensor var_3166_cast = reduce_mean(axes = var_3165, keep_dims = var_23, x = zero_mean_sq_87_cast)[name = tensor("op_3166_cast")]; + tensor var_3167_to_fp16 = const()[name = tensor("op_3167_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3168_cast = add(x = var_3166_cast, y = var_3167_to_fp16)[name = tensor("op_3168_cast")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_3168_cast)[name = tensor("denom_87_cast")]; + tensor out_87_cast = mul(x = zero_mean_87_cast, y = denom_87_cast)[name = tensor("out_87_cast")]; + tensor var_3172_to_fp16 = const()[name = tensor("op_3172_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366020864)))]; + tensor var_3173_cast = add(x = out_87_cast, y = var_3172_to_fp16)[name = tensor("op_3173_cast")]; + tensor var_3175_to_fp16 = const()[name = tensor("op_3175_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366023488)))]; + tensor hidden_states_139_cast = mul(x = var_3173_cast, y = var_3175_to_fp16)[name = tensor("hidden_states_139_cast")]; + tensor var_3182 = const()[name = tensor("op_3182"), val = tensor([1, 1])]; + tensor var_3184 = const()[name = tensor("op_3184"), val = tensor([1, 1])]; + tensor q_59_pad_type_0 = const()[name = tensor("q_59_pad_type_0"), val = tensor("custom")]; + tensor q_59_pad_0 = const()[name = tensor("q_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366026112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367254976))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_59_cast = conv(dilations = var_3184, groups = var_31, pad = q_59_pad_0, pad_type = q_59_pad_type_0, strides = var_3182, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_139_cast)[name = tensor("q_59_cast")]; + tensor var_3188 = const()[name = tensor("op_3188"), val = tensor([1, 1])]; + tensor var_3190 = const()[name = tensor("op_3190"), val = tensor([1, 1])]; + tensor k_59_pad_type_0 = const()[name = tensor("k_59_pad_type_0"), val = tensor("custom")]; + tensor k_59_pad_0 = const()[name = tensor("k_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367255168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369221312))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_59_cast = conv(dilations = var_3190, groups = var_31, pad = k_59_pad_0, pad_type = k_59_pad_type_0, strides = var_3188, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_59_cast")]; + tensor var_3194 = const()[name = tensor("op_3194"), val = tensor([1, 1])]; + tensor var_3196 = const()[name = tensor("op_3196"), val = tensor([1, 1])]; + tensor v_59_pad_type_0 = const()[name = tensor("v_59_pad_type_0"), val = tensor("custom")]; + tensor v_59_pad_0 = const()[name = tensor("v_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369221504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371187648))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_59_cast = conv(dilations = var_3196, groups = var_31, pad = v_59_pad_0, pad_type = v_59_pad_type_0, strides = var_3194, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_59_cast")]; + tensor var_3200 = const()[name = tensor("op_3200"), val = tensor([2, 20, 64, -1])]; + tensor var_3201_cast = reshape(shape = var_3200, x = q_59_cast)[name = tensor("op_3201_cast")]; + tensor var_3202 = const()[name = tensor("op_3202"), val = tensor([2, 20, 64, -1])]; + tensor var_3203_cast = reshape(shape = var_3202, x = k_59_cast)[name = tensor("op_3203_cast")]; + tensor var_3204 = const()[name = tensor("op_3204"), val = tensor([2, 20, 64, -1])]; + tensor var_3205_cast = reshape(shape = var_3204, x = v_59_cast)[name = tensor("op_3205_cast")]; + tensor attn_weights_117_transpose_x_0 = const()[name = tensor("attn_weights_117_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_117_transpose_y_0 = const()[name = tensor("attn_weights_117_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_117_cast = matmul(transpose_x = attn_weights_117_transpose_x_0, transpose_y = attn_weights_117_transpose_y_0, x = var_3201_cast, y = var_3203_cast)[name = tensor("attn_weights_117_cast")]; + tensor attn_weights_119_cast = mul(x = attn_weights_117_cast, y = var_12_to_fp16)[name = tensor("attn_weights_119_cast")]; + tensor var_3209_cast = softmax(axis = var_18, x = attn_weights_119_cast)[name = tensor("op_3209_cast")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3205_cast, y = var_3209_cast)[name = tensor("attn_59_cast")]; + tensor var_3213 = const()[name = tensor("op_3213"), val = tensor([2, 1280, 1, -1])]; + tensor input_231_cast = reshape(shape = var_3213, x = attn_59_cast)[name = tensor("input_231_cast")]; + tensor var_3218 = const()[name = tensor("op_3218"), val = tensor([1, 1])]; + tensor var_3220 = const()[name = tensor("op_3220"), val = tensor([1, 1])]; + tensor var_3222_pad_type_0 = const()[name = tensor("op_3222_pad_type_0"), val = tensor("custom")]; + tensor var_3222_pad_0 = const()[name = tensor("op_3222_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371187840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372416704))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372416896)))]; + tensor var_3222_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_3220, groups = var_31, pad = var_3222_pad_0, pad_type = var_3222_pad_type_0, strides = var_3218, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_231_cast)[name = tensor("op_3222_cast")]; + tensor inputs_89_cast = add(x = var_3222_cast, y = inputs_87_cast)[name = tensor("inputs_89_cast")]; + tensor var_3226 = const()[name = tensor("op_3226"), val = tensor([1])]; + tensor channels_mean_89_cast = reduce_mean(axes = var_3226, keep_dims = var_23, x = inputs_89_cast)[name = tensor("channels_mean_89_cast")]; + tensor zero_mean_89_cast = sub(x = inputs_89_cast, y = channels_mean_89_cast)[name = tensor("zero_mean_89_cast")]; + tensor zero_mean_sq_89_cast = mul(x = zero_mean_89_cast, y = zero_mean_89_cast)[name = tensor("zero_mean_sq_89_cast")]; + tensor var_3230 = const()[name = tensor("op_3230"), val = tensor([1])]; + tensor var_3231_cast = reduce_mean(axes = var_3230, keep_dims = var_23, x = zero_mean_sq_89_cast)[name = tensor("op_3231_cast")]; + tensor var_3232_to_fp16 = const()[name = tensor("op_3232_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3233_cast = add(x = var_3231_cast, y = var_3232_to_fp16)[name = tensor("op_3233_cast")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_3233_cast)[name = tensor("denom_89_cast")]; + tensor out_89_cast = mul(x = zero_mean_89_cast, y = denom_89_cast)[name = tensor("out_89_cast")]; + tensor var_3237_to_fp16 = const()[name = tensor("op_3237_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372419520)))]; + tensor var_3238_cast = add(x = out_89_cast, y = var_3237_to_fp16)[name = tensor("op_3238_cast")]; + tensor var_3240_to_fp16 = const()[name = tensor("op_3240_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372422144)))]; + tensor input_233_cast = mul(x = var_3238_cast, y = var_3240_to_fp16)[name = tensor("input_233_cast")]; + tensor var_3248 = const()[name = tensor("op_3248"), val = tensor([1, 1])]; + tensor var_3250 = const()[name = tensor("op_3250"), val = tensor([1, 1])]; + tensor var_3252_pad_type_0 = const()[name = tensor("op_3252_pad_type_0"), val = tensor("custom")]; + tensor var_3252_pad_0 = const()[name = tensor("op_3252_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372424768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382255232))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382255424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382263168))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_3252_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_3250, groups = var_31, pad = var_3252_pad_0, pad_type = var_3252_pad_type_0, strides = var_3248, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("op_3252_cast")]; + tensor var_3253_split_sizes_0 = const()[name = tensor("op_3253_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3253_axis_0 = const()[name = tensor("op_3253_axis_0"), val = tensor(1)]; + tensor var_3253_cast_0, tensor var_3253_cast_1 = split(axis = var_3253_axis_0, split_sizes = var_3253_split_sizes_0, x = var_3252_cast)[name = tensor("op_3253_cast")]; + tensor var_3255_mode_0 = const()[name = tensor("op_3255_mode_0"), val = tensor("EXACT")]; + tensor var_3255_cast = gelu(mode = var_3255_mode_0, x = var_3253_cast_1)[name = tensor("op_3255_cast")]; + tensor input_235_cast = mul(x = var_3253_cast_0, y = var_3255_cast)[name = tensor("input_235_cast")]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([1, 1])]; + tensor var_3261 = const()[name = tensor("op_3261"), val = tensor([1, 1])]; + tensor var_3263_pad_type_0 = const()[name = tensor("op_3263_pad_type_0"), val = tensor("custom")]; + tensor var_3263_pad_0 = const()[name = tensor("op_3263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382263360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(387178624))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(387178816)))]; + tensor var_3263_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_3261, groups = var_31, pad = var_3263_pad_0, pad_type = var_3263_pad_type_0, strides = var_3259, weight = unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_235_cast)[name = tensor("op_3263_cast")]; + tensor inputs_91_cast = add(x = var_3263_cast, y = inputs_89_cast)[name = tensor("inputs_91_cast")]; + tensor var_3273 = const()[name = tensor("op_3273"), val = tensor([1])]; + tensor channels_mean_91_cast = reduce_mean(axes = var_3273, keep_dims = var_23, x = inputs_91_cast)[name = tensor("channels_mean_91_cast")]; + tensor zero_mean_91_cast = sub(x = inputs_91_cast, y = channels_mean_91_cast)[name = tensor("zero_mean_91_cast")]; + tensor zero_mean_sq_91_cast = mul(x = zero_mean_91_cast, y = zero_mean_91_cast)[name = tensor("zero_mean_sq_91_cast")]; + tensor var_3277 = const()[name = tensor("op_3277"), val = tensor([1])]; + tensor var_3278_cast = reduce_mean(axes = var_3277, keep_dims = var_23, x = zero_mean_sq_91_cast)[name = tensor("op_3278_cast")]; + tensor var_3279_to_fp16 = const()[name = tensor("op_3279_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3280_cast = add(x = var_3278_cast, y = var_3279_to_fp16)[name = tensor("op_3280_cast")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_3280_cast)[name = tensor("denom_91_cast")]; + tensor out_91_cast = mul(x = zero_mean_91_cast, y = denom_91_cast)[name = tensor("out_91_cast")]; + tensor var_3284_to_fp16 = const()[name = tensor("op_3284_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(387181440)))]; + tensor var_3285_cast = add(x = out_91_cast, y = var_3284_to_fp16)[name = tensor("op_3285_cast")]; + tensor var_3287_to_fp16 = const()[name = tensor("op_3287_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(387184064)))]; + tensor hidden_states_143_cast = mul(x = var_3285_cast, y = var_3287_to_fp16)[name = tensor("hidden_states_143_cast")]; + tensor var_3294 = const()[name = tensor("op_3294"), val = tensor([1, 1])]; + tensor var_3296 = const()[name = tensor("op_3296"), val = tensor([1, 1])]; + tensor q_61_pad_type_0 = const()[name = tensor("q_61_pad_type_0"), val = tensor("custom")]; + tensor q_61_pad_0 = const()[name = tensor("q_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(387186688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(388415552))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_61_cast = conv(dilations = var_3296, groups = var_31, pad = q_61_pad_0, pad_type = q_61_pad_type_0, strides = var_3294, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("q_61_cast")]; + tensor var_3300 = const()[name = tensor("op_3300"), val = tensor([1, 1])]; + tensor var_3302 = const()[name = tensor("op_3302"), val = tensor([1, 1])]; + tensor k_61_pad_type_0 = const()[name = tensor("k_61_pad_type_0"), val = tensor("custom")]; + tensor k_61_pad_0 = const()[name = tensor("k_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(388415744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389644608))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_61_cast = conv(dilations = var_3302, groups = var_31, pad = k_61_pad_0, pad_type = k_61_pad_type_0, strides = var_3300, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("k_61_cast")]; + tensor var_3306 = const()[name = tensor("op_3306"), val = tensor([1, 1])]; + tensor var_3308 = const()[name = tensor("op_3308"), val = tensor([1, 1])]; + tensor v_61_pad_type_0 = const()[name = tensor("v_61_pad_type_0"), val = tensor("custom")]; + tensor v_61_pad_0 = const()[name = tensor("v_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389644800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390873664))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_61_cast = conv(dilations = var_3308, groups = var_31, pad = v_61_pad_0, pad_type = v_61_pad_type_0, strides = var_3306, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("v_61_cast")]; + tensor var_3312 = const()[name = tensor("op_3312"), val = tensor([2, 20, 64, -1])]; + tensor var_3313_cast = reshape(shape = var_3312, x = q_61_cast)[name = tensor("op_3313_cast")]; + tensor var_3314 = const()[name = tensor("op_3314"), val = tensor([2, 20, 64, -1])]; + tensor var_3315_cast = reshape(shape = var_3314, x = k_61_cast)[name = tensor("op_3315_cast")]; + tensor var_3316 = const()[name = tensor("op_3316"), val = tensor([2, 20, 64, -1])]; + tensor var_3317_cast = reshape(shape = var_3316, x = v_61_cast)[name = tensor("op_3317_cast")]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = var_3313_cast, y = var_3315_cast)[name = tensor("attn_weights_121_cast")]; + tensor attn_weights_123_cast = mul(x = attn_weights_121_cast, y = var_12_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_3321_cast = softmax(axis = var_18, x = attn_weights_123_cast)[name = tensor("op_3321_cast")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3317_cast, y = var_3321_cast)[name = tensor("attn_61_cast")]; + tensor var_3325 = const()[name = tensor("op_3325"), val = tensor([2, 1280, 1, -1])]; + tensor input_237_cast = reshape(shape = var_3325, x = attn_61_cast)[name = tensor("input_237_cast")]; + tensor var_3330 = const()[name = tensor("op_3330"), val = tensor([1, 1])]; + tensor var_3332 = const()[name = tensor("op_3332"), val = tensor([1, 1])]; + tensor var_3334_pad_type_0 = const()[name = tensor("op_3334_pad_type_0"), val = tensor("custom")]; + tensor var_3334_pad_0 = const()[name = tensor("op_3334_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390873856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392102720))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392102912)))]; + tensor var_3334_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_3332, groups = var_31, pad = var_3334_pad_0, pad_type = var_3334_pad_type_0, strides = var_3330, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("op_3334_cast")]; + tensor inputs_93_cast = add(x = var_3334_cast, y = inputs_91_cast)[name = tensor("inputs_93_cast")]; + tensor var_3338 = const()[name = tensor("op_3338"), val = tensor([1])]; + tensor channels_mean_93_cast = reduce_mean(axes = var_3338, keep_dims = var_23, x = inputs_93_cast)[name = tensor("channels_mean_93_cast")]; + tensor zero_mean_93_cast = sub(x = inputs_93_cast, y = channels_mean_93_cast)[name = tensor("zero_mean_93_cast")]; + tensor zero_mean_sq_93_cast = mul(x = zero_mean_93_cast, y = zero_mean_93_cast)[name = tensor("zero_mean_sq_93_cast")]; + tensor var_3342 = const()[name = tensor("op_3342"), val = tensor([1])]; + tensor var_3343_cast = reduce_mean(axes = var_3342, keep_dims = var_23, x = zero_mean_sq_93_cast)[name = tensor("op_3343_cast")]; + tensor var_3344_to_fp16 = const()[name = tensor("op_3344_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3345_cast = add(x = var_3343_cast, y = var_3344_to_fp16)[name = tensor("op_3345_cast")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_3345_cast)[name = tensor("denom_93_cast")]; + tensor out_93_cast = mul(x = zero_mean_93_cast, y = denom_93_cast)[name = tensor("out_93_cast")]; + tensor var_3349_to_fp16 = const()[name = tensor("op_3349_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392105536)))]; + tensor var_3350_cast = add(x = out_93_cast, y = var_3349_to_fp16)[name = tensor("op_3350_cast")]; + tensor var_3352_to_fp16 = const()[name = tensor("op_3352_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392108160)))]; + tensor hidden_states_145_cast = mul(x = var_3350_cast, y = var_3352_to_fp16)[name = tensor("hidden_states_145_cast")]; + tensor var_3359 = const()[name = tensor("op_3359"), val = tensor([1, 1])]; + tensor var_3361 = const()[name = tensor("op_3361"), val = tensor([1, 1])]; + tensor q_63_pad_type_0 = const()[name = tensor("q_63_pad_type_0"), val = tensor("custom")]; + tensor q_63_pad_0 = const()[name = tensor("q_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392110784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393339648))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_63_cast = conv(dilations = var_3361, groups = var_31, pad = q_63_pad_0, pad_type = q_63_pad_type_0, strides = var_3359, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_145_cast)[name = tensor("q_63_cast")]; + tensor var_3365 = const()[name = tensor("op_3365"), val = tensor([1, 1])]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor([1, 1])]; + tensor k_63_pad_type_0 = const()[name = tensor("k_63_pad_type_0"), val = tensor("custom")]; + tensor k_63_pad_0 = const()[name = tensor("k_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393339840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395305984))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_63_cast = conv(dilations = var_3367, groups = var_31, pad = k_63_pad_0, pad_type = k_63_pad_type_0, strides = var_3365, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_63_cast")]; + tensor var_3371 = const()[name = tensor("op_3371"), val = tensor([1, 1])]; + tensor var_3373 = const()[name = tensor("op_3373"), val = tensor([1, 1])]; + tensor v_63_pad_type_0 = const()[name = tensor("v_63_pad_type_0"), val = tensor("custom")]; + tensor v_63_pad_0 = const()[name = tensor("v_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395306176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397272320))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_63_cast = conv(dilations = var_3373, groups = var_31, pad = v_63_pad_0, pad_type = v_63_pad_type_0, strides = var_3371, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_63_cast")]; + tensor var_3377 = const()[name = tensor("op_3377"), val = tensor([2, 20, 64, -1])]; + tensor var_3378_cast = reshape(shape = var_3377, x = q_63_cast)[name = tensor("op_3378_cast")]; + tensor var_3379 = const()[name = tensor("op_3379"), val = tensor([2, 20, 64, -1])]; + tensor var_3380_cast = reshape(shape = var_3379, x = k_63_cast)[name = tensor("op_3380_cast")]; + tensor var_3381 = const()[name = tensor("op_3381"), val = tensor([2, 20, 64, -1])]; + tensor var_3382_cast = reshape(shape = var_3381, x = v_63_cast)[name = tensor("op_3382_cast")]; + tensor attn_weights_125_transpose_x_0 = const()[name = tensor("attn_weights_125_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_125_transpose_y_0 = const()[name = tensor("attn_weights_125_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_125_cast = matmul(transpose_x = attn_weights_125_transpose_x_0, transpose_y = attn_weights_125_transpose_y_0, x = var_3378_cast, y = var_3380_cast)[name = tensor("attn_weights_125_cast")]; + tensor attn_weights_127_cast = mul(x = attn_weights_125_cast, y = var_12_to_fp16)[name = tensor("attn_weights_127_cast")]; + tensor var_3386_cast = softmax(axis = var_18, x = attn_weights_127_cast)[name = tensor("op_3386_cast")]; + tensor attn_63_transpose_x_0 = const()[name = tensor("attn_63_transpose_x_0"), val = tensor(false)]; + tensor attn_63_transpose_y_0 = const()[name = tensor("attn_63_transpose_y_0"), val = tensor(true)]; + tensor attn_63_cast = matmul(transpose_x = attn_63_transpose_x_0, transpose_y = attn_63_transpose_y_0, x = var_3382_cast, y = var_3386_cast)[name = tensor("attn_63_cast")]; + tensor var_3390 = const()[name = tensor("op_3390"), val = tensor([2, 1280, 1, -1])]; + tensor input_239_cast = reshape(shape = var_3390, x = attn_63_cast)[name = tensor("input_239_cast")]; + tensor var_3395 = const()[name = tensor("op_3395"), val = tensor([1, 1])]; + tensor var_3397 = const()[name = tensor("op_3397"), val = tensor([1, 1])]; + tensor var_3399_pad_type_0 = const()[name = tensor("op_3399_pad_type_0"), val = tensor("custom")]; + tensor var_3399_pad_0 = const()[name = tensor("op_3399_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397272512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398501376))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398501568)))]; + tensor var_3399_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_3397, groups = var_31, pad = var_3399_pad_0, pad_type = var_3399_pad_type_0, strides = var_3395, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_239_cast)[name = tensor("op_3399_cast")]; + tensor inputs_95_cast = add(x = var_3399_cast, y = inputs_93_cast)[name = tensor("inputs_95_cast")]; + tensor var_3403 = const()[name = tensor("op_3403"), val = tensor([1])]; + tensor channels_mean_95_cast = reduce_mean(axes = var_3403, keep_dims = var_23, x = inputs_95_cast)[name = tensor("channels_mean_95_cast")]; + tensor zero_mean_95_cast = sub(x = inputs_95_cast, y = channels_mean_95_cast)[name = tensor("zero_mean_95_cast")]; + tensor zero_mean_sq_95_cast = mul(x = zero_mean_95_cast, y = zero_mean_95_cast)[name = tensor("zero_mean_sq_95_cast")]; + tensor var_3407 = const()[name = tensor("op_3407"), val = tensor([1])]; + tensor var_3408_cast = reduce_mean(axes = var_3407, keep_dims = var_23, x = zero_mean_sq_95_cast)[name = tensor("op_3408_cast")]; + tensor var_3409_to_fp16 = const()[name = tensor("op_3409_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3410_cast = add(x = var_3408_cast, y = var_3409_to_fp16)[name = tensor("op_3410_cast")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_3410_cast)[name = tensor("denom_95_cast")]; + tensor out_95_cast = mul(x = zero_mean_95_cast, y = denom_95_cast)[name = tensor("out_95_cast")]; + tensor var_3414_to_fp16 = const()[name = tensor("op_3414_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398504192)))]; + tensor var_3415_cast = add(x = out_95_cast, y = var_3414_to_fp16)[name = tensor("op_3415_cast")]; + tensor var_3417_to_fp16 = const()[name = tensor("op_3417_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398506816)))]; + tensor input_241_cast = mul(x = var_3415_cast, y = var_3417_to_fp16)[name = tensor("input_241_cast")]; + tensor var_3425 = const()[name = tensor("op_3425"), val = tensor([1, 1])]; + tensor var_3427 = const()[name = tensor("op_3427"), val = tensor([1, 1])]; + tensor var_3429_pad_type_0 = const()[name = tensor("op_3429_pad_type_0"), val = tensor("custom")]; + tensor var_3429_pad_0 = const()[name = tensor("op_3429_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398509440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408339904))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408340096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408347840))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_3429_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_3427, groups = var_31, pad = var_3429_pad_0, pad_type = var_3429_pad_type_0, strides = var_3425, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("op_3429_cast")]; + tensor var_3430_split_sizes_0 = const()[name = tensor("op_3430_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3430_axis_0 = const()[name = tensor("op_3430_axis_0"), val = tensor(1)]; + tensor var_3430_cast_0, tensor var_3430_cast_1 = split(axis = var_3430_axis_0, split_sizes = var_3430_split_sizes_0, x = var_3429_cast)[name = tensor("op_3430_cast")]; + tensor var_3432_mode_0 = const()[name = tensor("op_3432_mode_0"), val = tensor("EXACT")]; + tensor var_3432_cast = gelu(mode = var_3432_mode_0, x = var_3430_cast_1)[name = tensor("op_3432_cast")]; + tensor input_243_cast = mul(x = var_3430_cast_0, y = var_3432_cast)[name = tensor("input_243_cast")]; + tensor var_3436 = const()[name = tensor("op_3436"), val = tensor([1, 1])]; + tensor var_3438 = const()[name = tensor("op_3438"), val = tensor([1, 1])]; + tensor var_3440_pad_type_0 = const()[name = tensor("op_3440_pad_type_0"), val = tensor("custom")]; + tensor var_3440_pad_0 = const()[name = tensor("op_3440_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408348032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413263296))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413263488)))]; + tensor var_3440_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_3438, groups = var_31, pad = var_3440_pad_0, pad_type = var_3440_pad_type_0, strides = var_3436, weight = unet_down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_243_cast)[name = tensor("op_3440_cast")]; + tensor inputs_97_cast = add(x = var_3440_cast, y = inputs_95_cast)[name = tensor("inputs_97_cast")]; + tensor var_3450 = const()[name = tensor("op_3450"), val = tensor([1])]; + tensor channels_mean_97_cast = reduce_mean(axes = var_3450, keep_dims = var_23, x = inputs_97_cast)[name = tensor("channels_mean_97_cast")]; + tensor zero_mean_97_cast = sub(x = inputs_97_cast, y = channels_mean_97_cast)[name = tensor("zero_mean_97_cast")]; + tensor zero_mean_sq_97_cast = mul(x = zero_mean_97_cast, y = zero_mean_97_cast)[name = tensor("zero_mean_sq_97_cast")]; + tensor var_3454 = const()[name = tensor("op_3454"), val = tensor([1])]; + tensor var_3455_cast = reduce_mean(axes = var_3454, keep_dims = var_23, x = zero_mean_sq_97_cast)[name = tensor("op_3455_cast")]; + tensor var_3456_to_fp16 = const()[name = tensor("op_3456_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3457_cast = add(x = var_3455_cast, y = var_3456_to_fp16)[name = tensor("op_3457_cast")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3457_cast)[name = tensor("denom_97_cast")]; + tensor out_97_cast = mul(x = zero_mean_97_cast, y = denom_97_cast)[name = tensor("out_97_cast")]; + tensor var_3461_to_fp16 = const()[name = tensor("op_3461_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413266112)))]; + tensor var_3462_cast = add(x = out_97_cast, y = var_3461_to_fp16)[name = tensor("op_3462_cast")]; + tensor var_3464_to_fp16 = const()[name = tensor("op_3464_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413268736)))]; + tensor hidden_states_149_cast = mul(x = var_3462_cast, y = var_3464_to_fp16)[name = tensor("hidden_states_149_cast")]; + tensor var_3471 = const()[name = tensor("op_3471"), val = tensor([1, 1])]; + tensor var_3473 = const()[name = tensor("op_3473"), val = tensor([1, 1])]; + tensor q_65_pad_type_0 = const()[name = tensor("q_65_pad_type_0"), val = tensor("custom")]; + tensor q_65_pad_0 = const()[name = tensor("q_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413271360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414500224))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_65_cast = conv(dilations = var_3473, groups = var_31, pad = q_65_pad_0, pad_type = q_65_pad_type_0, strides = var_3471, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("q_65_cast")]; + tensor var_3477 = const()[name = tensor("op_3477"), val = tensor([1, 1])]; + tensor var_3479 = const()[name = tensor("op_3479"), val = tensor([1, 1])]; + tensor k_65_pad_type_0 = const()[name = tensor("k_65_pad_type_0"), val = tensor("custom")]; + tensor k_65_pad_0 = const()[name = tensor("k_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414500416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415729280))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_65_cast = conv(dilations = var_3479, groups = var_31, pad = k_65_pad_0, pad_type = k_65_pad_type_0, strides = var_3477, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("k_65_cast")]; + tensor var_3483 = const()[name = tensor("op_3483"), val = tensor([1, 1])]; + tensor var_3485 = const()[name = tensor("op_3485"), val = tensor([1, 1])]; + tensor v_65_pad_type_0 = const()[name = tensor("v_65_pad_type_0"), val = tensor("custom")]; + tensor v_65_pad_0 = const()[name = tensor("v_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415729472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416958336))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_65_cast = conv(dilations = var_3485, groups = var_31, pad = v_65_pad_0, pad_type = v_65_pad_type_0, strides = var_3483, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("v_65_cast")]; + tensor var_3489 = const()[name = tensor("op_3489"), val = tensor([2, 20, 64, -1])]; + tensor var_3490_cast = reshape(shape = var_3489, x = q_65_cast)[name = tensor("op_3490_cast")]; + tensor var_3491 = const()[name = tensor("op_3491"), val = tensor([2, 20, 64, -1])]; + tensor var_3492_cast = reshape(shape = var_3491, x = k_65_cast)[name = tensor("op_3492_cast")]; + tensor var_3493 = const()[name = tensor("op_3493"), val = tensor([2, 20, 64, -1])]; + tensor var_3494_cast = reshape(shape = var_3493, x = v_65_cast)[name = tensor("op_3494_cast")]; + tensor attn_weights_129_transpose_x_0 = const()[name = tensor("attn_weights_129_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_129_transpose_y_0 = const()[name = tensor("attn_weights_129_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_129_cast = matmul(transpose_x = attn_weights_129_transpose_x_0, transpose_y = attn_weights_129_transpose_y_0, x = var_3490_cast, y = var_3492_cast)[name = tensor("attn_weights_129_cast")]; + tensor attn_weights_131_cast = mul(x = attn_weights_129_cast, y = var_12_to_fp16)[name = tensor("attn_weights_131_cast")]; + tensor var_3498_cast = softmax(axis = var_18, x = attn_weights_131_cast)[name = tensor("op_3498_cast")]; + tensor attn_65_transpose_x_0 = const()[name = tensor("attn_65_transpose_x_0"), val = tensor(false)]; + tensor attn_65_transpose_y_0 = const()[name = tensor("attn_65_transpose_y_0"), val = tensor(true)]; + tensor attn_65_cast = matmul(transpose_x = attn_65_transpose_x_0, transpose_y = attn_65_transpose_y_0, x = var_3494_cast, y = var_3498_cast)[name = tensor("attn_65_cast")]; + tensor var_3502 = const()[name = tensor("op_3502"), val = tensor([2, 1280, 1, -1])]; + tensor input_245_cast = reshape(shape = var_3502, x = attn_65_cast)[name = tensor("input_245_cast")]; + tensor var_3507 = const()[name = tensor("op_3507"), val = tensor([1, 1])]; + tensor var_3509 = const()[name = tensor("op_3509"), val = tensor([1, 1])]; + tensor var_3511_pad_type_0 = const()[name = tensor("op_3511_pad_type_0"), val = tensor("custom")]; + tensor var_3511_pad_0 = const()[name = tensor("op_3511_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416958528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(418187392))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(418187584)))]; + tensor var_3511_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_3509, groups = var_31, pad = var_3511_pad_0, pad_type = var_3511_pad_type_0, strides = var_3507, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_245_cast)[name = tensor("op_3511_cast")]; + tensor inputs_99_cast = add(x = var_3511_cast, y = inputs_97_cast)[name = tensor("inputs_99_cast")]; + tensor var_3515 = const()[name = tensor("op_3515"), val = tensor([1])]; + tensor channels_mean_99_cast = reduce_mean(axes = var_3515, keep_dims = var_23, x = inputs_99_cast)[name = tensor("channels_mean_99_cast")]; + tensor zero_mean_99_cast = sub(x = inputs_99_cast, y = channels_mean_99_cast)[name = tensor("zero_mean_99_cast")]; + tensor zero_mean_sq_99_cast = mul(x = zero_mean_99_cast, y = zero_mean_99_cast)[name = tensor("zero_mean_sq_99_cast")]; + tensor var_3519 = const()[name = tensor("op_3519"), val = tensor([1])]; + tensor var_3520_cast = reduce_mean(axes = var_3519, keep_dims = var_23, x = zero_mean_sq_99_cast)[name = tensor("op_3520_cast")]; + tensor var_3521_to_fp16 = const()[name = tensor("op_3521_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3522_cast = add(x = var_3520_cast, y = var_3521_to_fp16)[name = tensor("op_3522_cast")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3522_cast)[name = tensor("denom_99_cast")]; + tensor out_99_cast = mul(x = zero_mean_99_cast, y = denom_99_cast)[name = tensor("out_99_cast")]; + tensor var_3526_to_fp16 = const()[name = tensor("op_3526_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(418190208)))]; + tensor var_3527_cast = add(x = out_99_cast, y = var_3526_to_fp16)[name = tensor("op_3527_cast")]; + tensor var_3529_to_fp16 = const()[name = tensor("op_3529_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(418192832)))]; + tensor hidden_states_151_cast = mul(x = var_3527_cast, y = var_3529_to_fp16)[name = tensor("hidden_states_151_cast")]; + tensor var_3536 = const()[name = tensor("op_3536"), val = tensor([1, 1])]; + tensor var_3538 = const()[name = tensor("op_3538"), val = tensor([1, 1])]; + tensor q_67_pad_type_0 = const()[name = tensor("q_67_pad_type_0"), val = tensor("custom")]; + tensor q_67_pad_0 = const()[name = tensor("q_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(418195456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(419424320))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_67_cast = conv(dilations = var_3538, groups = var_31, pad = q_67_pad_0, pad_type = q_67_pad_type_0, strides = var_3536, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_151_cast)[name = tensor("q_67_cast")]; + tensor var_3542 = const()[name = tensor("op_3542"), val = tensor([1, 1])]; + tensor var_3544 = const()[name = tensor("op_3544"), val = tensor([1, 1])]; + tensor k_67_pad_type_0 = const()[name = tensor("k_67_pad_type_0"), val = tensor("custom")]; + tensor k_67_pad_0 = const()[name = tensor("k_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(419424512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421390656))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_67_cast = conv(dilations = var_3544, groups = var_31, pad = k_67_pad_0, pad_type = k_67_pad_type_0, strides = var_3542, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_67_cast")]; + tensor var_3548 = const()[name = tensor("op_3548"), val = tensor([1, 1])]; + tensor var_3550 = const()[name = tensor("op_3550"), val = tensor([1, 1])]; + tensor v_67_pad_type_0 = const()[name = tensor("v_67_pad_type_0"), val = tensor("custom")]; + tensor v_67_pad_0 = const()[name = tensor("v_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421390848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423356992))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_67_cast = conv(dilations = var_3550, groups = var_31, pad = v_67_pad_0, pad_type = v_67_pad_type_0, strides = var_3548, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_67_cast")]; + tensor var_3554 = const()[name = tensor("op_3554"), val = tensor([2, 20, 64, -1])]; + tensor var_3555_cast = reshape(shape = var_3554, x = q_67_cast)[name = tensor("op_3555_cast")]; + tensor var_3556 = const()[name = tensor("op_3556"), val = tensor([2, 20, 64, -1])]; + tensor var_3557_cast = reshape(shape = var_3556, x = k_67_cast)[name = tensor("op_3557_cast")]; + tensor var_3558 = const()[name = tensor("op_3558"), val = tensor([2, 20, 64, -1])]; + tensor var_3559_cast = reshape(shape = var_3558, x = v_67_cast)[name = tensor("op_3559_cast")]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = var_3555_cast, y = var_3557_cast)[name = tensor("attn_weights_133_cast")]; + tensor attn_weights_135_cast = mul(x = attn_weights_133_cast, y = var_12_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_3563_cast = softmax(axis = var_18, x = attn_weights_135_cast)[name = tensor("op_3563_cast")]; + tensor attn_67_transpose_x_0 = const()[name = tensor("attn_67_transpose_x_0"), val = tensor(false)]; + tensor attn_67_transpose_y_0 = const()[name = tensor("attn_67_transpose_y_0"), val = tensor(true)]; + tensor attn_67_cast = matmul(transpose_x = attn_67_transpose_x_0, transpose_y = attn_67_transpose_y_0, x = var_3559_cast, y = var_3563_cast)[name = tensor("attn_67_cast")]; + tensor var_3567 = const()[name = tensor("op_3567"), val = tensor([2, 1280, 1, -1])]; + tensor input_247_cast = reshape(shape = var_3567, x = attn_67_cast)[name = tensor("input_247_cast")]; + tensor var_3572 = const()[name = tensor("op_3572"), val = tensor([1, 1])]; + tensor var_3574 = const()[name = tensor("op_3574"), val = tensor([1, 1])]; + tensor var_3576_pad_type_0 = const()[name = tensor("op_3576_pad_type_0"), val = tensor("custom")]; + tensor var_3576_pad_0 = const()[name = tensor("op_3576_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423357184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424586048))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424586240)))]; + tensor var_3576_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_3574, groups = var_31, pad = var_3576_pad_0, pad_type = var_3576_pad_type_0, strides = var_3572, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_247_cast)[name = tensor("op_3576_cast")]; + tensor inputs_101_cast = add(x = var_3576_cast, y = inputs_99_cast)[name = tensor("inputs_101_cast")]; + tensor var_3580 = const()[name = tensor("op_3580"), val = tensor([1])]; + tensor channels_mean_101_cast = reduce_mean(axes = var_3580, keep_dims = var_23, x = inputs_101_cast)[name = tensor("channels_mean_101_cast")]; + tensor zero_mean_101_cast = sub(x = inputs_101_cast, y = channels_mean_101_cast)[name = tensor("zero_mean_101_cast")]; + tensor zero_mean_sq_101_cast = mul(x = zero_mean_101_cast, y = zero_mean_101_cast)[name = tensor("zero_mean_sq_101_cast")]; + tensor var_3584 = const()[name = tensor("op_3584"), val = tensor([1])]; + tensor var_3585_cast = reduce_mean(axes = var_3584, keep_dims = var_23, x = zero_mean_sq_101_cast)[name = tensor("op_3585_cast")]; + tensor var_3586_to_fp16 = const()[name = tensor("op_3586_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3587_cast = add(x = var_3585_cast, y = var_3586_to_fp16)[name = tensor("op_3587_cast")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3587_cast)[name = tensor("denom_101_cast")]; + tensor out_101_cast = mul(x = zero_mean_101_cast, y = denom_101_cast)[name = tensor("out_101_cast")]; + tensor var_3591_to_fp16 = const()[name = tensor("op_3591_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424588864)))]; + tensor var_3592_cast = add(x = out_101_cast, y = var_3591_to_fp16)[name = tensor("op_3592_cast")]; + tensor var_3594_to_fp16 = const()[name = tensor("op_3594_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424591488)))]; + tensor input_249_cast = mul(x = var_3592_cast, y = var_3594_to_fp16)[name = tensor("input_249_cast")]; + tensor var_3602 = const()[name = tensor("op_3602"), val = tensor([1, 1])]; + tensor var_3604 = const()[name = tensor("op_3604"), val = tensor([1, 1])]; + tensor var_3606_pad_type_0 = const()[name = tensor("op_3606_pad_type_0"), val = tensor("custom")]; + tensor var_3606_pad_0 = const()[name = tensor("op_3606_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424594112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434424576))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434424768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434432512))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_3606_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_3604, groups = var_31, pad = var_3606_pad_0, pad_type = var_3606_pad_type_0, strides = var_3602, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("op_3606_cast")]; + tensor var_3607_split_sizes_0 = const()[name = tensor("op_3607_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3607_axis_0 = const()[name = tensor("op_3607_axis_0"), val = tensor(1)]; + tensor var_3607_cast_0, tensor var_3607_cast_1 = split(axis = var_3607_axis_0, split_sizes = var_3607_split_sizes_0, x = var_3606_cast)[name = tensor("op_3607_cast")]; + tensor var_3609_mode_0 = const()[name = tensor("op_3609_mode_0"), val = tensor("EXACT")]; + tensor var_3609_cast = gelu(mode = var_3609_mode_0, x = var_3607_cast_1)[name = tensor("op_3609_cast")]; + tensor input_251_cast = mul(x = var_3607_cast_0, y = var_3609_cast)[name = tensor("input_251_cast")]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([1, 1])]; + tensor var_3615 = const()[name = tensor("op_3615"), val = tensor([1, 1])]; + tensor var_3617_pad_type_0 = const()[name = tensor("op_3617_pad_type_0"), val = tensor("custom")]; + tensor var_3617_pad_0 = const()[name = tensor("op_3617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434432704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439347968))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439348160)))]; + tensor var_3617_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_3615, groups = var_31, pad = var_3617_pad_0, pad_type = var_3617_pad_type_0, strides = var_3613, weight = unet_down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_251_cast)[name = tensor("op_3617_cast")]; + tensor inputs_103_cast = add(x = var_3617_cast, y = inputs_101_cast)[name = tensor("inputs_103_cast")]; + tensor var_3627 = const()[name = tensor("op_3627"), val = tensor([1])]; + tensor channels_mean_103_cast = reduce_mean(axes = var_3627, keep_dims = var_23, x = inputs_103_cast)[name = tensor("channels_mean_103_cast")]; + tensor zero_mean_103_cast = sub(x = inputs_103_cast, y = channels_mean_103_cast)[name = tensor("zero_mean_103_cast")]; + tensor zero_mean_sq_103_cast = mul(x = zero_mean_103_cast, y = zero_mean_103_cast)[name = tensor("zero_mean_sq_103_cast")]; + tensor var_3631 = const()[name = tensor("op_3631"), val = tensor([1])]; + tensor var_3632_cast = reduce_mean(axes = var_3631, keep_dims = var_23, x = zero_mean_sq_103_cast)[name = tensor("op_3632_cast")]; + tensor var_3633_to_fp16 = const()[name = tensor("op_3633_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3634_cast = add(x = var_3632_cast, y = var_3633_to_fp16)[name = tensor("op_3634_cast")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3634_cast)[name = tensor("denom_103_cast")]; + tensor out_103_cast = mul(x = zero_mean_103_cast, y = denom_103_cast)[name = tensor("out_103_cast")]; + tensor var_3638_to_fp16 = const()[name = tensor("op_3638_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439350784)))]; + tensor var_3639_cast = add(x = out_103_cast, y = var_3638_to_fp16)[name = tensor("op_3639_cast")]; + tensor var_3641_to_fp16 = const()[name = tensor("op_3641_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439353408)))]; + tensor hidden_states_155_cast = mul(x = var_3639_cast, y = var_3641_to_fp16)[name = tensor("hidden_states_155_cast")]; + tensor var_3648 = const()[name = tensor("op_3648"), val = tensor([1, 1])]; + tensor var_3650 = const()[name = tensor("op_3650"), val = tensor([1, 1])]; + tensor q_69_pad_type_0 = const()[name = tensor("q_69_pad_type_0"), val = tensor("custom")]; + tensor q_69_pad_0 = const()[name = tensor("q_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439356032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(440584896))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_69_cast = conv(dilations = var_3650, groups = var_31, pad = q_69_pad_0, pad_type = q_69_pad_type_0, strides = var_3648, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("q_69_cast")]; + tensor var_3654 = const()[name = tensor("op_3654"), val = tensor([1, 1])]; + tensor var_3656 = const()[name = tensor("op_3656"), val = tensor([1, 1])]; + tensor k_69_pad_type_0 = const()[name = tensor("k_69_pad_type_0"), val = tensor("custom")]; + tensor k_69_pad_0 = const()[name = tensor("k_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(440585088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441813952))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_69_cast = conv(dilations = var_3656, groups = var_31, pad = k_69_pad_0, pad_type = k_69_pad_type_0, strides = var_3654, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("k_69_cast")]; + tensor var_3660 = const()[name = tensor("op_3660"), val = tensor([1, 1])]; + tensor var_3662 = const()[name = tensor("op_3662"), val = tensor([1, 1])]; + tensor v_69_pad_type_0 = const()[name = tensor("v_69_pad_type_0"), val = tensor("custom")]; + tensor v_69_pad_0 = const()[name = tensor("v_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441814144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(443043008))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_69_cast = conv(dilations = var_3662, groups = var_31, pad = v_69_pad_0, pad_type = v_69_pad_type_0, strides = var_3660, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("v_69_cast")]; + tensor var_3666 = const()[name = tensor("op_3666"), val = tensor([2, 20, 64, -1])]; + tensor var_3667_cast = reshape(shape = var_3666, x = q_69_cast)[name = tensor("op_3667_cast")]; + tensor var_3668 = const()[name = tensor("op_3668"), val = tensor([2, 20, 64, -1])]; + tensor var_3669_cast = reshape(shape = var_3668, x = k_69_cast)[name = tensor("op_3669_cast")]; + tensor var_3670 = const()[name = tensor("op_3670"), val = tensor([2, 20, 64, -1])]; + tensor var_3671_cast = reshape(shape = var_3670, x = v_69_cast)[name = tensor("op_3671_cast")]; + tensor attn_weights_137_transpose_x_0 = const()[name = tensor("attn_weights_137_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_137_transpose_y_0 = const()[name = tensor("attn_weights_137_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_137_cast = matmul(transpose_x = attn_weights_137_transpose_x_0, transpose_y = attn_weights_137_transpose_y_0, x = var_3667_cast, y = var_3669_cast)[name = tensor("attn_weights_137_cast")]; + tensor attn_weights_139_cast = mul(x = attn_weights_137_cast, y = var_12_to_fp16)[name = tensor("attn_weights_139_cast")]; + tensor var_3675_cast = softmax(axis = var_18, x = attn_weights_139_cast)[name = tensor("op_3675_cast")]; + tensor attn_69_transpose_x_0 = const()[name = tensor("attn_69_transpose_x_0"), val = tensor(false)]; + tensor attn_69_transpose_y_0 = const()[name = tensor("attn_69_transpose_y_0"), val = tensor(true)]; + tensor attn_69_cast = matmul(transpose_x = attn_69_transpose_x_0, transpose_y = attn_69_transpose_y_0, x = var_3671_cast, y = var_3675_cast)[name = tensor("attn_69_cast")]; + tensor var_3679 = const()[name = tensor("op_3679"), val = tensor([2, 1280, 1, -1])]; + tensor input_253_cast = reshape(shape = var_3679, x = attn_69_cast)[name = tensor("input_253_cast")]; + tensor var_3684 = const()[name = tensor("op_3684"), val = tensor([1, 1])]; + tensor var_3686 = const()[name = tensor("op_3686"), val = tensor([1, 1])]; + tensor var_3688_pad_type_0 = const()[name = tensor("op_3688_pad_type_0"), val = tensor("custom")]; + tensor var_3688_pad_0 = const()[name = tensor("op_3688_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(443043200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444272064))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444272256)))]; + tensor var_3688_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_3686, groups = var_31, pad = var_3688_pad_0, pad_type = var_3688_pad_type_0, strides = var_3684, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("op_3688_cast")]; + tensor inputs_105_cast = add(x = var_3688_cast, y = inputs_103_cast)[name = tensor("inputs_105_cast")]; + tensor var_3692 = const()[name = tensor("op_3692"), val = tensor([1])]; + tensor channels_mean_105_cast = reduce_mean(axes = var_3692, keep_dims = var_23, x = inputs_105_cast)[name = tensor("channels_mean_105_cast")]; + tensor zero_mean_105_cast = sub(x = inputs_105_cast, y = channels_mean_105_cast)[name = tensor("zero_mean_105_cast")]; + tensor zero_mean_sq_105_cast = mul(x = zero_mean_105_cast, y = zero_mean_105_cast)[name = tensor("zero_mean_sq_105_cast")]; + tensor var_3696 = const()[name = tensor("op_3696"), val = tensor([1])]; + tensor var_3697_cast = reduce_mean(axes = var_3696, keep_dims = var_23, x = zero_mean_sq_105_cast)[name = tensor("op_3697_cast")]; + tensor var_3698_to_fp16 = const()[name = tensor("op_3698_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3699_cast = add(x = var_3697_cast, y = var_3698_to_fp16)[name = tensor("op_3699_cast")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3699_cast)[name = tensor("denom_105_cast")]; + tensor out_105_cast = mul(x = zero_mean_105_cast, y = denom_105_cast)[name = tensor("out_105_cast")]; + tensor var_3703_to_fp16 = const()[name = tensor("op_3703_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444274880)))]; + tensor var_3704_cast = add(x = out_105_cast, y = var_3703_to_fp16)[name = tensor("op_3704_cast")]; + tensor var_3706_to_fp16 = const()[name = tensor("op_3706_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444277504)))]; + tensor hidden_states_157_cast = mul(x = var_3704_cast, y = var_3706_to_fp16)[name = tensor("hidden_states_157_cast")]; + tensor var_3713 = const()[name = tensor("op_3713"), val = tensor([1, 1])]; + tensor var_3715 = const()[name = tensor("op_3715"), val = tensor([1, 1])]; + tensor q_71_pad_type_0 = const()[name = tensor("q_71_pad_type_0"), val = tensor("custom")]; + tensor q_71_pad_0 = const()[name = tensor("q_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444280128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445508992))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_71_cast = conv(dilations = var_3715, groups = var_31, pad = q_71_pad_0, pad_type = q_71_pad_type_0, strides = var_3713, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_157_cast)[name = tensor("q_71_cast")]; + tensor var_3719 = const()[name = tensor("op_3719"), val = tensor([1, 1])]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor([1, 1])]; + tensor k_71_pad_type_0 = const()[name = tensor("k_71_pad_type_0"), val = tensor("custom")]; + tensor k_71_pad_0 = const()[name = tensor("k_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445509184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447475328))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_71_cast = conv(dilations = var_3721, groups = var_31, pad = k_71_pad_0, pad_type = k_71_pad_type_0, strides = var_3719, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_71_cast")]; + tensor var_3725 = const()[name = tensor("op_3725"), val = tensor([1, 1])]; + tensor var_3727 = const()[name = tensor("op_3727"), val = tensor([1, 1])]; + tensor v_71_pad_type_0 = const()[name = tensor("v_71_pad_type_0"), val = tensor("custom")]; + tensor v_71_pad_0 = const()[name = tensor("v_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447475520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449441664))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_71_cast = conv(dilations = var_3727, groups = var_31, pad = v_71_pad_0, pad_type = v_71_pad_type_0, strides = var_3725, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_71_cast")]; + tensor var_3731 = const()[name = tensor("op_3731"), val = tensor([2, 20, 64, -1])]; + tensor var_3732_cast = reshape(shape = var_3731, x = q_71_cast)[name = tensor("op_3732_cast")]; + tensor var_3733 = const()[name = tensor("op_3733"), val = tensor([2, 20, 64, -1])]; + tensor var_3734_cast = reshape(shape = var_3733, x = k_71_cast)[name = tensor("op_3734_cast")]; + tensor var_3735 = const()[name = tensor("op_3735"), val = tensor([2, 20, 64, -1])]; + tensor var_3736_cast = reshape(shape = var_3735, x = v_71_cast)[name = tensor("op_3736_cast")]; + tensor attn_weights_141_transpose_x_0 = const()[name = tensor("attn_weights_141_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_141_transpose_y_0 = const()[name = tensor("attn_weights_141_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_141_cast = matmul(transpose_x = attn_weights_141_transpose_x_0, transpose_y = attn_weights_141_transpose_y_0, x = var_3732_cast, y = var_3734_cast)[name = tensor("attn_weights_141_cast")]; + tensor attn_weights_143_cast = mul(x = attn_weights_141_cast, y = var_12_to_fp16)[name = tensor("attn_weights_143_cast")]; + tensor var_3740_cast = softmax(axis = var_18, x = attn_weights_143_cast)[name = tensor("op_3740_cast")]; + tensor attn_71_transpose_x_0 = const()[name = tensor("attn_71_transpose_x_0"), val = tensor(false)]; + tensor attn_71_transpose_y_0 = const()[name = tensor("attn_71_transpose_y_0"), val = tensor(true)]; + tensor attn_71_cast = matmul(transpose_x = attn_71_transpose_x_0, transpose_y = attn_71_transpose_y_0, x = var_3736_cast, y = var_3740_cast)[name = tensor("attn_71_cast")]; + tensor var_3744 = const()[name = tensor("op_3744"), val = tensor([2, 1280, 1, -1])]; + tensor input_255_cast = reshape(shape = var_3744, x = attn_71_cast)[name = tensor("input_255_cast")]; + tensor var_3749 = const()[name = tensor("op_3749"), val = tensor([1, 1])]; + tensor var_3751 = const()[name = tensor("op_3751"), val = tensor([1, 1])]; + tensor var_3753_pad_type_0 = const()[name = tensor("op_3753_pad_type_0"), val = tensor("custom")]; + tensor var_3753_pad_0 = const()[name = tensor("op_3753_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449441856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450670720))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450670912)))]; + tensor var_3753_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_3751, groups = var_31, pad = var_3753_pad_0, pad_type = var_3753_pad_type_0, strides = var_3749, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_255_cast)[name = tensor("op_3753_cast")]; + tensor inputs_107_cast = add(x = var_3753_cast, y = inputs_105_cast)[name = tensor("inputs_107_cast")]; + tensor var_3757 = const()[name = tensor("op_3757"), val = tensor([1])]; + tensor channels_mean_107_cast = reduce_mean(axes = var_3757, keep_dims = var_23, x = inputs_107_cast)[name = tensor("channels_mean_107_cast")]; + tensor zero_mean_107_cast = sub(x = inputs_107_cast, y = channels_mean_107_cast)[name = tensor("zero_mean_107_cast")]; + tensor zero_mean_sq_107_cast = mul(x = zero_mean_107_cast, y = zero_mean_107_cast)[name = tensor("zero_mean_sq_107_cast")]; + tensor var_3761 = const()[name = tensor("op_3761"), val = tensor([1])]; + tensor var_3762_cast = reduce_mean(axes = var_3761, keep_dims = var_23, x = zero_mean_sq_107_cast)[name = tensor("op_3762_cast")]; + tensor var_3763_to_fp16 = const()[name = tensor("op_3763_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3764_cast = add(x = var_3762_cast, y = var_3763_to_fp16)[name = tensor("op_3764_cast")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3764_cast)[name = tensor("denom_107_cast")]; + tensor out_107_cast = mul(x = zero_mean_107_cast, y = denom_107_cast)[name = tensor("out_107_cast")]; + tensor var_3768_to_fp16 = const()[name = tensor("op_3768_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450673536)))]; + tensor var_3769_cast = add(x = out_107_cast, y = var_3768_to_fp16)[name = tensor("op_3769_cast")]; + tensor var_3771_to_fp16 = const()[name = tensor("op_3771_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450676160)))]; + tensor input_257_cast = mul(x = var_3769_cast, y = var_3771_to_fp16)[name = tensor("input_257_cast")]; + tensor var_3779 = const()[name = tensor("op_3779"), val = tensor([1, 1])]; + tensor var_3781 = const()[name = tensor("op_3781"), val = tensor([1, 1])]; + tensor var_3783_pad_type_0 = const()[name = tensor("op_3783_pad_type_0"), val = tensor("custom")]; + tensor var_3783_pad_0 = const()[name = tensor("op_3783_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450678784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460509248))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460509440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460517184))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_3783_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_3781, groups = var_31, pad = var_3783_pad_0, pad_type = var_3783_pad_type_0, strides = var_3779, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("op_3783_cast")]; + tensor var_3784_split_sizes_0 = const()[name = tensor("op_3784_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3784_axis_0 = const()[name = tensor("op_3784_axis_0"), val = tensor(1)]; + tensor var_3784_cast_0, tensor var_3784_cast_1 = split(axis = var_3784_axis_0, split_sizes = var_3784_split_sizes_0, x = var_3783_cast)[name = tensor("op_3784_cast")]; + tensor var_3786_mode_0 = const()[name = tensor("op_3786_mode_0"), val = tensor("EXACT")]; + tensor var_3786_cast = gelu(mode = var_3786_mode_0, x = var_3784_cast_1)[name = tensor("op_3786_cast")]; + tensor input_259_cast = mul(x = var_3784_cast_0, y = var_3786_cast)[name = tensor("input_259_cast")]; + tensor var_3790 = const()[name = tensor("op_3790"), val = tensor([1, 1])]; + tensor var_3792 = const()[name = tensor("op_3792"), val = tensor([1, 1])]; + tensor var_3794_pad_type_0 = const()[name = tensor("op_3794_pad_type_0"), val = tensor("custom")]; + tensor var_3794_pad_0 = const()[name = tensor("op_3794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460517376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465432640))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465432832)))]; + tensor var_3794_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_3792, groups = var_31, pad = var_3794_pad_0, pad_type = var_3794_pad_type_0, strides = var_3790, weight = unet_down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_259_cast)[name = tensor("op_3794_cast")]; + tensor inputs_109_cast = add(x = var_3794_cast, y = inputs_107_cast)[name = tensor("inputs_109_cast")]; + tensor var_3804 = const()[name = tensor("op_3804"), val = tensor([1])]; + tensor channels_mean_109_cast = reduce_mean(axes = var_3804, keep_dims = var_23, x = inputs_109_cast)[name = tensor("channels_mean_109_cast")]; + tensor zero_mean_109_cast = sub(x = inputs_109_cast, y = channels_mean_109_cast)[name = tensor("zero_mean_109_cast")]; + tensor zero_mean_sq_109_cast = mul(x = zero_mean_109_cast, y = zero_mean_109_cast)[name = tensor("zero_mean_sq_109_cast")]; + tensor var_3808 = const()[name = tensor("op_3808"), val = tensor([1])]; + tensor var_3809_cast = reduce_mean(axes = var_3808, keep_dims = var_23, x = zero_mean_sq_109_cast)[name = tensor("op_3809_cast")]; + tensor var_3810_to_fp16 = const()[name = tensor("op_3810_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3811_cast = add(x = var_3809_cast, y = var_3810_to_fp16)[name = tensor("op_3811_cast")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3811_cast)[name = tensor("denom_109_cast")]; + tensor out_109_cast = mul(x = zero_mean_109_cast, y = denom_109_cast)[name = tensor("out_109_cast")]; + tensor var_3815_to_fp16 = const()[name = tensor("op_3815_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465435456)))]; + tensor var_3816_cast = add(x = out_109_cast, y = var_3815_to_fp16)[name = tensor("op_3816_cast")]; + tensor var_3818_to_fp16 = const()[name = tensor("op_3818_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465438080)))]; + tensor hidden_states_161_cast = mul(x = var_3816_cast, y = var_3818_to_fp16)[name = tensor("hidden_states_161_cast")]; + tensor var_3825 = const()[name = tensor("op_3825"), val = tensor([1, 1])]; + tensor var_3827 = const()[name = tensor("op_3827"), val = tensor([1, 1])]; + tensor q_73_pad_type_0 = const()[name = tensor("q_73_pad_type_0"), val = tensor("custom")]; + tensor q_73_pad_0 = const()[name = tensor("q_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465440704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466669568))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_73_cast = conv(dilations = var_3827, groups = var_31, pad = q_73_pad_0, pad_type = q_73_pad_type_0, strides = var_3825, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("q_73_cast")]; + tensor var_3831 = const()[name = tensor("op_3831"), val = tensor([1, 1])]; + tensor var_3833 = const()[name = tensor("op_3833"), val = tensor([1, 1])]; + tensor k_73_pad_type_0 = const()[name = tensor("k_73_pad_type_0"), val = tensor("custom")]; + tensor k_73_pad_0 = const()[name = tensor("k_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466669760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467898624))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_73_cast = conv(dilations = var_3833, groups = var_31, pad = k_73_pad_0, pad_type = k_73_pad_type_0, strides = var_3831, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("k_73_cast")]; + tensor var_3837 = const()[name = tensor("op_3837"), val = tensor([1, 1])]; + tensor var_3839 = const()[name = tensor("op_3839"), val = tensor([1, 1])]; + tensor v_73_pad_type_0 = const()[name = tensor("v_73_pad_type_0"), val = tensor("custom")]; + tensor v_73_pad_0 = const()[name = tensor("v_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467898816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(469127680))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_73_cast = conv(dilations = var_3839, groups = var_31, pad = v_73_pad_0, pad_type = v_73_pad_type_0, strides = var_3837, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("v_73_cast")]; + tensor var_3843 = const()[name = tensor("op_3843"), val = tensor([2, 20, 64, -1])]; + tensor var_3844_cast = reshape(shape = var_3843, x = q_73_cast)[name = tensor("op_3844_cast")]; + tensor var_3845 = const()[name = tensor("op_3845"), val = tensor([2, 20, 64, -1])]; + tensor var_3846_cast = reshape(shape = var_3845, x = k_73_cast)[name = tensor("op_3846_cast")]; + tensor var_3847 = const()[name = tensor("op_3847"), val = tensor([2, 20, 64, -1])]; + tensor var_3848_cast = reshape(shape = var_3847, x = v_73_cast)[name = tensor("op_3848_cast")]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = var_3844_cast, y = var_3846_cast)[name = tensor("attn_weights_145_cast")]; + tensor attn_weights_147_cast = mul(x = attn_weights_145_cast, y = var_12_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_3852_cast = softmax(axis = var_18, x = attn_weights_147_cast)[name = tensor("op_3852_cast")]; + tensor attn_73_transpose_x_0 = const()[name = tensor("attn_73_transpose_x_0"), val = tensor(false)]; + tensor attn_73_transpose_y_0 = const()[name = tensor("attn_73_transpose_y_0"), val = tensor(true)]; + tensor attn_73_cast = matmul(transpose_x = attn_73_transpose_x_0, transpose_y = attn_73_transpose_y_0, x = var_3848_cast, y = var_3852_cast)[name = tensor("attn_73_cast")]; + tensor var_3856 = const()[name = tensor("op_3856"), val = tensor([2, 1280, 1, -1])]; + tensor input_261_cast = reshape(shape = var_3856, x = attn_73_cast)[name = tensor("input_261_cast")]; + tensor var_3861 = const()[name = tensor("op_3861"), val = tensor([1, 1])]; + tensor var_3863 = const()[name = tensor("op_3863"), val = tensor([1, 1])]; + tensor var_3865_pad_type_0 = const()[name = tensor("op_3865_pad_type_0"), val = tensor("custom")]; + tensor var_3865_pad_0 = const()[name = tensor("op_3865_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(469127872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470356736))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470356928)))]; + tensor var_3865_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_3863, groups = var_31, pad = var_3865_pad_0, pad_type = var_3865_pad_type_0, strides = var_3861, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_261_cast)[name = tensor("op_3865_cast")]; + tensor inputs_111_cast = add(x = var_3865_cast, y = inputs_109_cast)[name = tensor("inputs_111_cast")]; + tensor var_3869 = const()[name = tensor("op_3869"), val = tensor([1])]; + tensor channels_mean_111_cast = reduce_mean(axes = var_3869, keep_dims = var_23, x = inputs_111_cast)[name = tensor("channels_mean_111_cast")]; + tensor zero_mean_111_cast = sub(x = inputs_111_cast, y = channels_mean_111_cast)[name = tensor("zero_mean_111_cast")]; + tensor zero_mean_sq_111_cast = mul(x = zero_mean_111_cast, y = zero_mean_111_cast)[name = tensor("zero_mean_sq_111_cast")]; + tensor var_3873 = const()[name = tensor("op_3873"), val = tensor([1])]; + tensor var_3874_cast = reduce_mean(axes = var_3873, keep_dims = var_23, x = zero_mean_sq_111_cast)[name = tensor("op_3874_cast")]; + tensor var_3875_to_fp16 = const()[name = tensor("op_3875_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3876_cast = add(x = var_3874_cast, y = var_3875_to_fp16)[name = tensor("op_3876_cast")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3876_cast)[name = tensor("denom_111_cast")]; + tensor out_111_cast = mul(x = zero_mean_111_cast, y = denom_111_cast)[name = tensor("out_111_cast")]; + tensor var_3880_to_fp16 = const()[name = tensor("op_3880_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470359552)))]; + tensor var_3881_cast = add(x = out_111_cast, y = var_3880_to_fp16)[name = tensor("op_3881_cast")]; + tensor var_3883_to_fp16 = const()[name = tensor("op_3883_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470362176)))]; + tensor hidden_states_163_cast = mul(x = var_3881_cast, y = var_3883_to_fp16)[name = tensor("hidden_states_163_cast")]; + tensor var_3890 = const()[name = tensor("op_3890"), val = tensor([1, 1])]; + tensor var_3892 = const()[name = tensor("op_3892"), val = tensor([1, 1])]; + tensor q_75_pad_type_0 = const()[name = tensor("q_75_pad_type_0"), val = tensor("custom")]; + tensor q_75_pad_0 = const()[name = tensor("q_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470364800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471593664))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_75_cast = conv(dilations = var_3892, groups = var_31, pad = q_75_pad_0, pad_type = q_75_pad_type_0, strides = var_3890, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_163_cast)[name = tensor("q_75_cast")]; + tensor var_3896 = const()[name = tensor("op_3896"), val = tensor([1, 1])]; + tensor var_3898 = const()[name = tensor("op_3898"), val = tensor([1, 1])]; + tensor k_75_pad_type_0 = const()[name = tensor("k_75_pad_type_0"), val = tensor("custom")]; + tensor k_75_pad_0 = const()[name = tensor("k_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471593856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473560000))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_75_cast = conv(dilations = var_3898, groups = var_31, pad = k_75_pad_0, pad_type = k_75_pad_type_0, strides = var_3896, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_75_cast")]; + tensor var_3902 = const()[name = tensor("op_3902"), val = tensor([1, 1])]; + tensor var_3904 = const()[name = tensor("op_3904"), val = tensor([1, 1])]; + tensor v_75_pad_type_0 = const()[name = tensor("v_75_pad_type_0"), val = tensor("custom")]; + tensor v_75_pad_0 = const()[name = tensor("v_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473560192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475526336))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_75_cast = conv(dilations = var_3904, groups = var_31, pad = v_75_pad_0, pad_type = v_75_pad_type_0, strides = var_3902, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_75_cast")]; + tensor var_3908 = const()[name = tensor("op_3908"), val = tensor([2, 20, 64, -1])]; + tensor var_3909_cast = reshape(shape = var_3908, x = q_75_cast)[name = tensor("op_3909_cast")]; + tensor var_3910 = const()[name = tensor("op_3910"), val = tensor([2, 20, 64, -1])]; + tensor var_3911_cast = reshape(shape = var_3910, x = k_75_cast)[name = tensor("op_3911_cast")]; + tensor var_3912 = const()[name = tensor("op_3912"), val = tensor([2, 20, 64, -1])]; + tensor var_3913_cast = reshape(shape = var_3912, x = v_75_cast)[name = tensor("op_3913_cast")]; + tensor attn_weights_149_transpose_x_0 = const()[name = tensor("attn_weights_149_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_149_transpose_y_0 = const()[name = tensor("attn_weights_149_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_149_cast = matmul(transpose_x = attn_weights_149_transpose_x_0, transpose_y = attn_weights_149_transpose_y_0, x = var_3909_cast, y = var_3911_cast)[name = tensor("attn_weights_149_cast")]; + tensor attn_weights_151_cast = mul(x = attn_weights_149_cast, y = var_12_to_fp16)[name = tensor("attn_weights_151_cast")]; + tensor var_3917_cast = softmax(axis = var_18, x = attn_weights_151_cast)[name = tensor("op_3917_cast")]; + tensor attn_75_transpose_x_0 = const()[name = tensor("attn_75_transpose_x_0"), val = tensor(false)]; + tensor attn_75_transpose_y_0 = const()[name = tensor("attn_75_transpose_y_0"), val = tensor(true)]; + tensor attn_75_cast = matmul(transpose_x = attn_75_transpose_x_0, transpose_y = attn_75_transpose_y_0, x = var_3913_cast, y = var_3917_cast)[name = tensor("attn_75_cast")]; + tensor var_3921 = const()[name = tensor("op_3921"), val = tensor([2, 1280, 1, -1])]; + tensor input_263_cast = reshape(shape = var_3921, x = attn_75_cast)[name = tensor("input_263_cast")]; + tensor var_3926 = const()[name = tensor("op_3926"), val = tensor([1, 1])]; + tensor var_3928 = const()[name = tensor("op_3928"), val = tensor([1, 1])]; + tensor var_3930_pad_type_0 = const()[name = tensor("op_3930_pad_type_0"), val = tensor("custom")]; + tensor var_3930_pad_0 = const()[name = tensor("op_3930_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475526528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476755392))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476755584)))]; + tensor var_3930_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_3928, groups = var_31, pad = var_3930_pad_0, pad_type = var_3930_pad_type_0, strides = var_3926, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_263_cast)[name = tensor("op_3930_cast")]; + tensor inputs_113_cast = add(x = var_3930_cast, y = inputs_111_cast)[name = tensor("inputs_113_cast")]; + tensor var_3934 = const()[name = tensor("op_3934"), val = tensor([1])]; + tensor channels_mean_113_cast = reduce_mean(axes = var_3934, keep_dims = var_23, x = inputs_113_cast)[name = tensor("channels_mean_113_cast")]; + tensor zero_mean_113_cast = sub(x = inputs_113_cast, y = channels_mean_113_cast)[name = tensor("zero_mean_113_cast")]; + tensor zero_mean_sq_113_cast = mul(x = zero_mean_113_cast, y = zero_mean_113_cast)[name = tensor("zero_mean_sq_113_cast")]; + tensor var_3938 = const()[name = tensor("op_3938"), val = tensor([1])]; + tensor var_3939_cast = reduce_mean(axes = var_3938, keep_dims = var_23, x = zero_mean_sq_113_cast)[name = tensor("op_3939_cast")]; + tensor var_3940_to_fp16 = const()[name = tensor("op_3940_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3941_cast = add(x = var_3939_cast, y = var_3940_to_fp16)[name = tensor("op_3941_cast")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3941_cast)[name = tensor("denom_113_cast")]; + tensor out_113_cast = mul(x = zero_mean_113_cast, y = denom_113_cast)[name = tensor("out_113_cast")]; + tensor var_3945_to_fp16 = const()[name = tensor("op_3945_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476758208)))]; + tensor var_3946_cast = add(x = out_113_cast, y = var_3945_to_fp16)[name = tensor("op_3946_cast")]; + tensor var_3948_to_fp16 = const()[name = tensor("op_3948_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476760832)))]; + tensor input_265_cast = mul(x = var_3946_cast, y = var_3948_to_fp16)[name = tensor("input_265_cast")]; + tensor var_3956 = const()[name = tensor("op_3956"), val = tensor([1, 1])]; + tensor var_3958 = const()[name = tensor("op_3958"), val = tensor([1, 1])]; + tensor var_3960_pad_type_0 = const()[name = tensor("op_3960_pad_type_0"), val = tensor("custom")]; + tensor var_3960_pad_0 = const()[name = tensor("op_3960_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476763456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486593920))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486594112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486601856))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_3960_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_3958, groups = var_31, pad = var_3960_pad_0, pad_type = var_3960_pad_type_0, strides = var_3956, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("op_3960_cast")]; + tensor var_3961_split_sizes_0 = const()[name = tensor("op_3961_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3961_axis_0 = const()[name = tensor("op_3961_axis_0"), val = tensor(1)]; + tensor var_3961_cast_0, tensor var_3961_cast_1 = split(axis = var_3961_axis_0, split_sizes = var_3961_split_sizes_0, x = var_3960_cast)[name = tensor("op_3961_cast")]; + tensor var_3963_mode_0 = const()[name = tensor("op_3963_mode_0"), val = tensor("EXACT")]; + tensor var_3963_cast = gelu(mode = var_3963_mode_0, x = var_3961_cast_1)[name = tensor("op_3963_cast")]; + tensor input_267_cast = mul(x = var_3961_cast_0, y = var_3963_cast)[name = tensor("input_267_cast")]; + tensor var_3967 = const()[name = tensor("op_3967"), val = tensor([1, 1])]; + tensor var_3969 = const()[name = tensor("op_3969"), val = tensor([1, 1])]; + tensor var_3971_pad_type_0 = const()[name = tensor("op_3971_pad_type_0"), val = tensor("custom")]; + tensor var_3971_pad_0 = const()[name = tensor("op_3971_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486602048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491517312))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491517504)))]; + tensor var_3971_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_3969, groups = var_31, pad = var_3971_pad_0, pad_type = var_3971_pad_type_0, strides = var_3967, weight = unet_down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_267_cast)[name = tensor("op_3971_cast")]; + tensor inputs_115_cast = add(x = var_3971_cast, y = inputs_113_cast)[name = tensor("inputs_115_cast")]; + tensor var_3981 = const()[name = tensor("op_3981"), val = tensor([1])]; + tensor channels_mean_115_cast = reduce_mean(axes = var_3981, keep_dims = var_23, x = inputs_115_cast)[name = tensor("channels_mean_115_cast")]; + tensor zero_mean_115_cast = sub(x = inputs_115_cast, y = channels_mean_115_cast)[name = tensor("zero_mean_115_cast")]; + tensor zero_mean_sq_115_cast = mul(x = zero_mean_115_cast, y = zero_mean_115_cast)[name = tensor("zero_mean_sq_115_cast")]; + tensor var_3985 = const()[name = tensor("op_3985"), val = tensor([1])]; + tensor var_3986_cast = reduce_mean(axes = var_3985, keep_dims = var_23, x = zero_mean_sq_115_cast)[name = tensor("op_3986_cast")]; + tensor var_3987_to_fp16 = const()[name = tensor("op_3987_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3988_cast = add(x = var_3986_cast, y = var_3987_to_fp16)[name = tensor("op_3988_cast")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_3988_cast)[name = tensor("denom_115_cast")]; + tensor out_115_cast = mul(x = zero_mean_115_cast, y = denom_115_cast)[name = tensor("out_115_cast")]; + tensor var_3992_to_fp16 = const()[name = tensor("op_3992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491520128)))]; + tensor var_3993_cast = add(x = out_115_cast, y = var_3992_to_fp16)[name = tensor("op_3993_cast")]; + tensor var_3995_to_fp16 = const()[name = tensor("op_3995_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491522752)))]; + tensor hidden_states_167_cast = mul(x = var_3993_cast, y = var_3995_to_fp16)[name = tensor("hidden_states_167_cast")]; + tensor var_4002 = const()[name = tensor("op_4002"), val = tensor([1, 1])]; + tensor var_4004 = const()[name = tensor("op_4004"), val = tensor([1, 1])]; + tensor q_77_pad_type_0 = const()[name = tensor("q_77_pad_type_0"), val = tensor("custom")]; + tensor q_77_pad_0 = const()[name = tensor("q_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491525376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492754240))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_77_cast = conv(dilations = var_4004, groups = var_31, pad = q_77_pad_0, pad_type = q_77_pad_type_0, strides = var_4002, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("q_77_cast")]; + tensor var_4008 = const()[name = tensor("op_4008"), val = tensor([1, 1])]; + tensor var_4010 = const()[name = tensor("op_4010"), val = tensor([1, 1])]; + tensor k_77_pad_type_0 = const()[name = tensor("k_77_pad_type_0"), val = tensor("custom")]; + tensor k_77_pad_0 = const()[name = tensor("k_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492754432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(493983296))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_77_cast = conv(dilations = var_4010, groups = var_31, pad = k_77_pad_0, pad_type = k_77_pad_type_0, strides = var_4008, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("k_77_cast")]; + tensor var_4014 = const()[name = tensor("op_4014"), val = tensor([1, 1])]; + tensor var_4016 = const()[name = tensor("op_4016"), val = tensor([1, 1])]; + tensor v_77_pad_type_0 = const()[name = tensor("v_77_pad_type_0"), val = tensor("custom")]; + tensor v_77_pad_0 = const()[name = tensor("v_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(493983488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495212352))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_77_cast = conv(dilations = var_4016, groups = var_31, pad = v_77_pad_0, pad_type = v_77_pad_type_0, strides = var_4014, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("v_77_cast")]; + tensor var_4020 = const()[name = tensor("op_4020"), val = tensor([2, 20, 64, -1])]; + tensor var_4021_cast = reshape(shape = var_4020, x = q_77_cast)[name = tensor("op_4021_cast")]; + tensor var_4022 = const()[name = tensor("op_4022"), val = tensor([2, 20, 64, -1])]; + tensor var_4023_cast = reshape(shape = var_4022, x = k_77_cast)[name = tensor("op_4023_cast")]; + tensor var_4024 = const()[name = tensor("op_4024"), val = tensor([2, 20, 64, -1])]; + tensor var_4025_cast = reshape(shape = var_4024, x = v_77_cast)[name = tensor("op_4025_cast")]; + tensor attn_weights_153_transpose_x_0 = const()[name = tensor("attn_weights_153_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_153_transpose_y_0 = const()[name = tensor("attn_weights_153_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_153_cast = matmul(transpose_x = attn_weights_153_transpose_x_0, transpose_y = attn_weights_153_transpose_y_0, x = var_4021_cast, y = var_4023_cast)[name = tensor("attn_weights_153_cast")]; + tensor attn_weights_155_cast = mul(x = attn_weights_153_cast, y = var_12_to_fp16)[name = tensor("attn_weights_155_cast")]; + tensor var_4029_cast = softmax(axis = var_18, x = attn_weights_155_cast)[name = tensor("op_4029_cast")]; + tensor attn_77_transpose_x_0 = const()[name = tensor("attn_77_transpose_x_0"), val = tensor(false)]; + tensor attn_77_transpose_y_0 = const()[name = tensor("attn_77_transpose_y_0"), val = tensor(true)]; + tensor attn_77_cast = matmul(transpose_x = attn_77_transpose_x_0, transpose_y = attn_77_transpose_y_0, x = var_4025_cast, y = var_4029_cast)[name = tensor("attn_77_cast")]; + tensor var_4033 = const()[name = tensor("op_4033"), val = tensor([2, 1280, 1, -1])]; + tensor input_269_cast = reshape(shape = var_4033, x = attn_77_cast)[name = tensor("input_269_cast")]; + tensor var_4038 = const()[name = tensor("op_4038"), val = tensor([1, 1])]; + tensor var_4040 = const()[name = tensor("op_4040"), val = tensor([1, 1])]; + tensor var_4042_pad_type_0 = const()[name = tensor("op_4042_pad_type_0"), val = tensor("custom")]; + tensor var_4042_pad_0 = const()[name = tensor("op_4042_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495212544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496441408))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496441600)))]; + tensor var_4042_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_4040, groups = var_31, pad = var_4042_pad_0, pad_type = var_4042_pad_type_0, strides = var_4038, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("op_4042_cast")]; + tensor inputs_117_cast = add(x = var_4042_cast, y = inputs_115_cast)[name = tensor("inputs_117_cast")]; + tensor var_4046 = const()[name = tensor("op_4046"), val = tensor([1])]; + tensor channels_mean_117_cast = reduce_mean(axes = var_4046, keep_dims = var_23, x = inputs_117_cast)[name = tensor("channels_mean_117_cast")]; + tensor zero_mean_117_cast = sub(x = inputs_117_cast, y = channels_mean_117_cast)[name = tensor("zero_mean_117_cast")]; + tensor zero_mean_sq_117_cast = mul(x = zero_mean_117_cast, y = zero_mean_117_cast)[name = tensor("zero_mean_sq_117_cast")]; + tensor var_4050 = const()[name = tensor("op_4050"), val = tensor([1])]; + tensor var_4051_cast = reduce_mean(axes = var_4050, keep_dims = var_23, x = zero_mean_sq_117_cast)[name = tensor("op_4051_cast")]; + tensor var_4052_to_fp16 = const()[name = tensor("op_4052_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4053_cast = add(x = var_4051_cast, y = var_4052_to_fp16)[name = tensor("op_4053_cast")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_4053_cast)[name = tensor("denom_117_cast")]; + tensor out_117_cast = mul(x = zero_mean_117_cast, y = denom_117_cast)[name = tensor("out_117_cast")]; + tensor var_4057_to_fp16 = const()[name = tensor("op_4057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496444224)))]; + tensor var_4058_cast = add(x = out_117_cast, y = var_4057_to_fp16)[name = tensor("op_4058_cast")]; + tensor var_4060_to_fp16 = const()[name = tensor("op_4060_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496446848)))]; + tensor hidden_states_169_cast = mul(x = var_4058_cast, y = var_4060_to_fp16)[name = tensor("hidden_states_169_cast")]; + tensor var_4067 = const()[name = tensor("op_4067"), val = tensor([1, 1])]; + tensor var_4069 = const()[name = tensor("op_4069"), val = tensor([1, 1])]; + tensor q_79_pad_type_0 = const()[name = tensor("q_79_pad_type_0"), val = tensor("custom")]; + tensor q_79_pad_0 = const()[name = tensor("q_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496449472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497678336))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_79_cast = conv(dilations = var_4069, groups = var_31, pad = q_79_pad_0, pad_type = q_79_pad_type_0, strides = var_4067, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_169_cast)[name = tensor("q_79_cast")]; + tensor var_4073 = const()[name = tensor("op_4073"), val = tensor([1, 1])]; + tensor var_4075 = const()[name = tensor("op_4075"), val = tensor([1, 1])]; + tensor k_79_pad_type_0 = const()[name = tensor("k_79_pad_type_0"), val = tensor("custom")]; + tensor k_79_pad_0 = const()[name = tensor("k_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497678528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499644672))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_79_cast = conv(dilations = var_4075, groups = var_31, pad = k_79_pad_0, pad_type = k_79_pad_type_0, strides = var_4073, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_79_cast")]; + tensor var_4079 = const()[name = tensor("op_4079"), val = tensor([1, 1])]; + tensor var_4081 = const()[name = tensor("op_4081"), val = tensor([1, 1])]; + tensor v_79_pad_type_0 = const()[name = tensor("v_79_pad_type_0"), val = tensor("custom")]; + tensor v_79_pad_0 = const()[name = tensor("v_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499644864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(501611008))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_79_cast = conv(dilations = var_4081, groups = var_31, pad = v_79_pad_0, pad_type = v_79_pad_type_0, strides = var_4079, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_79_cast")]; + tensor var_4085 = const()[name = tensor("op_4085"), val = tensor([2, 20, 64, -1])]; + tensor var_4086_cast = reshape(shape = var_4085, x = q_79_cast)[name = tensor("op_4086_cast")]; + tensor var_4087 = const()[name = tensor("op_4087"), val = tensor([2, 20, 64, -1])]; + tensor var_4088_cast = reshape(shape = var_4087, x = k_79_cast)[name = tensor("op_4088_cast")]; + tensor var_4089 = const()[name = tensor("op_4089"), val = tensor([2, 20, 64, -1])]; + tensor var_4090_cast = reshape(shape = var_4089, x = v_79_cast)[name = tensor("op_4090_cast")]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = var_4086_cast, y = var_4088_cast)[name = tensor("attn_weights_157_cast")]; + tensor attn_weights_159_cast = mul(x = attn_weights_157_cast, y = var_12_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_4094_cast = softmax(axis = var_18, x = attn_weights_159_cast)[name = tensor("op_4094_cast")]; + tensor attn_79_transpose_x_0 = const()[name = tensor("attn_79_transpose_x_0"), val = tensor(false)]; + tensor attn_79_transpose_y_0 = const()[name = tensor("attn_79_transpose_y_0"), val = tensor(true)]; + tensor attn_79_cast = matmul(transpose_x = attn_79_transpose_x_0, transpose_y = attn_79_transpose_y_0, x = var_4090_cast, y = var_4094_cast)[name = tensor("attn_79_cast")]; + tensor var_4098 = const()[name = tensor("op_4098"), val = tensor([2, 1280, 1, -1])]; + tensor input_271_cast = reshape(shape = var_4098, x = attn_79_cast)[name = tensor("input_271_cast")]; + tensor var_4103 = const()[name = tensor("op_4103"), val = tensor([1, 1])]; + tensor var_4105 = const()[name = tensor("op_4105"), val = tensor([1, 1])]; + tensor var_4107_pad_type_0 = const()[name = tensor("op_4107_pad_type_0"), val = tensor("custom")]; + tensor var_4107_pad_0 = const()[name = tensor("op_4107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(501611200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502840064))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502840256)))]; + tensor var_4107_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_4105, groups = var_31, pad = var_4107_pad_0, pad_type = var_4107_pad_type_0, strides = var_4103, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_271_cast)[name = tensor("op_4107_cast")]; + tensor inputs_119_cast = add(x = var_4107_cast, y = inputs_117_cast)[name = tensor("inputs_119_cast")]; + tensor var_4111 = const()[name = tensor("op_4111"), val = tensor([1])]; + tensor channels_mean_119_cast = reduce_mean(axes = var_4111, keep_dims = var_23, x = inputs_119_cast)[name = tensor("channels_mean_119_cast")]; + tensor zero_mean_119_cast = sub(x = inputs_119_cast, y = channels_mean_119_cast)[name = tensor("zero_mean_119_cast")]; + tensor zero_mean_sq_119_cast = mul(x = zero_mean_119_cast, y = zero_mean_119_cast)[name = tensor("zero_mean_sq_119_cast")]; + tensor var_4115 = const()[name = tensor("op_4115"), val = tensor([1])]; + tensor var_4116_cast = reduce_mean(axes = var_4115, keep_dims = var_23, x = zero_mean_sq_119_cast)[name = tensor("op_4116_cast")]; + tensor var_4117_to_fp16 = const()[name = tensor("op_4117_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4118_cast = add(x = var_4116_cast, y = var_4117_to_fp16)[name = tensor("op_4118_cast")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_4118_cast)[name = tensor("denom_119_cast")]; + tensor out_119_cast = mul(x = zero_mean_119_cast, y = denom_119_cast)[name = tensor("out_119_cast")]; + tensor var_4122_to_fp16 = const()[name = tensor("op_4122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502842880)))]; + tensor var_4123_cast = add(x = out_119_cast, y = var_4122_to_fp16)[name = tensor("op_4123_cast")]; + tensor var_4125_to_fp16 = const()[name = tensor("op_4125_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502845504)))]; + tensor input_273_cast = mul(x = var_4123_cast, y = var_4125_to_fp16)[name = tensor("input_273_cast")]; + tensor var_4133 = const()[name = tensor("op_4133"), val = tensor([1, 1])]; + tensor var_4135 = const()[name = tensor("op_4135"), val = tensor([1, 1])]; + tensor var_4137_pad_type_0 = const()[name = tensor("op_4137_pad_type_0"), val = tensor("custom")]; + tensor var_4137_pad_0 = const()[name = tensor("op_4137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502848128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512678592))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512678784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512686528))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_4137_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_4135, groups = var_31, pad = var_4137_pad_0, pad_type = var_4137_pad_type_0, strides = var_4133, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("op_4137_cast")]; + tensor var_4138_split_sizes_0 = const()[name = tensor("op_4138_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4138_axis_0 = const()[name = tensor("op_4138_axis_0"), val = tensor(1)]; + tensor var_4138_cast_0, tensor var_4138_cast_1 = split(axis = var_4138_axis_0, split_sizes = var_4138_split_sizes_0, x = var_4137_cast)[name = tensor("op_4138_cast")]; + tensor var_4140_mode_0 = const()[name = tensor("op_4140_mode_0"), val = tensor("EXACT")]; + tensor var_4140_cast = gelu(mode = var_4140_mode_0, x = var_4138_cast_1)[name = tensor("op_4140_cast")]; + tensor input_275_cast = mul(x = var_4138_cast_0, y = var_4140_cast)[name = tensor("input_275_cast")]; + tensor var_4144 = const()[name = tensor("op_4144"), val = tensor([1, 1])]; + tensor var_4146 = const()[name = tensor("op_4146"), val = tensor([1, 1])]; + tensor var_4148_pad_type_0 = const()[name = tensor("op_4148_pad_type_0"), val = tensor("custom")]; + tensor var_4148_pad_0 = const()[name = tensor("op_4148_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512686720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517601984))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517602176)))]; + tensor var_4148_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_4146, groups = var_31, pad = var_4148_pad_0, pad_type = var_4148_pad_type_0, strides = var_4144, weight = unet_down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_275_cast)[name = tensor("op_4148_cast")]; + tensor inputs_121_cast = add(x = var_4148_cast, y = inputs_119_cast)[name = tensor("inputs_121_cast")]; + tensor var_4158 = const()[name = tensor("op_4158"), val = tensor([1])]; + tensor channels_mean_121_cast = reduce_mean(axes = var_4158, keep_dims = var_23, x = inputs_121_cast)[name = tensor("channels_mean_121_cast")]; + tensor zero_mean_121_cast = sub(x = inputs_121_cast, y = channels_mean_121_cast)[name = tensor("zero_mean_121_cast")]; + tensor zero_mean_sq_121_cast = mul(x = zero_mean_121_cast, y = zero_mean_121_cast)[name = tensor("zero_mean_sq_121_cast")]; + tensor var_4162 = const()[name = tensor("op_4162"), val = tensor([1])]; + tensor var_4163_cast = reduce_mean(axes = var_4162, keep_dims = var_23, x = zero_mean_sq_121_cast)[name = tensor("op_4163_cast")]; + tensor var_4164_to_fp16 = const()[name = tensor("op_4164_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4165_cast = add(x = var_4163_cast, y = var_4164_to_fp16)[name = tensor("op_4165_cast")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_4165_cast)[name = tensor("denom_121_cast")]; + tensor out_121_cast = mul(x = zero_mean_121_cast, y = denom_121_cast)[name = tensor("out_121_cast")]; + tensor var_4169_to_fp16 = const()[name = tensor("op_4169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517604800)))]; + tensor var_4170_cast = add(x = out_121_cast, y = var_4169_to_fp16)[name = tensor("op_4170_cast")]; + tensor var_4172_to_fp16 = const()[name = tensor("op_4172_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517607424)))]; + tensor hidden_states_173_cast = mul(x = var_4170_cast, y = var_4172_to_fp16)[name = tensor("hidden_states_173_cast")]; + tensor var_4179 = const()[name = tensor("op_4179"), val = tensor([1, 1])]; + tensor var_4181 = const()[name = tensor("op_4181"), val = tensor([1, 1])]; + tensor q_81_pad_type_0 = const()[name = tensor("q_81_pad_type_0"), val = tensor("custom")]; + tensor q_81_pad_0 = const()[name = tensor("q_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517610048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518838912))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_81_cast = conv(dilations = var_4181, groups = var_31, pad = q_81_pad_0, pad_type = q_81_pad_type_0, strides = var_4179, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("q_81_cast")]; + tensor var_4185 = const()[name = tensor("op_4185"), val = tensor([1, 1])]; + tensor var_4187 = const()[name = tensor("op_4187"), val = tensor([1, 1])]; + tensor k_81_pad_type_0 = const()[name = tensor("k_81_pad_type_0"), val = tensor("custom")]; + tensor k_81_pad_0 = const()[name = tensor("k_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518839104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520067968))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_81_cast = conv(dilations = var_4187, groups = var_31, pad = k_81_pad_0, pad_type = k_81_pad_type_0, strides = var_4185, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("k_81_cast")]; + tensor var_4191 = const()[name = tensor("op_4191"), val = tensor([1, 1])]; + tensor var_4193 = const()[name = tensor("op_4193"), val = tensor([1, 1])]; + tensor v_81_pad_type_0 = const()[name = tensor("v_81_pad_type_0"), val = tensor("custom")]; + tensor v_81_pad_0 = const()[name = tensor("v_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520068160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(521297024))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_81_cast = conv(dilations = var_4193, groups = var_31, pad = v_81_pad_0, pad_type = v_81_pad_type_0, strides = var_4191, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("v_81_cast")]; + tensor var_4197 = const()[name = tensor("op_4197"), val = tensor([2, 20, 64, -1])]; + tensor var_4198_cast = reshape(shape = var_4197, x = q_81_cast)[name = tensor("op_4198_cast")]; + tensor var_4199 = const()[name = tensor("op_4199"), val = tensor([2, 20, 64, -1])]; + tensor var_4200_cast = reshape(shape = var_4199, x = k_81_cast)[name = tensor("op_4200_cast")]; + tensor var_4201 = const()[name = tensor("op_4201"), val = tensor([2, 20, 64, -1])]; + tensor var_4202_cast = reshape(shape = var_4201, x = v_81_cast)[name = tensor("op_4202_cast")]; + tensor attn_weights_161_transpose_x_0 = const()[name = tensor("attn_weights_161_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_161_transpose_y_0 = const()[name = tensor("attn_weights_161_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_161_cast = matmul(transpose_x = attn_weights_161_transpose_x_0, transpose_y = attn_weights_161_transpose_y_0, x = var_4198_cast, y = var_4200_cast)[name = tensor("attn_weights_161_cast")]; + tensor attn_weights_163_cast = mul(x = attn_weights_161_cast, y = var_12_to_fp16)[name = tensor("attn_weights_163_cast")]; + tensor var_4206_cast = softmax(axis = var_18, x = attn_weights_163_cast)[name = tensor("op_4206_cast")]; + tensor attn_81_transpose_x_0 = const()[name = tensor("attn_81_transpose_x_0"), val = tensor(false)]; + tensor attn_81_transpose_y_0 = const()[name = tensor("attn_81_transpose_y_0"), val = tensor(true)]; + tensor attn_81_cast = matmul(transpose_x = attn_81_transpose_x_0, transpose_y = attn_81_transpose_y_0, x = var_4202_cast, y = var_4206_cast)[name = tensor("attn_81_cast")]; + tensor var_4210 = const()[name = tensor("op_4210"), val = tensor([2, 1280, 1, -1])]; + tensor input_277_cast = reshape(shape = var_4210, x = attn_81_cast)[name = tensor("input_277_cast")]; + tensor var_4215 = const()[name = tensor("op_4215"), val = tensor([1, 1])]; + tensor var_4217 = const()[name = tensor("op_4217"), val = tensor([1, 1])]; + tensor var_4219_pad_type_0 = const()[name = tensor("op_4219_pad_type_0"), val = tensor("custom")]; + tensor var_4219_pad_0 = const()[name = tensor("op_4219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(521297216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522526080))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522526272)))]; + tensor var_4219_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_4217, groups = var_31, pad = var_4219_pad_0, pad_type = var_4219_pad_type_0, strides = var_4215, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_277_cast)[name = tensor("op_4219_cast")]; + tensor inputs_123_cast = add(x = var_4219_cast, y = inputs_121_cast)[name = tensor("inputs_123_cast")]; + tensor var_4223 = const()[name = tensor("op_4223"), val = tensor([1])]; + tensor channels_mean_123_cast = reduce_mean(axes = var_4223, keep_dims = var_23, x = inputs_123_cast)[name = tensor("channels_mean_123_cast")]; + tensor zero_mean_123_cast = sub(x = inputs_123_cast, y = channels_mean_123_cast)[name = tensor("zero_mean_123_cast")]; + tensor zero_mean_sq_123_cast = mul(x = zero_mean_123_cast, y = zero_mean_123_cast)[name = tensor("zero_mean_sq_123_cast")]; + tensor var_4227 = const()[name = tensor("op_4227"), val = tensor([1])]; + tensor var_4228_cast = reduce_mean(axes = var_4227, keep_dims = var_23, x = zero_mean_sq_123_cast)[name = tensor("op_4228_cast")]; + tensor var_4229_to_fp16 = const()[name = tensor("op_4229_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4230_cast = add(x = var_4228_cast, y = var_4229_to_fp16)[name = tensor("op_4230_cast")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_4230_cast)[name = tensor("denom_123_cast")]; + tensor out_123_cast = mul(x = zero_mean_123_cast, y = denom_123_cast)[name = tensor("out_123_cast")]; + tensor var_4234_to_fp16 = const()[name = tensor("op_4234_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522528896)))]; + tensor var_4235_cast = add(x = out_123_cast, y = var_4234_to_fp16)[name = tensor("op_4235_cast")]; + tensor var_4237_to_fp16 = const()[name = tensor("op_4237_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522531520)))]; + tensor hidden_states_175_cast = mul(x = var_4235_cast, y = var_4237_to_fp16)[name = tensor("hidden_states_175_cast")]; + tensor var_4244 = const()[name = tensor("op_4244"), val = tensor([1, 1])]; + tensor var_4246 = const()[name = tensor("op_4246"), val = tensor([1, 1])]; + tensor q_83_pad_type_0 = const()[name = tensor("q_83_pad_type_0"), val = tensor("custom")]; + tensor q_83_pad_0 = const()[name = tensor("q_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522534144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523763008))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_83_cast = conv(dilations = var_4246, groups = var_31, pad = q_83_pad_0, pad_type = q_83_pad_type_0, strides = var_4244, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_175_cast)[name = tensor("q_83_cast")]; + tensor var_4250 = const()[name = tensor("op_4250"), val = tensor([1, 1])]; + tensor var_4252 = const()[name = tensor("op_4252"), val = tensor([1, 1])]; + tensor k_83_pad_type_0 = const()[name = tensor("k_83_pad_type_0"), val = tensor("custom")]; + tensor k_83_pad_0 = const()[name = tensor("k_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523763200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525729344))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_83_cast = conv(dilations = var_4252, groups = var_31, pad = k_83_pad_0, pad_type = k_83_pad_type_0, strides = var_4250, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_83_cast")]; + tensor var_4256 = const()[name = tensor("op_4256"), val = tensor([1, 1])]; + tensor var_4258 = const()[name = tensor("op_4258"), val = tensor([1, 1])]; + tensor v_83_pad_type_0 = const()[name = tensor("v_83_pad_type_0"), val = tensor("custom")]; + tensor v_83_pad_0 = const()[name = tensor("v_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525729536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527695680))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_83_cast = conv(dilations = var_4258, groups = var_31, pad = v_83_pad_0, pad_type = v_83_pad_type_0, strides = var_4256, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_83_cast")]; + tensor var_4262 = const()[name = tensor("op_4262"), val = tensor([2, 20, 64, -1])]; + tensor var_4263_cast = reshape(shape = var_4262, x = q_83_cast)[name = tensor("op_4263_cast")]; + tensor var_4264 = const()[name = tensor("op_4264"), val = tensor([2, 20, 64, -1])]; + tensor var_4265_cast = reshape(shape = var_4264, x = k_83_cast)[name = tensor("op_4265_cast")]; + tensor var_4266 = const()[name = tensor("op_4266"), val = tensor([2, 20, 64, -1])]; + tensor var_4267_cast = reshape(shape = var_4266, x = v_83_cast)[name = tensor("op_4267_cast")]; + tensor attn_weights_165_transpose_x_0 = const()[name = tensor("attn_weights_165_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_165_transpose_y_0 = const()[name = tensor("attn_weights_165_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_165_cast = matmul(transpose_x = attn_weights_165_transpose_x_0, transpose_y = attn_weights_165_transpose_y_0, x = var_4263_cast, y = var_4265_cast)[name = tensor("attn_weights_165_cast")]; + tensor attn_weights_167_cast = mul(x = attn_weights_165_cast, y = var_12_to_fp16)[name = tensor("attn_weights_167_cast")]; + tensor var_4271_cast = softmax(axis = var_18, x = attn_weights_167_cast)[name = tensor("op_4271_cast")]; + tensor attn_83_transpose_x_0 = const()[name = tensor("attn_83_transpose_x_0"), val = tensor(false)]; + tensor attn_83_transpose_y_0 = const()[name = tensor("attn_83_transpose_y_0"), val = tensor(true)]; + tensor attn_83_cast = matmul(transpose_x = attn_83_transpose_x_0, transpose_y = attn_83_transpose_y_0, x = var_4267_cast, y = var_4271_cast)[name = tensor("attn_83_cast")]; + tensor var_4275 = const()[name = tensor("op_4275"), val = tensor([2, 1280, 1, -1])]; + tensor input_279_cast = reshape(shape = var_4275, x = attn_83_cast)[name = tensor("input_279_cast")]; + tensor var_4280 = const()[name = tensor("op_4280"), val = tensor([1, 1])]; + tensor var_4282 = const()[name = tensor("op_4282"), val = tensor([1, 1])]; + tensor var_4284_pad_type_0 = const()[name = tensor("op_4284_pad_type_0"), val = tensor("custom")]; + tensor var_4284_pad_0 = const()[name = tensor("op_4284_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527695872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528924736))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528924928)))]; + tensor var_4284_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_4282, groups = var_31, pad = var_4284_pad_0, pad_type = var_4284_pad_type_0, strides = var_4280, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_279_cast)[name = tensor("op_4284_cast")]; + tensor inputs_125_cast = add(x = var_4284_cast, y = inputs_123_cast)[name = tensor("inputs_125_cast")]; + tensor var_4288 = const()[name = tensor("op_4288"), val = tensor([1])]; + tensor channels_mean_125_cast = reduce_mean(axes = var_4288, keep_dims = var_23, x = inputs_125_cast)[name = tensor("channels_mean_125_cast")]; + tensor zero_mean_125_cast = sub(x = inputs_125_cast, y = channels_mean_125_cast)[name = tensor("zero_mean_125_cast")]; + tensor zero_mean_sq_125_cast = mul(x = zero_mean_125_cast, y = zero_mean_125_cast)[name = tensor("zero_mean_sq_125_cast")]; + tensor var_4292 = const()[name = tensor("op_4292"), val = tensor([1])]; + tensor var_4293_cast = reduce_mean(axes = var_4292, keep_dims = var_23, x = zero_mean_sq_125_cast)[name = tensor("op_4293_cast")]; + tensor var_4294_to_fp16 = const()[name = tensor("op_4294_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4295_cast = add(x = var_4293_cast, y = var_4294_to_fp16)[name = tensor("op_4295_cast")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_4295_cast)[name = tensor("denom_125_cast")]; + tensor out_125_cast = mul(x = zero_mean_125_cast, y = denom_125_cast)[name = tensor("out_125_cast")]; + tensor var_4299_to_fp16 = const()[name = tensor("op_4299_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528927552)))]; + tensor var_4300_cast = add(x = out_125_cast, y = var_4299_to_fp16)[name = tensor("op_4300_cast")]; + tensor var_4302_to_fp16 = const()[name = tensor("op_4302_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528930176)))]; + tensor input_281_cast = mul(x = var_4300_cast, y = var_4302_to_fp16)[name = tensor("input_281_cast")]; + tensor var_4310 = const()[name = tensor("op_4310"), val = tensor([1, 1])]; + tensor var_4312 = const()[name = tensor("op_4312"), val = tensor([1, 1])]; + tensor var_4314_pad_type_0 = const()[name = tensor("op_4314_pad_type_0"), val = tensor("custom")]; + tensor var_4314_pad_0 = const()[name = tensor("op_4314_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528932800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538763264))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538763456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538771200))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_4314_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_4312, groups = var_31, pad = var_4314_pad_0, pad_type = var_4314_pad_type_0, strides = var_4310, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("op_4314_cast")]; + tensor var_4315_split_sizes_0 = const()[name = tensor("op_4315_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4315_axis_0 = const()[name = tensor("op_4315_axis_0"), val = tensor(1)]; + tensor var_4315_cast_0, tensor var_4315_cast_1 = split(axis = var_4315_axis_0, split_sizes = var_4315_split_sizes_0, x = var_4314_cast)[name = tensor("op_4315_cast")]; + tensor var_4317_mode_0 = const()[name = tensor("op_4317_mode_0"), val = tensor("EXACT")]; + tensor var_4317_cast = gelu(mode = var_4317_mode_0, x = var_4315_cast_1)[name = tensor("op_4317_cast")]; + tensor input_283_cast = mul(x = var_4315_cast_0, y = var_4317_cast)[name = tensor("input_283_cast")]; + tensor var_4321 = const()[name = tensor("op_4321"), val = tensor([1, 1])]; + tensor var_4323 = const()[name = tensor("op_4323"), val = tensor([1, 1])]; + tensor var_4325_pad_type_0 = const()[name = tensor("op_4325_pad_type_0"), val = tensor("custom")]; + tensor var_4325_pad_0 = const()[name = tensor("op_4325_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538771392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(543686656))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(543686848)))]; + tensor var_4325_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_4323, groups = var_31, pad = var_4325_pad_0, pad_type = var_4325_pad_type_0, strides = var_4321, weight = unet_down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_283_cast)[name = tensor("op_4325_cast")]; + tensor inputs_127_cast = add(x = var_4325_cast, y = inputs_125_cast)[name = tensor("inputs_127_cast")]; + tensor var_4335 = const()[name = tensor("op_4335"), val = tensor([1])]; + tensor channels_mean_127_cast = reduce_mean(axes = var_4335, keep_dims = var_23, x = inputs_127_cast)[name = tensor("channels_mean_127_cast")]; + tensor zero_mean_127_cast = sub(x = inputs_127_cast, y = channels_mean_127_cast)[name = tensor("zero_mean_127_cast")]; + tensor zero_mean_sq_127_cast = mul(x = zero_mean_127_cast, y = zero_mean_127_cast)[name = tensor("zero_mean_sq_127_cast")]; + tensor var_4339 = const()[name = tensor("op_4339"), val = tensor([1])]; + tensor var_4340_cast = reduce_mean(axes = var_4339, keep_dims = var_23, x = zero_mean_sq_127_cast)[name = tensor("op_4340_cast")]; + tensor var_4341_to_fp16 = const()[name = tensor("op_4341_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4342_cast = add(x = var_4340_cast, y = var_4341_to_fp16)[name = tensor("op_4342_cast")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_4342_cast)[name = tensor("denom_127_cast")]; + tensor out_127_cast = mul(x = zero_mean_127_cast, y = denom_127_cast)[name = tensor("out_127_cast")]; + tensor var_4346_to_fp16 = const()[name = tensor("op_4346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(543689472)))]; + tensor var_4347_cast = add(x = out_127_cast, y = var_4346_to_fp16)[name = tensor("op_4347_cast")]; + tensor var_4349_to_fp16 = const()[name = tensor("op_4349_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(543692096)))]; + tensor hidden_states_179_cast = mul(x = var_4347_cast, y = var_4349_to_fp16)[name = tensor("hidden_states_179_cast")]; + tensor var_4356 = const()[name = tensor("op_4356"), val = tensor([1, 1])]; + tensor var_4358 = const()[name = tensor("op_4358"), val = tensor([1, 1])]; + tensor q_85_pad_type_0 = const()[name = tensor("q_85_pad_type_0"), val = tensor("custom")]; + tensor q_85_pad_0 = const()[name = tensor("q_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(543694720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(544923584))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_85_cast = conv(dilations = var_4358, groups = var_31, pad = q_85_pad_0, pad_type = q_85_pad_type_0, strides = var_4356, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("q_85_cast")]; + tensor var_4362 = const()[name = tensor("op_4362"), val = tensor([1, 1])]; + tensor var_4364 = const()[name = tensor("op_4364"), val = tensor([1, 1])]; + tensor k_85_pad_type_0 = const()[name = tensor("k_85_pad_type_0"), val = tensor("custom")]; + tensor k_85_pad_0 = const()[name = tensor("k_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(544923776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546152640))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_85_cast = conv(dilations = var_4364, groups = var_31, pad = k_85_pad_0, pad_type = k_85_pad_type_0, strides = var_4362, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("k_85_cast")]; + tensor var_4368 = const()[name = tensor("op_4368"), val = tensor([1, 1])]; + tensor var_4370 = const()[name = tensor("op_4370"), val = tensor([1, 1])]; + tensor v_85_pad_type_0 = const()[name = tensor("v_85_pad_type_0"), val = tensor("custom")]; + tensor v_85_pad_0 = const()[name = tensor("v_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546152832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547381696))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_85_cast = conv(dilations = var_4370, groups = var_31, pad = v_85_pad_0, pad_type = v_85_pad_type_0, strides = var_4368, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("v_85_cast")]; + tensor var_4374 = const()[name = tensor("op_4374"), val = tensor([2, 20, 64, -1])]; + tensor var_4375_cast = reshape(shape = var_4374, x = q_85_cast)[name = tensor("op_4375_cast")]; + tensor var_4376 = const()[name = tensor("op_4376"), val = tensor([2, 20, 64, -1])]; + tensor var_4377_cast = reshape(shape = var_4376, x = k_85_cast)[name = tensor("op_4377_cast")]; + tensor var_4378 = const()[name = tensor("op_4378"), val = tensor([2, 20, 64, -1])]; + tensor var_4379_cast = reshape(shape = var_4378, x = v_85_cast)[name = tensor("op_4379_cast")]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = var_4375_cast, y = var_4377_cast)[name = tensor("attn_weights_169_cast")]; + tensor attn_weights_171_cast = mul(x = attn_weights_169_cast, y = var_12_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_4383_cast = softmax(axis = var_18, x = attn_weights_171_cast)[name = tensor("op_4383_cast")]; + tensor attn_85_transpose_x_0 = const()[name = tensor("attn_85_transpose_x_0"), val = tensor(false)]; + tensor attn_85_transpose_y_0 = const()[name = tensor("attn_85_transpose_y_0"), val = tensor(true)]; + tensor attn_85_cast = matmul(transpose_x = attn_85_transpose_x_0, transpose_y = attn_85_transpose_y_0, x = var_4379_cast, y = var_4383_cast)[name = tensor("attn_85_cast")]; + tensor var_4387 = const()[name = tensor("op_4387"), val = tensor([2, 1280, 1, -1])]; + tensor input_285_cast = reshape(shape = var_4387, x = attn_85_cast)[name = tensor("input_285_cast")]; + tensor var_4392 = const()[name = tensor("op_4392"), val = tensor([1, 1])]; + tensor var_4394 = const()[name = tensor("op_4394"), val = tensor([1, 1])]; + tensor var_4396_pad_type_0 = const()[name = tensor("op_4396_pad_type_0"), val = tensor("custom")]; + tensor var_4396_pad_0 = const()[name = tensor("op_4396_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547381888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548610752))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548610944)))]; + tensor var_4396_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_4394, groups = var_31, pad = var_4396_pad_0, pad_type = var_4396_pad_type_0, strides = var_4392, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("op_4396_cast")]; + tensor inputs_129_cast = add(x = var_4396_cast, y = inputs_127_cast)[name = tensor("inputs_129_cast")]; + tensor var_4400 = const()[name = tensor("op_4400"), val = tensor([1])]; + tensor channels_mean_129_cast = reduce_mean(axes = var_4400, keep_dims = var_23, x = inputs_129_cast)[name = tensor("channels_mean_129_cast")]; + tensor zero_mean_129_cast = sub(x = inputs_129_cast, y = channels_mean_129_cast)[name = tensor("zero_mean_129_cast")]; + tensor zero_mean_sq_129_cast = mul(x = zero_mean_129_cast, y = zero_mean_129_cast)[name = tensor("zero_mean_sq_129_cast")]; + tensor var_4404 = const()[name = tensor("op_4404"), val = tensor([1])]; + tensor var_4405_cast = reduce_mean(axes = var_4404, keep_dims = var_23, x = zero_mean_sq_129_cast)[name = tensor("op_4405_cast")]; + tensor var_4406_to_fp16 = const()[name = tensor("op_4406_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4407_cast = add(x = var_4405_cast, y = var_4406_to_fp16)[name = tensor("op_4407_cast")]; + tensor denom_129_epsilon_0_to_fp16 = const()[name = tensor("denom_129_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_129_cast = rsqrt(epsilon = denom_129_epsilon_0_to_fp16, x = var_4407_cast)[name = tensor("denom_129_cast")]; + tensor out_129_cast = mul(x = zero_mean_129_cast, y = denom_129_cast)[name = tensor("out_129_cast")]; + tensor var_4411_to_fp16 = const()[name = tensor("op_4411_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548613568)))]; + tensor var_4412_cast = add(x = out_129_cast, y = var_4411_to_fp16)[name = tensor("op_4412_cast")]; + tensor var_4414_to_fp16 = const()[name = tensor("op_4414_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548616192)))]; + tensor hidden_states_181_cast = mul(x = var_4412_cast, y = var_4414_to_fp16)[name = tensor("hidden_states_181_cast")]; + tensor var_4421 = const()[name = tensor("op_4421"), val = tensor([1, 1])]; + tensor var_4423 = const()[name = tensor("op_4423"), val = tensor([1, 1])]; + tensor q_87_pad_type_0 = const()[name = tensor("q_87_pad_type_0"), val = tensor("custom")]; + tensor q_87_pad_0 = const()[name = tensor("q_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548618816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549847680))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_87_cast = conv(dilations = var_4423, groups = var_31, pad = q_87_pad_0, pad_type = q_87_pad_type_0, strides = var_4421, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_181_cast)[name = tensor("q_87_cast")]; + tensor var_4427 = const()[name = tensor("op_4427"), val = tensor([1, 1])]; + tensor var_4429 = const()[name = tensor("op_4429"), val = tensor([1, 1])]; + tensor k_87_pad_type_0 = const()[name = tensor("k_87_pad_type_0"), val = tensor("custom")]; + tensor k_87_pad_0 = const()[name = tensor("k_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549847872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551814016))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_87_cast = conv(dilations = var_4429, groups = var_31, pad = k_87_pad_0, pad_type = k_87_pad_type_0, strides = var_4427, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_87_cast")]; + tensor var_4433 = const()[name = tensor("op_4433"), val = tensor([1, 1])]; + tensor var_4435 = const()[name = tensor("op_4435"), val = tensor([1, 1])]; + tensor v_87_pad_type_0 = const()[name = tensor("v_87_pad_type_0"), val = tensor("custom")]; + tensor v_87_pad_0 = const()[name = tensor("v_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551814208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553780352))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_87_cast = conv(dilations = var_4435, groups = var_31, pad = v_87_pad_0, pad_type = v_87_pad_type_0, strides = var_4433, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_87_cast")]; + tensor var_4439 = const()[name = tensor("op_4439"), val = tensor([2, 20, 64, -1])]; + tensor var_4440_cast = reshape(shape = var_4439, x = q_87_cast)[name = tensor("op_4440_cast")]; + tensor var_4441 = const()[name = tensor("op_4441"), val = tensor([2, 20, 64, -1])]; + tensor var_4442_cast = reshape(shape = var_4441, x = k_87_cast)[name = tensor("op_4442_cast")]; + tensor var_4443 = const()[name = tensor("op_4443"), val = tensor([2, 20, 64, -1])]; + tensor var_4444_cast = reshape(shape = var_4443, x = v_87_cast)[name = tensor("op_4444_cast")]; + tensor attn_weights_173_transpose_x_0 = const()[name = tensor("attn_weights_173_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_173_transpose_y_0 = const()[name = tensor("attn_weights_173_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_173_cast = matmul(transpose_x = attn_weights_173_transpose_x_0, transpose_y = attn_weights_173_transpose_y_0, x = var_4440_cast, y = var_4442_cast)[name = tensor("attn_weights_173_cast")]; + tensor attn_weights_175_cast = mul(x = attn_weights_173_cast, y = var_12_to_fp16)[name = tensor("attn_weights_175_cast")]; + tensor var_4448_cast = softmax(axis = var_18, x = attn_weights_175_cast)[name = tensor("op_4448_cast")]; + tensor attn_87_transpose_x_0 = const()[name = tensor("attn_87_transpose_x_0"), val = tensor(false)]; + tensor attn_87_transpose_y_0 = const()[name = tensor("attn_87_transpose_y_0"), val = tensor(true)]; + tensor attn_87_cast = matmul(transpose_x = attn_87_transpose_x_0, transpose_y = attn_87_transpose_y_0, x = var_4444_cast, y = var_4448_cast)[name = tensor("attn_87_cast")]; + tensor var_4452 = const()[name = tensor("op_4452"), val = tensor([2, 1280, 1, -1])]; + tensor input_287_cast = reshape(shape = var_4452, x = attn_87_cast)[name = tensor("input_287_cast")]; + tensor var_4457 = const()[name = tensor("op_4457"), val = tensor([1, 1])]; + tensor var_4459 = const()[name = tensor("op_4459"), val = tensor([1, 1])]; + tensor var_4461_pad_type_0 = const()[name = tensor("op_4461_pad_type_0"), val = tensor("custom")]; + tensor var_4461_pad_0 = const()[name = tensor("op_4461_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553780544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555009408))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555009600)))]; + tensor var_4461_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_4459, groups = var_31, pad = var_4461_pad_0, pad_type = var_4461_pad_type_0, strides = var_4457, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_287_cast)[name = tensor("op_4461_cast")]; + tensor inputs_131_cast = add(x = var_4461_cast, y = inputs_129_cast)[name = tensor("inputs_131_cast")]; + tensor var_4465 = const()[name = tensor("op_4465"), val = tensor([1])]; + tensor channels_mean_131_cast = reduce_mean(axes = var_4465, keep_dims = var_23, x = inputs_131_cast)[name = tensor("channels_mean_131_cast")]; + tensor zero_mean_131_cast = sub(x = inputs_131_cast, y = channels_mean_131_cast)[name = tensor("zero_mean_131_cast")]; + tensor zero_mean_sq_131_cast = mul(x = zero_mean_131_cast, y = zero_mean_131_cast)[name = tensor("zero_mean_sq_131_cast")]; + tensor var_4469 = const()[name = tensor("op_4469"), val = tensor([1])]; + tensor var_4470_cast = reduce_mean(axes = var_4469, keep_dims = var_23, x = zero_mean_sq_131_cast)[name = tensor("op_4470_cast")]; + tensor var_4471_to_fp16 = const()[name = tensor("op_4471_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4472_cast = add(x = var_4470_cast, y = var_4471_to_fp16)[name = tensor("op_4472_cast")]; + tensor denom_131_epsilon_0_to_fp16 = const()[name = tensor("denom_131_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_131_cast = rsqrt(epsilon = denom_131_epsilon_0_to_fp16, x = var_4472_cast)[name = tensor("denom_131_cast")]; + tensor out_131_cast = mul(x = zero_mean_131_cast, y = denom_131_cast)[name = tensor("out_131_cast")]; + tensor var_4476_to_fp16 = const()[name = tensor("op_4476_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555012224)))]; + tensor var_4477_cast = add(x = out_131_cast, y = var_4476_to_fp16)[name = tensor("op_4477_cast")]; + tensor var_4479_to_fp16 = const()[name = tensor("op_4479_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555014848)))]; + tensor input_289_cast = mul(x = var_4477_cast, y = var_4479_to_fp16)[name = tensor("input_289_cast")]; + tensor var_4487 = const()[name = tensor("op_4487"), val = tensor([1, 1])]; + tensor var_4489 = const()[name = tensor("op_4489"), val = tensor([1, 1])]; + tensor var_4491_pad_type_0 = const()[name = tensor("op_4491_pad_type_0"), val = tensor("custom")]; + tensor var_4491_pad_0 = const()[name = tensor("op_4491_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555017472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564847936))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564848128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564855872))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_4491_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_4489, groups = var_31, pad = var_4491_pad_0, pad_type = var_4491_pad_type_0, strides = var_4487, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("op_4491_cast")]; + tensor var_4492_split_sizes_0 = const()[name = tensor("op_4492_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4492_axis_0 = const()[name = tensor("op_4492_axis_0"), val = tensor(1)]; + tensor var_4492_cast_0, tensor var_4492_cast_1 = split(axis = var_4492_axis_0, split_sizes = var_4492_split_sizes_0, x = var_4491_cast)[name = tensor("op_4492_cast")]; + tensor var_4494_mode_0 = const()[name = tensor("op_4494_mode_0"), val = tensor("EXACT")]; + tensor var_4494_cast = gelu(mode = var_4494_mode_0, x = var_4492_cast_1)[name = tensor("op_4494_cast")]; + tensor input_291_cast = mul(x = var_4492_cast_0, y = var_4494_cast)[name = tensor("input_291_cast")]; + tensor var_4498 = const()[name = tensor("op_4498"), val = tensor([1, 1])]; + tensor var_4500 = const()[name = tensor("op_4500"), val = tensor([1, 1])]; + tensor var_4502_pad_type_0 = const()[name = tensor("op_4502_pad_type_0"), val = tensor("custom")]; + tensor var_4502_pad_0 = const()[name = tensor("op_4502_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564856064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569771328))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569771520)))]; + tensor var_4502_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_4500, groups = var_31, pad = var_4502_pad_0, pad_type = var_4502_pad_type_0, strides = var_4498, weight = unet_down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_291_cast)[name = tensor("op_4502_cast")]; + tensor inputs_133_cast = add(x = var_4502_cast, y = inputs_131_cast)[name = tensor("inputs_133_cast")]; + tensor var_4512 = const()[name = tensor("op_4512"), val = tensor([1])]; + tensor channels_mean_133_cast = reduce_mean(axes = var_4512, keep_dims = var_23, x = inputs_133_cast)[name = tensor("channels_mean_133_cast")]; + tensor zero_mean_133_cast = sub(x = inputs_133_cast, y = channels_mean_133_cast)[name = tensor("zero_mean_133_cast")]; + tensor zero_mean_sq_133_cast = mul(x = zero_mean_133_cast, y = zero_mean_133_cast)[name = tensor("zero_mean_sq_133_cast")]; + tensor var_4516 = const()[name = tensor("op_4516"), val = tensor([1])]; + tensor var_4517_cast = reduce_mean(axes = var_4516, keep_dims = var_23, x = zero_mean_sq_133_cast)[name = tensor("op_4517_cast")]; + tensor var_4518_to_fp16 = const()[name = tensor("op_4518_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4519_cast = add(x = var_4517_cast, y = var_4518_to_fp16)[name = tensor("op_4519_cast")]; + tensor denom_133_epsilon_0_to_fp16 = const()[name = tensor("denom_133_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_133_cast = rsqrt(epsilon = denom_133_epsilon_0_to_fp16, x = var_4519_cast)[name = tensor("denom_133_cast")]; + tensor out_133_cast = mul(x = zero_mean_133_cast, y = denom_133_cast)[name = tensor("out_133_cast")]; + tensor var_4523_to_fp16 = const()[name = tensor("op_4523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569774144)))]; + tensor var_4524_cast = add(x = out_133_cast, y = var_4523_to_fp16)[name = tensor("op_4524_cast")]; + tensor var_4526_to_fp16 = const()[name = tensor("op_4526_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569776768)))]; + tensor hidden_states_185_cast = mul(x = var_4524_cast, y = var_4526_to_fp16)[name = tensor("hidden_states_185_cast")]; + tensor var_4533 = const()[name = tensor("op_4533"), val = tensor([1, 1])]; + tensor var_4535 = const()[name = tensor("op_4535"), val = tensor([1, 1])]; + tensor q_89_pad_type_0 = const()[name = tensor("q_89_pad_type_0"), val = tensor("custom")]; + tensor q_89_pad_0 = const()[name = tensor("q_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569779392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571008256))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_89_cast = conv(dilations = var_4535, groups = var_31, pad = q_89_pad_0, pad_type = q_89_pad_type_0, strides = var_4533, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("q_89_cast")]; + tensor var_4539 = const()[name = tensor("op_4539"), val = tensor([1, 1])]; + tensor var_4541 = const()[name = tensor("op_4541"), val = tensor([1, 1])]; + tensor k_89_pad_type_0 = const()[name = tensor("k_89_pad_type_0"), val = tensor("custom")]; + tensor k_89_pad_0 = const()[name = tensor("k_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571008448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572237312))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_89_cast = conv(dilations = var_4541, groups = var_31, pad = k_89_pad_0, pad_type = k_89_pad_type_0, strides = var_4539, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("k_89_cast")]; + tensor var_4545 = const()[name = tensor("op_4545"), val = tensor([1, 1])]; + tensor var_4547 = const()[name = tensor("op_4547"), val = tensor([1, 1])]; + tensor v_89_pad_type_0 = const()[name = tensor("v_89_pad_type_0"), val = tensor("custom")]; + tensor v_89_pad_0 = const()[name = tensor("v_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572237504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(573466368))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_89_cast = conv(dilations = var_4547, groups = var_31, pad = v_89_pad_0, pad_type = v_89_pad_type_0, strides = var_4545, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("v_89_cast")]; + tensor var_4551 = const()[name = tensor("op_4551"), val = tensor([2, 20, 64, -1])]; + tensor var_4552_cast = reshape(shape = var_4551, x = q_89_cast)[name = tensor("op_4552_cast")]; + tensor var_4553 = const()[name = tensor("op_4553"), val = tensor([2, 20, 64, -1])]; + tensor var_4554_cast = reshape(shape = var_4553, x = k_89_cast)[name = tensor("op_4554_cast")]; + tensor var_4555 = const()[name = tensor("op_4555"), val = tensor([2, 20, 64, -1])]; + tensor var_4556_cast = reshape(shape = var_4555, x = v_89_cast)[name = tensor("op_4556_cast")]; + tensor attn_weights_177_transpose_x_0 = const()[name = tensor("attn_weights_177_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_177_transpose_y_0 = const()[name = tensor("attn_weights_177_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_177_cast = matmul(transpose_x = attn_weights_177_transpose_x_0, transpose_y = attn_weights_177_transpose_y_0, x = var_4552_cast, y = var_4554_cast)[name = tensor("attn_weights_177_cast")]; + tensor attn_weights_179_cast = mul(x = attn_weights_177_cast, y = var_12_to_fp16)[name = tensor("attn_weights_179_cast")]; + tensor var_4560_cast = softmax(axis = var_18, x = attn_weights_179_cast)[name = tensor("op_4560_cast")]; + tensor attn_89_transpose_x_0 = const()[name = tensor("attn_89_transpose_x_0"), val = tensor(false)]; + tensor attn_89_transpose_y_0 = const()[name = tensor("attn_89_transpose_y_0"), val = tensor(true)]; + tensor attn_89_cast = matmul(transpose_x = attn_89_transpose_x_0, transpose_y = attn_89_transpose_y_0, x = var_4556_cast, y = var_4560_cast)[name = tensor("attn_89_cast")]; + tensor var_4564 = const()[name = tensor("op_4564"), val = tensor([2, 1280, 1, -1])]; + tensor input_293_cast = reshape(shape = var_4564, x = attn_89_cast)[name = tensor("input_293_cast")]; + tensor var_4569 = const()[name = tensor("op_4569"), val = tensor([1, 1])]; + tensor var_4571 = const()[name = tensor("op_4571"), val = tensor([1, 1])]; + tensor var_4573_pad_type_0 = const()[name = tensor("op_4573_pad_type_0"), val = tensor("custom")]; + tensor var_4573_pad_0 = const()[name = tensor("op_4573_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(573466560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574695424))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574695616)))]; + tensor var_4573_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_4571, groups = var_31, pad = var_4573_pad_0, pad_type = var_4573_pad_type_0, strides = var_4569, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_293_cast)[name = tensor("op_4573_cast")]; + tensor inputs_135_cast = add(x = var_4573_cast, y = inputs_133_cast)[name = tensor("inputs_135_cast")]; + tensor var_4577 = const()[name = tensor("op_4577"), val = tensor([1])]; + tensor channels_mean_135_cast = reduce_mean(axes = var_4577, keep_dims = var_23, x = inputs_135_cast)[name = tensor("channels_mean_135_cast")]; + tensor zero_mean_135_cast = sub(x = inputs_135_cast, y = channels_mean_135_cast)[name = tensor("zero_mean_135_cast")]; + tensor zero_mean_sq_135_cast = mul(x = zero_mean_135_cast, y = zero_mean_135_cast)[name = tensor("zero_mean_sq_135_cast")]; + tensor var_4581 = const()[name = tensor("op_4581"), val = tensor([1])]; + tensor var_4582_cast = reduce_mean(axes = var_4581, keep_dims = var_23, x = zero_mean_sq_135_cast)[name = tensor("op_4582_cast")]; + tensor var_4583_to_fp16 = const()[name = tensor("op_4583_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4584_cast = add(x = var_4582_cast, y = var_4583_to_fp16)[name = tensor("op_4584_cast")]; + tensor denom_135_epsilon_0_to_fp16 = const()[name = tensor("denom_135_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_135_cast = rsqrt(epsilon = denom_135_epsilon_0_to_fp16, x = var_4584_cast)[name = tensor("denom_135_cast")]; + tensor out_135_cast = mul(x = zero_mean_135_cast, y = denom_135_cast)[name = tensor("out_135_cast")]; + tensor var_4588_to_fp16 = const()[name = tensor("op_4588_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574698240)))]; + tensor var_4589_cast = add(x = out_135_cast, y = var_4588_to_fp16)[name = tensor("op_4589_cast")]; + tensor var_4591_to_fp16 = const()[name = tensor("op_4591_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574700864)))]; + tensor hidden_states_187_cast = mul(x = var_4589_cast, y = var_4591_to_fp16)[name = tensor("hidden_states_187_cast")]; + tensor var_4598 = const()[name = tensor("op_4598"), val = tensor([1, 1])]; + tensor var_4600 = const()[name = tensor("op_4600"), val = tensor([1, 1])]; + tensor q_91_pad_type_0 = const()[name = tensor("q_91_pad_type_0"), val = tensor("custom")]; + tensor q_91_pad_0 = const()[name = tensor("q_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574703488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(575932352))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_91_cast = conv(dilations = var_4600, groups = var_31, pad = q_91_pad_0, pad_type = q_91_pad_type_0, strides = var_4598, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_187_cast)[name = tensor("q_91_cast")]; + tensor var_4604 = const()[name = tensor("op_4604"), val = tensor([1, 1])]; + tensor var_4606 = const()[name = tensor("op_4606"), val = tensor([1, 1])]; + tensor k_91_pad_type_0 = const()[name = tensor("k_91_pad_type_0"), val = tensor("custom")]; + tensor k_91_pad_0 = const()[name = tensor("k_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(575932544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577898688))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_91_cast = conv(dilations = var_4606, groups = var_31, pad = k_91_pad_0, pad_type = k_91_pad_type_0, strides = var_4604, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_91_cast")]; + tensor var_4610 = const()[name = tensor("op_4610"), val = tensor([1, 1])]; + tensor var_4612 = const()[name = tensor("op_4612"), val = tensor([1, 1])]; + tensor v_91_pad_type_0 = const()[name = tensor("v_91_pad_type_0"), val = tensor("custom")]; + tensor v_91_pad_0 = const()[name = tensor("v_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577898880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579865024))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_91_cast = conv(dilations = var_4612, groups = var_31, pad = v_91_pad_0, pad_type = v_91_pad_type_0, strides = var_4610, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_91_cast")]; + tensor var_4616 = const()[name = tensor("op_4616"), val = tensor([2, 20, 64, -1])]; + tensor var_4617_cast = reshape(shape = var_4616, x = q_91_cast)[name = tensor("op_4617_cast")]; + tensor var_4618 = const()[name = tensor("op_4618"), val = tensor([2, 20, 64, -1])]; + tensor var_4619_cast = reshape(shape = var_4618, x = k_91_cast)[name = tensor("op_4619_cast")]; + tensor var_4620 = const()[name = tensor("op_4620"), val = tensor([2, 20, 64, -1])]; + tensor var_4621_cast = reshape(shape = var_4620, x = v_91_cast)[name = tensor("op_4621_cast")]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = var_4617_cast, y = var_4619_cast)[name = tensor("attn_weights_181_cast")]; + tensor attn_weights_183_cast = mul(x = attn_weights_181_cast, y = var_12_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_4625_cast = softmax(axis = var_18, x = attn_weights_183_cast)[name = tensor("op_4625_cast")]; + tensor attn_91_transpose_x_0 = const()[name = tensor("attn_91_transpose_x_0"), val = tensor(false)]; + tensor attn_91_transpose_y_0 = const()[name = tensor("attn_91_transpose_y_0"), val = tensor(true)]; + tensor attn_91_cast = matmul(transpose_x = attn_91_transpose_x_0, transpose_y = attn_91_transpose_y_0, x = var_4621_cast, y = var_4625_cast)[name = tensor("attn_91_cast")]; + tensor var_4629 = const()[name = tensor("op_4629"), val = tensor([2, 1280, 1, -1])]; + tensor input_295_cast = reshape(shape = var_4629, x = attn_91_cast)[name = tensor("input_295_cast")]; + tensor var_4634 = const()[name = tensor("op_4634"), val = tensor([1, 1])]; + tensor var_4636 = const()[name = tensor("op_4636"), val = tensor([1, 1])]; + tensor var_4638_pad_type_0 = const()[name = tensor("op_4638_pad_type_0"), val = tensor("custom")]; + tensor var_4638_pad_0 = const()[name = tensor("op_4638_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579865216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581094080))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581094272)))]; + tensor var_4638_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_4636, groups = var_31, pad = var_4638_pad_0, pad_type = var_4638_pad_type_0, strides = var_4634, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_295_cast)[name = tensor("op_4638_cast")]; + tensor inputs_137_cast = add(x = var_4638_cast, y = inputs_135_cast)[name = tensor("inputs_137_cast")]; + tensor var_4642 = const()[name = tensor("op_4642"), val = tensor([1])]; + tensor channels_mean_137_cast = reduce_mean(axes = var_4642, keep_dims = var_23, x = inputs_137_cast)[name = tensor("channels_mean_137_cast")]; + tensor zero_mean_137_cast = sub(x = inputs_137_cast, y = channels_mean_137_cast)[name = tensor("zero_mean_137_cast")]; + tensor zero_mean_sq_137_cast = mul(x = zero_mean_137_cast, y = zero_mean_137_cast)[name = tensor("zero_mean_sq_137_cast")]; + tensor var_4646 = const()[name = tensor("op_4646"), val = tensor([1])]; + tensor var_4647_cast = reduce_mean(axes = var_4646, keep_dims = var_23, x = zero_mean_sq_137_cast)[name = tensor("op_4647_cast")]; + tensor var_4648_to_fp16 = const()[name = tensor("op_4648_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4649_cast = add(x = var_4647_cast, y = var_4648_to_fp16)[name = tensor("op_4649_cast")]; + tensor denom_137_epsilon_0_to_fp16 = const()[name = tensor("denom_137_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_137_cast = rsqrt(epsilon = denom_137_epsilon_0_to_fp16, x = var_4649_cast)[name = tensor("denom_137_cast")]; + tensor out_137_cast = mul(x = zero_mean_137_cast, y = denom_137_cast)[name = tensor("out_137_cast")]; + tensor var_4653_to_fp16 = const()[name = tensor("op_4653_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581096896)))]; + tensor var_4654_cast = add(x = out_137_cast, y = var_4653_to_fp16)[name = tensor("op_4654_cast")]; + tensor var_4656_to_fp16 = const()[name = tensor("op_4656_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581099520)))]; + tensor input_297_cast = mul(x = var_4654_cast, y = var_4656_to_fp16)[name = tensor("input_297_cast")]; + tensor var_4664 = const()[name = tensor("op_4664"), val = tensor([1, 1])]; + tensor var_4666 = const()[name = tensor("op_4666"), val = tensor([1, 1])]; + tensor var_4668_pad_type_0 = const()[name = tensor("op_4668_pad_type_0"), val = tensor("custom")]; + tensor var_4668_pad_0 = const()[name = tensor("op_4668_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581102144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590932608))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590932800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590940544))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_4668_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_4666, groups = var_31, pad = var_4668_pad_0, pad_type = var_4668_pad_type_0, strides = var_4664, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("op_4668_cast")]; + tensor var_4669_split_sizes_0 = const()[name = tensor("op_4669_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4669_axis_0 = const()[name = tensor("op_4669_axis_0"), val = tensor(1)]; + tensor var_4669_cast_0, tensor var_4669_cast_1 = split(axis = var_4669_axis_0, split_sizes = var_4669_split_sizes_0, x = var_4668_cast)[name = tensor("op_4669_cast")]; + tensor var_4671_mode_0 = const()[name = tensor("op_4671_mode_0"), val = tensor("EXACT")]; + tensor var_4671_cast = gelu(mode = var_4671_mode_0, x = var_4669_cast_1)[name = tensor("op_4671_cast")]; + tensor input_299_cast = mul(x = var_4669_cast_0, y = var_4671_cast)[name = tensor("input_299_cast")]; + tensor var_4675 = const()[name = tensor("op_4675"), val = tensor([1, 1])]; + tensor var_4677 = const()[name = tensor("op_4677"), val = tensor([1, 1])]; + tensor var_4679_pad_type_0 = const()[name = tensor("op_4679_pad_type_0"), val = tensor("custom")]; + tensor var_4679_pad_0 = const()[name = tensor("op_4679_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590940736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(595856000))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(595856192)))]; + tensor var_4679_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_4677, groups = var_31, pad = var_4679_pad_0, pad_type = var_4679_pad_type_0, strides = var_4675, weight = unet_down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_299_cast)[name = tensor("op_4679_cast")]; + tensor inputs_139_cast = add(x = var_4679_cast, y = inputs_137_cast)[name = tensor("inputs_139_cast")]; + tensor var_4689 = const()[name = tensor("op_4689"), val = tensor([1])]; + tensor channels_mean_139_cast = reduce_mean(axes = var_4689, keep_dims = var_23, x = inputs_139_cast)[name = tensor("channels_mean_139_cast")]; + tensor zero_mean_139_cast = sub(x = inputs_139_cast, y = channels_mean_139_cast)[name = tensor("zero_mean_139_cast")]; + tensor zero_mean_sq_139_cast = mul(x = zero_mean_139_cast, y = zero_mean_139_cast)[name = tensor("zero_mean_sq_139_cast")]; + tensor var_4693 = const()[name = tensor("op_4693"), val = tensor([1])]; + tensor var_4694_cast = reduce_mean(axes = var_4693, keep_dims = var_23, x = zero_mean_sq_139_cast)[name = tensor("op_4694_cast")]; + tensor var_4695_to_fp16 = const()[name = tensor("op_4695_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4696_cast = add(x = var_4694_cast, y = var_4695_to_fp16)[name = tensor("op_4696_cast")]; + tensor denom_139_epsilon_0_to_fp16 = const()[name = tensor("denom_139_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_139_cast = rsqrt(epsilon = denom_139_epsilon_0_to_fp16, x = var_4696_cast)[name = tensor("denom_139_cast")]; + tensor out_139_cast = mul(x = zero_mean_139_cast, y = denom_139_cast)[name = tensor("out_139_cast")]; + tensor var_4700_to_fp16 = const()[name = tensor("op_4700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(595858816)))]; + tensor var_4701_cast = add(x = out_139_cast, y = var_4700_to_fp16)[name = tensor("op_4701_cast")]; + tensor var_4703_to_fp16 = const()[name = tensor("op_4703_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(595861440)))]; + tensor hidden_states_191_cast = mul(x = var_4701_cast, y = var_4703_to_fp16)[name = tensor("hidden_states_191_cast")]; + tensor var_4710 = const()[name = tensor("op_4710"), val = tensor([1, 1])]; + tensor var_4712 = const()[name = tensor("op_4712"), val = tensor([1, 1])]; + tensor q_93_pad_type_0 = const()[name = tensor("q_93_pad_type_0"), val = tensor("custom")]; + tensor q_93_pad_0 = const()[name = tensor("q_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(595864064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(597092928))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_93_cast = conv(dilations = var_4712, groups = var_31, pad = q_93_pad_0, pad_type = q_93_pad_type_0, strides = var_4710, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("q_93_cast")]; + tensor var_4716 = const()[name = tensor("op_4716"), val = tensor([1, 1])]; + tensor var_4718 = const()[name = tensor("op_4718"), val = tensor([1, 1])]; + tensor k_93_pad_type_0 = const()[name = tensor("k_93_pad_type_0"), val = tensor("custom")]; + tensor k_93_pad_0 = const()[name = tensor("k_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(597093120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598321984))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_93_cast = conv(dilations = var_4718, groups = var_31, pad = k_93_pad_0, pad_type = k_93_pad_type_0, strides = var_4716, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("k_93_cast")]; + tensor var_4722 = const()[name = tensor("op_4722"), val = tensor([1, 1])]; + tensor var_4724 = const()[name = tensor("op_4724"), val = tensor([1, 1])]; + tensor v_93_pad_type_0 = const()[name = tensor("v_93_pad_type_0"), val = tensor("custom")]; + tensor v_93_pad_0 = const()[name = tensor("v_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598322176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(599551040))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_93_cast = conv(dilations = var_4724, groups = var_31, pad = v_93_pad_0, pad_type = v_93_pad_type_0, strides = var_4722, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("v_93_cast")]; + tensor var_4728 = const()[name = tensor("op_4728"), val = tensor([2, 20, 64, -1])]; + tensor var_4729_cast = reshape(shape = var_4728, x = q_93_cast)[name = tensor("op_4729_cast")]; + tensor var_4730 = const()[name = tensor("op_4730"), val = tensor([2, 20, 64, -1])]; + tensor var_4731_cast = reshape(shape = var_4730, x = k_93_cast)[name = tensor("op_4731_cast")]; + tensor var_4732 = const()[name = tensor("op_4732"), val = tensor([2, 20, 64, -1])]; + tensor var_4733_cast = reshape(shape = var_4732, x = v_93_cast)[name = tensor("op_4733_cast")]; + tensor attn_weights_185_transpose_x_0 = const()[name = tensor("attn_weights_185_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_185_transpose_y_0 = const()[name = tensor("attn_weights_185_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_185_cast = matmul(transpose_x = attn_weights_185_transpose_x_0, transpose_y = attn_weights_185_transpose_y_0, x = var_4729_cast, y = var_4731_cast)[name = tensor("attn_weights_185_cast")]; + tensor attn_weights_187_cast = mul(x = attn_weights_185_cast, y = var_12_to_fp16)[name = tensor("attn_weights_187_cast")]; + tensor var_4737_cast = softmax(axis = var_18, x = attn_weights_187_cast)[name = tensor("op_4737_cast")]; + tensor attn_93_transpose_x_0 = const()[name = tensor("attn_93_transpose_x_0"), val = tensor(false)]; + tensor attn_93_transpose_y_0 = const()[name = tensor("attn_93_transpose_y_0"), val = tensor(true)]; + tensor attn_93_cast = matmul(transpose_x = attn_93_transpose_x_0, transpose_y = attn_93_transpose_y_0, x = var_4733_cast, y = var_4737_cast)[name = tensor("attn_93_cast")]; + tensor var_4741 = const()[name = tensor("op_4741"), val = tensor([2, 1280, 1, -1])]; + tensor input_301_cast = reshape(shape = var_4741, x = attn_93_cast)[name = tensor("input_301_cast")]; + tensor var_4746 = const()[name = tensor("op_4746"), val = tensor([1, 1])]; + tensor var_4748 = const()[name = tensor("op_4748"), val = tensor([1, 1])]; + tensor var_4750_pad_type_0 = const()[name = tensor("op_4750_pad_type_0"), val = tensor("custom")]; + tensor var_4750_pad_0 = const()[name = tensor("op_4750_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(599551232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600780096))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600780288)))]; + tensor var_4750_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_4748, groups = var_31, pad = var_4750_pad_0, pad_type = var_4750_pad_type_0, strides = var_4746, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("op_4750_cast")]; + tensor inputs_141_cast = add(x = var_4750_cast, y = inputs_139_cast)[name = tensor("inputs_141_cast")]; + tensor var_4754 = const()[name = tensor("op_4754"), val = tensor([1])]; + tensor channels_mean_141_cast = reduce_mean(axes = var_4754, keep_dims = var_23, x = inputs_141_cast)[name = tensor("channels_mean_141_cast")]; + tensor zero_mean_141_cast = sub(x = inputs_141_cast, y = channels_mean_141_cast)[name = tensor("zero_mean_141_cast")]; + tensor zero_mean_sq_141_cast = mul(x = zero_mean_141_cast, y = zero_mean_141_cast)[name = tensor("zero_mean_sq_141_cast")]; + tensor var_4758 = const()[name = tensor("op_4758"), val = tensor([1])]; + tensor var_4759_cast = reduce_mean(axes = var_4758, keep_dims = var_23, x = zero_mean_sq_141_cast)[name = tensor("op_4759_cast")]; + tensor var_4760_to_fp16 = const()[name = tensor("op_4760_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4761_cast = add(x = var_4759_cast, y = var_4760_to_fp16)[name = tensor("op_4761_cast")]; + tensor denom_141_epsilon_0_to_fp16 = const()[name = tensor("denom_141_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_141_cast = rsqrt(epsilon = denom_141_epsilon_0_to_fp16, x = var_4761_cast)[name = tensor("denom_141_cast")]; + tensor out_141_cast = mul(x = zero_mean_141_cast, y = denom_141_cast)[name = tensor("out_141_cast")]; + tensor var_4765_to_fp16 = const()[name = tensor("op_4765_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600782912)))]; + tensor var_4766_cast = add(x = out_141_cast, y = var_4765_to_fp16)[name = tensor("op_4766_cast")]; + tensor var_4768_to_fp16 = const()[name = tensor("op_4768_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600785536)))]; + tensor hidden_states_193_cast = mul(x = var_4766_cast, y = var_4768_to_fp16)[name = tensor("hidden_states_193_cast")]; + tensor var_4775 = const()[name = tensor("op_4775"), val = tensor([1, 1])]; + tensor var_4777 = const()[name = tensor("op_4777"), val = tensor([1, 1])]; + tensor q_95_pad_type_0 = const()[name = tensor("q_95_pad_type_0"), val = tensor("custom")]; + tensor q_95_pad_0 = const()[name = tensor("q_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600788160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602017024))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_95_cast = conv(dilations = var_4777, groups = var_31, pad = q_95_pad_0, pad_type = q_95_pad_type_0, strides = var_4775, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_193_cast)[name = tensor("q_95_cast")]; + tensor var_4781 = const()[name = tensor("op_4781"), val = tensor([1, 1])]; + tensor var_4783 = const()[name = tensor("op_4783"), val = tensor([1, 1])]; + tensor k_95_pad_type_0 = const()[name = tensor("k_95_pad_type_0"), val = tensor("custom")]; + tensor k_95_pad_0 = const()[name = tensor("k_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602017216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603983360))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_95_cast = conv(dilations = var_4783, groups = var_31, pad = k_95_pad_0, pad_type = k_95_pad_type_0, strides = var_4781, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_95_cast")]; + tensor var_4787 = const()[name = tensor("op_4787"), val = tensor([1, 1])]; + tensor var_4789 = const()[name = tensor("op_4789"), val = tensor([1, 1])]; + tensor v_95_pad_type_0 = const()[name = tensor("v_95_pad_type_0"), val = tensor("custom")]; + tensor v_95_pad_0 = const()[name = tensor("v_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603983552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605949696))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_95_cast = conv(dilations = var_4789, groups = var_31, pad = v_95_pad_0, pad_type = v_95_pad_type_0, strides = var_4787, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_95_cast")]; + tensor var_4793 = const()[name = tensor("op_4793"), val = tensor([2, 20, 64, -1])]; + tensor var_4794_cast = reshape(shape = var_4793, x = q_95_cast)[name = tensor("op_4794_cast")]; + tensor var_4795 = const()[name = tensor("op_4795"), val = tensor([2, 20, 64, -1])]; + tensor var_4796_cast = reshape(shape = var_4795, x = k_95_cast)[name = tensor("op_4796_cast")]; + tensor var_4797 = const()[name = tensor("op_4797"), val = tensor([2, 20, 64, -1])]; + tensor var_4798_cast = reshape(shape = var_4797, x = v_95_cast)[name = tensor("op_4798_cast")]; + tensor attn_weights_189_transpose_x_0 = const()[name = tensor("attn_weights_189_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_189_transpose_y_0 = const()[name = tensor("attn_weights_189_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_189_cast = matmul(transpose_x = attn_weights_189_transpose_x_0, transpose_y = attn_weights_189_transpose_y_0, x = var_4794_cast, y = var_4796_cast)[name = tensor("attn_weights_189_cast")]; + tensor attn_weights_191_cast = mul(x = attn_weights_189_cast, y = var_12_to_fp16)[name = tensor("attn_weights_191_cast")]; + tensor var_4802_cast = softmax(axis = var_18, x = attn_weights_191_cast)[name = tensor("op_4802_cast")]; + tensor attn_95_transpose_x_0 = const()[name = tensor("attn_95_transpose_x_0"), val = tensor(false)]; + tensor attn_95_transpose_y_0 = const()[name = tensor("attn_95_transpose_y_0"), val = tensor(true)]; + tensor attn_95_cast = matmul(transpose_x = attn_95_transpose_x_0, transpose_y = attn_95_transpose_y_0, x = var_4798_cast, y = var_4802_cast)[name = tensor("attn_95_cast")]; + tensor var_4806 = const()[name = tensor("op_4806"), val = tensor([2, 1280, 1, -1])]; + tensor input_303_cast = reshape(shape = var_4806, x = attn_95_cast)[name = tensor("input_303_cast")]; + tensor var_4811 = const()[name = tensor("op_4811"), val = tensor([1, 1])]; + tensor var_4813 = const()[name = tensor("op_4813"), val = tensor([1, 1])]; + tensor var_4815_pad_type_0 = const()[name = tensor("op_4815_pad_type_0"), val = tensor("custom")]; + tensor var_4815_pad_0 = const()[name = tensor("op_4815_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605949888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607178752))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607178944)))]; + tensor var_4815_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_4813, groups = var_31, pad = var_4815_pad_0, pad_type = var_4815_pad_type_0, strides = var_4811, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_303_cast)[name = tensor("op_4815_cast")]; + tensor inputs_143_cast = add(x = var_4815_cast, y = inputs_141_cast)[name = tensor("inputs_143_cast")]; + tensor var_4819 = const()[name = tensor("op_4819"), val = tensor([1])]; + tensor channels_mean_143_cast = reduce_mean(axes = var_4819, keep_dims = var_23, x = inputs_143_cast)[name = tensor("channels_mean_143_cast")]; + tensor zero_mean_143_cast = sub(x = inputs_143_cast, y = channels_mean_143_cast)[name = tensor("zero_mean_143_cast")]; + tensor zero_mean_sq_143_cast = mul(x = zero_mean_143_cast, y = zero_mean_143_cast)[name = tensor("zero_mean_sq_143_cast")]; + tensor var_4823 = const()[name = tensor("op_4823"), val = tensor([1])]; + tensor var_4824_cast = reduce_mean(axes = var_4823, keep_dims = var_23, x = zero_mean_sq_143_cast)[name = tensor("op_4824_cast")]; + tensor var_4825_to_fp16 = const()[name = tensor("op_4825_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4826_cast = add(x = var_4824_cast, y = var_4825_to_fp16)[name = tensor("op_4826_cast")]; + tensor denom_143_epsilon_0_to_fp16 = const()[name = tensor("denom_143_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_143_cast = rsqrt(epsilon = denom_143_epsilon_0_to_fp16, x = var_4826_cast)[name = tensor("denom_143_cast")]; + tensor out_143_cast = mul(x = zero_mean_143_cast, y = denom_143_cast)[name = tensor("out_143_cast")]; + tensor var_4830_to_fp16 = const()[name = tensor("op_4830_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607181568)))]; + tensor var_4831_cast = add(x = out_143_cast, y = var_4830_to_fp16)[name = tensor("op_4831_cast")]; + tensor var_4833_to_fp16 = const()[name = tensor("op_4833_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607184192)))]; + tensor input_305_cast = mul(x = var_4831_cast, y = var_4833_to_fp16)[name = tensor("input_305_cast")]; + tensor var_4841 = const()[name = tensor("op_4841"), val = tensor([1, 1])]; + tensor var_4843 = const()[name = tensor("op_4843"), val = tensor([1, 1])]; + tensor var_4845_pad_type_0 = const()[name = tensor("op_4845_pad_type_0"), val = tensor("custom")]; + tensor var_4845_pad_0 = const()[name = tensor("op_4845_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607186816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617017280))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617017472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617025216))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_4845_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_4843, groups = var_31, pad = var_4845_pad_0, pad_type = var_4845_pad_type_0, strides = var_4841, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("op_4845_cast")]; + tensor var_4846_split_sizes_0 = const()[name = tensor("op_4846_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4846_axis_0 = const()[name = tensor("op_4846_axis_0"), val = tensor(1)]; + tensor var_4846_cast_0, tensor var_4846_cast_1 = split(axis = var_4846_axis_0, split_sizes = var_4846_split_sizes_0, x = var_4845_cast)[name = tensor("op_4846_cast")]; + tensor var_4848_mode_0 = const()[name = tensor("op_4848_mode_0"), val = tensor("EXACT")]; + tensor var_4848_cast = gelu(mode = var_4848_mode_0, x = var_4846_cast_1)[name = tensor("op_4848_cast")]; + tensor input_307_cast = mul(x = var_4846_cast_0, y = var_4848_cast)[name = tensor("input_307_cast")]; + tensor var_4852 = const()[name = tensor("op_4852"), val = tensor([1, 1])]; + tensor var_4854 = const()[name = tensor("op_4854"), val = tensor([1, 1])]; + tensor var_4856_pad_type_0 = const()[name = tensor("op_4856_pad_type_0"), val = tensor("custom")]; + tensor var_4856_pad_0 = const()[name = tensor("op_4856_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617025408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621940672))), name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621940864)))]; + tensor var_4856_cast = conv(bias = unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_4854, groups = var_31, pad = var_4856_pad_0, pad_type = var_4856_pad_type_0, strides = var_4852, weight = unet_down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_307_cast)[name = tensor("op_4856_cast")]; + tensor hidden_states_197_cast = add(x = var_4856_cast, y = inputs_143_cast)[name = tensor("hidden_states_197_cast")]; + tensor var_4858 = const()[name = tensor("op_4858"), val = tensor([2, 1280, 32, 32])]; + tensor input_309_cast = reshape(shape = var_4858, x = hidden_states_197_cast)[name = tensor("input_309_cast")]; + tensor var_4862 = const()[name = tensor("op_4862"), val = tensor([1, 1])]; + tensor var_4864 = const()[name = tensor("op_4864"), val = tensor([1, 1])]; + tensor hidden_states_199_pad_type_0 = const()[name = tensor("hidden_states_199_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_199_pad_0 = const()[name = tensor("hidden_states_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621943488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623172352))), name = tensor("unet_down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_down_blocks_2_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("unet_down_blocks_2_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623172544)))]; + tensor hidden_states_199_cast = conv(bias = unet_down_blocks_2_attentions_1_proj_out_bias_to_fp16, dilations = var_4864, groups = var_31, pad = hidden_states_199_pad_0, pad_type = hidden_states_199_pad_type_0, strides = var_4862, weight = unet_down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized, x = input_309_cast)[name = tensor("hidden_states_199_cast")]; + tensor input_311_cast = add(x = hidden_states_199_cast, y = hidden_states_133_cast)[name = tensor("input_311_cast")]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = input_311_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623175168)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623177792)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_315_cast = silu(x = add_33_cast)[name = tensor("input_315_cast")]; + tensor var_4888 = const()[name = tensor("op_4888"), val = tensor([1, 1])]; + tensor var_4890 = const()[name = tensor("op_4890"), val = tensor([1, 1])]; + tensor hidden_states_201_pad_type_0 = const()[name = tensor("hidden_states_201_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_201_pad_0 = const()[name = tensor("hidden_states_201_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_mid_block_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623180416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634239680))), name = tensor("unet_mid_block_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634239872)))]; + tensor hidden_states_201_cast = conv(bias = unet_mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_4890, groups = var_31, pad = hidden_states_201_pad_0, pad_type = hidden_states_201_pad_type_0, strides = var_4888, weight = unet_mid_block_resnets_0_conv1_weight_to_fp16_palettized, x = input_315_cast)[name = tensor("hidden_states_201_cast")]; + tensor var_4896 = const()[name = tensor("op_4896"), val = tensor([1, 1])]; + tensor var_4898 = const()[name = tensor("op_4898"), val = tensor([1, 1])]; + tensor temb_13_pad_type_0 = const()[name = tensor("temb_13_pad_type_0"), val = tensor("custom")]; + tensor temb_13_pad_0 = const()[name = tensor("temb_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634242496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(635471360))), name = tensor("unet_mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(635471552)))]; + tensor temb_13_cast = conv(bias = unet_mid_block_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_4898, groups = var_31, pad = temb_13_pad_0, pad_type = temb_13_pad_type_0, strides = var_4896, weight = unet_mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_13_cast")]; + tensor input_319_cast = add(x = hidden_states_201_cast, y = temb_13_cast)[name = tensor("input_319_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_319_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(635474176)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(635476800)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_323_cast = silu(x = add_35_cast)[name = tensor("input_323_cast")]; + tensor var_4908 = const()[name = tensor("op_4908"), val = tensor([1, 1])]; + tensor var_4910 = const()[name = tensor("op_4910"), val = tensor([1, 1])]; + tensor hidden_states_203_pad_type_0 = const()[name = tensor("hidden_states_203_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_203_pad_0 = const()[name = tensor("hidden_states_203_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_mid_block_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(635479424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646538688))), name = tensor("unet_mid_block_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646538880)))]; + tensor hidden_states_203_cast = conv(bias = unet_mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_4910, groups = var_31, pad = hidden_states_203_pad_0, pad_type = hidden_states_203_pad_type_0, strides = var_4908, weight = unet_mid_block_resnets_0_conv2_weight_to_fp16_palettized, x = input_323_cast)[name = tensor("hidden_states_203_cast")]; + tensor hidden_states_205_cast = add(x = input_311_cast, y = hidden_states_203_cast)[name = tensor("hidden_states_205_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = hidden_states_205_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor add_37_gamma_0_to_fp16 = const()[name = tensor("add_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646541504)))]; + tensor add_37_beta_0_to_fp16 = const()[name = tensor("add_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646544128)))]; + tensor add_37_epsilon_0_to_fp16 = const()[name = tensor("add_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_37_cast = batch_norm(beta = add_37_beta_0_to_fp16, epsilon = add_37_epsilon_0_to_fp16, gamma = add_37_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_73_cast)[name = tensor("add_37_cast")]; + tensor var_4948 = const()[name = tensor("op_4948"), val = tensor([1, 1])]; + tensor var_4950 = const()[name = tensor("op_4950"), val = tensor([1, 1])]; + tensor hidden_states_207_pad_type_0 = const()[name = tensor("hidden_states_207_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_207_pad_0 = const()[name = tensor("hidden_states_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(646546752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647775616))), name = tensor("unet_mid_block_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647775808)))]; + tensor hidden_states_207_cast = conv(bias = unet_mid_block_attentions_0_proj_in_bias_to_fp16, dilations = var_4950, groups = var_31, pad = hidden_states_207_pad_0, pad_type = hidden_states_207_pad_type_0, strides = var_4948, weight = unet_mid_block_attentions_0_proj_in_weight_to_fp16_palettized, x = add_37_cast)[name = tensor("hidden_states_207_cast")]; + tensor var_4955 = const()[name = tensor("op_4955"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_145_cast = reshape(shape = var_4955, x = hidden_states_207_cast)[name = tensor("inputs_145_cast")]; + tensor var_4965 = const()[name = tensor("op_4965"), val = tensor([1])]; + tensor channels_mean_145_cast = reduce_mean(axes = var_4965, keep_dims = var_23, x = inputs_145_cast)[name = tensor("channels_mean_145_cast")]; + tensor zero_mean_145_cast = sub(x = inputs_145_cast, y = channels_mean_145_cast)[name = tensor("zero_mean_145_cast")]; + tensor zero_mean_sq_145_cast = mul(x = zero_mean_145_cast, y = zero_mean_145_cast)[name = tensor("zero_mean_sq_145_cast")]; + tensor var_4969 = const()[name = tensor("op_4969"), val = tensor([1])]; + tensor var_4970_cast = reduce_mean(axes = var_4969, keep_dims = var_23, x = zero_mean_sq_145_cast)[name = tensor("op_4970_cast")]; + tensor var_4971_to_fp16 = const()[name = tensor("op_4971_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4972_cast = add(x = var_4970_cast, y = var_4971_to_fp16)[name = tensor("op_4972_cast")]; + tensor denom_145_epsilon_0_to_fp16 = const()[name = tensor("denom_145_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_145_cast = rsqrt(epsilon = denom_145_epsilon_0_to_fp16, x = var_4972_cast)[name = tensor("denom_145_cast")]; + tensor out_145_cast = mul(x = zero_mean_145_cast, y = denom_145_cast)[name = tensor("out_145_cast")]; + tensor var_4976_to_fp16 = const()[name = tensor("op_4976_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647778432)))]; + tensor var_4977_cast = add(x = out_145_cast, y = var_4976_to_fp16)[name = tensor("op_4977_cast")]; + tensor var_4979_to_fp16 = const()[name = tensor("op_4979_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647781056)))]; + tensor hidden_states_209_cast = mul(x = var_4977_cast, y = var_4979_to_fp16)[name = tensor("hidden_states_209_cast")]; + tensor var_4986 = const()[name = tensor("op_4986"), val = tensor([1, 1])]; + tensor var_4988 = const()[name = tensor("op_4988"), val = tensor([1, 1])]; + tensor q_97_pad_type_0 = const()[name = tensor("q_97_pad_type_0"), val = tensor("custom")]; + tensor q_97_pad_0 = const()[name = tensor("q_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647783680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(649012544))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_97_cast = conv(dilations = var_4988, groups = var_31, pad = q_97_pad_0, pad_type = q_97_pad_type_0, strides = var_4986, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("q_97_cast")]; + tensor var_4992 = const()[name = tensor("op_4992"), val = tensor([1, 1])]; + tensor var_4994 = const()[name = tensor("op_4994"), val = tensor([1, 1])]; + tensor k_97_pad_type_0 = const()[name = tensor("k_97_pad_type_0"), val = tensor("custom")]; + tensor k_97_pad_0 = const()[name = tensor("k_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(649012736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(650241600))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_97_cast = conv(dilations = var_4994, groups = var_31, pad = k_97_pad_0, pad_type = k_97_pad_type_0, strides = var_4992, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("k_97_cast")]; + tensor var_4998 = const()[name = tensor("op_4998"), val = tensor([1, 1])]; + tensor var_5000 = const()[name = tensor("op_5000"), val = tensor([1, 1])]; + tensor v_97_pad_type_0 = const()[name = tensor("v_97_pad_type_0"), val = tensor("custom")]; + tensor v_97_pad_0 = const()[name = tensor("v_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(650241792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651470656))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_97_cast = conv(dilations = var_5000, groups = var_31, pad = v_97_pad_0, pad_type = v_97_pad_type_0, strides = var_4998, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("v_97_cast")]; + tensor var_5004 = const()[name = tensor("op_5004"), val = tensor([2, 20, 64, -1])]; + tensor var_5005_cast = reshape(shape = var_5004, x = q_97_cast)[name = tensor("op_5005_cast")]; + tensor var_5006 = const()[name = tensor("op_5006"), val = tensor([2, 20, 64, -1])]; + tensor var_5007_cast = reshape(shape = var_5006, x = k_97_cast)[name = tensor("op_5007_cast")]; + tensor var_5008 = const()[name = tensor("op_5008"), val = tensor([2, 20, 64, -1])]; + tensor var_5009_cast = reshape(shape = var_5008, x = v_97_cast)[name = tensor("op_5009_cast")]; + tensor attn_weights_193_transpose_x_0 = const()[name = tensor("attn_weights_193_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_193_transpose_y_0 = const()[name = tensor("attn_weights_193_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_193_cast = matmul(transpose_x = attn_weights_193_transpose_x_0, transpose_y = attn_weights_193_transpose_y_0, x = var_5005_cast, y = var_5007_cast)[name = tensor("attn_weights_193_cast")]; + tensor attn_weights_195_cast = mul(x = attn_weights_193_cast, y = var_12_to_fp16)[name = tensor("attn_weights_195_cast")]; + tensor var_5013_cast = softmax(axis = var_18, x = attn_weights_195_cast)[name = tensor("op_5013_cast")]; + tensor attn_97_transpose_x_0 = const()[name = tensor("attn_97_transpose_x_0"), val = tensor(false)]; + tensor attn_97_transpose_y_0 = const()[name = tensor("attn_97_transpose_y_0"), val = tensor(true)]; + tensor attn_97_cast = matmul(transpose_x = attn_97_transpose_x_0, transpose_y = attn_97_transpose_y_0, x = var_5009_cast, y = var_5013_cast)[name = tensor("attn_97_cast")]; + tensor var_5017 = const()[name = tensor("op_5017"), val = tensor([2, 1280, 1, -1])]; + tensor input_327_cast = reshape(shape = var_5017, x = attn_97_cast)[name = tensor("input_327_cast")]; + tensor var_5022 = const()[name = tensor("op_5022"), val = tensor([1, 1])]; + tensor var_5024 = const()[name = tensor("op_5024"), val = tensor([1, 1])]; + tensor var_5026_pad_type_0 = const()[name = tensor("op_5026_pad_type_0"), val = tensor("custom")]; + tensor var_5026_pad_0 = const()[name = tensor("op_5026_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651470848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652699712))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652699904)))]; + tensor var_5026_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_5024, groups = var_31, pad = var_5026_pad_0, pad_type = var_5026_pad_type_0, strides = var_5022, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_327_cast)[name = tensor("op_5026_cast")]; + tensor inputs_147_cast = add(x = var_5026_cast, y = inputs_145_cast)[name = tensor("inputs_147_cast")]; + tensor var_5030 = const()[name = tensor("op_5030"), val = tensor([1])]; + tensor channels_mean_147_cast = reduce_mean(axes = var_5030, keep_dims = var_23, x = inputs_147_cast)[name = tensor("channels_mean_147_cast")]; + tensor zero_mean_147_cast = sub(x = inputs_147_cast, y = channels_mean_147_cast)[name = tensor("zero_mean_147_cast")]; + tensor zero_mean_sq_147_cast = mul(x = zero_mean_147_cast, y = zero_mean_147_cast)[name = tensor("zero_mean_sq_147_cast")]; + tensor var_5034 = const()[name = tensor("op_5034"), val = tensor([1])]; + tensor var_5035_cast = reduce_mean(axes = var_5034, keep_dims = var_23, x = zero_mean_sq_147_cast)[name = tensor("op_5035_cast")]; + tensor var_5036_to_fp16 = const()[name = tensor("op_5036_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5037_cast = add(x = var_5035_cast, y = var_5036_to_fp16)[name = tensor("op_5037_cast")]; + tensor denom_147_epsilon_0_to_fp16 = const()[name = tensor("denom_147_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_147_cast = rsqrt(epsilon = denom_147_epsilon_0_to_fp16, x = var_5037_cast)[name = tensor("denom_147_cast")]; + tensor out_147_cast = mul(x = zero_mean_147_cast, y = denom_147_cast)[name = tensor("out_147_cast")]; + tensor var_5041_to_fp16 = const()[name = tensor("op_5041_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652702528)))]; + tensor var_5042_cast = add(x = out_147_cast, y = var_5041_to_fp16)[name = tensor("op_5042_cast")]; + tensor var_5044_to_fp16 = const()[name = tensor("op_5044_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652705152)))]; + tensor hidden_states_211_cast = mul(x = var_5042_cast, y = var_5044_to_fp16)[name = tensor("hidden_states_211_cast")]; + tensor var_5051 = const()[name = tensor("op_5051"), val = tensor([1, 1])]; + tensor var_5053 = const()[name = tensor("op_5053"), val = tensor([1, 1])]; + tensor q_99_pad_type_0 = const()[name = tensor("q_99_pad_type_0"), val = tensor("custom")]; + tensor q_99_pad_0 = const()[name = tensor("q_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652707776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653936640))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_99_cast = conv(dilations = var_5053, groups = var_31, pad = q_99_pad_0, pad_type = q_99_pad_type_0, strides = var_5051, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_211_cast)[name = tensor("q_99_cast")]; + tensor var_5057 = const()[name = tensor("op_5057"), val = tensor([1, 1])]; + tensor var_5059 = const()[name = tensor("op_5059"), val = tensor([1, 1])]; + tensor k_99_pad_type_0 = const()[name = tensor("k_99_pad_type_0"), val = tensor("custom")]; + tensor k_99_pad_0 = const()[name = tensor("k_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653936832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655902976))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_99_cast = conv(dilations = var_5059, groups = var_31, pad = k_99_pad_0, pad_type = k_99_pad_type_0, strides = var_5057, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_99_cast")]; + tensor var_5063 = const()[name = tensor("op_5063"), val = tensor([1, 1])]; + tensor var_5065 = const()[name = tensor("op_5065"), val = tensor([1, 1])]; + tensor v_99_pad_type_0 = const()[name = tensor("v_99_pad_type_0"), val = tensor("custom")]; + tensor v_99_pad_0 = const()[name = tensor("v_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655903168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657869312))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_99_cast = conv(dilations = var_5065, groups = var_31, pad = v_99_pad_0, pad_type = v_99_pad_type_0, strides = var_5063, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_99_cast")]; + tensor var_5069 = const()[name = tensor("op_5069"), val = tensor([2, 20, 64, -1])]; + tensor var_5070_cast = reshape(shape = var_5069, x = q_99_cast)[name = tensor("op_5070_cast")]; + tensor var_5071 = const()[name = tensor("op_5071"), val = tensor([2, 20, 64, -1])]; + tensor var_5072_cast = reshape(shape = var_5071, x = k_99_cast)[name = tensor("op_5072_cast")]; + tensor var_5073 = const()[name = tensor("op_5073"), val = tensor([2, 20, 64, -1])]; + tensor var_5074_cast = reshape(shape = var_5073, x = v_99_cast)[name = tensor("op_5074_cast")]; + tensor attn_weights_197_transpose_x_0 = const()[name = tensor("attn_weights_197_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_197_transpose_y_0 = const()[name = tensor("attn_weights_197_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_197_cast = matmul(transpose_x = attn_weights_197_transpose_x_0, transpose_y = attn_weights_197_transpose_y_0, x = var_5070_cast, y = var_5072_cast)[name = tensor("attn_weights_197_cast")]; + tensor attn_weights_199_cast = mul(x = attn_weights_197_cast, y = var_12_to_fp16)[name = tensor("attn_weights_199_cast")]; + tensor var_5078_cast = softmax(axis = var_18, x = attn_weights_199_cast)[name = tensor("op_5078_cast")]; + tensor attn_99_transpose_x_0 = const()[name = tensor("attn_99_transpose_x_0"), val = tensor(false)]; + tensor attn_99_transpose_y_0 = const()[name = tensor("attn_99_transpose_y_0"), val = tensor(true)]; + tensor attn_99_cast = matmul(transpose_x = attn_99_transpose_x_0, transpose_y = attn_99_transpose_y_0, x = var_5074_cast, y = var_5078_cast)[name = tensor("attn_99_cast")]; + tensor var_5082 = const()[name = tensor("op_5082"), val = tensor([2, 1280, 1, -1])]; + tensor input_329_cast = reshape(shape = var_5082, x = attn_99_cast)[name = tensor("input_329_cast")]; + tensor var_5087 = const()[name = tensor("op_5087"), val = tensor([1, 1])]; + tensor var_5089 = const()[name = tensor("op_5089"), val = tensor([1, 1])]; + tensor var_5091_pad_type_0 = const()[name = tensor("op_5091_pad_type_0"), val = tensor("custom")]; + tensor var_5091_pad_0 = const()[name = tensor("op_5091_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657869504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659098368))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659098560)))]; + tensor var_5091_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_5089, groups = var_31, pad = var_5091_pad_0, pad_type = var_5091_pad_type_0, strides = var_5087, weight = unet_mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("op_5091_cast")]; + tensor inputs_149_cast = add(x = var_5091_cast, y = inputs_147_cast)[name = tensor("inputs_149_cast")]; + tensor var_5095 = const()[name = tensor("op_5095"), val = tensor([1])]; + tensor channels_mean_149_cast = reduce_mean(axes = var_5095, keep_dims = var_23, x = inputs_149_cast)[name = tensor("channels_mean_149_cast")]; + tensor zero_mean_149_cast = sub(x = inputs_149_cast, y = channels_mean_149_cast)[name = tensor("zero_mean_149_cast")]; + tensor zero_mean_sq_149_cast = mul(x = zero_mean_149_cast, y = zero_mean_149_cast)[name = tensor("zero_mean_sq_149_cast")]; + tensor var_5099 = const()[name = tensor("op_5099"), val = tensor([1])]; + tensor var_5100_cast = reduce_mean(axes = var_5099, keep_dims = var_23, x = zero_mean_sq_149_cast)[name = tensor("op_5100_cast")]; + tensor var_5101_to_fp16 = const()[name = tensor("op_5101_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5102_cast = add(x = var_5100_cast, y = var_5101_to_fp16)[name = tensor("op_5102_cast")]; + tensor denom_149_epsilon_0_to_fp16 = const()[name = tensor("denom_149_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_149_cast = rsqrt(epsilon = denom_149_epsilon_0_to_fp16, x = var_5102_cast)[name = tensor("denom_149_cast")]; + tensor out_149_cast = mul(x = zero_mean_149_cast, y = denom_149_cast)[name = tensor("out_149_cast")]; + tensor var_5106_to_fp16 = const()[name = tensor("op_5106_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659101184)))]; + tensor var_5107_cast = add(x = out_149_cast, y = var_5106_to_fp16)[name = tensor("op_5107_cast")]; + tensor var_5109_to_fp16 = const()[name = tensor("op_5109_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659103808)))]; + tensor input_331_cast = mul(x = var_5107_cast, y = var_5109_to_fp16)[name = tensor("input_331_cast")]; + tensor var_5117 = const()[name = tensor("op_5117"), val = tensor([1, 1])]; + tensor var_5119 = const()[name = tensor("op_5119"), val = tensor([1, 1])]; + tensor var_5121_pad_type_0 = const()[name = tensor("op_5121_pad_type_0"), val = tensor("custom")]; + tensor var_5121_pad_0 = const()[name = tensor("op_5121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659106432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(668936896))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(668937088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(668944832))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_5121_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_5119, groups = var_31, pad = var_5121_pad_0, pad_type = var_5121_pad_type_0, strides = var_5117, weight = unet_mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_331_cast)[name = tensor("op_5121_cast")]; + tensor var_5122_split_sizes_0 = const()[name = tensor("op_5122_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5122_axis_0 = const()[name = tensor("op_5122_axis_0"), val = tensor(1)]; + tensor var_5122_cast_0, tensor var_5122_cast_1 = split(axis = var_5122_axis_0, split_sizes = var_5122_split_sizes_0, x = var_5121_cast)[name = tensor("op_5122_cast")]; + tensor var_5124_mode_0 = const()[name = tensor("op_5124_mode_0"), val = tensor("EXACT")]; + tensor var_5124_cast = gelu(mode = var_5124_mode_0, x = var_5122_cast_1)[name = tensor("op_5124_cast")]; + tensor input_333_cast = mul(x = var_5122_cast_0, y = var_5124_cast)[name = tensor("input_333_cast")]; + tensor var_5128 = const()[name = tensor("op_5128"), val = tensor([1, 1])]; + tensor var_5130 = const()[name = tensor("op_5130"), val = tensor([1, 1])]; + tensor var_5132_pad_type_0 = const()[name = tensor("op_5132_pad_type_0"), val = tensor("custom")]; + tensor var_5132_pad_0 = const()[name = tensor("op_5132_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(668945024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673860288))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673860480)))]; + tensor var_5132_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_5130, groups = var_31, pad = var_5132_pad_0, pad_type = var_5132_pad_type_0, strides = var_5128, weight = unet_mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("op_5132_cast")]; + tensor inputs_151_cast = add(x = var_5132_cast, y = inputs_149_cast)[name = tensor("inputs_151_cast")]; + tensor var_5142 = const()[name = tensor("op_5142"), val = tensor([1])]; + tensor channels_mean_151_cast = reduce_mean(axes = var_5142, keep_dims = var_23, x = inputs_151_cast)[name = tensor("channels_mean_151_cast")]; + tensor zero_mean_151_cast = sub(x = inputs_151_cast, y = channels_mean_151_cast)[name = tensor("zero_mean_151_cast")]; + tensor zero_mean_sq_151_cast = mul(x = zero_mean_151_cast, y = zero_mean_151_cast)[name = tensor("zero_mean_sq_151_cast")]; + tensor var_5146 = const()[name = tensor("op_5146"), val = tensor([1])]; + tensor var_5147_cast = reduce_mean(axes = var_5146, keep_dims = var_23, x = zero_mean_sq_151_cast)[name = tensor("op_5147_cast")]; + tensor var_5148_to_fp16 = const()[name = tensor("op_5148_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5149_cast = add(x = var_5147_cast, y = var_5148_to_fp16)[name = tensor("op_5149_cast")]; + tensor denom_151_epsilon_0_to_fp16 = const()[name = tensor("denom_151_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_151_cast = rsqrt(epsilon = denom_151_epsilon_0_to_fp16, x = var_5149_cast)[name = tensor("denom_151_cast")]; + tensor out_151_cast = mul(x = zero_mean_151_cast, y = denom_151_cast)[name = tensor("out_151_cast")]; + tensor var_5153_to_fp16 = const()[name = tensor("op_5153_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673863104)))]; + tensor var_5154_cast = add(x = out_151_cast, y = var_5153_to_fp16)[name = tensor("op_5154_cast")]; + tensor var_5156_to_fp16 = const()[name = tensor("op_5156_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673865728)))]; + tensor hidden_states_215_cast = mul(x = var_5154_cast, y = var_5156_to_fp16)[name = tensor("hidden_states_215_cast")]; + tensor var_5163 = const()[name = tensor("op_5163"), val = tensor([1, 1])]; + tensor var_5165 = const()[name = tensor("op_5165"), val = tensor([1, 1])]; + tensor q_101_pad_type_0 = const()[name = tensor("q_101_pad_type_0"), val = tensor("custom")]; + tensor q_101_pad_0 = const()[name = tensor("q_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673868352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675097216))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_101_cast = conv(dilations = var_5165, groups = var_31, pad = q_101_pad_0, pad_type = q_101_pad_type_0, strides = var_5163, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("q_101_cast")]; + tensor var_5169 = const()[name = tensor("op_5169"), val = tensor([1, 1])]; + tensor var_5171 = const()[name = tensor("op_5171"), val = tensor([1, 1])]; + tensor k_101_pad_type_0 = const()[name = tensor("k_101_pad_type_0"), val = tensor("custom")]; + tensor k_101_pad_0 = const()[name = tensor("k_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675097408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676326272))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_101_cast = conv(dilations = var_5171, groups = var_31, pad = k_101_pad_0, pad_type = k_101_pad_type_0, strides = var_5169, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("k_101_cast")]; + tensor var_5175 = const()[name = tensor("op_5175"), val = tensor([1, 1])]; + tensor var_5177 = const()[name = tensor("op_5177"), val = tensor([1, 1])]; + tensor v_101_pad_type_0 = const()[name = tensor("v_101_pad_type_0"), val = tensor("custom")]; + tensor v_101_pad_0 = const()[name = tensor("v_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676326464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677555328))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_101_cast = conv(dilations = var_5177, groups = var_31, pad = v_101_pad_0, pad_type = v_101_pad_type_0, strides = var_5175, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("v_101_cast")]; + tensor var_5181 = const()[name = tensor("op_5181"), val = tensor([2, 20, 64, -1])]; + tensor var_5182_cast = reshape(shape = var_5181, x = q_101_cast)[name = tensor("op_5182_cast")]; + tensor var_5183 = const()[name = tensor("op_5183"), val = tensor([2, 20, 64, -1])]; + tensor var_5184_cast = reshape(shape = var_5183, x = k_101_cast)[name = tensor("op_5184_cast")]; + tensor var_5185 = const()[name = tensor("op_5185"), val = tensor([2, 20, 64, -1])]; + tensor var_5186_cast = reshape(shape = var_5185, x = v_101_cast)[name = tensor("op_5186_cast")]; + tensor attn_weights_201_transpose_x_0 = const()[name = tensor("attn_weights_201_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_201_transpose_y_0 = const()[name = tensor("attn_weights_201_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_201_cast = matmul(transpose_x = attn_weights_201_transpose_x_0, transpose_y = attn_weights_201_transpose_y_0, x = var_5182_cast, y = var_5184_cast)[name = tensor("attn_weights_201_cast")]; + tensor attn_weights_203_cast = mul(x = attn_weights_201_cast, y = var_12_to_fp16)[name = tensor("attn_weights_203_cast")]; + tensor var_5190_cast = softmax(axis = var_18, x = attn_weights_203_cast)[name = tensor("op_5190_cast")]; + tensor attn_101_transpose_x_0 = const()[name = tensor("attn_101_transpose_x_0"), val = tensor(false)]; + tensor attn_101_transpose_y_0 = const()[name = tensor("attn_101_transpose_y_0"), val = tensor(true)]; + tensor attn_101_cast = matmul(transpose_x = attn_101_transpose_x_0, transpose_y = attn_101_transpose_y_0, x = var_5186_cast, y = var_5190_cast)[name = tensor("attn_101_cast")]; + tensor var_5194 = const()[name = tensor("op_5194"), val = tensor([2, 1280, 1, -1])]; + tensor input_335_cast = reshape(shape = var_5194, x = attn_101_cast)[name = tensor("input_335_cast")]; + tensor var_5199 = const()[name = tensor("op_5199"), val = tensor([1, 1])]; + tensor var_5201 = const()[name = tensor("op_5201"), val = tensor([1, 1])]; + tensor var_5203_pad_type_0 = const()[name = tensor("op_5203_pad_type_0"), val = tensor("custom")]; + tensor var_5203_pad_0 = const()[name = tensor("op_5203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677555520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678784384))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678784576)))]; + tensor var_5203_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_5201, groups = var_31, pad = var_5203_pad_0, pad_type = var_5203_pad_type_0, strides = var_5199, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_335_cast)[name = tensor("op_5203_cast")]; + tensor inputs_153_cast = add(x = var_5203_cast, y = inputs_151_cast)[name = tensor("inputs_153_cast")]; + tensor var_5207 = const()[name = tensor("op_5207"), val = tensor([1])]; + tensor channels_mean_153_cast = reduce_mean(axes = var_5207, keep_dims = var_23, x = inputs_153_cast)[name = tensor("channels_mean_153_cast")]; + tensor zero_mean_153_cast = sub(x = inputs_153_cast, y = channels_mean_153_cast)[name = tensor("zero_mean_153_cast")]; + tensor zero_mean_sq_153_cast = mul(x = zero_mean_153_cast, y = zero_mean_153_cast)[name = tensor("zero_mean_sq_153_cast")]; + tensor var_5211 = const()[name = tensor("op_5211"), val = tensor([1])]; + tensor var_5212_cast = reduce_mean(axes = var_5211, keep_dims = var_23, x = zero_mean_sq_153_cast)[name = tensor("op_5212_cast")]; + tensor var_5213_to_fp16 = const()[name = tensor("op_5213_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5214_cast = add(x = var_5212_cast, y = var_5213_to_fp16)[name = tensor("op_5214_cast")]; + tensor denom_153_epsilon_0_to_fp16 = const()[name = tensor("denom_153_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_153_cast = rsqrt(epsilon = denom_153_epsilon_0_to_fp16, x = var_5214_cast)[name = tensor("denom_153_cast")]; + tensor out_153_cast = mul(x = zero_mean_153_cast, y = denom_153_cast)[name = tensor("out_153_cast")]; + tensor var_5218_to_fp16 = const()[name = tensor("op_5218_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678787200)))]; + tensor var_5219_cast = add(x = out_153_cast, y = var_5218_to_fp16)[name = tensor("op_5219_cast")]; + tensor var_5221_to_fp16 = const()[name = tensor("op_5221_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678789824)))]; + tensor hidden_states_217_cast = mul(x = var_5219_cast, y = var_5221_to_fp16)[name = tensor("hidden_states_217_cast")]; + tensor var_5228 = const()[name = tensor("op_5228"), val = tensor([1, 1])]; + tensor var_5230 = const()[name = tensor("op_5230"), val = tensor([1, 1])]; + tensor q_103_pad_type_0 = const()[name = tensor("q_103_pad_type_0"), val = tensor("custom")]; + tensor q_103_pad_0 = const()[name = tensor("q_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678792448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680021312))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_103_cast = conv(dilations = var_5230, groups = var_31, pad = q_103_pad_0, pad_type = q_103_pad_type_0, strides = var_5228, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_217_cast)[name = tensor("q_103_cast")]; + tensor var_5234 = const()[name = tensor("op_5234"), val = tensor([1, 1])]; + tensor var_5236 = const()[name = tensor("op_5236"), val = tensor([1, 1])]; + tensor k_103_pad_type_0 = const()[name = tensor("k_103_pad_type_0"), val = tensor("custom")]; + tensor k_103_pad_0 = const()[name = tensor("k_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680021504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681987648))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_103_cast = conv(dilations = var_5236, groups = var_31, pad = k_103_pad_0, pad_type = k_103_pad_type_0, strides = var_5234, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_103_cast")]; + tensor var_5240 = const()[name = tensor("op_5240"), val = tensor([1, 1])]; + tensor var_5242 = const()[name = tensor("op_5242"), val = tensor([1, 1])]; + tensor v_103_pad_type_0 = const()[name = tensor("v_103_pad_type_0"), val = tensor("custom")]; + tensor v_103_pad_0 = const()[name = tensor("v_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681987840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683953984))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_103_cast = conv(dilations = var_5242, groups = var_31, pad = v_103_pad_0, pad_type = v_103_pad_type_0, strides = var_5240, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_103_cast")]; + tensor var_5246 = const()[name = tensor("op_5246"), val = tensor([2, 20, 64, -1])]; + tensor var_5247_cast = reshape(shape = var_5246, x = q_103_cast)[name = tensor("op_5247_cast")]; + tensor var_5248 = const()[name = tensor("op_5248"), val = tensor([2, 20, 64, -1])]; + tensor var_5249_cast = reshape(shape = var_5248, x = k_103_cast)[name = tensor("op_5249_cast")]; + tensor var_5250 = const()[name = tensor("op_5250"), val = tensor([2, 20, 64, -1])]; + tensor var_5251_cast = reshape(shape = var_5250, x = v_103_cast)[name = tensor("op_5251_cast")]; + tensor attn_weights_205_transpose_x_0 = const()[name = tensor("attn_weights_205_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_205_transpose_y_0 = const()[name = tensor("attn_weights_205_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_205_cast = matmul(transpose_x = attn_weights_205_transpose_x_0, transpose_y = attn_weights_205_transpose_y_0, x = var_5247_cast, y = var_5249_cast)[name = tensor("attn_weights_205_cast")]; + tensor attn_weights_207_cast = mul(x = attn_weights_205_cast, y = var_12_to_fp16)[name = tensor("attn_weights_207_cast")]; + tensor var_5255_cast = softmax(axis = var_18, x = attn_weights_207_cast)[name = tensor("op_5255_cast")]; + tensor attn_103_transpose_x_0 = const()[name = tensor("attn_103_transpose_x_0"), val = tensor(false)]; + tensor attn_103_transpose_y_0 = const()[name = tensor("attn_103_transpose_y_0"), val = tensor(true)]; + tensor attn_103_cast = matmul(transpose_x = attn_103_transpose_x_0, transpose_y = attn_103_transpose_y_0, x = var_5251_cast, y = var_5255_cast)[name = tensor("attn_103_cast")]; + tensor var_5259 = const()[name = tensor("op_5259"), val = tensor([2, 1280, 1, -1])]; + tensor input_337_cast = reshape(shape = var_5259, x = attn_103_cast)[name = tensor("input_337_cast")]; + tensor var_5264 = const()[name = tensor("op_5264"), val = tensor([1, 1])]; + tensor var_5266 = const()[name = tensor("op_5266"), val = tensor([1, 1])]; + tensor var_5268_pad_type_0 = const()[name = tensor("op_5268_pad_type_0"), val = tensor("custom")]; + tensor var_5268_pad_0 = const()[name = tensor("op_5268_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683954176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(685183040))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(685183232)))]; + tensor var_5268_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_5266, groups = var_31, pad = var_5268_pad_0, pad_type = var_5268_pad_type_0, strides = var_5264, weight = unet_mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("op_5268_cast")]; + tensor inputs_155_cast = add(x = var_5268_cast, y = inputs_153_cast)[name = tensor("inputs_155_cast")]; + tensor var_5272 = const()[name = tensor("op_5272"), val = tensor([1])]; + tensor channels_mean_155_cast = reduce_mean(axes = var_5272, keep_dims = var_23, x = inputs_155_cast)[name = tensor("channels_mean_155_cast")]; + tensor zero_mean_155_cast = sub(x = inputs_155_cast, y = channels_mean_155_cast)[name = tensor("zero_mean_155_cast")]; + tensor zero_mean_sq_155_cast = mul(x = zero_mean_155_cast, y = zero_mean_155_cast)[name = tensor("zero_mean_sq_155_cast")]; + tensor var_5276 = const()[name = tensor("op_5276"), val = tensor([1])]; + tensor var_5277_cast = reduce_mean(axes = var_5276, keep_dims = var_23, x = zero_mean_sq_155_cast)[name = tensor("op_5277_cast")]; + tensor var_5278_to_fp16 = const()[name = tensor("op_5278_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5279_cast = add(x = var_5277_cast, y = var_5278_to_fp16)[name = tensor("op_5279_cast")]; + tensor denom_155_epsilon_0_to_fp16 = const()[name = tensor("denom_155_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_155_cast = rsqrt(epsilon = denom_155_epsilon_0_to_fp16, x = var_5279_cast)[name = tensor("denom_155_cast")]; + tensor out_155_cast = mul(x = zero_mean_155_cast, y = denom_155_cast)[name = tensor("out_155_cast")]; + tensor var_5283_to_fp16 = const()[name = tensor("op_5283_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(685185856)))]; + tensor var_5284_cast = add(x = out_155_cast, y = var_5283_to_fp16)[name = tensor("op_5284_cast")]; + tensor var_5286_to_fp16 = const()[name = tensor("op_5286_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(685188480)))]; + tensor input_339_cast = mul(x = var_5284_cast, y = var_5286_to_fp16)[name = tensor("input_339_cast")]; + tensor var_5294 = const()[name = tensor("op_5294"), val = tensor([1, 1])]; + tensor var_5296 = const()[name = tensor("op_5296"), val = tensor([1, 1])]; + tensor var_5298_pad_type_0 = const()[name = tensor("op_5298_pad_type_0"), val = tensor("custom")]; + tensor var_5298_pad_0 = const()[name = tensor("op_5298_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(685191104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695021568))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695021760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695029504))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_5298_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_5296, groups = var_31, pad = var_5298_pad_0, pad_type = var_5298_pad_type_0, strides = var_5294, weight = unet_mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_339_cast)[name = tensor("op_5298_cast")]; + tensor var_5299_split_sizes_0 = const()[name = tensor("op_5299_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5299_axis_0 = const()[name = tensor("op_5299_axis_0"), val = tensor(1)]; + tensor var_5299_cast_0, tensor var_5299_cast_1 = split(axis = var_5299_axis_0, split_sizes = var_5299_split_sizes_0, x = var_5298_cast)[name = tensor("op_5299_cast")]; + tensor var_5301_mode_0 = const()[name = tensor("op_5301_mode_0"), val = tensor("EXACT")]; + tensor var_5301_cast = gelu(mode = var_5301_mode_0, x = var_5299_cast_1)[name = tensor("op_5301_cast")]; + tensor input_341_cast = mul(x = var_5299_cast_0, y = var_5301_cast)[name = tensor("input_341_cast")]; + tensor var_5305 = const()[name = tensor("op_5305"), val = tensor([1, 1])]; + tensor var_5307 = const()[name = tensor("op_5307"), val = tensor([1, 1])]; + tensor var_5309_pad_type_0 = const()[name = tensor("op_5309_pad_type_0"), val = tensor("custom")]; + tensor var_5309_pad_0 = const()[name = tensor("op_5309_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695029696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699944960))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699945152)))]; + tensor var_5309_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_5307, groups = var_31, pad = var_5309_pad_0, pad_type = var_5309_pad_type_0, strides = var_5305, weight = unet_mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_341_cast)[name = tensor("op_5309_cast")]; + tensor inputs_157_cast = add(x = var_5309_cast, y = inputs_155_cast)[name = tensor("inputs_157_cast")]; + tensor var_5319 = const()[name = tensor("op_5319"), val = tensor([1])]; + tensor channels_mean_157_cast = reduce_mean(axes = var_5319, keep_dims = var_23, x = inputs_157_cast)[name = tensor("channels_mean_157_cast")]; + tensor zero_mean_157_cast = sub(x = inputs_157_cast, y = channels_mean_157_cast)[name = tensor("zero_mean_157_cast")]; + tensor zero_mean_sq_157_cast = mul(x = zero_mean_157_cast, y = zero_mean_157_cast)[name = tensor("zero_mean_sq_157_cast")]; + tensor var_5323 = const()[name = tensor("op_5323"), val = tensor([1])]; + tensor var_5324_cast = reduce_mean(axes = var_5323, keep_dims = var_23, x = zero_mean_sq_157_cast)[name = tensor("op_5324_cast")]; + tensor var_5325_to_fp16 = const()[name = tensor("op_5325_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5326_cast = add(x = var_5324_cast, y = var_5325_to_fp16)[name = tensor("op_5326_cast")]; + tensor denom_157_epsilon_0_to_fp16 = const()[name = tensor("denom_157_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_157_cast = rsqrt(epsilon = denom_157_epsilon_0_to_fp16, x = var_5326_cast)[name = tensor("denom_157_cast")]; + tensor out_157_cast = mul(x = zero_mean_157_cast, y = denom_157_cast)[name = tensor("out_157_cast")]; + tensor var_5330_to_fp16 = const()[name = tensor("op_5330_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699947776)))]; + tensor var_5331_cast = add(x = out_157_cast, y = var_5330_to_fp16)[name = tensor("op_5331_cast")]; + tensor var_5333_to_fp16 = const()[name = tensor("op_5333_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699950400)))]; + tensor hidden_states_221_cast = mul(x = var_5331_cast, y = var_5333_to_fp16)[name = tensor("hidden_states_221_cast")]; + tensor var_5340 = const()[name = tensor("op_5340"), val = tensor([1, 1])]; + tensor var_5342 = const()[name = tensor("op_5342"), val = tensor([1, 1])]; + tensor q_105_pad_type_0 = const()[name = tensor("q_105_pad_type_0"), val = tensor("custom")]; + tensor q_105_pad_0 = const()[name = tensor("q_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699953024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701181888))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_105_cast = conv(dilations = var_5342, groups = var_31, pad = q_105_pad_0, pad_type = q_105_pad_type_0, strides = var_5340, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("q_105_cast")]; + tensor var_5346 = const()[name = tensor("op_5346"), val = tensor([1, 1])]; + tensor var_5348 = const()[name = tensor("op_5348"), val = tensor([1, 1])]; + tensor k_105_pad_type_0 = const()[name = tensor("k_105_pad_type_0"), val = tensor("custom")]; + tensor k_105_pad_0 = const()[name = tensor("k_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701182080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(702410944))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_105_cast = conv(dilations = var_5348, groups = var_31, pad = k_105_pad_0, pad_type = k_105_pad_type_0, strides = var_5346, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("k_105_cast")]; + tensor var_5352 = const()[name = tensor("op_5352"), val = tensor([1, 1])]; + tensor var_5354 = const()[name = tensor("op_5354"), val = tensor([1, 1])]; + tensor v_105_pad_type_0 = const()[name = tensor("v_105_pad_type_0"), val = tensor("custom")]; + tensor v_105_pad_0 = const()[name = tensor("v_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(702411136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703640000))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_105_cast = conv(dilations = var_5354, groups = var_31, pad = v_105_pad_0, pad_type = v_105_pad_type_0, strides = var_5352, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("v_105_cast")]; + tensor var_5358 = const()[name = tensor("op_5358"), val = tensor([2, 20, 64, -1])]; + tensor var_5359_cast = reshape(shape = var_5358, x = q_105_cast)[name = tensor("op_5359_cast")]; + tensor var_5360 = const()[name = tensor("op_5360"), val = tensor([2, 20, 64, -1])]; + tensor var_5361_cast = reshape(shape = var_5360, x = k_105_cast)[name = tensor("op_5361_cast")]; + tensor var_5362 = const()[name = tensor("op_5362"), val = tensor([2, 20, 64, -1])]; + tensor var_5363_cast = reshape(shape = var_5362, x = v_105_cast)[name = tensor("op_5363_cast")]; + tensor attn_weights_209_transpose_x_0 = const()[name = tensor("attn_weights_209_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_209_transpose_y_0 = const()[name = tensor("attn_weights_209_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_209_cast = matmul(transpose_x = attn_weights_209_transpose_x_0, transpose_y = attn_weights_209_transpose_y_0, x = var_5359_cast, y = var_5361_cast)[name = tensor("attn_weights_209_cast")]; + tensor attn_weights_211_cast = mul(x = attn_weights_209_cast, y = var_12_to_fp16)[name = tensor("attn_weights_211_cast")]; + tensor var_5367_cast = softmax(axis = var_18, x = attn_weights_211_cast)[name = tensor("op_5367_cast")]; + tensor attn_105_transpose_x_0 = const()[name = tensor("attn_105_transpose_x_0"), val = tensor(false)]; + tensor attn_105_transpose_y_0 = const()[name = tensor("attn_105_transpose_y_0"), val = tensor(true)]; + tensor attn_105_cast = matmul(transpose_x = attn_105_transpose_x_0, transpose_y = attn_105_transpose_y_0, x = var_5363_cast, y = var_5367_cast)[name = tensor("attn_105_cast")]; + tensor var_5371 = const()[name = tensor("op_5371"), val = tensor([2, 1280, 1, -1])]; + tensor input_343_cast = reshape(shape = var_5371, x = attn_105_cast)[name = tensor("input_343_cast")]; + tensor var_5376 = const()[name = tensor("op_5376"), val = tensor([1, 1])]; + tensor var_5378 = const()[name = tensor("op_5378"), val = tensor([1, 1])]; + tensor var_5380_pad_type_0 = const()[name = tensor("op_5380_pad_type_0"), val = tensor("custom")]; + tensor var_5380_pad_0 = const()[name = tensor("op_5380_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703640192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704869056))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704869248)))]; + tensor var_5380_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_5378, groups = var_31, pad = var_5380_pad_0, pad_type = var_5380_pad_type_0, strides = var_5376, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_343_cast)[name = tensor("op_5380_cast")]; + tensor inputs_159_cast = add(x = var_5380_cast, y = inputs_157_cast)[name = tensor("inputs_159_cast")]; + tensor var_5384 = const()[name = tensor("op_5384"), val = tensor([1])]; + tensor channels_mean_159_cast = reduce_mean(axes = var_5384, keep_dims = var_23, x = inputs_159_cast)[name = tensor("channels_mean_159_cast")]; + tensor zero_mean_159_cast = sub(x = inputs_159_cast, y = channels_mean_159_cast)[name = tensor("zero_mean_159_cast")]; + tensor zero_mean_sq_159_cast = mul(x = zero_mean_159_cast, y = zero_mean_159_cast)[name = tensor("zero_mean_sq_159_cast")]; + tensor var_5388 = const()[name = tensor("op_5388"), val = tensor([1])]; + tensor var_5389_cast = reduce_mean(axes = var_5388, keep_dims = var_23, x = zero_mean_sq_159_cast)[name = tensor("op_5389_cast")]; + tensor var_5390_to_fp16 = const()[name = tensor("op_5390_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5391_cast = add(x = var_5389_cast, y = var_5390_to_fp16)[name = tensor("op_5391_cast")]; + tensor denom_159_epsilon_0_to_fp16 = const()[name = tensor("denom_159_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_159_cast = rsqrt(epsilon = denom_159_epsilon_0_to_fp16, x = var_5391_cast)[name = tensor("denom_159_cast")]; + tensor out_159_cast = mul(x = zero_mean_159_cast, y = denom_159_cast)[name = tensor("out_159_cast")]; + tensor var_5395_to_fp16 = const()[name = tensor("op_5395_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704871872)))]; + tensor var_5396_cast = add(x = out_159_cast, y = var_5395_to_fp16)[name = tensor("op_5396_cast")]; + tensor var_5398_to_fp16 = const()[name = tensor("op_5398_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704874496)))]; + tensor hidden_states_223_cast = mul(x = var_5396_cast, y = var_5398_to_fp16)[name = tensor("hidden_states_223_cast")]; + tensor var_5405 = const()[name = tensor("op_5405"), val = tensor([1, 1])]; + tensor var_5407 = const()[name = tensor("op_5407"), val = tensor([1, 1])]; + tensor q_107_pad_type_0 = const()[name = tensor("q_107_pad_type_0"), val = tensor("custom")]; + tensor q_107_pad_0 = const()[name = tensor("q_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704877120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706105984))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_107_cast = conv(dilations = var_5407, groups = var_31, pad = q_107_pad_0, pad_type = q_107_pad_type_0, strides = var_5405, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_223_cast)[name = tensor("q_107_cast")]; + tensor var_5411 = const()[name = tensor("op_5411"), val = tensor([1, 1])]; + tensor var_5413 = const()[name = tensor("op_5413"), val = tensor([1, 1])]; + tensor k_107_pad_type_0 = const()[name = tensor("k_107_pad_type_0"), val = tensor("custom")]; + tensor k_107_pad_0 = const()[name = tensor("k_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706106176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708072320))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_107_cast = conv(dilations = var_5413, groups = var_31, pad = k_107_pad_0, pad_type = k_107_pad_type_0, strides = var_5411, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_107_cast")]; + tensor var_5417 = const()[name = tensor("op_5417"), val = tensor([1, 1])]; + tensor var_5419 = const()[name = tensor("op_5419"), val = tensor([1, 1])]; + tensor v_107_pad_type_0 = const()[name = tensor("v_107_pad_type_0"), val = tensor("custom")]; + tensor v_107_pad_0 = const()[name = tensor("v_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708072512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(710038656))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_107_cast = conv(dilations = var_5419, groups = var_31, pad = v_107_pad_0, pad_type = v_107_pad_type_0, strides = var_5417, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_107_cast")]; + tensor var_5423 = const()[name = tensor("op_5423"), val = tensor([2, 20, 64, -1])]; + tensor var_5424_cast = reshape(shape = var_5423, x = q_107_cast)[name = tensor("op_5424_cast")]; + tensor var_5425 = const()[name = tensor("op_5425"), val = tensor([2, 20, 64, -1])]; + tensor var_5426_cast = reshape(shape = var_5425, x = k_107_cast)[name = tensor("op_5426_cast")]; + tensor var_5427 = const()[name = tensor("op_5427"), val = tensor([2, 20, 64, -1])]; + tensor var_5428_cast = reshape(shape = var_5427, x = v_107_cast)[name = tensor("op_5428_cast")]; + tensor attn_weights_213_transpose_x_0 = const()[name = tensor("attn_weights_213_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_213_transpose_y_0 = const()[name = tensor("attn_weights_213_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_213_cast = matmul(transpose_x = attn_weights_213_transpose_x_0, transpose_y = attn_weights_213_transpose_y_0, x = var_5424_cast, y = var_5426_cast)[name = tensor("attn_weights_213_cast")]; + tensor attn_weights_215_cast = mul(x = attn_weights_213_cast, y = var_12_to_fp16)[name = tensor("attn_weights_215_cast")]; + tensor var_5432_cast = softmax(axis = var_18, x = attn_weights_215_cast)[name = tensor("op_5432_cast")]; + tensor attn_107_transpose_x_0 = const()[name = tensor("attn_107_transpose_x_0"), val = tensor(false)]; + tensor attn_107_transpose_y_0 = const()[name = tensor("attn_107_transpose_y_0"), val = tensor(true)]; + tensor attn_107_cast = matmul(transpose_x = attn_107_transpose_x_0, transpose_y = attn_107_transpose_y_0, x = var_5428_cast, y = var_5432_cast)[name = tensor("attn_107_cast")]; + tensor var_5436 = const()[name = tensor("op_5436"), val = tensor([2, 1280, 1, -1])]; + tensor input_345_cast = reshape(shape = var_5436, x = attn_107_cast)[name = tensor("input_345_cast")]; + tensor var_5441 = const()[name = tensor("op_5441"), val = tensor([1, 1])]; + tensor var_5443 = const()[name = tensor("op_5443"), val = tensor([1, 1])]; + tensor var_5445_pad_type_0 = const()[name = tensor("op_5445_pad_type_0"), val = tensor("custom")]; + tensor var_5445_pad_0 = const()[name = tensor("op_5445_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(710038848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711267712))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711267904)))]; + tensor var_5445_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_5443, groups = var_31, pad = var_5445_pad_0, pad_type = var_5445_pad_type_0, strides = var_5441, weight = unet_mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("op_5445_cast")]; + tensor inputs_161_cast = add(x = var_5445_cast, y = inputs_159_cast)[name = tensor("inputs_161_cast")]; + tensor var_5449 = const()[name = tensor("op_5449"), val = tensor([1])]; + tensor channels_mean_161_cast = reduce_mean(axes = var_5449, keep_dims = var_23, x = inputs_161_cast)[name = tensor("channels_mean_161_cast")]; + tensor zero_mean_161_cast = sub(x = inputs_161_cast, y = channels_mean_161_cast)[name = tensor("zero_mean_161_cast")]; + tensor zero_mean_sq_161_cast = mul(x = zero_mean_161_cast, y = zero_mean_161_cast)[name = tensor("zero_mean_sq_161_cast")]; + tensor var_5453 = const()[name = tensor("op_5453"), val = tensor([1])]; + tensor var_5454_cast = reduce_mean(axes = var_5453, keep_dims = var_23, x = zero_mean_sq_161_cast)[name = tensor("op_5454_cast")]; + tensor var_5455_to_fp16 = const()[name = tensor("op_5455_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5456_cast = add(x = var_5454_cast, y = var_5455_to_fp16)[name = tensor("op_5456_cast")]; + tensor denom_161_epsilon_0_to_fp16 = const()[name = tensor("denom_161_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_161_cast = rsqrt(epsilon = denom_161_epsilon_0_to_fp16, x = var_5456_cast)[name = tensor("denom_161_cast")]; + tensor out_161_cast = mul(x = zero_mean_161_cast, y = denom_161_cast)[name = tensor("out_161_cast")]; + tensor var_5460_to_fp16 = const()[name = tensor("op_5460_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711270528)))]; + tensor var_5461_cast = add(x = out_161_cast, y = var_5460_to_fp16)[name = tensor("op_5461_cast")]; + tensor var_5463_to_fp16 = const()[name = tensor("op_5463_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711273152)))]; + tensor input_347_cast = mul(x = var_5461_cast, y = var_5463_to_fp16)[name = tensor("input_347_cast")]; + tensor var_5471 = const()[name = tensor("op_5471"), val = tensor([1, 1])]; + tensor var_5473 = const()[name = tensor("op_5473"), val = tensor([1, 1])]; + tensor var_5475_pad_type_0 = const()[name = tensor("op_5475_pad_type_0"), val = tensor("custom")]; + tensor var_5475_pad_0 = const()[name = tensor("op_5475_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711275776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721106240))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721106432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721114176))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_5475_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_5473, groups = var_31, pad = var_5475_pad_0, pad_type = var_5475_pad_type_0, strides = var_5471, weight = unet_mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_347_cast)[name = tensor("op_5475_cast")]; + tensor var_5476_split_sizes_0 = const()[name = tensor("op_5476_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5476_axis_0 = const()[name = tensor("op_5476_axis_0"), val = tensor(1)]; + tensor var_5476_cast_0, tensor var_5476_cast_1 = split(axis = var_5476_axis_0, split_sizes = var_5476_split_sizes_0, x = var_5475_cast)[name = tensor("op_5476_cast")]; + tensor var_5478_mode_0 = const()[name = tensor("op_5478_mode_0"), val = tensor("EXACT")]; + tensor var_5478_cast = gelu(mode = var_5478_mode_0, x = var_5476_cast_1)[name = tensor("op_5478_cast")]; + tensor input_349_cast = mul(x = var_5476_cast_0, y = var_5478_cast)[name = tensor("input_349_cast")]; + tensor var_5482 = const()[name = tensor("op_5482"), val = tensor([1, 1])]; + tensor var_5484 = const()[name = tensor("op_5484"), val = tensor([1, 1])]; + tensor var_5486_pad_type_0 = const()[name = tensor("op_5486_pad_type_0"), val = tensor("custom")]; + tensor var_5486_pad_0 = const()[name = tensor("op_5486_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721114368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726029632))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726029824)))]; + tensor var_5486_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_5484, groups = var_31, pad = var_5486_pad_0, pad_type = var_5486_pad_type_0, strides = var_5482, weight = unet_mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("op_5486_cast")]; + tensor inputs_163_cast = add(x = var_5486_cast, y = inputs_161_cast)[name = tensor("inputs_163_cast")]; + tensor var_5496 = const()[name = tensor("op_5496"), val = tensor([1])]; + tensor channels_mean_163_cast = reduce_mean(axes = var_5496, keep_dims = var_23, x = inputs_163_cast)[name = tensor("channels_mean_163_cast")]; + tensor zero_mean_163_cast = sub(x = inputs_163_cast, y = channels_mean_163_cast)[name = tensor("zero_mean_163_cast")]; + tensor zero_mean_sq_163_cast = mul(x = zero_mean_163_cast, y = zero_mean_163_cast)[name = tensor("zero_mean_sq_163_cast")]; + tensor var_5500 = const()[name = tensor("op_5500"), val = tensor([1])]; + tensor var_5501_cast = reduce_mean(axes = var_5500, keep_dims = var_23, x = zero_mean_sq_163_cast)[name = tensor("op_5501_cast")]; + tensor var_5502_to_fp16 = const()[name = tensor("op_5502_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5503_cast = add(x = var_5501_cast, y = var_5502_to_fp16)[name = tensor("op_5503_cast")]; + tensor denom_163_epsilon_0_to_fp16 = const()[name = tensor("denom_163_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_163_cast = rsqrt(epsilon = denom_163_epsilon_0_to_fp16, x = var_5503_cast)[name = tensor("denom_163_cast")]; + tensor out_163_cast = mul(x = zero_mean_163_cast, y = denom_163_cast)[name = tensor("out_163_cast")]; + tensor var_5507_to_fp16 = const()[name = tensor("op_5507_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726032448)))]; + tensor var_5508_cast = add(x = out_163_cast, y = var_5507_to_fp16)[name = tensor("op_5508_cast")]; + tensor var_5510_to_fp16 = const()[name = tensor("op_5510_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726035072)))]; + tensor hidden_states_227_cast = mul(x = var_5508_cast, y = var_5510_to_fp16)[name = tensor("hidden_states_227_cast")]; + tensor var_5517 = const()[name = tensor("op_5517"), val = tensor([1, 1])]; + tensor var_5519 = const()[name = tensor("op_5519"), val = tensor([1, 1])]; + tensor q_109_pad_type_0 = const()[name = tensor("q_109_pad_type_0"), val = tensor("custom")]; + tensor q_109_pad_0 = const()[name = tensor("q_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726037696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727266560))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_109_cast = conv(dilations = var_5519, groups = var_31, pad = q_109_pad_0, pad_type = q_109_pad_type_0, strides = var_5517, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("q_109_cast")]; + tensor var_5523 = const()[name = tensor("op_5523"), val = tensor([1, 1])]; + tensor var_5525 = const()[name = tensor("op_5525"), val = tensor([1, 1])]; + tensor k_109_pad_type_0 = const()[name = tensor("k_109_pad_type_0"), val = tensor("custom")]; + tensor k_109_pad_0 = const()[name = tensor("k_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727266752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728495616))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_109_cast = conv(dilations = var_5525, groups = var_31, pad = k_109_pad_0, pad_type = k_109_pad_type_0, strides = var_5523, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("k_109_cast")]; + tensor var_5529 = const()[name = tensor("op_5529"), val = tensor([1, 1])]; + tensor var_5531 = const()[name = tensor("op_5531"), val = tensor([1, 1])]; + tensor v_109_pad_type_0 = const()[name = tensor("v_109_pad_type_0"), val = tensor("custom")]; + tensor v_109_pad_0 = const()[name = tensor("v_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728495808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(729724672))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_109_cast = conv(dilations = var_5531, groups = var_31, pad = v_109_pad_0, pad_type = v_109_pad_type_0, strides = var_5529, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("v_109_cast")]; + tensor var_5535 = const()[name = tensor("op_5535"), val = tensor([2, 20, 64, -1])]; + tensor var_5536_cast = reshape(shape = var_5535, x = q_109_cast)[name = tensor("op_5536_cast")]; + tensor var_5537 = const()[name = tensor("op_5537"), val = tensor([2, 20, 64, -1])]; + tensor var_5538_cast = reshape(shape = var_5537, x = k_109_cast)[name = tensor("op_5538_cast")]; + tensor var_5539 = const()[name = tensor("op_5539"), val = tensor([2, 20, 64, -1])]; + tensor var_5540_cast = reshape(shape = var_5539, x = v_109_cast)[name = tensor("op_5540_cast")]; + tensor attn_weights_217_transpose_x_0 = const()[name = tensor("attn_weights_217_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_217_transpose_y_0 = const()[name = tensor("attn_weights_217_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_217_cast = matmul(transpose_x = attn_weights_217_transpose_x_0, transpose_y = attn_weights_217_transpose_y_0, x = var_5536_cast, y = var_5538_cast)[name = tensor("attn_weights_217_cast")]; + tensor attn_weights_219_cast = mul(x = attn_weights_217_cast, y = var_12_to_fp16)[name = tensor("attn_weights_219_cast")]; + tensor var_5544_cast = softmax(axis = var_18, x = attn_weights_219_cast)[name = tensor("op_5544_cast")]; + tensor attn_109_transpose_x_0 = const()[name = tensor("attn_109_transpose_x_0"), val = tensor(false)]; + tensor attn_109_transpose_y_0 = const()[name = tensor("attn_109_transpose_y_0"), val = tensor(true)]; + tensor attn_109_cast = matmul(transpose_x = attn_109_transpose_x_0, transpose_y = attn_109_transpose_y_0, x = var_5540_cast, y = var_5544_cast)[name = tensor("attn_109_cast")]; + tensor var_5548 = const()[name = tensor("op_5548"), val = tensor([2, 1280, 1, -1])]; + tensor input_351_cast = reshape(shape = var_5548, x = attn_109_cast)[name = tensor("input_351_cast")]; + tensor var_5553 = const()[name = tensor("op_5553"), val = tensor([1, 1])]; + tensor var_5555 = const()[name = tensor("op_5555"), val = tensor([1, 1])]; + tensor var_5557_pad_type_0 = const()[name = tensor("op_5557_pad_type_0"), val = tensor("custom")]; + tensor var_5557_pad_0 = const()[name = tensor("op_5557_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(729724864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730953728))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730953920)))]; + tensor var_5557_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_5555, groups = var_31, pad = var_5557_pad_0, pad_type = var_5557_pad_type_0, strides = var_5553, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_351_cast)[name = tensor("op_5557_cast")]; + tensor inputs_165_cast = add(x = var_5557_cast, y = inputs_163_cast)[name = tensor("inputs_165_cast")]; + tensor var_5561 = const()[name = tensor("op_5561"), val = tensor([1])]; + tensor channels_mean_165_cast = reduce_mean(axes = var_5561, keep_dims = var_23, x = inputs_165_cast)[name = tensor("channels_mean_165_cast")]; + tensor zero_mean_165_cast = sub(x = inputs_165_cast, y = channels_mean_165_cast)[name = tensor("zero_mean_165_cast")]; + tensor zero_mean_sq_165_cast = mul(x = zero_mean_165_cast, y = zero_mean_165_cast)[name = tensor("zero_mean_sq_165_cast")]; + tensor var_5565 = const()[name = tensor("op_5565"), val = tensor([1])]; + tensor var_5566_cast = reduce_mean(axes = var_5565, keep_dims = var_23, x = zero_mean_sq_165_cast)[name = tensor("op_5566_cast")]; + tensor var_5567_to_fp16 = const()[name = tensor("op_5567_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5568_cast = add(x = var_5566_cast, y = var_5567_to_fp16)[name = tensor("op_5568_cast")]; + tensor denom_165_epsilon_0_to_fp16 = const()[name = tensor("denom_165_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_165_cast = rsqrt(epsilon = denom_165_epsilon_0_to_fp16, x = var_5568_cast)[name = tensor("denom_165_cast")]; + tensor out_165_cast = mul(x = zero_mean_165_cast, y = denom_165_cast)[name = tensor("out_165_cast")]; + tensor var_5572_to_fp16 = const()[name = tensor("op_5572_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730956544)))]; + tensor var_5573_cast = add(x = out_165_cast, y = var_5572_to_fp16)[name = tensor("op_5573_cast")]; + tensor var_5575_to_fp16 = const()[name = tensor("op_5575_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730959168)))]; + tensor hidden_states_229_cast = mul(x = var_5573_cast, y = var_5575_to_fp16)[name = tensor("hidden_states_229_cast")]; + tensor var_5582 = const()[name = tensor("op_5582"), val = tensor([1, 1])]; + tensor var_5584 = const()[name = tensor("op_5584"), val = tensor([1, 1])]; + tensor q_111_pad_type_0 = const()[name = tensor("q_111_pad_type_0"), val = tensor("custom")]; + tensor q_111_pad_0 = const()[name = tensor("q_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730961792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732190656))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_111_cast = conv(dilations = var_5584, groups = var_31, pad = q_111_pad_0, pad_type = q_111_pad_type_0, strides = var_5582, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_229_cast)[name = tensor("q_111_cast")]; + tensor var_5588 = const()[name = tensor("op_5588"), val = tensor([1, 1])]; + tensor var_5590 = const()[name = tensor("op_5590"), val = tensor([1, 1])]; + tensor k_111_pad_type_0 = const()[name = tensor("k_111_pad_type_0"), val = tensor("custom")]; + tensor k_111_pad_0 = const()[name = tensor("k_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732190848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734156992))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_111_cast = conv(dilations = var_5590, groups = var_31, pad = k_111_pad_0, pad_type = k_111_pad_type_0, strides = var_5588, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_111_cast")]; + tensor var_5594 = const()[name = tensor("op_5594"), val = tensor([1, 1])]; + tensor var_5596 = const()[name = tensor("op_5596"), val = tensor([1, 1])]; + tensor v_111_pad_type_0 = const()[name = tensor("v_111_pad_type_0"), val = tensor("custom")]; + tensor v_111_pad_0 = const()[name = tensor("v_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734157184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(736123328))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_111_cast = conv(dilations = var_5596, groups = var_31, pad = v_111_pad_0, pad_type = v_111_pad_type_0, strides = var_5594, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_111_cast")]; + tensor var_5600 = const()[name = tensor("op_5600"), val = tensor([2, 20, 64, -1])]; + tensor var_5601_cast = reshape(shape = var_5600, x = q_111_cast)[name = tensor("op_5601_cast")]; + tensor var_5602 = const()[name = tensor("op_5602"), val = tensor([2, 20, 64, -1])]; + tensor var_5603_cast = reshape(shape = var_5602, x = k_111_cast)[name = tensor("op_5603_cast")]; + tensor var_5604 = const()[name = tensor("op_5604"), val = tensor([2, 20, 64, -1])]; + tensor var_5605_cast = reshape(shape = var_5604, x = v_111_cast)[name = tensor("op_5605_cast")]; + tensor attn_weights_221_transpose_x_0 = const()[name = tensor("attn_weights_221_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_221_transpose_y_0 = const()[name = tensor("attn_weights_221_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_221_cast = matmul(transpose_x = attn_weights_221_transpose_x_0, transpose_y = attn_weights_221_transpose_y_0, x = var_5601_cast, y = var_5603_cast)[name = tensor("attn_weights_221_cast")]; + tensor attn_weights_223_cast = mul(x = attn_weights_221_cast, y = var_12_to_fp16)[name = tensor("attn_weights_223_cast")]; + tensor var_5609_cast = softmax(axis = var_18, x = attn_weights_223_cast)[name = tensor("op_5609_cast")]; + tensor attn_111_transpose_x_0 = const()[name = tensor("attn_111_transpose_x_0"), val = tensor(false)]; + tensor attn_111_transpose_y_0 = const()[name = tensor("attn_111_transpose_y_0"), val = tensor(true)]; + tensor attn_111_cast = matmul(transpose_x = attn_111_transpose_x_0, transpose_y = attn_111_transpose_y_0, x = var_5605_cast, y = var_5609_cast)[name = tensor("attn_111_cast")]; + tensor var_5613 = const()[name = tensor("op_5613"), val = tensor([2, 1280, 1, -1])]; + tensor input_353_cast = reshape(shape = var_5613, x = attn_111_cast)[name = tensor("input_353_cast")]; + tensor var_5618 = const()[name = tensor("op_5618"), val = tensor([1, 1])]; + tensor var_5620 = const()[name = tensor("op_5620"), val = tensor([1, 1])]; + tensor var_5622_pad_type_0 = const()[name = tensor("op_5622_pad_type_0"), val = tensor("custom")]; + tensor var_5622_pad_0 = const()[name = tensor("op_5622_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(736123520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737352384))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737352576)))]; + tensor var_5622_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_5620, groups = var_31, pad = var_5622_pad_0, pad_type = var_5622_pad_type_0, strides = var_5618, weight = unet_mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("op_5622_cast")]; + tensor inputs_167_cast = add(x = var_5622_cast, y = inputs_165_cast)[name = tensor("inputs_167_cast")]; + tensor var_5626 = const()[name = tensor("op_5626"), val = tensor([1])]; + tensor channels_mean_167_cast = reduce_mean(axes = var_5626, keep_dims = var_23, x = inputs_167_cast)[name = tensor("channels_mean_167_cast")]; + tensor zero_mean_167_cast = sub(x = inputs_167_cast, y = channels_mean_167_cast)[name = tensor("zero_mean_167_cast")]; + tensor zero_mean_sq_167_cast = mul(x = zero_mean_167_cast, y = zero_mean_167_cast)[name = tensor("zero_mean_sq_167_cast")]; + tensor var_5630 = const()[name = tensor("op_5630"), val = tensor([1])]; + tensor var_5631_cast = reduce_mean(axes = var_5630, keep_dims = var_23, x = zero_mean_sq_167_cast)[name = tensor("op_5631_cast")]; + tensor var_5632_to_fp16 = const()[name = tensor("op_5632_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5633_cast = add(x = var_5631_cast, y = var_5632_to_fp16)[name = tensor("op_5633_cast")]; + tensor denom_167_epsilon_0_to_fp16 = const()[name = tensor("denom_167_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_167_cast = rsqrt(epsilon = denom_167_epsilon_0_to_fp16, x = var_5633_cast)[name = tensor("denom_167_cast")]; + tensor out_167_cast = mul(x = zero_mean_167_cast, y = denom_167_cast)[name = tensor("out_167_cast")]; + tensor var_5637_to_fp16 = const()[name = tensor("op_5637_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737355200)))]; + tensor var_5638_cast = add(x = out_167_cast, y = var_5637_to_fp16)[name = tensor("op_5638_cast")]; + tensor var_5640_to_fp16 = const()[name = tensor("op_5640_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737357824)))]; + tensor input_355_cast = mul(x = var_5638_cast, y = var_5640_to_fp16)[name = tensor("input_355_cast")]; + tensor var_5648 = const()[name = tensor("op_5648"), val = tensor([1, 1])]; + tensor var_5650 = const()[name = tensor("op_5650"), val = tensor([1, 1])]; + tensor var_5652_pad_type_0 = const()[name = tensor("op_5652_pad_type_0"), val = tensor("custom")]; + tensor var_5652_pad_0 = const()[name = tensor("op_5652_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737360448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747190912))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747191104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747198848))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_5652_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_5650, groups = var_31, pad = var_5652_pad_0, pad_type = var_5652_pad_type_0, strides = var_5648, weight = unet_mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_355_cast)[name = tensor("op_5652_cast")]; + tensor var_5653_split_sizes_0 = const()[name = tensor("op_5653_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5653_axis_0 = const()[name = tensor("op_5653_axis_0"), val = tensor(1)]; + tensor var_5653_cast_0, tensor var_5653_cast_1 = split(axis = var_5653_axis_0, split_sizes = var_5653_split_sizes_0, x = var_5652_cast)[name = tensor("op_5653_cast")]; + tensor var_5655_mode_0 = const()[name = tensor("op_5655_mode_0"), val = tensor("EXACT")]; + tensor var_5655_cast = gelu(mode = var_5655_mode_0, x = var_5653_cast_1)[name = tensor("op_5655_cast")]; + tensor input_357_cast = mul(x = var_5653_cast_0, y = var_5655_cast)[name = tensor("input_357_cast")]; + tensor var_5659 = const()[name = tensor("op_5659"), val = tensor([1, 1])]; + tensor var_5661 = const()[name = tensor("op_5661"), val = tensor([1, 1])]; + tensor var_5663_pad_type_0 = const()[name = tensor("op_5663_pad_type_0"), val = tensor("custom")]; + tensor var_5663_pad_0 = const()[name = tensor("op_5663_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747199040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(752114304))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(752114496)))]; + tensor var_5663_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_5661, groups = var_31, pad = var_5663_pad_0, pad_type = var_5663_pad_type_0, strides = var_5659, weight = unet_mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_357_cast)[name = tensor("op_5663_cast")]; + tensor inputs_169_cast = add(x = var_5663_cast, y = inputs_167_cast)[name = tensor("inputs_169_cast")]; + tensor var_5673 = const()[name = tensor("op_5673"), val = tensor([1])]; + tensor channels_mean_169_cast = reduce_mean(axes = var_5673, keep_dims = var_23, x = inputs_169_cast)[name = tensor("channels_mean_169_cast")]; + tensor zero_mean_169_cast = sub(x = inputs_169_cast, y = channels_mean_169_cast)[name = tensor("zero_mean_169_cast")]; + tensor zero_mean_sq_169_cast = mul(x = zero_mean_169_cast, y = zero_mean_169_cast)[name = tensor("zero_mean_sq_169_cast")]; + tensor var_5677 = const()[name = tensor("op_5677"), val = tensor([1])]; + tensor var_5678_cast = reduce_mean(axes = var_5677, keep_dims = var_23, x = zero_mean_sq_169_cast)[name = tensor("op_5678_cast")]; + tensor var_5679_to_fp16 = const()[name = tensor("op_5679_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5680_cast = add(x = var_5678_cast, y = var_5679_to_fp16)[name = tensor("op_5680_cast")]; + tensor denom_169_epsilon_0_to_fp16 = const()[name = tensor("denom_169_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_169_cast = rsqrt(epsilon = denom_169_epsilon_0_to_fp16, x = var_5680_cast)[name = tensor("denom_169_cast")]; + tensor out_169_cast = mul(x = zero_mean_169_cast, y = denom_169_cast)[name = tensor("out_169_cast")]; + tensor var_5684_to_fp16 = const()[name = tensor("op_5684_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(752117120)))]; + tensor var_5685_cast = add(x = out_169_cast, y = var_5684_to_fp16)[name = tensor("op_5685_cast")]; + tensor var_5687_to_fp16 = const()[name = tensor("op_5687_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(752119744)))]; + tensor hidden_states_233_cast = mul(x = var_5685_cast, y = var_5687_to_fp16)[name = tensor("hidden_states_233_cast")]; + tensor var_5694 = const()[name = tensor("op_5694"), val = tensor([1, 1])]; + tensor var_5696 = const()[name = tensor("op_5696"), val = tensor([1, 1])]; + tensor q_113_pad_type_0 = const()[name = tensor("q_113_pad_type_0"), val = tensor("custom")]; + tensor q_113_pad_0 = const()[name = tensor("q_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(752122368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(753351232))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_113_cast = conv(dilations = var_5696, groups = var_31, pad = q_113_pad_0, pad_type = q_113_pad_type_0, strides = var_5694, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("q_113_cast")]; + tensor var_5700 = const()[name = tensor("op_5700"), val = tensor([1, 1])]; + tensor var_5702 = const()[name = tensor("op_5702"), val = tensor([1, 1])]; + tensor k_113_pad_type_0 = const()[name = tensor("k_113_pad_type_0"), val = tensor("custom")]; + tensor k_113_pad_0 = const()[name = tensor("k_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(753351424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(754580288))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_113_cast = conv(dilations = var_5702, groups = var_31, pad = k_113_pad_0, pad_type = k_113_pad_type_0, strides = var_5700, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("k_113_cast")]; + tensor var_5706 = const()[name = tensor("op_5706"), val = tensor([1, 1])]; + tensor var_5708 = const()[name = tensor("op_5708"), val = tensor([1, 1])]; + tensor v_113_pad_type_0 = const()[name = tensor("v_113_pad_type_0"), val = tensor("custom")]; + tensor v_113_pad_0 = const()[name = tensor("v_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(754580480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755809344))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_113_cast = conv(dilations = var_5708, groups = var_31, pad = v_113_pad_0, pad_type = v_113_pad_type_0, strides = var_5706, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("v_113_cast")]; + tensor var_5712 = const()[name = tensor("op_5712"), val = tensor([2, 20, 64, -1])]; + tensor var_5713_cast = reshape(shape = var_5712, x = q_113_cast)[name = tensor("op_5713_cast")]; + tensor var_5714 = const()[name = tensor("op_5714"), val = tensor([2, 20, 64, -1])]; + tensor var_5715_cast = reshape(shape = var_5714, x = k_113_cast)[name = tensor("op_5715_cast")]; + tensor var_5716 = const()[name = tensor("op_5716"), val = tensor([2, 20, 64, -1])]; + tensor var_5717_cast = reshape(shape = var_5716, x = v_113_cast)[name = tensor("op_5717_cast")]; + tensor attn_weights_225_transpose_x_0 = const()[name = tensor("attn_weights_225_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_225_transpose_y_0 = const()[name = tensor("attn_weights_225_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_225_cast = matmul(transpose_x = attn_weights_225_transpose_x_0, transpose_y = attn_weights_225_transpose_y_0, x = var_5713_cast, y = var_5715_cast)[name = tensor("attn_weights_225_cast")]; + tensor attn_weights_227_cast = mul(x = attn_weights_225_cast, y = var_12_to_fp16)[name = tensor("attn_weights_227_cast")]; + tensor var_5721_cast = softmax(axis = var_18, x = attn_weights_227_cast)[name = tensor("op_5721_cast")]; + tensor attn_113_transpose_x_0 = const()[name = tensor("attn_113_transpose_x_0"), val = tensor(false)]; + tensor attn_113_transpose_y_0 = const()[name = tensor("attn_113_transpose_y_0"), val = tensor(true)]; + tensor attn_113_cast = matmul(transpose_x = attn_113_transpose_x_0, transpose_y = attn_113_transpose_y_0, x = var_5717_cast, y = var_5721_cast)[name = tensor("attn_113_cast")]; + tensor var_5725 = const()[name = tensor("op_5725"), val = tensor([2, 1280, 1, -1])]; + tensor input_359_cast = reshape(shape = var_5725, x = attn_113_cast)[name = tensor("input_359_cast")]; + tensor var_5730 = const()[name = tensor("op_5730"), val = tensor([1, 1])]; + tensor var_5732 = const()[name = tensor("op_5732"), val = tensor([1, 1])]; + tensor var_5734_pad_type_0 = const()[name = tensor("op_5734_pad_type_0"), val = tensor("custom")]; + tensor var_5734_pad_0 = const()[name = tensor("op_5734_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755809536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(757038400))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(757038592)))]; + tensor var_5734_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_5732, groups = var_31, pad = var_5734_pad_0, pad_type = var_5734_pad_type_0, strides = var_5730, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_359_cast)[name = tensor("op_5734_cast")]; + tensor inputs_171_cast = add(x = var_5734_cast, y = inputs_169_cast)[name = tensor("inputs_171_cast")]; + tensor var_5738 = const()[name = tensor("op_5738"), val = tensor([1])]; + tensor channels_mean_171_cast = reduce_mean(axes = var_5738, keep_dims = var_23, x = inputs_171_cast)[name = tensor("channels_mean_171_cast")]; + tensor zero_mean_171_cast = sub(x = inputs_171_cast, y = channels_mean_171_cast)[name = tensor("zero_mean_171_cast")]; + tensor zero_mean_sq_171_cast = mul(x = zero_mean_171_cast, y = zero_mean_171_cast)[name = tensor("zero_mean_sq_171_cast")]; + tensor var_5742 = const()[name = tensor("op_5742"), val = tensor([1])]; + tensor var_5743_cast = reduce_mean(axes = var_5742, keep_dims = var_23, x = zero_mean_sq_171_cast)[name = tensor("op_5743_cast")]; + tensor var_5744_to_fp16 = const()[name = tensor("op_5744_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5745_cast = add(x = var_5743_cast, y = var_5744_to_fp16)[name = tensor("op_5745_cast")]; + tensor denom_171_epsilon_0_to_fp16 = const()[name = tensor("denom_171_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_171_cast = rsqrt(epsilon = denom_171_epsilon_0_to_fp16, x = var_5745_cast)[name = tensor("denom_171_cast")]; + tensor out_171_cast = mul(x = zero_mean_171_cast, y = denom_171_cast)[name = tensor("out_171_cast")]; + tensor var_5749_to_fp16 = const()[name = tensor("op_5749_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(757041216)))]; + tensor var_5750_cast = add(x = out_171_cast, y = var_5749_to_fp16)[name = tensor("op_5750_cast")]; + tensor var_5752_to_fp16 = const()[name = tensor("op_5752_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(757043840)))]; + tensor hidden_states_235_cast = mul(x = var_5750_cast, y = var_5752_to_fp16)[name = tensor("hidden_states_235_cast")]; + tensor var_5759 = const()[name = tensor("op_5759"), val = tensor([1, 1])]; + tensor var_5761 = const()[name = tensor("op_5761"), val = tensor([1, 1])]; + tensor q_115_pad_type_0 = const()[name = tensor("q_115_pad_type_0"), val = tensor("custom")]; + tensor q_115_pad_0 = const()[name = tensor("q_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(757046464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758275328))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_115_cast = conv(dilations = var_5761, groups = var_31, pad = q_115_pad_0, pad_type = q_115_pad_type_0, strides = var_5759, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_235_cast)[name = tensor("q_115_cast")]; + tensor var_5765 = const()[name = tensor("op_5765"), val = tensor([1, 1])]; + tensor var_5767 = const()[name = tensor("op_5767"), val = tensor([1, 1])]; + tensor k_115_pad_type_0 = const()[name = tensor("k_115_pad_type_0"), val = tensor("custom")]; + tensor k_115_pad_0 = const()[name = tensor("k_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758275520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760241664))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_115_cast = conv(dilations = var_5767, groups = var_31, pad = k_115_pad_0, pad_type = k_115_pad_type_0, strides = var_5765, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_115_cast")]; + tensor var_5771 = const()[name = tensor("op_5771"), val = tensor([1, 1])]; + tensor var_5773 = const()[name = tensor("op_5773"), val = tensor([1, 1])]; + tensor v_115_pad_type_0 = const()[name = tensor("v_115_pad_type_0"), val = tensor("custom")]; + tensor v_115_pad_0 = const()[name = tensor("v_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760241856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762208000))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_115_cast = conv(dilations = var_5773, groups = var_31, pad = v_115_pad_0, pad_type = v_115_pad_type_0, strides = var_5771, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_115_cast")]; + tensor var_5777 = const()[name = tensor("op_5777"), val = tensor([2, 20, 64, -1])]; + tensor var_5778_cast = reshape(shape = var_5777, x = q_115_cast)[name = tensor("op_5778_cast")]; + tensor var_5779 = const()[name = tensor("op_5779"), val = tensor([2, 20, 64, -1])]; + tensor var_5780_cast = reshape(shape = var_5779, x = k_115_cast)[name = tensor("op_5780_cast")]; + tensor var_5781 = const()[name = tensor("op_5781"), val = tensor([2, 20, 64, -1])]; + tensor var_5782_cast = reshape(shape = var_5781, x = v_115_cast)[name = tensor("op_5782_cast")]; + tensor attn_weights_229_transpose_x_0 = const()[name = tensor("attn_weights_229_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_229_transpose_y_0 = const()[name = tensor("attn_weights_229_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_229_cast = matmul(transpose_x = attn_weights_229_transpose_x_0, transpose_y = attn_weights_229_transpose_y_0, x = var_5778_cast, y = var_5780_cast)[name = tensor("attn_weights_229_cast")]; + tensor attn_weights_231_cast = mul(x = attn_weights_229_cast, y = var_12_to_fp16)[name = tensor("attn_weights_231_cast")]; + tensor var_5786_cast = softmax(axis = var_18, x = attn_weights_231_cast)[name = tensor("op_5786_cast")]; + tensor attn_115_transpose_x_0 = const()[name = tensor("attn_115_transpose_x_0"), val = tensor(false)]; + tensor attn_115_transpose_y_0 = const()[name = tensor("attn_115_transpose_y_0"), val = tensor(true)]; + tensor attn_115_cast = matmul(transpose_x = attn_115_transpose_x_0, transpose_y = attn_115_transpose_y_0, x = var_5782_cast, y = var_5786_cast)[name = tensor("attn_115_cast")]; + tensor var_5790 = const()[name = tensor("op_5790"), val = tensor([2, 1280, 1, -1])]; + tensor input_361_cast = reshape(shape = var_5790, x = attn_115_cast)[name = tensor("input_361_cast")]; + tensor var_5795 = const()[name = tensor("op_5795"), val = tensor([1, 1])]; + tensor var_5797 = const()[name = tensor("op_5797"), val = tensor([1, 1])]; + tensor var_5799_pad_type_0 = const()[name = tensor("op_5799_pad_type_0"), val = tensor("custom")]; + tensor var_5799_pad_0 = const()[name = tensor("op_5799_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762208192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(763437056))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(763437248)))]; + tensor var_5799_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_5797, groups = var_31, pad = var_5799_pad_0, pad_type = var_5799_pad_type_0, strides = var_5795, weight = unet_mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("op_5799_cast")]; + tensor inputs_173_cast = add(x = var_5799_cast, y = inputs_171_cast)[name = tensor("inputs_173_cast")]; + tensor var_5803 = const()[name = tensor("op_5803"), val = tensor([1])]; + tensor channels_mean_173_cast = reduce_mean(axes = var_5803, keep_dims = var_23, x = inputs_173_cast)[name = tensor("channels_mean_173_cast")]; + tensor zero_mean_173_cast = sub(x = inputs_173_cast, y = channels_mean_173_cast)[name = tensor("zero_mean_173_cast")]; + tensor zero_mean_sq_173_cast = mul(x = zero_mean_173_cast, y = zero_mean_173_cast)[name = tensor("zero_mean_sq_173_cast")]; + tensor var_5807 = const()[name = tensor("op_5807"), val = tensor([1])]; + tensor var_5808_cast = reduce_mean(axes = var_5807, keep_dims = var_23, x = zero_mean_sq_173_cast)[name = tensor("op_5808_cast")]; + tensor var_5809_to_fp16 = const()[name = tensor("op_5809_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5810_cast = add(x = var_5808_cast, y = var_5809_to_fp16)[name = tensor("op_5810_cast")]; + tensor denom_173_epsilon_0_to_fp16 = const()[name = tensor("denom_173_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_173_cast = rsqrt(epsilon = denom_173_epsilon_0_to_fp16, x = var_5810_cast)[name = tensor("denom_173_cast")]; + tensor out_173_cast = mul(x = zero_mean_173_cast, y = denom_173_cast)[name = tensor("out_173_cast")]; + tensor var_5814_to_fp16 = const()[name = tensor("op_5814_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(763439872)))]; + tensor var_5815_cast = add(x = out_173_cast, y = var_5814_to_fp16)[name = tensor("op_5815_cast")]; + tensor var_5817_to_fp16 = const()[name = tensor("op_5817_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(763442496)))]; + tensor input_363_cast = mul(x = var_5815_cast, y = var_5817_to_fp16)[name = tensor("input_363_cast")]; + tensor var_5825 = const()[name = tensor("op_5825"), val = tensor([1, 1])]; + tensor var_5827 = const()[name = tensor("op_5827"), val = tensor([1, 1])]; + tensor var_5829_pad_type_0 = const()[name = tensor("op_5829_pad_type_0"), val = tensor("custom")]; + tensor var_5829_pad_0 = const()[name = tensor("op_5829_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(763445120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773275584))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773275776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773283520))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_5829_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_5827, groups = var_31, pad = var_5829_pad_0, pad_type = var_5829_pad_type_0, strides = var_5825, weight = unet_mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_363_cast)[name = tensor("op_5829_cast")]; + tensor var_5830_split_sizes_0 = const()[name = tensor("op_5830_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5830_axis_0 = const()[name = tensor("op_5830_axis_0"), val = tensor(1)]; + tensor var_5830_cast_0, tensor var_5830_cast_1 = split(axis = var_5830_axis_0, split_sizes = var_5830_split_sizes_0, x = var_5829_cast)[name = tensor("op_5830_cast")]; + tensor var_5832_mode_0 = const()[name = tensor("op_5832_mode_0"), val = tensor("EXACT")]; + tensor var_5832_cast = gelu(mode = var_5832_mode_0, x = var_5830_cast_1)[name = tensor("op_5832_cast")]; + tensor input_365_cast = mul(x = var_5830_cast_0, y = var_5832_cast)[name = tensor("input_365_cast")]; + tensor var_5836 = const()[name = tensor("op_5836"), val = tensor([1, 1])]; + tensor var_5838 = const()[name = tensor("op_5838"), val = tensor([1, 1])]; + tensor var_5840_pad_type_0 = const()[name = tensor("op_5840_pad_type_0"), val = tensor("custom")]; + tensor var_5840_pad_0 = const()[name = tensor("op_5840_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773283712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(778198976))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(778199168)))]; + tensor var_5840_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_5838, groups = var_31, pad = var_5840_pad_0, pad_type = var_5840_pad_type_0, strides = var_5836, weight = unet_mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("op_5840_cast")]; + tensor inputs_175_cast = add(x = var_5840_cast, y = inputs_173_cast)[name = tensor("inputs_175_cast")]; + tensor var_5850 = const()[name = tensor("op_5850"), val = tensor([1])]; + tensor channels_mean_175_cast = reduce_mean(axes = var_5850, keep_dims = var_23, x = inputs_175_cast)[name = tensor("channels_mean_175_cast")]; + tensor zero_mean_175_cast = sub(x = inputs_175_cast, y = channels_mean_175_cast)[name = tensor("zero_mean_175_cast")]; + tensor zero_mean_sq_175_cast = mul(x = zero_mean_175_cast, y = zero_mean_175_cast)[name = tensor("zero_mean_sq_175_cast")]; + tensor var_5854 = const()[name = tensor("op_5854"), val = tensor([1])]; + tensor var_5855_cast = reduce_mean(axes = var_5854, keep_dims = var_23, x = zero_mean_sq_175_cast)[name = tensor("op_5855_cast")]; + tensor var_5856_to_fp16 = const()[name = tensor("op_5856_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5857_cast = add(x = var_5855_cast, y = var_5856_to_fp16)[name = tensor("op_5857_cast")]; + tensor denom_175_epsilon_0_to_fp16 = const()[name = tensor("denom_175_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_175_cast = rsqrt(epsilon = denom_175_epsilon_0_to_fp16, x = var_5857_cast)[name = tensor("denom_175_cast")]; + tensor out_175_cast = mul(x = zero_mean_175_cast, y = denom_175_cast)[name = tensor("out_175_cast")]; + tensor var_5861_to_fp16 = const()[name = tensor("op_5861_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(778201792)))]; + tensor var_5862_cast = add(x = out_175_cast, y = var_5861_to_fp16)[name = tensor("op_5862_cast")]; + tensor var_5864_to_fp16 = const()[name = tensor("op_5864_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(778204416)))]; + tensor hidden_states_239_cast = mul(x = var_5862_cast, y = var_5864_to_fp16)[name = tensor("hidden_states_239_cast")]; + tensor var_5871 = const()[name = tensor("op_5871"), val = tensor([1, 1])]; + tensor var_5873 = const()[name = tensor("op_5873"), val = tensor([1, 1])]; + tensor q_117_pad_type_0 = const()[name = tensor("q_117_pad_type_0"), val = tensor("custom")]; + tensor q_117_pad_0 = const()[name = tensor("q_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(778207040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(779435904))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_117_cast = conv(dilations = var_5873, groups = var_31, pad = q_117_pad_0, pad_type = q_117_pad_type_0, strides = var_5871, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("q_117_cast")]; + tensor var_5877 = const()[name = tensor("op_5877"), val = tensor([1, 1])]; + tensor var_5879 = const()[name = tensor("op_5879"), val = tensor([1, 1])]; + tensor k_117_pad_type_0 = const()[name = tensor("k_117_pad_type_0"), val = tensor("custom")]; + tensor k_117_pad_0 = const()[name = tensor("k_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(779436096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780664960))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_117_cast = conv(dilations = var_5879, groups = var_31, pad = k_117_pad_0, pad_type = k_117_pad_type_0, strides = var_5877, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("k_117_cast")]; + tensor var_5883 = const()[name = tensor("op_5883"), val = tensor([1, 1])]; + tensor var_5885 = const()[name = tensor("op_5885"), val = tensor([1, 1])]; + tensor v_117_pad_type_0 = const()[name = tensor("v_117_pad_type_0"), val = tensor("custom")]; + tensor v_117_pad_0 = const()[name = tensor("v_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780665152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(781894016))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_117_cast = conv(dilations = var_5885, groups = var_31, pad = v_117_pad_0, pad_type = v_117_pad_type_0, strides = var_5883, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("v_117_cast")]; + tensor var_5889 = const()[name = tensor("op_5889"), val = tensor([2, 20, 64, -1])]; + tensor var_5890_cast = reshape(shape = var_5889, x = q_117_cast)[name = tensor("op_5890_cast")]; + tensor var_5891 = const()[name = tensor("op_5891"), val = tensor([2, 20, 64, -1])]; + tensor var_5892_cast = reshape(shape = var_5891, x = k_117_cast)[name = tensor("op_5892_cast")]; + tensor var_5893 = const()[name = tensor("op_5893"), val = tensor([2, 20, 64, -1])]; + tensor var_5894_cast = reshape(shape = var_5893, x = v_117_cast)[name = tensor("op_5894_cast")]; + tensor attn_weights_233_transpose_x_0 = const()[name = tensor("attn_weights_233_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_233_transpose_y_0 = const()[name = tensor("attn_weights_233_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_233_cast = matmul(transpose_x = attn_weights_233_transpose_x_0, transpose_y = attn_weights_233_transpose_y_0, x = var_5890_cast, y = var_5892_cast)[name = tensor("attn_weights_233_cast")]; + tensor attn_weights_235_cast = mul(x = attn_weights_233_cast, y = var_12_to_fp16)[name = tensor("attn_weights_235_cast")]; + tensor var_5898_cast = softmax(axis = var_18, x = attn_weights_235_cast)[name = tensor("op_5898_cast")]; + tensor attn_117_transpose_x_0 = const()[name = tensor("attn_117_transpose_x_0"), val = tensor(false)]; + tensor attn_117_transpose_y_0 = const()[name = tensor("attn_117_transpose_y_0"), val = tensor(true)]; + tensor attn_117_cast = matmul(transpose_x = attn_117_transpose_x_0, transpose_y = attn_117_transpose_y_0, x = var_5894_cast, y = var_5898_cast)[name = tensor("attn_117_cast")]; + tensor var_5902 = const()[name = tensor("op_5902"), val = tensor([2, 1280, 1, -1])]; + tensor input_367_cast = reshape(shape = var_5902, x = attn_117_cast)[name = tensor("input_367_cast")]; + tensor var_5907 = const()[name = tensor("op_5907"), val = tensor([1, 1])]; + tensor var_5909 = const()[name = tensor("op_5909"), val = tensor([1, 1])]; + tensor var_5911_pad_type_0 = const()[name = tensor("op_5911_pad_type_0"), val = tensor("custom")]; + tensor var_5911_pad_0 = const()[name = tensor("op_5911_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(781894208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783123072))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783123264)))]; + tensor var_5911_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_5909, groups = var_31, pad = var_5911_pad_0, pad_type = var_5911_pad_type_0, strides = var_5907, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_367_cast)[name = tensor("op_5911_cast")]; + tensor inputs_177_cast = add(x = var_5911_cast, y = inputs_175_cast)[name = tensor("inputs_177_cast")]; + tensor var_5915 = const()[name = tensor("op_5915"), val = tensor([1])]; + tensor channels_mean_177_cast = reduce_mean(axes = var_5915, keep_dims = var_23, x = inputs_177_cast)[name = tensor("channels_mean_177_cast")]; + tensor zero_mean_177_cast = sub(x = inputs_177_cast, y = channels_mean_177_cast)[name = tensor("zero_mean_177_cast")]; + tensor zero_mean_sq_177_cast = mul(x = zero_mean_177_cast, y = zero_mean_177_cast)[name = tensor("zero_mean_sq_177_cast")]; + tensor var_5919 = const()[name = tensor("op_5919"), val = tensor([1])]; + tensor var_5920_cast = reduce_mean(axes = var_5919, keep_dims = var_23, x = zero_mean_sq_177_cast)[name = tensor("op_5920_cast")]; + tensor var_5921_to_fp16 = const()[name = tensor("op_5921_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5922_cast = add(x = var_5920_cast, y = var_5921_to_fp16)[name = tensor("op_5922_cast")]; + tensor denom_177_epsilon_0_to_fp16 = const()[name = tensor("denom_177_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_177_cast = rsqrt(epsilon = denom_177_epsilon_0_to_fp16, x = var_5922_cast)[name = tensor("denom_177_cast")]; + tensor out_177_cast = mul(x = zero_mean_177_cast, y = denom_177_cast)[name = tensor("out_177_cast")]; + tensor var_5926_to_fp16 = const()[name = tensor("op_5926_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783125888)))]; + tensor var_5927_cast = add(x = out_177_cast, y = var_5926_to_fp16)[name = tensor("op_5927_cast")]; + tensor var_5929_to_fp16 = const()[name = tensor("op_5929_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783128512)))]; + tensor hidden_states_241_cast = mul(x = var_5927_cast, y = var_5929_to_fp16)[name = tensor("hidden_states_241_cast")]; + tensor var_5936 = const()[name = tensor("op_5936"), val = tensor([1, 1])]; + tensor var_5938 = const()[name = tensor("op_5938"), val = tensor([1, 1])]; + tensor q_119_pad_type_0 = const()[name = tensor("q_119_pad_type_0"), val = tensor("custom")]; + tensor q_119_pad_0 = const()[name = tensor("q_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783131136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(784360000))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_119_cast = conv(dilations = var_5938, groups = var_31, pad = q_119_pad_0, pad_type = q_119_pad_type_0, strides = var_5936, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_241_cast)[name = tensor("q_119_cast")]; + tensor var_5942 = const()[name = tensor("op_5942"), val = tensor([1, 1])]; + tensor var_5944 = const()[name = tensor("op_5944"), val = tensor([1, 1])]; + tensor k_119_pad_type_0 = const()[name = tensor("k_119_pad_type_0"), val = tensor("custom")]; + tensor k_119_pad_0 = const()[name = tensor("k_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(784360192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786326336))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_119_cast = conv(dilations = var_5944, groups = var_31, pad = k_119_pad_0, pad_type = k_119_pad_type_0, strides = var_5942, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_119_cast")]; + tensor var_5948 = const()[name = tensor("op_5948"), val = tensor([1, 1])]; + tensor var_5950 = const()[name = tensor("op_5950"), val = tensor([1, 1])]; + tensor v_119_pad_type_0 = const()[name = tensor("v_119_pad_type_0"), val = tensor("custom")]; + tensor v_119_pad_0 = const()[name = tensor("v_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786326528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788292672))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_119_cast = conv(dilations = var_5950, groups = var_31, pad = v_119_pad_0, pad_type = v_119_pad_type_0, strides = var_5948, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_119_cast")]; + tensor var_5954 = const()[name = tensor("op_5954"), val = tensor([2, 20, 64, -1])]; + tensor var_5955_cast = reshape(shape = var_5954, x = q_119_cast)[name = tensor("op_5955_cast")]; + tensor var_5956 = const()[name = tensor("op_5956"), val = tensor([2, 20, 64, -1])]; + tensor var_5957_cast = reshape(shape = var_5956, x = k_119_cast)[name = tensor("op_5957_cast")]; + tensor var_5958 = const()[name = tensor("op_5958"), val = tensor([2, 20, 64, -1])]; + tensor var_5959_cast = reshape(shape = var_5958, x = v_119_cast)[name = tensor("op_5959_cast")]; + tensor attn_weights_237_transpose_x_0 = const()[name = tensor("attn_weights_237_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_237_transpose_y_0 = const()[name = tensor("attn_weights_237_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_237_cast = matmul(transpose_x = attn_weights_237_transpose_x_0, transpose_y = attn_weights_237_transpose_y_0, x = var_5955_cast, y = var_5957_cast)[name = tensor("attn_weights_237_cast")]; + tensor attn_weights_239_cast = mul(x = attn_weights_237_cast, y = var_12_to_fp16)[name = tensor("attn_weights_239_cast")]; + tensor var_5963_cast = softmax(axis = var_18, x = attn_weights_239_cast)[name = tensor("op_5963_cast")]; + tensor attn_119_transpose_x_0 = const()[name = tensor("attn_119_transpose_x_0"), val = tensor(false)]; + tensor attn_119_transpose_y_0 = const()[name = tensor("attn_119_transpose_y_0"), val = tensor(true)]; + tensor attn_119_cast = matmul(transpose_x = attn_119_transpose_x_0, transpose_y = attn_119_transpose_y_0, x = var_5959_cast, y = var_5963_cast)[name = tensor("attn_119_cast")]; + tensor var_5967 = const()[name = tensor("op_5967"), val = tensor([2, 1280, 1, -1])]; + tensor input_369_cast = reshape(shape = var_5967, x = attn_119_cast)[name = tensor("input_369_cast")]; + tensor var_5972 = const()[name = tensor("op_5972"), val = tensor([1, 1])]; + tensor var_5974 = const()[name = tensor("op_5974"), val = tensor([1, 1])]; + tensor var_5976_pad_type_0 = const()[name = tensor("op_5976_pad_type_0"), val = tensor("custom")]; + tensor var_5976_pad_0 = const()[name = tensor("op_5976_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788292864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789521728))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789521920)))]; + tensor var_5976_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_5974, groups = var_31, pad = var_5976_pad_0, pad_type = var_5976_pad_type_0, strides = var_5972, weight = unet_mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("op_5976_cast")]; + tensor inputs_179_cast = add(x = var_5976_cast, y = inputs_177_cast)[name = tensor("inputs_179_cast")]; + tensor var_5980 = const()[name = tensor("op_5980"), val = tensor([1])]; + tensor channels_mean_179_cast = reduce_mean(axes = var_5980, keep_dims = var_23, x = inputs_179_cast)[name = tensor("channels_mean_179_cast")]; + tensor zero_mean_179_cast = sub(x = inputs_179_cast, y = channels_mean_179_cast)[name = tensor("zero_mean_179_cast")]; + tensor zero_mean_sq_179_cast = mul(x = zero_mean_179_cast, y = zero_mean_179_cast)[name = tensor("zero_mean_sq_179_cast")]; + tensor var_5984 = const()[name = tensor("op_5984"), val = tensor([1])]; + tensor var_5985_cast = reduce_mean(axes = var_5984, keep_dims = var_23, x = zero_mean_sq_179_cast)[name = tensor("op_5985_cast")]; + tensor var_5986_to_fp16 = const()[name = tensor("op_5986_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5987_cast = add(x = var_5985_cast, y = var_5986_to_fp16)[name = tensor("op_5987_cast")]; + tensor denom_179_epsilon_0_to_fp16 = const()[name = tensor("denom_179_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_179_cast = rsqrt(epsilon = denom_179_epsilon_0_to_fp16, x = var_5987_cast)[name = tensor("denom_179_cast")]; + tensor out_179_cast = mul(x = zero_mean_179_cast, y = denom_179_cast)[name = tensor("out_179_cast")]; + tensor var_5991_to_fp16 = const()[name = tensor("op_5991_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789524544)))]; + tensor var_5992_cast = add(x = out_179_cast, y = var_5991_to_fp16)[name = tensor("op_5992_cast")]; + tensor var_5994_to_fp16 = const()[name = tensor("op_5994_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789527168)))]; + tensor input_371_cast = mul(x = var_5992_cast, y = var_5994_to_fp16)[name = tensor("input_371_cast")]; + tensor var_6002 = const()[name = tensor("op_6002"), val = tensor([1, 1])]; + tensor var_6004 = const()[name = tensor("op_6004"), val = tensor([1, 1])]; + tensor var_6006_pad_type_0 = const()[name = tensor("op_6006_pad_type_0"), val = tensor("custom")]; + tensor var_6006_pad_0 = const()[name = tensor("op_6006_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789529792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799360256))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799360448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799368192))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_6006_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_6004, groups = var_31, pad = var_6006_pad_0, pad_type = var_6006_pad_type_0, strides = var_6002, weight = unet_mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_371_cast)[name = tensor("op_6006_cast")]; + tensor var_6007_split_sizes_0 = const()[name = tensor("op_6007_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6007_axis_0 = const()[name = tensor("op_6007_axis_0"), val = tensor(1)]; + tensor var_6007_cast_0, tensor var_6007_cast_1 = split(axis = var_6007_axis_0, split_sizes = var_6007_split_sizes_0, x = var_6006_cast)[name = tensor("op_6007_cast")]; + tensor var_6009_mode_0 = const()[name = tensor("op_6009_mode_0"), val = tensor("EXACT")]; + tensor var_6009_cast = gelu(mode = var_6009_mode_0, x = var_6007_cast_1)[name = tensor("op_6009_cast")]; + tensor input_373_cast = mul(x = var_6007_cast_0, y = var_6009_cast)[name = tensor("input_373_cast")]; + tensor var_6013 = const()[name = tensor("op_6013"), val = tensor([1, 1])]; + tensor var_6015 = const()[name = tensor("op_6015"), val = tensor([1, 1])]; + tensor var_6017_pad_type_0 = const()[name = tensor("op_6017_pad_type_0"), val = tensor("custom")]; + tensor var_6017_pad_0 = const()[name = tensor("op_6017_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799368384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804283648))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804283840)))]; + tensor var_6017_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_6015, groups = var_31, pad = var_6017_pad_0, pad_type = var_6017_pad_type_0, strides = var_6013, weight = unet_mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_373_cast)[name = tensor("op_6017_cast")]; + tensor inputs_181_cast = add(x = var_6017_cast, y = inputs_179_cast)[name = tensor("inputs_181_cast")]; + tensor var_6027 = const()[name = tensor("op_6027"), val = tensor([1])]; + tensor channels_mean_181_cast = reduce_mean(axes = var_6027, keep_dims = var_23, x = inputs_181_cast)[name = tensor("channels_mean_181_cast")]; + tensor zero_mean_181_cast = sub(x = inputs_181_cast, y = channels_mean_181_cast)[name = tensor("zero_mean_181_cast")]; + tensor zero_mean_sq_181_cast = mul(x = zero_mean_181_cast, y = zero_mean_181_cast)[name = tensor("zero_mean_sq_181_cast")]; + tensor var_6031 = const()[name = tensor("op_6031"), val = tensor([1])]; + tensor var_6032_cast = reduce_mean(axes = var_6031, keep_dims = var_23, x = zero_mean_sq_181_cast)[name = tensor("op_6032_cast")]; + tensor var_6033_to_fp16 = const()[name = tensor("op_6033_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6034_cast = add(x = var_6032_cast, y = var_6033_to_fp16)[name = tensor("op_6034_cast")]; + tensor denom_181_epsilon_0_to_fp16 = const()[name = tensor("denom_181_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_181_cast = rsqrt(epsilon = denom_181_epsilon_0_to_fp16, x = var_6034_cast)[name = tensor("denom_181_cast")]; + tensor out_181_cast = mul(x = zero_mean_181_cast, y = denom_181_cast)[name = tensor("out_181_cast")]; + tensor var_6038_to_fp16 = const()[name = tensor("op_6038_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804286464)))]; + tensor var_6039_cast = add(x = out_181_cast, y = var_6038_to_fp16)[name = tensor("op_6039_cast")]; + tensor var_6041_to_fp16 = const()[name = tensor("op_6041_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804289088)))]; + tensor hidden_states_245_cast = mul(x = var_6039_cast, y = var_6041_to_fp16)[name = tensor("hidden_states_245_cast")]; + tensor var_6048 = const()[name = tensor("op_6048"), val = tensor([1, 1])]; + tensor var_6050 = const()[name = tensor("op_6050"), val = tensor([1, 1])]; + tensor q_121_pad_type_0 = const()[name = tensor("q_121_pad_type_0"), val = tensor("custom")]; + tensor q_121_pad_0 = const()[name = tensor("q_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804291712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805520576))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_121_cast = conv(dilations = var_6050, groups = var_31, pad = q_121_pad_0, pad_type = q_121_pad_type_0, strides = var_6048, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("q_121_cast")]; + tensor var_6054 = const()[name = tensor("op_6054"), val = tensor([1, 1])]; + tensor var_6056 = const()[name = tensor("op_6056"), val = tensor([1, 1])]; + tensor k_121_pad_type_0 = const()[name = tensor("k_121_pad_type_0"), val = tensor("custom")]; + tensor k_121_pad_0 = const()[name = tensor("k_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805520768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(806749632))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_121_cast = conv(dilations = var_6056, groups = var_31, pad = k_121_pad_0, pad_type = k_121_pad_type_0, strides = var_6054, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("k_121_cast")]; + tensor var_6060 = const()[name = tensor("op_6060"), val = tensor([1, 1])]; + tensor var_6062 = const()[name = tensor("op_6062"), val = tensor([1, 1])]; + tensor v_121_pad_type_0 = const()[name = tensor("v_121_pad_type_0"), val = tensor("custom")]; + tensor v_121_pad_0 = const()[name = tensor("v_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(806749824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(807978688))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_121_cast = conv(dilations = var_6062, groups = var_31, pad = v_121_pad_0, pad_type = v_121_pad_type_0, strides = var_6060, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("v_121_cast")]; + tensor var_6066 = const()[name = tensor("op_6066"), val = tensor([2, 20, 64, -1])]; + tensor var_6067_cast = reshape(shape = var_6066, x = q_121_cast)[name = tensor("op_6067_cast")]; + tensor var_6068 = const()[name = tensor("op_6068"), val = tensor([2, 20, 64, -1])]; + tensor var_6069_cast = reshape(shape = var_6068, x = k_121_cast)[name = tensor("op_6069_cast")]; + tensor var_6070 = const()[name = tensor("op_6070"), val = tensor([2, 20, 64, -1])]; + tensor var_6071_cast = reshape(shape = var_6070, x = v_121_cast)[name = tensor("op_6071_cast")]; + tensor attn_weights_241_transpose_x_0 = const()[name = tensor("attn_weights_241_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_241_transpose_y_0 = const()[name = tensor("attn_weights_241_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_241_cast = matmul(transpose_x = attn_weights_241_transpose_x_0, transpose_y = attn_weights_241_transpose_y_0, x = var_6067_cast, y = var_6069_cast)[name = tensor("attn_weights_241_cast")]; + tensor attn_weights_243_cast = mul(x = attn_weights_241_cast, y = var_12_to_fp16)[name = tensor("attn_weights_243_cast")]; + tensor var_6075_cast = softmax(axis = var_18, x = attn_weights_243_cast)[name = tensor("op_6075_cast")]; + tensor attn_121_transpose_x_0 = const()[name = tensor("attn_121_transpose_x_0"), val = tensor(false)]; + tensor attn_121_transpose_y_0 = const()[name = tensor("attn_121_transpose_y_0"), val = tensor(true)]; + tensor attn_121_cast = matmul(transpose_x = attn_121_transpose_x_0, transpose_y = attn_121_transpose_y_0, x = var_6071_cast, y = var_6075_cast)[name = tensor("attn_121_cast")]; + tensor var_6079 = const()[name = tensor("op_6079"), val = tensor([2, 1280, 1, -1])]; + tensor input_375_cast = reshape(shape = var_6079, x = attn_121_cast)[name = tensor("input_375_cast")]; + tensor var_6084 = const()[name = tensor("op_6084"), val = tensor([1, 1])]; + tensor var_6086 = const()[name = tensor("op_6086"), val = tensor([1, 1])]; + tensor var_6088_pad_type_0 = const()[name = tensor("op_6088_pad_type_0"), val = tensor("custom")]; + tensor var_6088_pad_0 = const()[name = tensor("op_6088_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(807978880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809207744))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809207936)))]; + tensor var_6088_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_6086, groups = var_31, pad = var_6088_pad_0, pad_type = var_6088_pad_type_0, strides = var_6084, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_375_cast)[name = tensor("op_6088_cast")]; + tensor inputs_183_cast = add(x = var_6088_cast, y = inputs_181_cast)[name = tensor("inputs_183_cast")]; + tensor var_6092 = const()[name = tensor("op_6092"), val = tensor([1])]; + tensor channels_mean_183_cast = reduce_mean(axes = var_6092, keep_dims = var_23, x = inputs_183_cast)[name = tensor("channels_mean_183_cast")]; + tensor zero_mean_183_cast = sub(x = inputs_183_cast, y = channels_mean_183_cast)[name = tensor("zero_mean_183_cast")]; + tensor zero_mean_sq_183_cast = mul(x = zero_mean_183_cast, y = zero_mean_183_cast)[name = tensor("zero_mean_sq_183_cast")]; + tensor var_6096 = const()[name = tensor("op_6096"), val = tensor([1])]; + tensor var_6097_cast = reduce_mean(axes = var_6096, keep_dims = var_23, x = zero_mean_sq_183_cast)[name = tensor("op_6097_cast")]; + tensor var_6098_to_fp16 = const()[name = tensor("op_6098_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6099_cast = add(x = var_6097_cast, y = var_6098_to_fp16)[name = tensor("op_6099_cast")]; + tensor denom_183_epsilon_0_to_fp16 = const()[name = tensor("denom_183_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_183_cast = rsqrt(epsilon = denom_183_epsilon_0_to_fp16, x = var_6099_cast)[name = tensor("denom_183_cast")]; + tensor out_183_cast = mul(x = zero_mean_183_cast, y = denom_183_cast)[name = tensor("out_183_cast")]; + tensor var_6103_to_fp16 = const()[name = tensor("op_6103_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809210560)))]; + tensor var_6104_cast = add(x = out_183_cast, y = var_6103_to_fp16)[name = tensor("op_6104_cast")]; + tensor var_6106_to_fp16 = const()[name = tensor("op_6106_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809213184)))]; + tensor hidden_states_247_cast = mul(x = var_6104_cast, y = var_6106_to_fp16)[name = tensor("hidden_states_247_cast")]; + tensor var_6113 = const()[name = tensor("op_6113"), val = tensor([1, 1])]; + tensor var_6115 = const()[name = tensor("op_6115"), val = tensor([1, 1])]; + tensor q_123_pad_type_0 = const()[name = tensor("q_123_pad_type_0"), val = tensor("custom")]; + tensor q_123_pad_0 = const()[name = tensor("q_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809215808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810444672))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_123_cast = conv(dilations = var_6115, groups = var_31, pad = q_123_pad_0, pad_type = q_123_pad_type_0, strides = var_6113, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_247_cast)[name = tensor("q_123_cast")]; + tensor var_6119 = const()[name = tensor("op_6119"), val = tensor([1, 1])]; + tensor var_6121 = const()[name = tensor("op_6121"), val = tensor([1, 1])]; + tensor k_123_pad_type_0 = const()[name = tensor("k_123_pad_type_0"), val = tensor("custom")]; + tensor k_123_pad_0 = const()[name = tensor("k_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810444864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(812411008))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_123_cast = conv(dilations = var_6121, groups = var_31, pad = k_123_pad_0, pad_type = k_123_pad_type_0, strides = var_6119, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_123_cast")]; + tensor var_6125 = const()[name = tensor("op_6125"), val = tensor([1, 1])]; + tensor var_6127 = const()[name = tensor("op_6127"), val = tensor([1, 1])]; + tensor v_123_pad_type_0 = const()[name = tensor("v_123_pad_type_0"), val = tensor("custom")]; + tensor v_123_pad_0 = const()[name = tensor("v_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(812411200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814377344))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_123_cast = conv(dilations = var_6127, groups = var_31, pad = v_123_pad_0, pad_type = v_123_pad_type_0, strides = var_6125, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_123_cast")]; + tensor var_6131 = const()[name = tensor("op_6131"), val = tensor([2, 20, 64, -1])]; + tensor var_6132_cast = reshape(shape = var_6131, x = q_123_cast)[name = tensor("op_6132_cast")]; + tensor var_6133 = const()[name = tensor("op_6133"), val = tensor([2, 20, 64, -1])]; + tensor var_6134_cast = reshape(shape = var_6133, x = k_123_cast)[name = tensor("op_6134_cast")]; + tensor var_6135 = const()[name = tensor("op_6135"), val = tensor([2, 20, 64, -1])]; + tensor var_6136_cast = reshape(shape = var_6135, x = v_123_cast)[name = tensor("op_6136_cast")]; + tensor attn_weights_245_transpose_x_0 = const()[name = tensor("attn_weights_245_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_245_transpose_y_0 = const()[name = tensor("attn_weights_245_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_245_cast = matmul(transpose_x = attn_weights_245_transpose_x_0, transpose_y = attn_weights_245_transpose_y_0, x = var_6132_cast, y = var_6134_cast)[name = tensor("attn_weights_245_cast")]; + tensor attn_weights_247_cast = mul(x = attn_weights_245_cast, y = var_12_to_fp16)[name = tensor("attn_weights_247_cast")]; + tensor var_6140_cast = softmax(axis = var_18, x = attn_weights_247_cast)[name = tensor("op_6140_cast")]; + tensor attn_123_transpose_x_0 = const()[name = tensor("attn_123_transpose_x_0"), val = tensor(false)]; + tensor attn_123_transpose_y_0 = const()[name = tensor("attn_123_transpose_y_0"), val = tensor(true)]; + tensor attn_123_cast = matmul(transpose_x = attn_123_transpose_x_0, transpose_y = attn_123_transpose_y_0, x = var_6136_cast, y = var_6140_cast)[name = tensor("attn_123_cast")]; + tensor var_6144 = const()[name = tensor("op_6144"), val = tensor([2, 1280, 1, -1])]; + tensor input_377_cast = reshape(shape = var_6144, x = attn_123_cast)[name = tensor("input_377_cast")]; + tensor var_6149 = const()[name = tensor("op_6149"), val = tensor([1, 1])]; + tensor var_6151 = const()[name = tensor("op_6151"), val = tensor([1, 1])]; + tensor var_6153_pad_type_0 = const()[name = tensor("op_6153_pad_type_0"), val = tensor("custom")]; + tensor var_6153_pad_0 = const()[name = tensor("op_6153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814377536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815606400))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815606592)))]; + tensor var_6153_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_6151, groups = var_31, pad = var_6153_pad_0, pad_type = var_6153_pad_type_0, strides = var_6149, weight = unet_mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_377_cast)[name = tensor("op_6153_cast")]; + tensor inputs_185_cast = add(x = var_6153_cast, y = inputs_183_cast)[name = tensor("inputs_185_cast")]; + tensor var_6157 = const()[name = tensor("op_6157"), val = tensor([1])]; + tensor channels_mean_185_cast = reduce_mean(axes = var_6157, keep_dims = var_23, x = inputs_185_cast)[name = tensor("channels_mean_185_cast")]; + tensor zero_mean_185_cast = sub(x = inputs_185_cast, y = channels_mean_185_cast)[name = tensor("zero_mean_185_cast")]; + tensor zero_mean_sq_185_cast = mul(x = zero_mean_185_cast, y = zero_mean_185_cast)[name = tensor("zero_mean_sq_185_cast")]; + tensor var_6161 = const()[name = tensor("op_6161"), val = tensor([1])]; + tensor var_6162_cast = reduce_mean(axes = var_6161, keep_dims = var_23, x = zero_mean_sq_185_cast)[name = tensor("op_6162_cast")]; + tensor var_6163_to_fp16 = const()[name = tensor("op_6163_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6164_cast = add(x = var_6162_cast, y = var_6163_to_fp16)[name = tensor("op_6164_cast")]; + tensor denom_185_epsilon_0_to_fp16 = const()[name = tensor("denom_185_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_185_cast = rsqrt(epsilon = denom_185_epsilon_0_to_fp16, x = var_6164_cast)[name = tensor("denom_185_cast")]; + tensor out_185_cast = mul(x = zero_mean_185_cast, y = denom_185_cast)[name = tensor("out_185_cast")]; + tensor var_6168_to_fp16 = const()[name = tensor("op_6168_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815609216)))]; + tensor var_6169_cast = add(x = out_185_cast, y = var_6168_to_fp16)[name = tensor("op_6169_cast")]; + tensor var_6171_to_fp16 = const()[name = tensor("op_6171_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815611840)))]; + tensor input_379_cast = mul(x = var_6169_cast, y = var_6171_to_fp16)[name = tensor("input_379_cast")]; + tensor var_6179 = const()[name = tensor("op_6179"), val = tensor([1, 1])]; + tensor var_6181 = const()[name = tensor("op_6181"), val = tensor([1, 1])]; + tensor var_6183_pad_type_0 = const()[name = tensor("op_6183_pad_type_0"), val = tensor("custom")]; + tensor var_6183_pad_0 = const()[name = tensor("op_6183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815614464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825444928))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825445120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825452864))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_6183_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_6181, groups = var_31, pad = var_6183_pad_0, pad_type = var_6183_pad_type_0, strides = var_6179, weight = unet_mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_379_cast)[name = tensor("op_6183_cast")]; + tensor var_6184_split_sizes_0 = const()[name = tensor("op_6184_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6184_axis_0 = const()[name = tensor("op_6184_axis_0"), val = tensor(1)]; + tensor var_6184_cast_0, tensor var_6184_cast_1 = split(axis = var_6184_axis_0, split_sizes = var_6184_split_sizes_0, x = var_6183_cast)[name = tensor("op_6184_cast")]; + tensor var_6186_mode_0 = const()[name = tensor("op_6186_mode_0"), val = tensor("EXACT")]; + tensor var_6186_cast = gelu(mode = var_6186_mode_0, x = var_6184_cast_1)[name = tensor("op_6186_cast")]; + tensor input_381_cast = mul(x = var_6184_cast_0, y = var_6186_cast)[name = tensor("input_381_cast")]; + tensor var_6190 = const()[name = tensor("op_6190"), val = tensor([1, 1])]; + tensor var_6192 = const()[name = tensor("op_6192"), val = tensor([1, 1])]; + tensor var_6194_pad_type_0 = const()[name = tensor("op_6194_pad_type_0"), val = tensor("custom")]; + tensor var_6194_pad_0 = const()[name = tensor("op_6194_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825453056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830368320))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830368512)))]; + tensor var_6194_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_6192, groups = var_31, pad = var_6194_pad_0, pad_type = var_6194_pad_type_0, strides = var_6190, weight = unet_mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_381_cast)[name = tensor("op_6194_cast")]; + tensor inputs_187_cast = add(x = var_6194_cast, y = inputs_185_cast)[name = tensor("inputs_187_cast")]; + tensor var_6204 = const()[name = tensor("op_6204"), val = tensor([1])]; + tensor channels_mean_187_cast = reduce_mean(axes = var_6204, keep_dims = var_23, x = inputs_187_cast)[name = tensor("channels_mean_187_cast")]; + tensor zero_mean_187_cast = sub(x = inputs_187_cast, y = channels_mean_187_cast)[name = tensor("zero_mean_187_cast")]; + tensor zero_mean_sq_187_cast = mul(x = zero_mean_187_cast, y = zero_mean_187_cast)[name = tensor("zero_mean_sq_187_cast")]; + tensor var_6208 = const()[name = tensor("op_6208"), val = tensor([1])]; + tensor var_6209_cast = reduce_mean(axes = var_6208, keep_dims = var_23, x = zero_mean_sq_187_cast)[name = tensor("op_6209_cast")]; + tensor var_6210_to_fp16 = const()[name = tensor("op_6210_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6211_cast = add(x = var_6209_cast, y = var_6210_to_fp16)[name = tensor("op_6211_cast")]; + tensor denom_187_epsilon_0_to_fp16 = const()[name = tensor("denom_187_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_187_cast = rsqrt(epsilon = denom_187_epsilon_0_to_fp16, x = var_6211_cast)[name = tensor("denom_187_cast")]; + tensor out_187_cast = mul(x = zero_mean_187_cast, y = denom_187_cast)[name = tensor("out_187_cast")]; + tensor var_6215_to_fp16 = const()[name = tensor("op_6215_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830371136)))]; + tensor var_6216_cast = add(x = out_187_cast, y = var_6215_to_fp16)[name = tensor("op_6216_cast")]; + tensor var_6218_to_fp16 = const()[name = tensor("op_6218_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830373760)))]; + tensor hidden_states_251_cast = mul(x = var_6216_cast, y = var_6218_to_fp16)[name = tensor("hidden_states_251_cast")]; + tensor var_6225 = const()[name = tensor("op_6225"), val = tensor([1, 1])]; + tensor var_6227 = const()[name = tensor("op_6227"), val = tensor([1, 1])]; + tensor q_125_pad_type_0 = const()[name = tensor("q_125_pad_type_0"), val = tensor("custom")]; + tensor q_125_pad_0 = const()[name = tensor("q_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830376384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831605248))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_125_cast = conv(dilations = var_6227, groups = var_31, pad = q_125_pad_0, pad_type = q_125_pad_type_0, strides = var_6225, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("q_125_cast")]; + tensor var_6231 = const()[name = tensor("op_6231"), val = tensor([1, 1])]; + tensor var_6233 = const()[name = tensor("op_6233"), val = tensor([1, 1])]; + tensor k_125_pad_type_0 = const()[name = tensor("k_125_pad_type_0"), val = tensor("custom")]; + tensor k_125_pad_0 = const()[name = tensor("k_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831605440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(832834304))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_125_cast = conv(dilations = var_6233, groups = var_31, pad = k_125_pad_0, pad_type = k_125_pad_type_0, strides = var_6231, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("k_125_cast")]; + tensor var_6237 = const()[name = tensor("op_6237"), val = tensor([1, 1])]; + tensor var_6239 = const()[name = tensor("op_6239"), val = tensor([1, 1])]; + tensor v_125_pad_type_0 = const()[name = tensor("v_125_pad_type_0"), val = tensor("custom")]; + tensor v_125_pad_0 = const()[name = tensor("v_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(832834496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(834063360))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_125_cast = conv(dilations = var_6239, groups = var_31, pad = v_125_pad_0, pad_type = v_125_pad_type_0, strides = var_6237, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("v_125_cast")]; + tensor var_6243 = const()[name = tensor("op_6243"), val = tensor([2, 20, 64, -1])]; + tensor var_6244_cast = reshape(shape = var_6243, x = q_125_cast)[name = tensor("op_6244_cast")]; + tensor var_6245 = const()[name = tensor("op_6245"), val = tensor([2, 20, 64, -1])]; + tensor var_6246_cast = reshape(shape = var_6245, x = k_125_cast)[name = tensor("op_6246_cast")]; + tensor var_6247 = const()[name = tensor("op_6247"), val = tensor([2, 20, 64, -1])]; + tensor var_6248_cast = reshape(shape = var_6247, x = v_125_cast)[name = tensor("op_6248_cast")]; + tensor attn_weights_249_transpose_x_0 = const()[name = tensor("attn_weights_249_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_249_transpose_y_0 = const()[name = tensor("attn_weights_249_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_249_cast = matmul(transpose_x = attn_weights_249_transpose_x_0, transpose_y = attn_weights_249_transpose_y_0, x = var_6244_cast, y = var_6246_cast)[name = tensor("attn_weights_249_cast")]; + tensor attn_weights_251_cast = mul(x = attn_weights_249_cast, y = var_12_to_fp16)[name = tensor("attn_weights_251_cast")]; + tensor var_6252_cast = softmax(axis = var_18, x = attn_weights_251_cast)[name = tensor("op_6252_cast")]; + tensor attn_125_transpose_x_0 = const()[name = tensor("attn_125_transpose_x_0"), val = tensor(false)]; + tensor attn_125_transpose_y_0 = const()[name = tensor("attn_125_transpose_y_0"), val = tensor(true)]; + tensor attn_125_cast = matmul(transpose_x = attn_125_transpose_x_0, transpose_y = attn_125_transpose_y_0, x = var_6248_cast, y = var_6252_cast)[name = tensor("attn_125_cast")]; + tensor var_6256 = const()[name = tensor("op_6256"), val = tensor([2, 1280, 1, -1])]; + tensor input_383_cast = reshape(shape = var_6256, x = attn_125_cast)[name = tensor("input_383_cast")]; + tensor var_6261 = const()[name = tensor("op_6261"), val = tensor([1, 1])]; + tensor var_6263 = const()[name = tensor("op_6263"), val = tensor([1, 1])]; + tensor var_6265_pad_type_0 = const()[name = tensor("op_6265_pad_type_0"), val = tensor("custom")]; + tensor var_6265_pad_0 = const()[name = tensor("op_6265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(834063552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835292416))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835292608)))]; + tensor var_6265_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_6263, groups = var_31, pad = var_6265_pad_0, pad_type = var_6265_pad_type_0, strides = var_6261, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_383_cast)[name = tensor("op_6265_cast")]; + tensor inputs_189_cast = add(x = var_6265_cast, y = inputs_187_cast)[name = tensor("inputs_189_cast")]; + tensor var_6269 = const()[name = tensor("op_6269"), val = tensor([1])]; + tensor channels_mean_189_cast = reduce_mean(axes = var_6269, keep_dims = var_23, x = inputs_189_cast)[name = tensor("channels_mean_189_cast")]; + tensor zero_mean_189_cast = sub(x = inputs_189_cast, y = channels_mean_189_cast)[name = tensor("zero_mean_189_cast")]; + tensor zero_mean_sq_189_cast = mul(x = zero_mean_189_cast, y = zero_mean_189_cast)[name = tensor("zero_mean_sq_189_cast")]; + tensor var_6273 = const()[name = tensor("op_6273"), val = tensor([1])]; + tensor var_6274_cast = reduce_mean(axes = var_6273, keep_dims = var_23, x = zero_mean_sq_189_cast)[name = tensor("op_6274_cast")]; + tensor var_6275_to_fp16 = const()[name = tensor("op_6275_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6276_cast = add(x = var_6274_cast, y = var_6275_to_fp16)[name = tensor("op_6276_cast")]; + tensor denom_189_epsilon_0_to_fp16 = const()[name = tensor("denom_189_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_189_cast = rsqrt(epsilon = denom_189_epsilon_0_to_fp16, x = var_6276_cast)[name = tensor("denom_189_cast")]; + tensor out_189_cast = mul(x = zero_mean_189_cast, y = denom_189_cast)[name = tensor("out_189_cast")]; + tensor var_6280_to_fp16 = const()[name = tensor("op_6280_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835295232)))]; + tensor var_6281_cast = add(x = out_189_cast, y = var_6280_to_fp16)[name = tensor("op_6281_cast")]; + tensor var_6283_to_fp16 = const()[name = tensor("op_6283_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835297856)))]; + tensor hidden_states_253_cast = mul(x = var_6281_cast, y = var_6283_to_fp16)[name = tensor("hidden_states_253_cast")]; + tensor var_6290 = const()[name = tensor("op_6290"), val = tensor([1, 1])]; + tensor var_6292 = const()[name = tensor("op_6292"), val = tensor([1, 1])]; + tensor q_127_pad_type_0 = const()[name = tensor("q_127_pad_type_0"), val = tensor("custom")]; + tensor q_127_pad_0 = const()[name = tensor("q_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835300480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836529344))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_127_cast = conv(dilations = var_6292, groups = var_31, pad = q_127_pad_0, pad_type = q_127_pad_type_0, strides = var_6290, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_253_cast)[name = tensor("q_127_cast")]; + tensor var_6296 = const()[name = tensor("op_6296"), val = tensor([1, 1])]; + tensor var_6298 = const()[name = tensor("op_6298"), val = tensor([1, 1])]; + tensor k_127_pad_type_0 = const()[name = tensor("k_127_pad_type_0"), val = tensor("custom")]; + tensor k_127_pad_0 = const()[name = tensor("k_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836529536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838495680))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_127_cast = conv(dilations = var_6298, groups = var_31, pad = k_127_pad_0, pad_type = k_127_pad_type_0, strides = var_6296, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_127_cast")]; + tensor var_6302 = const()[name = tensor("op_6302"), val = tensor([1, 1])]; + tensor var_6304 = const()[name = tensor("op_6304"), val = tensor([1, 1])]; + tensor v_127_pad_type_0 = const()[name = tensor("v_127_pad_type_0"), val = tensor("custom")]; + tensor v_127_pad_0 = const()[name = tensor("v_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838495872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840462016))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_127_cast = conv(dilations = var_6304, groups = var_31, pad = v_127_pad_0, pad_type = v_127_pad_type_0, strides = var_6302, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_127_cast")]; + tensor var_6308 = const()[name = tensor("op_6308"), val = tensor([2, 20, 64, -1])]; + tensor var_6309_cast = reshape(shape = var_6308, x = q_127_cast)[name = tensor("op_6309_cast")]; + tensor var_6310 = const()[name = tensor("op_6310"), val = tensor([2, 20, 64, -1])]; + tensor var_6311_cast = reshape(shape = var_6310, x = k_127_cast)[name = tensor("op_6311_cast")]; + tensor var_6312 = const()[name = tensor("op_6312"), val = tensor([2, 20, 64, -1])]; + tensor var_6313_cast = reshape(shape = var_6312, x = v_127_cast)[name = tensor("op_6313_cast")]; + tensor attn_weights_253_transpose_x_0 = const()[name = tensor("attn_weights_253_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_253_transpose_y_0 = const()[name = tensor("attn_weights_253_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_253_cast = matmul(transpose_x = attn_weights_253_transpose_x_0, transpose_y = attn_weights_253_transpose_y_0, x = var_6309_cast, y = var_6311_cast)[name = tensor("attn_weights_253_cast")]; + tensor attn_weights_255_cast = mul(x = attn_weights_253_cast, y = var_12_to_fp16)[name = tensor("attn_weights_255_cast")]; + tensor var_6317_cast = softmax(axis = var_18, x = attn_weights_255_cast)[name = tensor("op_6317_cast")]; + tensor attn_127_transpose_x_0 = const()[name = tensor("attn_127_transpose_x_0"), val = tensor(false)]; + tensor attn_127_transpose_y_0 = const()[name = tensor("attn_127_transpose_y_0"), val = tensor(true)]; + tensor attn_127_cast = matmul(transpose_x = attn_127_transpose_x_0, transpose_y = attn_127_transpose_y_0, x = var_6313_cast, y = var_6317_cast)[name = tensor("attn_127_cast")]; + tensor var_6321 = const()[name = tensor("op_6321"), val = tensor([2, 1280, 1, -1])]; + tensor input_385_cast = reshape(shape = var_6321, x = attn_127_cast)[name = tensor("input_385_cast")]; + tensor var_6326 = const()[name = tensor("op_6326"), val = tensor([1, 1])]; + tensor var_6328 = const()[name = tensor("op_6328"), val = tensor([1, 1])]; + tensor var_6330_pad_type_0 = const()[name = tensor("op_6330_pad_type_0"), val = tensor("custom")]; + tensor var_6330_pad_0 = const()[name = tensor("op_6330_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840462208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841691072))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841691264)))]; + tensor var_6330_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_6328, groups = var_31, pad = var_6330_pad_0, pad_type = var_6330_pad_type_0, strides = var_6326, weight = unet_mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_385_cast)[name = tensor("op_6330_cast")]; + tensor inputs_191_cast = add(x = var_6330_cast, y = inputs_189_cast)[name = tensor("inputs_191_cast")]; + tensor var_6334 = const()[name = tensor("op_6334"), val = tensor([1])]; + tensor channels_mean_191_cast = reduce_mean(axes = var_6334, keep_dims = var_23, x = inputs_191_cast)[name = tensor("channels_mean_191_cast")]; + tensor zero_mean_191_cast = sub(x = inputs_191_cast, y = channels_mean_191_cast)[name = tensor("zero_mean_191_cast")]; + tensor zero_mean_sq_191_cast = mul(x = zero_mean_191_cast, y = zero_mean_191_cast)[name = tensor("zero_mean_sq_191_cast")]; + tensor var_6338 = const()[name = tensor("op_6338"), val = tensor([1])]; + tensor var_6339_cast = reduce_mean(axes = var_6338, keep_dims = var_23, x = zero_mean_sq_191_cast)[name = tensor("op_6339_cast")]; + tensor var_6340_to_fp16 = const()[name = tensor("op_6340_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6341_cast = add(x = var_6339_cast, y = var_6340_to_fp16)[name = tensor("op_6341_cast")]; + tensor denom_191_epsilon_0_to_fp16 = const()[name = tensor("denom_191_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_191_cast = rsqrt(epsilon = denom_191_epsilon_0_to_fp16, x = var_6341_cast)[name = tensor("denom_191_cast")]; + tensor out_191_cast = mul(x = zero_mean_191_cast, y = denom_191_cast)[name = tensor("out_191_cast")]; + tensor var_6345_to_fp16 = const()[name = tensor("op_6345_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841693888)))]; + tensor var_6346_cast = add(x = out_191_cast, y = var_6345_to_fp16)[name = tensor("op_6346_cast")]; + tensor var_6348_to_fp16 = const()[name = tensor("op_6348_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841696512)))]; + tensor input_387_cast = mul(x = var_6346_cast, y = var_6348_to_fp16)[name = tensor("input_387_cast")]; + tensor var_6356 = const()[name = tensor("op_6356"), val = tensor([1, 1])]; + tensor var_6358 = const()[name = tensor("op_6358"), val = tensor([1, 1])]; + tensor var_6360_pad_type_0 = const()[name = tensor("op_6360_pad_type_0"), val = tensor("custom")]; + tensor var_6360_pad_0 = const()[name = tensor("op_6360_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841699136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(851529600))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(851529792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(851537536))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_6360_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_6358, groups = var_31, pad = var_6360_pad_0, pad_type = var_6360_pad_type_0, strides = var_6356, weight = unet_mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_387_cast)[name = tensor("op_6360_cast")]; + tensor var_6361_split_sizes_0 = const()[name = tensor("op_6361_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6361_axis_0 = const()[name = tensor("op_6361_axis_0"), val = tensor(1)]; + tensor var_6361_cast_0, tensor var_6361_cast_1 = split(axis = var_6361_axis_0, split_sizes = var_6361_split_sizes_0, x = var_6360_cast)[name = tensor("op_6361_cast")]; + tensor var_6363_mode_0 = const()[name = tensor("op_6363_mode_0"), val = tensor("EXACT")]; + tensor var_6363_cast = gelu(mode = var_6363_mode_0, x = var_6361_cast_1)[name = tensor("op_6363_cast")]; + tensor input_389_cast = mul(x = var_6361_cast_0, y = var_6363_cast)[name = tensor("input_389_cast")]; + tensor var_6367 = const()[name = tensor("op_6367"), val = tensor([1, 1])]; + tensor var_6369 = const()[name = tensor("op_6369"), val = tensor([1, 1])]; + tensor var_6371_pad_type_0 = const()[name = tensor("op_6371_pad_type_0"), val = tensor("custom")]; + tensor var_6371_pad_0 = const()[name = tensor("op_6371_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(851537728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856452992))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856453184)))]; + tensor var_6371_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_6369, groups = var_31, pad = var_6371_pad_0, pad_type = var_6371_pad_type_0, strides = var_6367, weight = unet_mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_389_cast)[name = tensor("op_6371_cast")]; + tensor inputs_193_cast = add(x = var_6371_cast, y = inputs_191_cast)[name = tensor("inputs_193_cast")]; + tensor var_6381 = const()[name = tensor("op_6381"), val = tensor([1])]; + tensor channels_mean_193_cast = reduce_mean(axes = var_6381, keep_dims = var_23, x = inputs_193_cast)[name = tensor("channels_mean_193_cast")]; + tensor zero_mean_193_cast = sub(x = inputs_193_cast, y = channels_mean_193_cast)[name = tensor("zero_mean_193_cast")]; + tensor zero_mean_sq_193_cast = mul(x = zero_mean_193_cast, y = zero_mean_193_cast)[name = tensor("zero_mean_sq_193_cast")]; + tensor var_6385 = const()[name = tensor("op_6385"), val = tensor([1])]; + tensor var_6386_cast = reduce_mean(axes = var_6385, keep_dims = var_23, x = zero_mean_sq_193_cast)[name = tensor("op_6386_cast")]; + tensor var_6387_to_fp16 = const()[name = tensor("op_6387_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6388_cast = add(x = var_6386_cast, y = var_6387_to_fp16)[name = tensor("op_6388_cast")]; + tensor denom_193_epsilon_0_to_fp16 = const()[name = tensor("denom_193_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_193_cast = rsqrt(epsilon = denom_193_epsilon_0_to_fp16, x = var_6388_cast)[name = tensor("denom_193_cast")]; + tensor out_193_cast = mul(x = zero_mean_193_cast, y = denom_193_cast)[name = tensor("out_193_cast")]; + tensor var_6392_to_fp16 = const()[name = tensor("op_6392_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856455808)))]; + tensor var_6393_cast = add(x = out_193_cast, y = var_6392_to_fp16)[name = tensor("op_6393_cast")]; + tensor var_6395_to_fp16 = const()[name = tensor("op_6395_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856458432)))]; + tensor hidden_states_257_cast = mul(x = var_6393_cast, y = var_6395_to_fp16)[name = tensor("hidden_states_257_cast")]; + tensor var_6402 = const()[name = tensor("op_6402"), val = tensor([1, 1])]; + tensor var_6404 = const()[name = tensor("op_6404"), val = tensor([1, 1])]; + tensor q_129_pad_type_0 = const()[name = tensor("q_129_pad_type_0"), val = tensor("custom")]; + tensor q_129_pad_0 = const()[name = tensor("q_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(856461056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857689920))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_129_cast = conv(dilations = var_6404, groups = var_31, pad = q_129_pad_0, pad_type = q_129_pad_type_0, strides = var_6402, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("q_129_cast")]; + tensor var_6408 = const()[name = tensor("op_6408"), val = tensor([1, 1])]; + tensor var_6410 = const()[name = tensor("op_6410"), val = tensor([1, 1])]; + tensor k_129_pad_type_0 = const()[name = tensor("k_129_pad_type_0"), val = tensor("custom")]; + tensor k_129_pad_0 = const()[name = tensor("k_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857690112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(858918976))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_129_cast = conv(dilations = var_6410, groups = var_31, pad = k_129_pad_0, pad_type = k_129_pad_type_0, strides = var_6408, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("k_129_cast")]; + tensor var_6414 = const()[name = tensor("op_6414"), val = tensor([1, 1])]; + tensor var_6416 = const()[name = tensor("op_6416"), val = tensor([1, 1])]; + tensor v_129_pad_type_0 = const()[name = tensor("v_129_pad_type_0"), val = tensor("custom")]; + tensor v_129_pad_0 = const()[name = tensor("v_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(858919168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(860148032))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_129_cast = conv(dilations = var_6416, groups = var_31, pad = v_129_pad_0, pad_type = v_129_pad_type_0, strides = var_6414, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("v_129_cast")]; + tensor var_6420 = const()[name = tensor("op_6420"), val = tensor([2, 20, 64, -1])]; + tensor var_6421_cast = reshape(shape = var_6420, x = q_129_cast)[name = tensor("op_6421_cast")]; + tensor var_6422 = const()[name = tensor("op_6422"), val = tensor([2, 20, 64, -1])]; + tensor var_6423_cast = reshape(shape = var_6422, x = k_129_cast)[name = tensor("op_6423_cast")]; + tensor var_6424 = const()[name = tensor("op_6424"), val = tensor([2, 20, 64, -1])]; + tensor var_6425_cast = reshape(shape = var_6424, x = v_129_cast)[name = tensor("op_6425_cast")]; + tensor attn_weights_257_transpose_x_0 = const()[name = tensor("attn_weights_257_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_257_transpose_y_0 = const()[name = tensor("attn_weights_257_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_257_cast = matmul(transpose_x = attn_weights_257_transpose_x_0, transpose_y = attn_weights_257_transpose_y_0, x = var_6421_cast, y = var_6423_cast)[name = tensor("attn_weights_257_cast")]; + tensor attn_weights_259_cast = mul(x = attn_weights_257_cast, y = var_12_to_fp16)[name = tensor("attn_weights_259_cast")]; + tensor var_6429_cast = softmax(axis = var_18, x = attn_weights_259_cast)[name = tensor("op_6429_cast")]; + tensor attn_129_transpose_x_0 = const()[name = tensor("attn_129_transpose_x_0"), val = tensor(false)]; + tensor attn_129_transpose_y_0 = const()[name = tensor("attn_129_transpose_y_0"), val = tensor(true)]; + tensor attn_129_cast = matmul(transpose_x = attn_129_transpose_x_0, transpose_y = attn_129_transpose_y_0, x = var_6425_cast, y = var_6429_cast)[name = tensor("attn_129_cast")]; + tensor var_6433 = const()[name = tensor("op_6433"), val = tensor([2, 1280, 1, -1])]; + tensor input_391_cast = reshape(shape = var_6433, x = attn_129_cast)[name = tensor("input_391_cast")]; + tensor var_6438 = const()[name = tensor("op_6438"), val = tensor([1, 1])]; + tensor var_6440 = const()[name = tensor("op_6440"), val = tensor([1, 1])]; + tensor var_6442_pad_type_0 = const()[name = tensor("op_6442_pad_type_0"), val = tensor("custom")]; + tensor var_6442_pad_0 = const()[name = tensor("op_6442_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(860148224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861377088))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861377280)))]; + tensor var_6442_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_6440, groups = var_31, pad = var_6442_pad_0, pad_type = var_6442_pad_type_0, strides = var_6438, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_391_cast)[name = tensor("op_6442_cast")]; + tensor inputs_195_cast = add(x = var_6442_cast, y = inputs_193_cast)[name = tensor("inputs_195_cast")]; + tensor var_6446 = const()[name = tensor("op_6446"), val = tensor([1])]; + tensor channels_mean_195_cast = reduce_mean(axes = var_6446, keep_dims = var_23, x = inputs_195_cast)[name = tensor("channels_mean_195_cast")]; + tensor zero_mean_195_cast = sub(x = inputs_195_cast, y = channels_mean_195_cast)[name = tensor("zero_mean_195_cast")]; + tensor zero_mean_sq_195_cast = mul(x = zero_mean_195_cast, y = zero_mean_195_cast)[name = tensor("zero_mean_sq_195_cast")]; + tensor var_6450 = const()[name = tensor("op_6450"), val = tensor([1])]; + tensor var_6451_cast = reduce_mean(axes = var_6450, keep_dims = var_23, x = zero_mean_sq_195_cast)[name = tensor("op_6451_cast")]; + tensor var_6452_to_fp16 = const()[name = tensor("op_6452_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6453_cast = add(x = var_6451_cast, y = var_6452_to_fp16)[name = tensor("op_6453_cast")]; + tensor denom_195_epsilon_0_to_fp16 = const()[name = tensor("denom_195_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_195_cast = rsqrt(epsilon = denom_195_epsilon_0_to_fp16, x = var_6453_cast)[name = tensor("denom_195_cast")]; + tensor out_195_cast = mul(x = zero_mean_195_cast, y = denom_195_cast)[name = tensor("out_195_cast")]; + tensor var_6457_to_fp16 = const()[name = tensor("op_6457_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861379904)))]; + tensor var_6458_cast = add(x = out_195_cast, y = var_6457_to_fp16)[name = tensor("op_6458_cast")]; + tensor var_6460_to_fp16 = const()[name = tensor("op_6460_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861382528)))]; + tensor hidden_states_259_cast = mul(x = var_6458_cast, y = var_6460_to_fp16)[name = tensor("hidden_states_259_cast")]; + tensor var_6467 = const()[name = tensor("op_6467"), val = tensor([1, 1])]; + tensor var_6469 = const()[name = tensor("op_6469"), val = tensor([1, 1])]; + tensor q_131_pad_type_0 = const()[name = tensor("q_131_pad_type_0"), val = tensor("custom")]; + tensor q_131_pad_0 = const()[name = tensor("q_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861385152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862614016))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_131_cast = conv(dilations = var_6469, groups = var_31, pad = q_131_pad_0, pad_type = q_131_pad_type_0, strides = var_6467, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_259_cast)[name = tensor("q_131_cast")]; + tensor var_6473 = const()[name = tensor("op_6473"), val = tensor([1, 1])]; + tensor var_6475 = const()[name = tensor("op_6475"), val = tensor([1, 1])]; + tensor k_131_pad_type_0 = const()[name = tensor("k_131_pad_type_0"), val = tensor("custom")]; + tensor k_131_pad_0 = const()[name = tensor("k_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862614208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864580352))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_131_cast = conv(dilations = var_6475, groups = var_31, pad = k_131_pad_0, pad_type = k_131_pad_type_0, strides = var_6473, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_131_cast")]; + tensor var_6479 = const()[name = tensor("op_6479"), val = tensor([1, 1])]; + tensor var_6481 = const()[name = tensor("op_6481"), val = tensor([1, 1])]; + tensor v_131_pad_type_0 = const()[name = tensor("v_131_pad_type_0"), val = tensor("custom")]; + tensor v_131_pad_0 = const()[name = tensor("v_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864580544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866546688))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_131_cast = conv(dilations = var_6481, groups = var_31, pad = v_131_pad_0, pad_type = v_131_pad_type_0, strides = var_6479, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_131_cast")]; + tensor var_6485 = const()[name = tensor("op_6485"), val = tensor([2, 20, 64, -1])]; + tensor var_6486_cast = reshape(shape = var_6485, x = q_131_cast)[name = tensor("op_6486_cast")]; + tensor var_6487 = const()[name = tensor("op_6487"), val = tensor([2, 20, 64, -1])]; + tensor var_6488_cast = reshape(shape = var_6487, x = k_131_cast)[name = tensor("op_6488_cast")]; + tensor var_6489 = const()[name = tensor("op_6489"), val = tensor([2, 20, 64, -1])]; + tensor var_6490_cast = reshape(shape = var_6489, x = v_131_cast)[name = tensor("op_6490_cast")]; + tensor attn_weights_261_transpose_x_0 = const()[name = tensor("attn_weights_261_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_261_transpose_y_0 = const()[name = tensor("attn_weights_261_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_261_cast = matmul(transpose_x = attn_weights_261_transpose_x_0, transpose_y = attn_weights_261_transpose_y_0, x = var_6486_cast, y = var_6488_cast)[name = tensor("attn_weights_261_cast")]; + tensor attn_weights_263_cast = mul(x = attn_weights_261_cast, y = var_12_to_fp16)[name = tensor("attn_weights_263_cast")]; + tensor var_6494_cast = softmax(axis = var_18, x = attn_weights_263_cast)[name = tensor("op_6494_cast")]; + tensor attn_131_transpose_x_0 = const()[name = tensor("attn_131_transpose_x_0"), val = tensor(false)]; + tensor attn_131_transpose_y_0 = const()[name = tensor("attn_131_transpose_y_0"), val = tensor(true)]; + tensor attn_131_cast = matmul(transpose_x = attn_131_transpose_x_0, transpose_y = attn_131_transpose_y_0, x = var_6490_cast, y = var_6494_cast)[name = tensor("attn_131_cast")]; + tensor var_6498 = const()[name = tensor("op_6498"), val = tensor([2, 1280, 1, -1])]; + tensor input_393_cast = reshape(shape = var_6498, x = attn_131_cast)[name = tensor("input_393_cast")]; + tensor var_6503 = const()[name = tensor("op_6503"), val = tensor([1, 1])]; + tensor var_6505 = const()[name = tensor("op_6505"), val = tensor([1, 1])]; + tensor var_6507_pad_type_0 = const()[name = tensor("op_6507_pad_type_0"), val = tensor("custom")]; + tensor var_6507_pad_0 = const()[name = tensor("op_6507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866546880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867775744))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867775936)))]; + tensor var_6507_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_6505, groups = var_31, pad = var_6507_pad_0, pad_type = var_6507_pad_type_0, strides = var_6503, weight = unet_mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_393_cast)[name = tensor("op_6507_cast")]; + tensor inputs_197_cast = add(x = var_6507_cast, y = inputs_195_cast)[name = tensor("inputs_197_cast")]; + tensor var_6511 = const()[name = tensor("op_6511"), val = tensor([1])]; + tensor channels_mean_197_cast = reduce_mean(axes = var_6511, keep_dims = var_23, x = inputs_197_cast)[name = tensor("channels_mean_197_cast")]; + tensor zero_mean_197_cast = sub(x = inputs_197_cast, y = channels_mean_197_cast)[name = tensor("zero_mean_197_cast")]; + tensor zero_mean_sq_197_cast = mul(x = zero_mean_197_cast, y = zero_mean_197_cast)[name = tensor("zero_mean_sq_197_cast")]; + tensor var_6515 = const()[name = tensor("op_6515"), val = tensor([1])]; + tensor var_6516_cast = reduce_mean(axes = var_6515, keep_dims = var_23, x = zero_mean_sq_197_cast)[name = tensor("op_6516_cast")]; + tensor var_6517_to_fp16 = const()[name = tensor("op_6517_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6518_cast = add(x = var_6516_cast, y = var_6517_to_fp16)[name = tensor("op_6518_cast")]; + tensor denom_197_epsilon_0_to_fp16 = const()[name = tensor("denom_197_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_197_cast = rsqrt(epsilon = denom_197_epsilon_0_to_fp16, x = var_6518_cast)[name = tensor("denom_197_cast")]; + tensor out_197_cast = mul(x = zero_mean_197_cast, y = denom_197_cast)[name = tensor("out_197_cast")]; + tensor var_6522_to_fp16 = const()[name = tensor("op_6522_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867778560)))]; + tensor var_6523_cast = add(x = out_197_cast, y = var_6522_to_fp16)[name = tensor("op_6523_cast")]; + tensor var_6525_to_fp16 = const()[name = tensor("op_6525_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867781184)))]; + tensor input_395_cast = mul(x = var_6523_cast, y = var_6525_to_fp16)[name = tensor("input_395_cast")]; + tensor var_6533 = const()[name = tensor("op_6533"), val = tensor([1, 1])]; + tensor var_6535 = const()[name = tensor("op_6535"), val = tensor([1, 1])]; + tensor var_6537_pad_type_0 = const()[name = tensor("op_6537_pad_type_0"), val = tensor("custom")]; + tensor var_6537_pad_0 = const()[name = tensor("op_6537_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867783808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877614272))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877614464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877622208))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_6537_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_6535, groups = var_31, pad = var_6537_pad_0, pad_type = var_6537_pad_type_0, strides = var_6533, weight = unet_mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_395_cast)[name = tensor("op_6537_cast")]; + tensor var_6538_split_sizes_0 = const()[name = tensor("op_6538_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6538_axis_0 = const()[name = tensor("op_6538_axis_0"), val = tensor(1)]; + tensor var_6538_cast_0, tensor var_6538_cast_1 = split(axis = var_6538_axis_0, split_sizes = var_6538_split_sizes_0, x = var_6537_cast)[name = tensor("op_6538_cast")]; + tensor var_6540_mode_0 = const()[name = tensor("op_6540_mode_0"), val = tensor("EXACT")]; + tensor var_6540_cast = gelu(mode = var_6540_mode_0, x = var_6538_cast_1)[name = tensor("op_6540_cast")]; + tensor input_397_cast = mul(x = var_6538_cast_0, y = var_6540_cast)[name = tensor("input_397_cast")]; + tensor var_6544 = const()[name = tensor("op_6544"), val = tensor([1, 1])]; + tensor var_6546 = const()[name = tensor("op_6546"), val = tensor([1, 1])]; + tensor var_6548_pad_type_0 = const()[name = tensor("op_6548_pad_type_0"), val = tensor("custom")]; + tensor var_6548_pad_0 = const()[name = tensor("op_6548_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877622400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882537664))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882537856)))]; + tensor var_6548_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_6546, groups = var_31, pad = var_6548_pad_0, pad_type = var_6548_pad_type_0, strides = var_6544, weight = unet_mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_397_cast)[name = tensor("op_6548_cast")]; + tensor inputs_199_cast = add(x = var_6548_cast, y = inputs_197_cast)[name = tensor("inputs_199_cast")]; + tensor var_6558 = const()[name = tensor("op_6558"), val = tensor([1])]; + tensor channels_mean_199_cast = reduce_mean(axes = var_6558, keep_dims = var_23, x = inputs_199_cast)[name = tensor("channels_mean_199_cast")]; + tensor zero_mean_199_cast = sub(x = inputs_199_cast, y = channels_mean_199_cast)[name = tensor("zero_mean_199_cast")]; + tensor zero_mean_sq_199_cast = mul(x = zero_mean_199_cast, y = zero_mean_199_cast)[name = tensor("zero_mean_sq_199_cast")]; + tensor var_6562 = const()[name = tensor("op_6562"), val = tensor([1])]; + tensor var_6563_cast = reduce_mean(axes = var_6562, keep_dims = var_23, x = zero_mean_sq_199_cast)[name = tensor("op_6563_cast")]; + tensor var_6564_to_fp16 = const()[name = tensor("op_6564_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6565_cast = add(x = var_6563_cast, y = var_6564_to_fp16)[name = tensor("op_6565_cast")]; + tensor denom_199_epsilon_0_to_fp16 = const()[name = tensor("denom_199_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_199_cast = rsqrt(epsilon = denom_199_epsilon_0_to_fp16, x = var_6565_cast)[name = tensor("denom_199_cast")]; + tensor out_199_cast = mul(x = zero_mean_199_cast, y = denom_199_cast)[name = tensor("out_199_cast")]; + tensor var_6569_to_fp16 = const()[name = tensor("op_6569_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882540480)))]; + tensor var_6570_cast = add(x = out_199_cast, y = var_6569_to_fp16)[name = tensor("op_6570_cast")]; + tensor var_6572_to_fp16 = const()[name = tensor("op_6572_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882543104)))]; + tensor hidden_states_263_cast = mul(x = var_6570_cast, y = var_6572_to_fp16)[name = tensor("hidden_states_263_cast")]; + tensor var_6579 = const()[name = tensor("op_6579"), val = tensor([1, 1])]; + tensor var_6581 = const()[name = tensor("op_6581"), val = tensor([1, 1])]; + tensor q_133_pad_type_0 = const()[name = tensor("q_133_pad_type_0"), val = tensor("custom")]; + tensor q_133_pad_0 = const()[name = tensor("q_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882545728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(883774592))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_133_cast = conv(dilations = var_6581, groups = var_31, pad = q_133_pad_0, pad_type = q_133_pad_type_0, strides = var_6579, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("q_133_cast")]; + tensor var_6585 = const()[name = tensor("op_6585"), val = tensor([1, 1])]; + tensor var_6587 = const()[name = tensor("op_6587"), val = tensor([1, 1])]; + tensor k_133_pad_type_0 = const()[name = tensor("k_133_pad_type_0"), val = tensor("custom")]; + tensor k_133_pad_0 = const()[name = tensor("k_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(883774784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885003648))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_133_cast = conv(dilations = var_6587, groups = var_31, pad = k_133_pad_0, pad_type = k_133_pad_type_0, strides = var_6585, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("k_133_cast")]; + tensor var_6591 = const()[name = tensor("op_6591"), val = tensor([1, 1])]; + tensor var_6593 = const()[name = tensor("op_6593"), val = tensor([1, 1])]; + tensor v_133_pad_type_0 = const()[name = tensor("v_133_pad_type_0"), val = tensor("custom")]; + tensor v_133_pad_0 = const()[name = tensor("v_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885003840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886232704))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_133_cast = conv(dilations = var_6593, groups = var_31, pad = v_133_pad_0, pad_type = v_133_pad_type_0, strides = var_6591, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("v_133_cast")]; + tensor var_6597 = const()[name = tensor("op_6597"), val = tensor([2, 20, 64, -1])]; + tensor var_6598_cast = reshape(shape = var_6597, x = q_133_cast)[name = tensor("op_6598_cast")]; + tensor var_6599 = const()[name = tensor("op_6599"), val = tensor([2, 20, 64, -1])]; + tensor var_6600_cast = reshape(shape = var_6599, x = k_133_cast)[name = tensor("op_6600_cast")]; + tensor var_6601 = const()[name = tensor("op_6601"), val = tensor([2, 20, 64, -1])]; + tensor var_6602_cast = reshape(shape = var_6601, x = v_133_cast)[name = tensor("op_6602_cast")]; + tensor attn_weights_265_transpose_x_0 = const()[name = tensor("attn_weights_265_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_265_transpose_y_0 = const()[name = tensor("attn_weights_265_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_265_cast = matmul(transpose_x = attn_weights_265_transpose_x_0, transpose_y = attn_weights_265_transpose_y_0, x = var_6598_cast, y = var_6600_cast)[name = tensor("attn_weights_265_cast")]; + tensor attn_weights_267_cast = mul(x = attn_weights_265_cast, y = var_12_to_fp16)[name = tensor("attn_weights_267_cast")]; + tensor var_6606_cast = softmax(axis = var_18, x = attn_weights_267_cast)[name = tensor("op_6606_cast")]; + tensor attn_133_transpose_x_0 = const()[name = tensor("attn_133_transpose_x_0"), val = tensor(false)]; + tensor attn_133_transpose_y_0 = const()[name = tensor("attn_133_transpose_y_0"), val = tensor(true)]; + tensor attn_133_cast = matmul(transpose_x = attn_133_transpose_x_0, transpose_y = attn_133_transpose_y_0, x = var_6602_cast, y = var_6606_cast)[name = tensor("attn_133_cast")]; + tensor var_6610 = const()[name = tensor("op_6610"), val = tensor([2, 1280, 1, -1])]; + tensor input_399_cast = reshape(shape = var_6610, x = attn_133_cast)[name = tensor("input_399_cast")]; + tensor var_6615 = const()[name = tensor("op_6615"), val = tensor([1, 1])]; + tensor var_6617 = const()[name = tensor("op_6617"), val = tensor([1, 1])]; + tensor var_6619_pad_type_0 = const()[name = tensor("op_6619_pad_type_0"), val = tensor("custom")]; + tensor var_6619_pad_0 = const()[name = tensor("op_6619_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886232896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887461760))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887461952)))]; + tensor var_6619_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_6617, groups = var_31, pad = var_6619_pad_0, pad_type = var_6619_pad_type_0, strides = var_6615, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_399_cast)[name = tensor("op_6619_cast")]; + tensor inputs_201_cast = add(x = var_6619_cast, y = inputs_199_cast)[name = tensor("inputs_201_cast")]; + tensor var_6623 = const()[name = tensor("op_6623"), val = tensor([1])]; + tensor channels_mean_201_cast = reduce_mean(axes = var_6623, keep_dims = var_23, x = inputs_201_cast)[name = tensor("channels_mean_201_cast")]; + tensor zero_mean_201_cast = sub(x = inputs_201_cast, y = channels_mean_201_cast)[name = tensor("zero_mean_201_cast")]; + tensor zero_mean_sq_201_cast = mul(x = zero_mean_201_cast, y = zero_mean_201_cast)[name = tensor("zero_mean_sq_201_cast")]; + tensor var_6627 = const()[name = tensor("op_6627"), val = tensor([1])]; + tensor var_6628_cast = reduce_mean(axes = var_6627, keep_dims = var_23, x = zero_mean_sq_201_cast)[name = tensor("op_6628_cast")]; + tensor var_6629_to_fp16 = const()[name = tensor("op_6629_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6630_cast = add(x = var_6628_cast, y = var_6629_to_fp16)[name = tensor("op_6630_cast")]; + tensor denom_201_epsilon_0_to_fp16 = const()[name = tensor("denom_201_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_201_cast = rsqrt(epsilon = denom_201_epsilon_0_to_fp16, x = var_6630_cast)[name = tensor("denom_201_cast")]; + tensor out_201_cast = mul(x = zero_mean_201_cast, y = denom_201_cast)[name = tensor("out_201_cast")]; + tensor var_6634_to_fp16 = const()[name = tensor("op_6634_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887464576)))]; + tensor var_6635_cast = add(x = out_201_cast, y = var_6634_to_fp16)[name = tensor("op_6635_cast")]; + tensor var_6637_to_fp16 = const()[name = tensor("op_6637_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887467200)))]; + tensor hidden_states_265_cast = mul(x = var_6635_cast, y = var_6637_to_fp16)[name = tensor("hidden_states_265_cast")]; + tensor var_6644 = const()[name = tensor("op_6644"), val = tensor([1, 1])]; + tensor var_6646 = const()[name = tensor("op_6646"), val = tensor([1, 1])]; + tensor q_135_pad_type_0 = const()[name = tensor("q_135_pad_type_0"), val = tensor("custom")]; + tensor q_135_pad_0 = const()[name = tensor("q_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887469824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888698688))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_135_cast = conv(dilations = var_6646, groups = var_31, pad = q_135_pad_0, pad_type = q_135_pad_type_0, strides = var_6644, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_265_cast)[name = tensor("q_135_cast")]; + tensor var_6650 = const()[name = tensor("op_6650"), val = tensor([1, 1])]; + tensor var_6652 = const()[name = tensor("op_6652"), val = tensor([1, 1])]; + tensor k_135_pad_type_0 = const()[name = tensor("k_135_pad_type_0"), val = tensor("custom")]; + tensor k_135_pad_0 = const()[name = tensor("k_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888698880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(890665024))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_135_cast = conv(dilations = var_6652, groups = var_31, pad = k_135_pad_0, pad_type = k_135_pad_type_0, strides = var_6650, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_135_cast")]; + tensor var_6656 = const()[name = tensor("op_6656"), val = tensor([1, 1])]; + tensor var_6658 = const()[name = tensor("op_6658"), val = tensor([1, 1])]; + tensor v_135_pad_type_0 = const()[name = tensor("v_135_pad_type_0"), val = tensor("custom")]; + tensor v_135_pad_0 = const()[name = tensor("v_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(890665216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892631360))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_135_cast = conv(dilations = var_6658, groups = var_31, pad = v_135_pad_0, pad_type = v_135_pad_type_0, strides = var_6656, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_135_cast")]; + tensor var_6662 = const()[name = tensor("op_6662"), val = tensor([2, 20, 64, -1])]; + tensor var_6663_cast = reshape(shape = var_6662, x = q_135_cast)[name = tensor("op_6663_cast")]; + tensor var_6664 = const()[name = tensor("op_6664"), val = tensor([2, 20, 64, -1])]; + tensor var_6665_cast = reshape(shape = var_6664, x = k_135_cast)[name = tensor("op_6665_cast")]; + tensor var_6666 = const()[name = tensor("op_6666"), val = tensor([2, 20, 64, -1])]; + tensor var_6667_cast = reshape(shape = var_6666, x = v_135_cast)[name = tensor("op_6667_cast")]; + tensor attn_weights_269_transpose_x_0 = const()[name = tensor("attn_weights_269_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_269_transpose_y_0 = const()[name = tensor("attn_weights_269_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_269_cast = matmul(transpose_x = attn_weights_269_transpose_x_0, transpose_y = attn_weights_269_transpose_y_0, x = var_6663_cast, y = var_6665_cast)[name = tensor("attn_weights_269_cast")]; + tensor attn_weights_271_cast = mul(x = attn_weights_269_cast, y = var_12_to_fp16)[name = tensor("attn_weights_271_cast")]; + tensor var_6671_cast = softmax(axis = var_18, x = attn_weights_271_cast)[name = tensor("op_6671_cast")]; + tensor attn_135_transpose_x_0 = const()[name = tensor("attn_135_transpose_x_0"), val = tensor(false)]; + tensor attn_135_transpose_y_0 = const()[name = tensor("attn_135_transpose_y_0"), val = tensor(true)]; + tensor attn_135_cast = matmul(transpose_x = attn_135_transpose_x_0, transpose_y = attn_135_transpose_y_0, x = var_6667_cast, y = var_6671_cast)[name = tensor("attn_135_cast")]; + tensor var_6675 = const()[name = tensor("op_6675"), val = tensor([2, 1280, 1, -1])]; + tensor input_401_cast = reshape(shape = var_6675, x = attn_135_cast)[name = tensor("input_401_cast")]; + tensor var_6680 = const()[name = tensor("op_6680"), val = tensor([1, 1])]; + tensor var_6682 = const()[name = tensor("op_6682"), val = tensor([1, 1])]; + tensor var_6684_pad_type_0 = const()[name = tensor("op_6684_pad_type_0"), val = tensor("custom")]; + tensor var_6684_pad_0 = const()[name = tensor("op_6684_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892631552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893860416))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893860608)))]; + tensor var_6684_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_6682, groups = var_31, pad = var_6684_pad_0, pad_type = var_6684_pad_type_0, strides = var_6680, weight = unet_mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_401_cast)[name = tensor("op_6684_cast")]; + tensor inputs_203_cast = add(x = var_6684_cast, y = inputs_201_cast)[name = tensor("inputs_203_cast")]; + tensor var_6688 = const()[name = tensor("op_6688"), val = tensor([1])]; + tensor channels_mean_203_cast = reduce_mean(axes = var_6688, keep_dims = var_23, x = inputs_203_cast)[name = tensor("channels_mean_203_cast")]; + tensor zero_mean_203_cast = sub(x = inputs_203_cast, y = channels_mean_203_cast)[name = tensor("zero_mean_203_cast")]; + tensor zero_mean_sq_203_cast = mul(x = zero_mean_203_cast, y = zero_mean_203_cast)[name = tensor("zero_mean_sq_203_cast")]; + tensor var_6692 = const()[name = tensor("op_6692"), val = tensor([1])]; + tensor var_6693_cast = reduce_mean(axes = var_6692, keep_dims = var_23, x = zero_mean_sq_203_cast)[name = tensor("op_6693_cast")]; + tensor var_6694_to_fp16 = const()[name = tensor("op_6694_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6695_cast = add(x = var_6693_cast, y = var_6694_to_fp16)[name = tensor("op_6695_cast")]; + tensor denom_203_epsilon_0_to_fp16 = const()[name = tensor("denom_203_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_203_cast = rsqrt(epsilon = denom_203_epsilon_0_to_fp16, x = var_6695_cast)[name = tensor("denom_203_cast")]; + tensor out_203_cast = mul(x = zero_mean_203_cast, y = denom_203_cast)[name = tensor("out_203_cast")]; + tensor var_6699_to_fp16 = const()[name = tensor("op_6699_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893863232)))]; + tensor var_6700_cast = add(x = out_203_cast, y = var_6699_to_fp16)[name = tensor("op_6700_cast")]; + tensor var_6702_to_fp16 = const()[name = tensor("op_6702_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893865856)))]; + tensor input_403_cast = mul(x = var_6700_cast, y = var_6702_to_fp16)[name = tensor("input_403_cast")]; + tensor var_6710 = const()[name = tensor("op_6710"), val = tensor([1, 1])]; + tensor var_6712 = const()[name = tensor("op_6712"), val = tensor([1, 1])]; + tensor var_6714_pad_type_0 = const()[name = tensor("op_6714_pad_type_0"), val = tensor("custom")]; + tensor var_6714_pad_0 = const()[name = tensor("op_6714_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893868480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903698944))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903699136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903706880))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_6714_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_6712, groups = var_31, pad = var_6714_pad_0, pad_type = var_6714_pad_type_0, strides = var_6710, weight = unet_mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_403_cast)[name = tensor("op_6714_cast")]; + tensor var_6715_split_sizes_0 = const()[name = tensor("op_6715_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6715_axis_0 = const()[name = tensor("op_6715_axis_0"), val = tensor(1)]; + tensor var_6715_cast_0, tensor var_6715_cast_1 = split(axis = var_6715_axis_0, split_sizes = var_6715_split_sizes_0, x = var_6714_cast)[name = tensor("op_6715_cast")]; + tensor var_6717_mode_0 = const()[name = tensor("op_6717_mode_0"), val = tensor("EXACT")]; + tensor var_6717_cast = gelu(mode = var_6717_mode_0, x = var_6715_cast_1)[name = tensor("op_6717_cast")]; + tensor input_405_cast = mul(x = var_6715_cast_0, y = var_6717_cast)[name = tensor("input_405_cast")]; + tensor var_6721 = const()[name = tensor("op_6721"), val = tensor([1, 1])]; + tensor var_6723 = const()[name = tensor("op_6723"), val = tensor([1, 1])]; + tensor var_6725_pad_type_0 = const()[name = tensor("op_6725_pad_type_0"), val = tensor("custom")]; + tensor var_6725_pad_0 = const()[name = tensor("op_6725_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903707072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908622336))), name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908622528)))]; + tensor var_6725_cast = conv(bias = unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_6723, groups = var_31, pad = var_6725_pad_0, pad_type = var_6725_pad_type_0, strides = var_6721, weight = unet_mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_405_cast)[name = tensor("op_6725_cast")]; + tensor hidden_states_269_cast = add(x = var_6725_cast, y = inputs_203_cast)[name = tensor("hidden_states_269_cast")]; + tensor var_6727 = const()[name = tensor("op_6727"), val = tensor([2, 1280, 32, 32])]; + tensor input_407_cast = reshape(shape = var_6727, x = hidden_states_269_cast)[name = tensor("input_407_cast")]; + tensor var_6731 = const()[name = tensor("op_6731"), val = tensor([1, 1])]; + tensor var_6733 = const()[name = tensor("op_6733"), val = tensor([1, 1])]; + tensor hidden_states_271_pad_type_0 = const()[name = tensor("hidden_states_271_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_271_pad_0 = const()[name = tensor("hidden_states_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908625152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909854016))), name = tensor("unet_mid_block_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("unet_mid_block_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909854208)))]; + tensor hidden_states_271_cast = conv(bias = unet_mid_block_attentions_0_proj_out_bias_to_fp16, dilations = var_6733, groups = var_31, pad = hidden_states_271_pad_0, pad_type = hidden_states_271_pad_type_0, strides = var_6731, weight = unet_mid_block_attentions_0_proj_out_weight_to_fp16_palettized, x = input_407_cast)[name = tensor("hidden_states_271_cast")]; + tensor input_409_cast = add(x = hidden_states_271_cast, y = hidden_states_205_cast)[name = tensor("input_409_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = input_409_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909856832)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909859456)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_413_cast = silu(x = add_39_cast)[name = tensor("input_413_cast")]; + tensor var_6748 = const()[name = tensor("op_6748"), val = tensor([1, 1])]; + tensor var_6750 = const()[name = tensor("op_6750"), val = tensor([1, 1])]; + tensor hidden_states_273_pad_type_0 = const()[name = tensor("hidden_states_273_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_273_pad_0 = const()[name = tensor("hidden_states_273_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_mid_block_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909862080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920921344))), name = tensor("unet_mid_block_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920921536)))]; + tensor hidden_states_273_cast = conv(bias = unet_mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_6750, groups = var_31, pad = hidden_states_273_pad_0, pad_type = hidden_states_273_pad_type_0, strides = var_6748, weight = unet_mid_block_resnets_1_conv1_weight_to_fp16_palettized, x = input_413_cast)[name = tensor("hidden_states_273_cast")]; + tensor var_6756 = const()[name = tensor("op_6756"), val = tensor([1, 1])]; + tensor var_6758 = const()[name = tensor("op_6758"), val = tensor([1, 1])]; + tensor temb_15_pad_type_0 = const()[name = tensor("temb_15_pad_type_0"), val = tensor("custom")]; + tensor temb_15_pad_0 = const()[name = tensor("temb_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920924160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922153024))), name = tensor("unet_mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_mid_block_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922153216)))]; + tensor temb_15_cast = conv(bias = unet_mid_block_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_6758, groups = var_31, pad = temb_15_pad_0, pad_type = temb_15_pad_type_0, strides = var_6756, weight = unet_mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_15_cast")]; + tensor input_417_cast = add(x = hidden_states_273_cast, y = temb_15_cast)[name = tensor("input_417_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_417_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922155840)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922158464)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_421_cast = silu(x = add_41_cast)[name = tensor("input_421_cast")]; + tensor var_6768 = const()[name = tensor("op_6768"), val = tensor([1, 1])]; + tensor var_6770 = const()[name = tensor("op_6770"), val = tensor([1, 1])]; + tensor hidden_states_275_pad_type_0 = const()[name = tensor("hidden_states_275_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_275_pad_0 = const()[name = tensor("hidden_states_275_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_mid_block_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922161088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933220352))), name = tensor("unet_mid_block_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933220544)))]; + tensor hidden_states_275_cast = conv(bias = unet_mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_6770, groups = var_31, pad = hidden_states_275_pad_0, pad_type = hidden_states_275_pad_type_0, strides = var_6768, weight = unet_mid_block_resnets_1_conv2_weight_to_fp16_palettized, x = input_421_cast)[name = tensor("hidden_states_275_cast")]; + tensor hidden_states_277_cast = add(x = input_409_cast, y = hidden_states_275_cast)[name = tensor("hidden_states_277_cast")]; + tensor input_423_interleave_0 = const()[name = tensor("input_423_interleave_0"), val = tensor(false)]; + tensor input_423_cast = concat(axis = var_31, interleave = input_423_interleave_0, values = (hidden_states_277_cast, input_311_cast))[name = tensor("input_423_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = input_423_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_mean_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933223168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933225152))), name = tensor("add_43_mean_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_43_variance_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933225344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933227328))), name = tensor("add_43_variance_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_43_gamma_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933227520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933229504))), name = tensor("add_43_gamma_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_43_beta_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933229696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933231680))), name = tensor("add_43_beta_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16_palettized, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16_palettized, mean = add_43_mean_0_to_fp16_palettized, variance = add_43_variance_0_to_fp16_palettized, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_427_cast = silu(x = add_43_cast)[name = tensor("input_427_cast")]; + tensor var_6802 = const()[name = tensor("op_6802"), val = tensor([1, 1])]; + tensor var_6804 = const()[name = tensor("op_6804"), val = tensor([1, 1])]; + tensor hidden_states_279_pad_type_0 = const()[name = tensor("hidden_states_279_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_279_pad_0 = const()[name = tensor("hidden_states_279_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933231872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955350336))), name = tensor("unet_up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor unet_up_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955350528)))]; + tensor hidden_states_279_cast = conv(bias = unet_up_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_6804, groups = var_31, pad = hidden_states_279_pad_0, pad_type = hidden_states_279_pad_type_0, strides = var_6802, weight = unet_up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_427_cast)[name = tensor("hidden_states_279_cast")]; + tensor var_6810 = const()[name = tensor("op_6810"), val = tensor([1, 1])]; + tensor var_6812 = const()[name = tensor("op_6812"), val = tensor([1, 1])]; + tensor temb_17_pad_type_0 = const()[name = tensor("temb_17_pad_type_0"), val = tensor("custom")]; + tensor temb_17_pad_0 = const()[name = tensor("temb_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955353152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956582016))), name = tensor("unet_up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956582208)))]; + tensor temb_17_cast = conv(bias = unet_up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_6812, groups = var_31, pad = temb_17_pad_0, pad_type = temb_17_pad_type_0, strides = var_6810, weight = unet_up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_17_cast")]; + tensor input_431_cast = add(x = hidden_states_279_cast, y = temb_17_cast)[name = tensor("input_431_cast")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_88_cast = reshape(shape = reshape_88_shape_0, x = input_431_cast)[name = tensor("reshape_88_cast")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66_cast = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88_cast)[name = tensor("reduce_mean_66_cast")]; + tensor sub_44_cast = sub(x = reshape_88_cast, y = reduce_mean_66_cast)[name = tensor("sub_44_cast")]; + tensor square_22_cast = square(x = sub_44_cast)[name = tensor("square_22_cast")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68_cast = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22_cast)[name = tensor("reduce_mean_68_cast")]; + tensor add_44_y_0_to_fp16 = const()[name = tensor("add_44_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_44_cast = add(x = reduce_mean_68_cast, y = add_44_y_0_to_fp16)[name = tensor("add_44_cast")]; + tensor sqrt_22_cast = sqrt(x = add_44_cast)[name = tensor("sqrt_22_cast")]; + tensor real_div_22_cast = real_div(x = sub_44_cast, y = sqrt_22_cast)[name = tensor("real_div_22_cast")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_89_cast = reshape(shape = reshape_89_shape_0, x = real_div_22_cast)[name = tensor("reshape_89_cast")]; + tensor add_45_gamma_0_to_fp16 = const()[name = tensor("add_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956584832)))]; + tensor add_45_beta_0_to_fp16 = const()[name = tensor("add_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956587456)))]; + tensor add_45_epsilon_0_to_fp16 = const()[name = tensor("add_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_45_cast = batch_norm(beta = add_45_beta_0_to_fp16, epsilon = add_45_epsilon_0_to_fp16, gamma = add_45_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_89_cast)[name = tensor("add_45_cast")]; + tensor input_435_cast = silu(x = add_45_cast)[name = tensor("input_435_cast")]; + tensor var_6822 = const()[name = tensor("op_6822"), val = tensor([1, 1])]; + tensor var_6824 = const()[name = tensor("op_6824"), val = tensor([1, 1])]; + tensor hidden_states_281_pad_type_0 = const()[name = tensor("hidden_states_281_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_281_pad_0 = const()[name = tensor("hidden_states_281_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956590080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967649344))), name = tensor("unet_up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_up_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967649536)))]; + tensor hidden_states_281_cast = conv(bias = unet_up_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_6824, groups = var_31, pad = hidden_states_281_pad_0, pad_type = hidden_states_281_pad_type_0, strides = var_6822, weight = unet_up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_435_cast)[name = tensor("hidden_states_281_cast")]; + tensor var_6829 = const()[name = tensor("op_6829"), val = tensor([1, 1])]; + tensor var_6831 = const()[name = tensor("op_6831"), val = tensor([1, 1])]; + tensor x_5_pad_type_0 = const()[name = tensor("x_5_pad_type_0"), val = tensor("custom")]; + tensor x_5_pad_0 = const()[name = tensor("x_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967652160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970109824))), name = tensor("unet_up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor unet_up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970110016)))]; + tensor x_5_cast = conv(bias = unet_up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_6831, groups = var_31, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = var_6829, weight = unet_up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_423_cast)[name = tensor("x_5_cast")]; + tensor hidden_states_283_cast = add(x = x_5_cast, y = hidden_states_281_cast)[name = tensor("hidden_states_283_cast")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_92_cast = reshape(shape = reshape_92_shape_0, x = hidden_states_283_cast)[name = tensor("reshape_92_cast")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69_cast = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92_cast)[name = tensor("reduce_mean_69_cast")]; + tensor sub_46_cast = sub(x = reshape_92_cast, y = reduce_mean_69_cast)[name = tensor("sub_46_cast")]; + tensor square_23_cast = square(x = sub_46_cast)[name = tensor("square_23_cast")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71_cast = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23_cast)[name = tensor("reduce_mean_71_cast")]; + tensor add_46_y_0_to_fp16 = const()[name = tensor("add_46_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_46_cast = add(x = reduce_mean_71_cast, y = add_46_y_0_to_fp16)[name = tensor("add_46_cast")]; + tensor sqrt_23_cast = sqrt(x = add_46_cast)[name = tensor("sqrt_23_cast")]; + tensor real_div_23_cast = real_div(x = sub_46_cast, y = sqrt_23_cast)[name = tensor("real_div_23_cast")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_93_cast = reshape(shape = reshape_93_shape_0, x = real_div_23_cast)[name = tensor("reshape_93_cast")]; + tensor add_47_gamma_0_to_fp16 = const()[name = tensor("add_47_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970112640)))]; + tensor add_47_beta_0_to_fp16 = const()[name = tensor("add_47_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970115264)))]; + tensor add_47_epsilon_0_to_fp16 = const()[name = tensor("add_47_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_47_cast = batch_norm(beta = add_47_beta_0_to_fp16, epsilon = add_47_epsilon_0_to_fp16, gamma = add_47_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_93_cast)[name = tensor("add_47_cast")]; + tensor var_6869 = const()[name = tensor("op_6869"), val = tensor([1, 1])]; + tensor var_6871 = const()[name = tensor("op_6871"), val = tensor([1, 1])]; + tensor hidden_states_285_pad_type_0 = const()[name = tensor("hidden_states_285_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_285_pad_0 = const()[name = tensor("hidden_states_285_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970117888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971346752))), name = tensor("unet_up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971346944)))]; + tensor hidden_states_285_cast = conv(bias = unet_up_blocks_0_attentions_0_proj_in_bias_to_fp16, dilations = var_6871, groups = var_31, pad = hidden_states_285_pad_0, pad_type = hidden_states_285_pad_type_0, strides = var_6869, weight = unet_up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized, x = add_47_cast)[name = tensor("hidden_states_285_cast")]; + tensor var_6876 = const()[name = tensor("op_6876"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_205_cast = reshape(shape = var_6876, x = hidden_states_285_cast)[name = tensor("inputs_205_cast")]; + tensor var_6886 = const()[name = tensor("op_6886"), val = tensor([1])]; + tensor channels_mean_205_cast = reduce_mean(axes = var_6886, keep_dims = var_23, x = inputs_205_cast)[name = tensor("channels_mean_205_cast")]; + tensor zero_mean_205_cast = sub(x = inputs_205_cast, y = channels_mean_205_cast)[name = tensor("zero_mean_205_cast")]; + tensor zero_mean_sq_205_cast = mul(x = zero_mean_205_cast, y = zero_mean_205_cast)[name = tensor("zero_mean_sq_205_cast")]; + tensor var_6890 = const()[name = tensor("op_6890"), val = tensor([1])]; + tensor var_6891_cast = reduce_mean(axes = var_6890, keep_dims = var_23, x = zero_mean_sq_205_cast)[name = tensor("op_6891_cast")]; + tensor var_6892_to_fp16 = const()[name = tensor("op_6892_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6893_cast = add(x = var_6891_cast, y = var_6892_to_fp16)[name = tensor("op_6893_cast")]; + tensor denom_205_epsilon_0_to_fp16 = const()[name = tensor("denom_205_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_205_cast = rsqrt(epsilon = denom_205_epsilon_0_to_fp16, x = var_6893_cast)[name = tensor("denom_205_cast")]; + tensor out_205_cast = mul(x = zero_mean_205_cast, y = denom_205_cast)[name = tensor("out_205_cast")]; + tensor var_6897_to_fp16 = const()[name = tensor("op_6897_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971349568)))]; + tensor var_6898_cast = add(x = out_205_cast, y = var_6897_to_fp16)[name = tensor("op_6898_cast")]; + tensor var_6900_to_fp16 = const()[name = tensor("op_6900_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971352192)))]; + tensor hidden_states_287_cast = mul(x = var_6898_cast, y = var_6900_to_fp16)[name = tensor("hidden_states_287_cast")]; + tensor var_6907 = const()[name = tensor("op_6907"), val = tensor([1, 1])]; + tensor var_6909 = const()[name = tensor("op_6909"), val = tensor([1, 1])]; + tensor q_137_pad_type_0 = const()[name = tensor("q_137_pad_type_0"), val = tensor("custom")]; + tensor q_137_pad_0 = const()[name = tensor("q_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971354816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(972583680))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_137_cast = conv(dilations = var_6909, groups = var_31, pad = q_137_pad_0, pad_type = q_137_pad_type_0, strides = var_6907, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("q_137_cast")]; + tensor var_6913 = const()[name = tensor("op_6913"), val = tensor([1, 1])]; + tensor var_6915 = const()[name = tensor("op_6915"), val = tensor([1, 1])]; + tensor k_137_pad_type_0 = const()[name = tensor("k_137_pad_type_0"), val = tensor("custom")]; + tensor k_137_pad_0 = const()[name = tensor("k_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(972583872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(973812736))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_137_cast = conv(dilations = var_6915, groups = var_31, pad = k_137_pad_0, pad_type = k_137_pad_type_0, strides = var_6913, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("k_137_cast")]; + tensor var_6919 = const()[name = tensor("op_6919"), val = tensor([1, 1])]; + tensor var_6921 = const()[name = tensor("op_6921"), val = tensor([1, 1])]; + tensor v_137_pad_type_0 = const()[name = tensor("v_137_pad_type_0"), val = tensor("custom")]; + tensor v_137_pad_0 = const()[name = tensor("v_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(973812928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975041792))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_137_cast = conv(dilations = var_6921, groups = var_31, pad = v_137_pad_0, pad_type = v_137_pad_type_0, strides = var_6919, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("v_137_cast")]; + tensor var_6925 = const()[name = tensor("op_6925"), val = tensor([2, 20, 64, -1])]; + tensor var_6926_cast = reshape(shape = var_6925, x = q_137_cast)[name = tensor("op_6926_cast")]; + tensor var_6927 = const()[name = tensor("op_6927"), val = tensor([2, 20, 64, -1])]; + tensor var_6928_cast = reshape(shape = var_6927, x = k_137_cast)[name = tensor("op_6928_cast")]; + tensor var_6929 = const()[name = tensor("op_6929"), val = tensor([2, 20, 64, -1])]; + tensor var_6930_cast = reshape(shape = var_6929, x = v_137_cast)[name = tensor("op_6930_cast")]; + tensor attn_weights_273_transpose_x_0 = const()[name = tensor("attn_weights_273_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_273_transpose_y_0 = const()[name = tensor("attn_weights_273_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_273_cast = matmul(transpose_x = attn_weights_273_transpose_x_0, transpose_y = attn_weights_273_transpose_y_0, x = var_6926_cast, y = var_6928_cast)[name = tensor("attn_weights_273_cast")]; + tensor attn_weights_275_cast = mul(x = attn_weights_273_cast, y = var_12_to_fp16)[name = tensor("attn_weights_275_cast")]; + tensor var_6934_cast = softmax(axis = var_18, x = attn_weights_275_cast)[name = tensor("op_6934_cast")]; + tensor attn_137_transpose_x_0 = const()[name = tensor("attn_137_transpose_x_0"), val = tensor(false)]; + tensor attn_137_transpose_y_0 = const()[name = tensor("attn_137_transpose_y_0"), val = tensor(true)]; + tensor attn_137_cast = matmul(transpose_x = attn_137_transpose_x_0, transpose_y = attn_137_transpose_y_0, x = var_6930_cast, y = var_6934_cast)[name = tensor("attn_137_cast")]; + tensor var_6938 = const()[name = tensor("op_6938"), val = tensor([2, 1280, 1, -1])]; + tensor input_439_cast = reshape(shape = var_6938, x = attn_137_cast)[name = tensor("input_439_cast")]; + tensor var_6943 = const()[name = tensor("op_6943"), val = tensor([1, 1])]; + tensor var_6945 = const()[name = tensor("op_6945"), val = tensor([1, 1])]; + tensor var_6947_pad_type_0 = const()[name = tensor("op_6947_pad_type_0"), val = tensor("custom")]; + tensor var_6947_pad_0 = const()[name = tensor("op_6947_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975041984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(976270848))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(976271040)))]; + tensor var_6947_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_6945, groups = var_31, pad = var_6947_pad_0, pad_type = var_6947_pad_type_0, strides = var_6943, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_439_cast)[name = tensor("op_6947_cast")]; + tensor inputs_207_cast = add(x = var_6947_cast, y = inputs_205_cast)[name = tensor("inputs_207_cast")]; + tensor var_6951 = const()[name = tensor("op_6951"), val = tensor([1])]; + tensor channels_mean_207_cast = reduce_mean(axes = var_6951, keep_dims = var_23, x = inputs_207_cast)[name = tensor("channels_mean_207_cast")]; + tensor zero_mean_207_cast = sub(x = inputs_207_cast, y = channels_mean_207_cast)[name = tensor("zero_mean_207_cast")]; + tensor zero_mean_sq_207_cast = mul(x = zero_mean_207_cast, y = zero_mean_207_cast)[name = tensor("zero_mean_sq_207_cast")]; + tensor var_6955 = const()[name = tensor("op_6955"), val = tensor([1])]; + tensor var_6956_cast = reduce_mean(axes = var_6955, keep_dims = var_23, x = zero_mean_sq_207_cast)[name = tensor("op_6956_cast")]; + tensor var_6957_to_fp16 = const()[name = tensor("op_6957_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6958_cast = add(x = var_6956_cast, y = var_6957_to_fp16)[name = tensor("op_6958_cast")]; + tensor denom_207_epsilon_0_to_fp16 = const()[name = tensor("denom_207_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_207_cast = rsqrt(epsilon = denom_207_epsilon_0_to_fp16, x = var_6958_cast)[name = tensor("denom_207_cast")]; + tensor out_207_cast = mul(x = zero_mean_207_cast, y = denom_207_cast)[name = tensor("out_207_cast")]; + tensor var_6962_to_fp16 = const()[name = tensor("op_6962_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(976273664)))]; + tensor var_6963_cast = add(x = out_207_cast, y = var_6962_to_fp16)[name = tensor("op_6963_cast")]; + tensor var_6965_to_fp16 = const()[name = tensor("op_6965_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(976276288)))]; + tensor hidden_states_289_cast = mul(x = var_6963_cast, y = var_6965_to_fp16)[name = tensor("hidden_states_289_cast")]; + tensor var_6972 = const()[name = tensor("op_6972"), val = tensor([1, 1])]; + tensor var_6974 = const()[name = tensor("op_6974"), val = tensor([1, 1])]; + tensor q_139_pad_type_0 = const()[name = tensor("q_139_pad_type_0"), val = tensor("custom")]; + tensor q_139_pad_0 = const()[name = tensor("q_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(976278912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977507776))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_139_cast = conv(dilations = var_6974, groups = var_31, pad = q_139_pad_0, pad_type = q_139_pad_type_0, strides = var_6972, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_289_cast)[name = tensor("q_139_cast")]; + tensor var_6978 = const()[name = tensor("op_6978"), val = tensor([1, 1])]; + tensor var_6980 = const()[name = tensor("op_6980"), val = tensor([1, 1])]; + tensor k_139_pad_type_0 = const()[name = tensor("k_139_pad_type_0"), val = tensor("custom")]; + tensor k_139_pad_0 = const()[name = tensor("k_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977507968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979474112))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_139_cast = conv(dilations = var_6980, groups = var_31, pad = k_139_pad_0, pad_type = k_139_pad_type_0, strides = var_6978, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_139_cast")]; + tensor var_6984 = const()[name = tensor("op_6984"), val = tensor([1, 1])]; + tensor var_6986 = const()[name = tensor("op_6986"), val = tensor([1, 1])]; + tensor v_139_pad_type_0 = const()[name = tensor("v_139_pad_type_0"), val = tensor("custom")]; + tensor v_139_pad_0 = const()[name = tensor("v_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979474304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981440448))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_139_cast = conv(dilations = var_6986, groups = var_31, pad = v_139_pad_0, pad_type = v_139_pad_type_0, strides = var_6984, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_139_cast")]; + tensor var_6990 = const()[name = tensor("op_6990"), val = tensor([2, 20, 64, -1])]; + tensor var_6991_cast = reshape(shape = var_6990, x = q_139_cast)[name = tensor("op_6991_cast")]; + tensor var_6992 = const()[name = tensor("op_6992"), val = tensor([2, 20, 64, -1])]; + tensor var_6993_cast = reshape(shape = var_6992, x = k_139_cast)[name = tensor("op_6993_cast")]; + tensor var_6994 = const()[name = tensor("op_6994"), val = tensor([2, 20, 64, -1])]; + tensor var_6995_cast = reshape(shape = var_6994, x = v_139_cast)[name = tensor("op_6995_cast")]; + tensor attn_weights_277_transpose_x_0 = const()[name = tensor("attn_weights_277_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_277_transpose_y_0 = const()[name = tensor("attn_weights_277_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_277_cast = matmul(transpose_x = attn_weights_277_transpose_x_0, transpose_y = attn_weights_277_transpose_y_0, x = var_6991_cast, y = var_6993_cast)[name = tensor("attn_weights_277_cast")]; + tensor attn_weights_279_cast = mul(x = attn_weights_277_cast, y = var_12_to_fp16)[name = tensor("attn_weights_279_cast")]; + tensor var_6999_cast = softmax(axis = var_18, x = attn_weights_279_cast)[name = tensor("op_6999_cast")]; + tensor attn_139_transpose_x_0 = const()[name = tensor("attn_139_transpose_x_0"), val = tensor(false)]; + tensor attn_139_transpose_y_0 = const()[name = tensor("attn_139_transpose_y_0"), val = tensor(true)]; + tensor attn_139_cast = matmul(transpose_x = attn_139_transpose_x_0, transpose_y = attn_139_transpose_y_0, x = var_6995_cast, y = var_6999_cast)[name = tensor("attn_139_cast")]; + tensor var_7003 = const()[name = tensor("op_7003"), val = tensor([2, 1280, 1, -1])]; + tensor input_441_cast = reshape(shape = var_7003, x = attn_139_cast)[name = tensor("input_441_cast")]; + tensor var_7008 = const()[name = tensor("op_7008"), val = tensor([1, 1])]; + tensor var_7010 = const()[name = tensor("op_7010"), val = tensor([1, 1])]; + tensor var_7012_pad_type_0 = const()[name = tensor("op_7012_pad_type_0"), val = tensor("custom")]; + tensor var_7012_pad_0 = const()[name = tensor("op_7012_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981440640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982669504))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982669696)))]; + tensor var_7012_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_7010, groups = var_31, pad = var_7012_pad_0, pad_type = var_7012_pad_type_0, strides = var_7008, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_441_cast)[name = tensor("op_7012_cast")]; + tensor inputs_209_cast = add(x = var_7012_cast, y = inputs_207_cast)[name = tensor("inputs_209_cast")]; + tensor var_7016 = const()[name = tensor("op_7016"), val = tensor([1])]; + tensor channels_mean_209_cast = reduce_mean(axes = var_7016, keep_dims = var_23, x = inputs_209_cast)[name = tensor("channels_mean_209_cast")]; + tensor zero_mean_209_cast = sub(x = inputs_209_cast, y = channels_mean_209_cast)[name = tensor("zero_mean_209_cast")]; + tensor zero_mean_sq_209_cast = mul(x = zero_mean_209_cast, y = zero_mean_209_cast)[name = tensor("zero_mean_sq_209_cast")]; + tensor var_7020 = const()[name = tensor("op_7020"), val = tensor([1])]; + tensor var_7021_cast = reduce_mean(axes = var_7020, keep_dims = var_23, x = zero_mean_sq_209_cast)[name = tensor("op_7021_cast")]; + tensor var_7022_to_fp16 = const()[name = tensor("op_7022_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7023_cast = add(x = var_7021_cast, y = var_7022_to_fp16)[name = tensor("op_7023_cast")]; + tensor denom_209_epsilon_0_to_fp16 = const()[name = tensor("denom_209_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_209_cast = rsqrt(epsilon = denom_209_epsilon_0_to_fp16, x = var_7023_cast)[name = tensor("denom_209_cast")]; + tensor out_209_cast = mul(x = zero_mean_209_cast, y = denom_209_cast)[name = tensor("out_209_cast")]; + tensor var_7027_to_fp16 = const()[name = tensor("op_7027_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982672320)))]; + tensor var_7028_cast = add(x = out_209_cast, y = var_7027_to_fp16)[name = tensor("op_7028_cast")]; + tensor var_7030_to_fp16 = const()[name = tensor("op_7030_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982674944)))]; + tensor input_443_cast = mul(x = var_7028_cast, y = var_7030_to_fp16)[name = tensor("input_443_cast")]; + tensor var_7038 = const()[name = tensor("op_7038"), val = tensor([1, 1])]; + tensor var_7040 = const()[name = tensor("op_7040"), val = tensor([1, 1])]; + tensor var_7042_pad_type_0 = const()[name = tensor("op_7042_pad_type_0"), val = tensor("custom")]; + tensor var_7042_pad_0 = const()[name = tensor("op_7042_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982677568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992508032))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992508224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992515968))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7042_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7040, groups = var_31, pad = var_7042_pad_0, pad_type = var_7042_pad_type_0, strides = var_7038, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_443_cast)[name = tensor("op_7042_cast")]; + tensor var_7043_split_sizes_0 = const()[name = tensor("op_7043_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7043_axis_0 = const()[name = tensor("op_7043_axis_0"), val = tensor(1)]; + tensor var_7043_cast_0, tensor var_7043_cast_1 = split(axis = var_7043_axis_0, split_sizes = var_7043_split_sizes_0, x = var_7042_cast)[name = tensor("op_7043_cast")]; + tensor var_7045_mode_0 = const()[name = tensor("op_7045_mode_0"), val = tensor("EXACT")]; + tensor var_7045_cast = gelu(mode = var_7045_mode_0, x = var_7043_cast_1)[name = tensor("op_7045_cast")]; + tensor input_445_cast = mul(x = var_7043_cast_0, y = var_7045_cast)[name = tensor("input_445_cast")]; + tensor var_7049 = const()[name = tensor("op_7049"), val = tensor([1, 1])]; + tensor var_7051 = const()[name = tensor("op_7051"), val = tensor([1, 1])]; + tensor var_7053_pad_type_0 = const()[name = tensor("op_7053_pad_type_0"), val = tensor("custom")]; + tensor var_7053_pad_0 = const()[name = tensor("op_7053_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992516160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997431424))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997431616)))]; + tensor var_7053_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_7051, groups = var_31, pad = var_7053_pad_0, pad_type = var_7053_pad_type_0, strides = var_7049, weight = unet_up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_445_cast)[name = tensor("op_7053_cast")]; + tensor inputs_211_cast = add(x = var_7053_cast, y = inputs_209_cast)[name = tensor("inputs_211_cast")]; + tensor var_7063 = const()[name = tensor("op_7063"), val = tensor([1])]; + tensor channels_mean_211_cast = reduce_mean(axes = var_7063, keep_dims = var_23, x = inputs_211_cast)[name = tensor("channels_mean_211_cast")]; + tensor zero_mean_211_cast = sub(x = inputs_211_cast, y = channels_mean_211_cast)[name = tensor("zero_mean_211_cast")]; + tensor zero_mean_sq_211_cast = mul(x = zero_mean_211_cast, y = zero_mean_211_cast)[name = tensor("zero_mean_sq_211_cast")]; + tensor var_7067 = const()[name = tensor("op_7067"), val = tensor([1])]; + tensor var_7068_cast = reduce_mean(axes = var_7067, keep_dims = var_23, x = zero_mean_sq_211_cast)[name = tensor("op_7068_cast")]; + tensor var_7069_to_fp16 = const()[name = tensor("op_7069_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7070_cast = add(x = var_7068_cast, y = var_7069_to_fp16)[name = tensor("op_7070_cast")]; + tensor denom_211_epsilon_0_to_fp16 = const()[name = tensor("denom_211_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_211_cast = rsqrt(epsilon = denom_211_epsilon_0_to_fp16, x = var_7070_cast)[name = tensor("denom_211_cast")]; + tensor out_211_cast = mul(x = zero_mean_211_cast, y = denom_211_cast)[name = tensor("out_211_cast")]; + tensor var_7074_to_fp16 = const()[name = tensor("op_7074_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997434240)))]; + tensor var_7075_cast = add(x = out_211_cast, y = var_7074_to_fp16)[name = tensor("op_7075_cast")]; + tensor var_7077_to_fp16 = const()[name = tensor("op_7077_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997436864)))]; + tensor hidden_states_293_cast = mul(x = var_7075_cast, y = var_7077_to_fp16)[name = tensor("hidden_states_293_cast")]; + tensor var_7084 = const()[name = tensor("op_7084"), val = tensor([1, 1])]; + tensor var_7086 = const()[name = tensor("op_7086"), val = tensor([1, 1])]; + tensor q_141_pad_type_0 = const()[name = tensor("q_141_pad_type_0"), val = tensor("custom")]; + tensor q_141_pad_0 = const()[name = tensor("q_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997439488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998668352))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_141_cast = conv(dilations = var_7086, groups = var_31, pad = q_141_pad_0, pad_type = q_141_pad_type_0, strides = var_7084, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("q_141_cast")]; + tensor var_7090 = const()[name = tensor("op_7090"), val = tensor([1, 1])]; + tensor var_7092 = const()[name = tensor("op_7092"), val = tensor([1, 1])]; + tensor k_141_pad_type_0 = const()[name = tensor("k_141_pad_type_0"), val = tensor("custom")]; + tensor k_141_pad_0 = const()[name = tensor("k_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998668544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999897408))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_141_cast = conv(dilations = var_7092, groups = var_31, pad = k_141_pad_0, pad_type = k_141_pad_type_0, strides = var_7090, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("k_141_cast")]; + tensor var_7096 = const()[name = tensor("op_7096"), val = tensor([1, 1])]; + tensor var_7098 = const()[name = tensor("op_7098"), val = tensor([1, 1])]; + tensor v_141_pad_type_0 = const()[name = tensor("v_141_pad_type_0"), val = tensor("custom")]; + tensor v_141_pad_0 = const()[name = tensor("v_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999897600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001126464))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_141_cast = conv(dilations = var_7098, groups = var_31, pad = v_141_pad_0, pad_type = v_141_pad_type_0, strides = var_7096, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("v_141_cast")]; + tensor var_7102 = const()[name = tensor("op_7102"), val = tensor([2, 20, 64, -1])]; + tensor var_7103_cast = reshape(shape = var_7102, x = q_141_cast)[name = tensor("op_7103_cast")]; + tensor var_7104 = const()[name = tensor("op_7104"), val = tensor([2, 20, 64, -1])]; + tensor var_7105_cast = reshape(shape = var_7104, x = k_141_cast)[name = tensor("op_7105_cast")]; + tensor var_7106 = const()[name = tensor("op_7106"), val = tensor([2, 20, 64, -1])]; + tensor var_7107_cast = reshape(shape = var_7106, x = v_141_cast)[name = tensor("op_7107_cast")]; + tensor attn_weights_281_transpose_x_0 = const()[name = tensor("attn_weights_281_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_281_transpose_y_0 = const()[name = tensor("attn_weights_281_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_281_cast = matmul(transpose_x = attn_weights_281_transpose_x_0, transpose_y = attn_weights_281_transpose_y_0, x = var_7103_cast, y = var_7105_cast)[name = tensor("attn_weights_281_cast")]; + tensor attn_weights_283_cast = mul(x = attn_weights_281_cast, y = var_12_to_fp16)[name = tensor("attn_weights_283_cast")]; + tensor var_7111_cast = softmax(axis = var_18, x = attn_weights_283_cast)[name = tensor("op_7111_cast")]; + tensor attn_141_transpose_x_0 = const()[name = tensor("attn_141_transpose_x_0"), val = tensor(false)]; + tensor attn_141_transpose_y_0 = const()[name = tensor("attn_141_transpose_y_0"), val = tensor(true)]; + tensor attn_141_cast = matmul(transpose_x = attn_141_transpose_x_0, transpose_y = attn_141_transpose_y_0, x = var_7107_cast, y = var_7111_cast)[name = tensor("attn_141_cast")]; + tensor var_7115 = const()[name = tensor("op_7115"), val = tensor([2, 1280, 1, -1])]; + tensor input_447_cast = reshape(shape = var_7115, x = attn_141_cast)[name = tensor("input_447_cast")]; + tensor var_7120 = const()[name = tensor("op_7120"), val = tensor([1, 1])]; + tensor var_7122 = const()[name = tensor("op_7122"), val = tensor([1, 1])]; + tensor var_7124_pad_type_0 = const()[name = tensor("op_7124_pad_type_0"), val = tensor("custom")]; + tensor var_7124_pad_0 = const()[name = tensor("op_7124_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001126656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002355520))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002355712)))]; + tensor var_7124_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_7122, groups = var_31, pad = var_7124_pad_0, pad_type = var_7124_pad_type_0, strides = var_7120, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_447_cast)[name = tensor("op_7124_cast")]; + tensor inputs_213_cast = add(x = var_7124_cast, y = inputs_211_cast)[name = tensor("inputs_213_cast")]; + tensor var_7128 = const()[name = tensor("op_7128"), val = tensor([1])]; + tensor channels_mean_213_cast = reduce_mean(axes = var_7128, keep_dims = var_23, x = inputs_213_cast)[name = tensor("channels_mean_213_cast")]; + tensor zero_mean_213_cast = sub(x = inputs_213_cast, y = channels_mean_213_cast)[name = tensor("zero_mean_213_cast")]; + tensor zero_mean_sq_213_cast = mul(x = zero_mean_213_cast, y = zero_mean_213_cast)[name = tensor("zero_mean_sq_213_cast")]; + tensor var_7132 = const()[name = tensor("op_7132"), val = tensor([1])]; + tensor var_7133_cast = reduce_mean(axes = var_7132, keep_dims = var_23, x = zero_mean_sq_213_cast)[name = tensor("op_7133_cast")]; + tensor var_7134_to_fp16 = const()[name = tensor("op_7134_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7135_cast = add(x = var_7133_cast, y = var_7134_to_fp16)[name = tensor("op_7135_cast")]; + tensor denom_213_epsilon_0_to_fp16 = const()[name = tensor("denom_213_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_213_cast = rsqrt(epsilon = denom_213_epsilon_0_to_fp16, x = var_7135_cast)[name = tensor("denom_213_cast")]; + tensor out_213_cast = mul(x = zero_mean_213_cast, y = denom_213_cast)[name = tensor("out_213_cast")]; + tensor var_7139_to_fp16 = const()[name = tensor("op_7139_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002358336)))]; + tensor var_7140_cast = add(x = out_213_cast, y = var_7139_to_fp16)[name = tensor("op_7140_cast")]; + tensor var_7142_to_fp16 = const()[name = tensor("op_7142_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002360960)))]; + tensor hidden_states_295_cast = mul(x = var_7140_cast, y = var_7142_to_fp16)[name = tensor("hidden_states_295_cast")]; + tensor var_7149 = const()[name = tensor("op_7149"), val = tensor([1, 1])]; + tensor var_7151 = const()[name = tensor("op_7151"), val = tensor([1, 1])]; + tensor q_143_pad_type_0 = const()[name = tensor("q_143_pad_type_0"), val = tensor("custom")]; + tensor q_143_pad_0 = const()[name = tensor("q_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002363584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1003592448))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_143_cast = conv(dilations = var_7151, groups = var_31, pad = q_143_pad_0, pad_type = q_143_pad_type_0, strides = var_7149, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_295_cast)[name = tensor("q_143_cast")]; + tensor var_7155 = const()[name = tensor("op_7155"), val = tensor([1, 1])]; + tensor var_7157 = const()[name = tensor("op_7157"), val = tensor([1, 1])]; + tensor k_143_pad_type_0 = const()[name = tensor("k_143_pad_type_0"), val = tensor("custom")]; + tensor k_143_pad_0 = const()[name = tensor("k_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1003592640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005558784))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_143_cast = conv(dilations = var_7157, groups = var_31, pad = k_143_pad_0, pad_type = k_143_pad_type_0, strides = var_7155, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_143_cast")]; + tensor var_7161 = const()[name = tensor("op_7161"), val = tensor([1, 1])]; + tensor var_7163 = const()[name = tensor("op_7163"), val = tensor([1, 1])]; + tensor v_143_pad_type_0 = const()[name = tensor("v_143_pad_type_0"), val = tensor("custom")]; + tensor v_143_pad_0 = const()[name = tensor("v_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005558976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007525120))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_143_cast = conv(dilations = var_7163, groups = var_31, pad = v_143_pad_0, pad_type = v_143_pad_type_0, strides = var_7161, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_143_cast")]; + tensor var_7167 = const()[name = tensor("op_7167"), val = tensor([2, 20, 64, -1])]; + tensor var_7168_cast = reshape(shape = var_7167, x = q_143_cast)[name = tensor("op_7168_cast")]; + tensor var_7169 = const()[name = tensor("op_7169"), val = tensor([2, 20, 64, -1])]; + tensor var_7170_cast = reshape(shape = var_7169, x = k_143_cast)[name = tensor("op_7170_cast")]; + tensor var_7171 = const()[name = tensor("op_7171"), val = tensor([2, 20, 64, -1])]; + tensor var_7172_cast = reshape(shape = var_7171, x = v_143_cast)[name = tensor("op_7172_cast")]; + tensor attn_weights_285_transpose_x_0 = const()[name = tensor("attn_weights_285_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_285_transpose_y_0 = const()[name = tensor("attn_weights_285_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_285_cast = matmul(transpose_x = attn_weights_285_transpose_x_0, transpose_y = attn_weights_285_transpose_y_0, x = var_7168_cast, y = var_7170_cast)[name = tensor("attn_weights_285_cast")]; + tensor attn_weights_287_cast = mul(x = attn_weights_285_cast, y = var_12_to_fp16)[name = tensor("attn_weights_287_cast")]; + tensor var_7176_cast = softmax(axis = var_18, x = attn_weights_287_cast)[name = tensor("op_7176_cast")]; + tensor attn_143_transpose_x_0 = const()[name = tensor("attn_143_transpose_x_0"), val = tensor(false)]; + tensor attn_143_transpose_y_0 = const()[name = tensor("attn_143_transpose_y_0"), val = tensor(true)]; + tensor attn_143_cast = matmul(transpose_x = attn_143_transpose_x_0, transpose_y = attn_143_transpose_y_0, x = var_7172_cast, y = var_7176_cast)[name = tensor("attn_143_cast")]; + tensor var_7180 = const()[name = tensor("op_7180"), val = tensor([2, 1280, 1, -1])]; + tensor input_449_cast = reshape(shape = var_7180, x = attn_143_cast)[name = tensor("input_449_cast")]; + tensor var_7185 = const()[name = tensor("op_7185"), val = tensor([1, 1])]; + tensor var_7187 = const()[name = tensor("op_7187"), val = tensor([1, 1])]; + tensor var_7189_pad_type_0 = const()[name = tensor("op_7189_pad_type_0"), val = tensor("custom")]; + tensor var_7189_pad_0 = const()[name = tensor("op_7189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007525312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1008754176))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1008754368)))]; + tensor var_7189_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_7187, groups = var_31, pad = var_7189_pad_0, pad_type = var_7189_pad_type_0, strides = var_7185, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_449_cast)[name = tensor("op_7189_cast")]; + tensor inputs_215_cast = add(x = var_7189_cast, y = inputs_213_cast)[name = tensor("inputs_215_cast")]; + tensor var_7193 = const()[name = tensor("op_7193"), val = tensor([1])]; + tensor channels_mean_215_cast = reduce_mean(axes = var_7193, keep_dims = var_23, x = inputs_215_cast)[name = tensor("channels_mean_215_cast")]; + tensor zero_mean_215_cast = sub(x = inputs_215_cast, y = channels_mean_215_cast)[name = tensor("zero_mean_215_cast")]; + tensor zero_mean_sq_215_cast = mul(x = zero_mean_215_cast, y = zero_mean_215_cast)[name = tensor("zero_mean_sq_215_cast")]; + tensor var_7197 = const()[name = tensor("op_7197"), val = tensor([1])]; + tensor var_7198_cast = reduce_mean(axes = var_7197, keep_dims = var_23, x = zero_mean_sq_215_cast)[name = tensor("op_7198_cast")]; + tensor var_7199_to_fp16 = const()[name = tensor("op_7199_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7200_cast = add(x = var_7198_cast, y = var_7199_to_fp16)[name = tensor("op_7200_cast")]; + tensor denom_215_epsilon_0_to_fp16 = const()[name = tensor("denom_215_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_215_cast = rsqrt(epsilon = denom_215_epsilon_0_to_fp16, x = var_7200_cast)[name = tensor("denom_215_cast")]; + tensor out_215_cast = mul(x = zero_mean_215_cast, y = denom_215_cast)[name = tensor("out_215_cast")]; + tensor var_7204_to_fp16 = const()[name = tensor("op_7204_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1008756992)))]; + tensor var_7205_cast = add(x = out_215_cast, y = var_7204_to_fp16)[name = tensor("op_7205_cast")]; + tensor var_7207_to_fp16 = const()[name = tensor("op_7207_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1008759616)))]; + tensor input_451_cast = mul(x = var_7205_cast, y = var_7207_to_fp16)[name = tensor("input_451_cast")]; + tensor var_7215 = const()[name = tensor("op_7215"), val = tensor([1, 1])]; + tensor var_7217 = const()[name = tensor("op_7217"), val = tensor([1, 1])]; + tensor var_7219_pad_type_0 = const()[name = tensor("op_7219_pad_type_0"), val = tensor("custom")]; + tensor var_7219_pad_0 = const()[name = tensor("op_7219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1008762240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018592704))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018592896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018600640))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7219_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7217, groups = var_31, pad = var_7219_pad_0, pad_type = var_7219_pad_type_0, strides = var_7215, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_451_cast)[name = tensor("op_7219_cast")]; + tensor var_7220_split_sizes_0 = const()[name = tensor("op_7220_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7220_axis_0 = const()[name = tensor("op_7220_axis_0"), val = tensor(1)]; + tensor var_7220_cast_0, tensor var_7220_cast_1 = split(axis = var_7220_axis_0, split_sizes = var_7220_split_sizes_0, x = var_7219_cast)[name = tensor("op_7220_cast")]; + tensor var_7222_mode_0 = const()[name = tensor("op_7222_mode_0"), val = tensor("EXACT")]; + tensor var_7222_cast = gelu(mode = var_7222_mode_0, x = var_7220_cast_1)[name = tensor("op_7222_cast")]; + tensor input_453_cast = mul(x = var_7220_cast_0, y = var_7222_cast)[name = tensor("input_453_cast")]; + tensor var_7226 = const()[name = tensor("op_7226"), val = tensor([1, 1])]; + tensor var_7228 = const()[name = tensor("op_7228"), val = tensor([1, 1])]; + tensor var_7230_pad_type_0 = const()[name = tensor("op_7230_pad_type_0"), val = tensor("custom")]; + tensor var_7230_pad_0 = const()[name = tensor("op_7230_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018600832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1023516096))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1023516288)))]; + tensor var_7230_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_7228, groups = var_31, pad = var_7230_pad_0, pad_type = var_7230_pad_type_0, strides = var_7226, weight = unet_up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_453_cast)[name = tensor("op_7230_cast")]; + tensor inputs_217_cast = add(x = var_7230_cast, y = inputs_215_cast)[name = tensor("inputs_217_cast")]; + tensor var_7240 = const()[name = tensor("op_7240"), val = tensor([1])]; + tensor channels_mean_217_cast = reduce_mean(axes = var_7240, keep_dims = var_23, x = inputs_217_cast)[name = tensor("channels_mean_217_cast")]; + tensor zero_mean_217_cast = sub(x = inputs_217_cast, y = channels_mean_217_cast)[name = tensor("zero_mean_217_cast")]; + tensor zero_mean_sq_217_cast = mul(x = zero_mean_217_cast, y = zero_mean_217_cast)[name = tensor("zero_mean_sq_217_cast")]; + tensor var_7244 = const()[name = tensor("op_7244"), val = tensor([1])]; + tensor var_7245_cast = reduce_mean(axes = var_7244, keep_dims = var_23, x = zero_mean_sq_217_cast)[name = tensor("op_7245_cast")]; + tensor var_7246_to_fp16 = const()[name = tensor("op_7246_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7247_cast = add(x = var_7245_cast, y = var_7246_to_fp16)[name = tensor("op_7247_cast")]; + tensor denom_217_epsilon_0_to_fp16 = const()[name = tensor("denom_217_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_217_cast = rsqrt(epsilon = denom_217_epsilon_0_to_fp16, x = var_7247_cast)[name = tensor("denom_217_cast")]; + tensor out_217_cast = mul(x = zero_mean_217_cast, y = denom_217_cast)[name = tensor("out_217_cast")]; + tensor var_7251_to_fp16 = const()[name = tensor("op_7251_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1023518912)))]; + tensor var_7252_cast = add(x = out_217_cast, y = var_7251_to_fp16)[name = tensor("op_7252_cast")]; + tensor var_7254_to_fp16 = const()[name = tensor("op_7254_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1023521536)))]; + tensor hidden_states_299_cast = mul(x = var_7252_cast, y = var_7254_to_fp16)[name = tensor("hidden_states_299_cast")]; + tensor var_7261 = const()[name = tensor("op_7261"), val = tensor([1, 1])]; + tensor var_7263 = const()[name = tensor("op_7263"), val = tensor([1, 1])]; + tensor q_145_pad_type_0 = const()[name = tensor("q_145_pad_type_0"), val = tensor("custom")]; + tensor q_145_pad_0 = const()[name = tensor("q_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1023524160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024753024))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_145_cast = conv(dilations = var_7263, groups = var_31, pad = q_145_pad_0, pad_type = q_145_pad_type_0, strides = var_7261, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("q_145_cast")]; + tensor var_7267 = const()[name = tensor("op_7267"), val = tensor([1, 1])]; + tensor var_7269 = const()[name = tensor("op_7269"), val = tensor([1, 1])]; + tensor k_145_pad_type_0 = const()[name = tensor("k_145_pad_type_0"), val = tensor("custom")]; + tensor k_145_pad_0 = const()[name = tensor("k_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024753216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025982080))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_145_cast = conv(dilations = var_7269, groups = var_31, pad = k_145_pad_0, pad_type = k_145_pad_type_0, strides = var_7267, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("k_145_cast")]; + tensor var_7273 = const()[name = tensor("op_7273"), val = tensor([1, 1])]; + tensor var_7275 = const()[name = tensor("op_7275"), val = tensor([1, 1])]; + tensor v_145_pad_type_0 = const()[name = tensor("v_145_pad_type_0"), val = tensor("custom")]; + tensor v_145_pad_0 = const()[name = tensor("v_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025982272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1027211136))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_145_cast = conv(dilations = var_7275, groups = var_31, pad = v_145_pad_0, pad_type = v_145_pad_type_0, strides = var_7273, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("v_145_cast")]; + tensor var_7279 = const()[name = tensor("op_7279"), val = tensor([2, 20, 64, -1])]; + tensor var_7280_cast = reshape(shape = var_7279, x = q_145_cast)[name = tensor("op_7280_cast")]; + tensor var_7281 = const()[name = tensor("op_7281"), val = tensor([2, 20, 64, -1])]; + tensor var_7282_cast = reshape(shape = var_7281, x = k_145_cast)[name = tensor("op_7282_cast")]; + tensor var_7283 = const()[name = tensor("op_7283"), val = tensor([2, 20, 64, -1])]; + tensor var_7284_cast = reshape(shape = var_7283, x = v_145_cast)[name = tensor("op_7284_cast")]; + tensor attn_weights_289_transpose_x_0 = const()[name = tensor("attn_weights_289_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_289_transpose_y_0 = const()[name = tensor("attn_weights_289_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_289_cast = matmul(transpose_x = attn_weights_289_transpose_x_0, transpose_y = attn_weights_289_transpose_y_0, x = var_7280_cast, y = var_7282_cast)[name = tensor("attn_weights_289_cast")]; + tensor attn_weights_291_cast = mul(x = attn_weights_289_cast, y = var_12_to_fp16)[name = tensor("attn_weights_291_cast")]; + tensor var_7288_cast = softmax(axis = var_18, x = attn_weights_291_cast)[name = tensor("op_7288_cast")]; + tensor attn_145_transpose_x_0 = const()[name = tensor("attn_145_transpose_x_0"), val = tensor(false)]; + tensor attn_145_transpose_y_0 = const()[name = tensor("attn_145_transpose_y_0"), val = tensor(true)]; + tensor attn_145_cast = matmul(transpose_x = attn_145_transpose_x_0, transpose_y = attn_145_transpose_y_0, x = var_7284_cast, y = var_7288_cast)[name = tensor("attn_145_cast")]; + tensor var_7292 = const()[name = tensor("op_7292"), val = tensor([2, 1280, 1, -1])]; + tensor input_455_cast = reshape(shape = var_7292, x = attn_145_cast)[name = tensor("input_455_cast")]; + tensor var_7297 = const()[name = tensor("op_7297"), val = tensor([1, 1])]; + tensor var_7299 = const()[name = tensor("op_7299"), val = tensor([1, 1])]; + tensor var_7301_pad_type_0 = const()[name = tensor("op_7301_pad_type_0"), val = tensor("custom")]; + tensor var_7301_pad_0 = const()[name = tensor("op_7301_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1027211328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1028440192))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1028440384)))]; + tensor var_7301_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_7299, groups = var_31, pad = var_7301_pad_0, pad_type = var_7301_pad_type_0, strides = var_7297, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_455_cast)[name = tensor("op_7301_cast")]; + tensor inputs_219_cast = add(x = var_7301_cast, y = inputs_217_cast)[name = tensor("inputs_219_cast")]; + tensor var_7305 = const()[name = tensor("op_7305"), val = tensor([1])]; + tensor channels_mean_219_cast = reduce_mean(axes = var_7305, keep_dims = var_23, x = inputs_219_cast)[name = tensor("channels_mean_219_cast")]; + tensor zero_mean_219_cast = sub(x = inputs_219_cast, y = channels_mean_219_cast)[name = tensor("zero_mean_219_cast")]; + tensor zero_mean_sq_219_cast = mul(x = zero_mean_219_cast, y = zero_mean_219_cast)[name = tensor("zero_mean_sq_219_cast")]; + tensor var_7309 = const()[name = tensor("op_7309"), val = tensor([1])]; + tensor var_7310_cast = reduce_mean(axes = var_7309, keep_dims = var_23, x = zero_mean_sq_219_cast)[name = tensor("op_7310_cast")]; + tensor var_7311_to_fp16 = const()[name = tensor("op_7311_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7312_cast = add(x = var_7310_cast, y = var_7311_to_fp16)[name = tensor("op_7312_cast")]; + tensor denom_219_epsilon_0_to_fp16 = const()[name = tensor("denom_219_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_219_cast = rsqrt(epsilon = denom_219_epsilon_0_to_fp16, x = var_7312_cast)[name = tensor("denom_219_cast")]; + tensor out_219_cast = mul(x = zero_mean_219_cast, y = denom_219_cast)[name = tensor("out_219_cast")]; + tensor var_7316_to_fp16 = const()[name = tensor("op_7316_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1028443008)))]; + tensor var_7317_cast = add(x = out_219_cast, y = var_7316_to_fp16)[name = tensor("op_7317_cast")]; + tensor var_7319_to_fp16 = const()[name = tensor("op_7319_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1028445632)))]; + tensor hidden_states_301_cast = mul(x = var_7317_cast, y = var_7319_to_fp16)[name = tensor("hidden_states_301_cast")]; + tensor var_7326 = const()[name = tensor("op_7326"), val = tensor([1, 1])]; + tensor var_7328 = const()[name = tensor("op_7328"), val = tensor([1, 1])]; + tensor q_147_pad_type_0 = const()[name = tensor("q_147_pad_type_0"), val = tensor("custom")]; + tensor q_147_pad_0 = const()[name = tensor("q_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1028448256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1029677120))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_147_cast = conv(dilations = var_7328, groups = var_31, pad = q_147_pad_0, pad_type = q_147_pad_type_0, strides = var_7326, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_301_cast)[name = tensor("q_147_cast")]; + tensor var_7332 = const()[name = tensor("op_7332"), val = tensor([1, 1])]; + tensor var_7334 = const()[name = tensor("op_7334"), val = tensor([1, 1])]; + tensor k_147_pad_type_0 = const()[name = tensor("k_147_pad_type_0"), val = tensor("custom")]; + tensor k_147_pad_0 = const()[name = tensor("k_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1029677312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031643456))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_147_cast = conv(dilations = var_7334, groups = var_31, pad = k_147_pad_0, pad_type = k_147_pad_type_0, strides = var_7332, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_147_cast")]; + tensor var_7338 = const()[name = tensor("op_7338"), val = tensor([1, 1])]; + tensor var_7340 = const()[name = tensor("op_7340"), val = tensor([1, 1])]; + tensor v_147_pad_type_0 = const()[name = tensor("v_147_pad_type_0"), val = tensor("custom")]; + tensor v_147_pad_0 = const()[name = tensor("v_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031643648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1033609792))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_147_cast = conv(dilations = var_7340, groups = var_31, pad = v_147_pad_0, pad_type = v_147_pad_type_0, strides = var_7338, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_147_cast")]; + tensor var_7344 = const()[name = tensor("op_7344"), val = tensor([2, 20, 64, -1])]; + tensor var_7345_cast = reshape(shape = var_7344, x = q_147_cast)[name = tensor("op_7345_cast")]; + tensor var_7346 = const()[name = tensor("op_7346"), val = tensor([2, 20, 64, -1])]; + tensor var_7347_cast = reshape(shape = var_7346, x = k_147_cast)[name = tensor("op_7347_cast")]; + tensor var_7348 = const()[name = tensor("op_7348"), val = tensor([2, 20, 64, -1])]; + tensor var_7349_cast = reshape(shape = var_7348, x = v_147_cast)[name = tensor("op_7349_cast")]; + tensor attn_weights_293_transpose_x_0 = const()[name = tensor("attn_weights_293_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_293_transpose_y_0 = const()[name = tensor("attn_weights_293_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_293_cast = matmul(transpose_x = attn_weights_293_transpose_x_0, transpose_y = attn_weights_293_transpose_y_0, x = var_7345_cast, y = var_7347_cast)[name = tensor("attn_weights_293_cast")]; + tensor attn_weights_295_cast = mul(x = attn_weights_293_cast, y = var_12_to_fp16)[name = tensor("attn_weights_295_cast")]; + tensor var_7353_cast = softmax(axis = var_18, x = attn_weights_295_cast)[name = tensor("op_7353_cast")]; + tensor attn_147_transpose_x_0 = const()[name = tensor("attn_147_transpose_x_0"), val = tensor(false)]; + tensor attn_147_transpose_y_0 = const()[name = tensor("attn_147_transpose_y_0"), val = tensor(true)]; + tensor attn_147_cast = matmul(transpose_x = attn_147_transpose_x_0, transpose_y = attn_147_transpose_y_0, x = var_7349_cast, y = var_7353_cast)[name = tensor("attn_147_cast")]; + tensor var_7357 = const()[name = tensor("op_7357"), val = tensor([2, 1280, 1, -1])]; + tensor input_457_cast = reshape(shape = var_7357, x = attn_147_cast)[name = tensor("input_457_cast")]; + tensor var_7362 = const()[name = tensor("op_7362"), val = tensor([1, 1])]; + tensor var_7364 = const()[name = tensor("op_7364"), val = tensor([1, 1])]; + tensor var_7366_pad_type_0 = const()[name = tensor("op_7366_pad_type_0"), val = tensor("custom")]; + tensor var_7366_pad_0 = const()[name = tensor("op_7366_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1033609984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034838848))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034839040)))]; + tensor var_7366_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_7364, groups = var_31, pad = var_7366_pad_0, pad_type = var_7366_pad_type_0, strides = var_7362, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_457_cast)[name = tensor("op_7366_cast")]; + tensor inputs_221_cast = add(x = var_7366_cast, y = inputs_219_cast)[name = tensor("inputs_221_cast")]; + tensor var_7370 = const()[name = tensor("op_7370"), val = tensor([1])]; + tensor channels_mean_221_cast = reduce_mean(axes = var_7370, keep_dims = var_23, x = inputs_221_cast)[name = tensor("channels_mean_221_cast")]; + tensor zero_mean_221_cast = sub(x = inputs_221_cast, y = channels_mean_221_cast)[name = tensor("zero_mean_221_cast")]; + tensor zero_mean_sq_221_cast = mul(x = zero_mean_221_cast, y = zero_mean_221_cast)[name = tensor("zero_mean_sq_221_cast")]; + tensor var_7374 = const()[name = tensor("op_7374"), val = tensor([1])]; + tensor var_7375_cast = reduce_mean(axes = var_7374, keep_dims = var_23, x = zero_mean_sq_221_cast)[name = tensor("op_7375_cast")]; + tensor var_7376_to_fp16 = const()[name = tensor("op_7376_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7377_cast = add(x = var_7375_cast, y = var_7376_to_fp16)[name = tensor("op_7377_cast")]; + tensor denom_221_epsilon_0_to_fp16 = const()[name = tensor("denom_221_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_221_cast = rsqrt(epsilon = denom_221_epsilon_0_to_fp16, x = var_7377_cast)[name = tensor("denom_221_cast")]; + tensor out_221_cast = mul(x = zero_mean_221_cast, y = denom_221_cast)[name = tensor("out_221_cast")]; + tensor var_7381_to_fp16 = const()[name = tensor("op_7381_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034841664)))]; + tensor var_7382_cast = add(x = out_221_cast, y = var_7381_to_fp16)[name = tensor("op_7382_cast")]; + tensor var_7384_to_fp16 = const()[name = tensor("op_7384_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034844288)))]; + tensor input_459_cast = mul(x = var_7382_cast, y = var_7384_to_fp16)[name = tensor("input_459_cast")]; + tensor var_7392 = const()[name = tensor("op_7392"), val = tensor([1, 1])]; + tensor var_7394 = const()[name = tensor("op_7394"), val = tensor([1, 1])]; + tensor var_7396_pad_type_0 = const()[name = tensor("op_7396_pad_type_0"), val = tensor("custom")]; + tensor var_7396_pad_0 = const()[name = tensor("op_7396_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034846912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044677376))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044677568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044685312))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7396_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7394, groups = var_31, pad = var_7396_pad_0, pad_type = var_7396_pad_type_0, strides = var_7392, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_459_cast)[name = tensor("op_7396_cast")]; + tensor var_7397_split_sizes_0 = const()[name = tensor("op_7397_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7397_axis_0 = const()[name = tensor("op_7397_axis_0"), val = tensor(1)]; + tensor var_7397_cast_0, tensor var_7397_cast_1 = split(axis = var_7397_axis_0, split_sizes = var_7397_split_sizes_0, x = var_7396_cast)[name = tensor("op_7397_cast")]; + tensor var_7399_mode_0 = const()[name = tensor("op_7399_mode_0"), val = tensor("EXACT")]; + tensor var_7399_cast = gelu(mode = var_7399_mode_0, x = var_7397_cast_1)[name = tensor("op_7399_cast")]; + tensor input_461_cast = mul(x = var_7397_cast_0, y = var_7399_cast)[name = tensor("input_461_cast")]; + tensor var_7403 = const()[name = tensor("op_7403"), val = tensor([1, 1])]; + tensor var_7405 = const()[name = tensor("op_7405"), val = tensor([1, 1])]; + tensor var_7407_pad_type_0 = const()[name = tensor("op_7407_pad_type_0"), val = tensor("custom")]; + tensor var_7407_pad_0 = const()[name = tensor("op_7407_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044685504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049600768))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049600960)))]; + tensor var_7407_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_7405, groups = var_31, pad = var_7407_pad_0, pad_type = var_7407_pad_type_0, strides = var_7403, weight = unet_up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_461_cast)[name = tensor("op_7407_cast")]; + tensor inputs_223_cast = add(x = var_7407_cast, y = inputs_221_cast)[name = tensor("inputs_223_cast")]; + tensor var_7417 = const()[name = tensor("op_7417"), val = tensor([1])]; + tensor channels_mean_223_cast = reduce_mean(axes = var_7417, keep_dims = var_23, x = inputs_223_cast)[name = tensor("channels_mean_223_cast")]; + tensor zero_mean_223_cast = sub(x = inputs_223_cast, y = channels_mean_223_cast)[name = tensor("zero_mean_223_cast")]; + tensor zero_mean_sq_223_cast = mul(x = zero_mean_223_cast, y = zero_mean_223_cast)[name = tensor("zero_mean_sq_223_cast")]; + tensor var_7421 = const()[name = tensor("op_7421"), val = tensor([1])]; + tensor var_7422_cast = reduce_mean(axes = var_7421, keep_dims = var_23, x = zero_mean_sq_223_cast)[name = tensor("op_7422_cast")]; + tensor var_7423_to_fp16 = const()[name = tensor("op_7423_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7424_cast = add(x = var_7422_cast, y = var_7423_to_fp16)[name = tensor("op_7424_cast")]; + tensor denom_223_epsilon_0_to_fp16 = const()[name = tensor("denom_223_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_223_cast = rsqrt(epsilon = denom_223_epsilon_0_to_fp16, x = var_7424_cast)[name = tensor("denom_223_cast")]; + tensor out_223_cast = mul(x = zero_mean_223_cast, y = denom_223_cast)[name = tensor("out_223_cast")]; + tensor var_7428_to_fp16 = const()[name = tensor("op_7428_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049603584)))]; + tensor var_7429_cast = add(x = out_223_cast, y = var_7428_to_fp16)[name = tensor("op_7429_cast")]; + tensor var_7431_to_fp16 = const()[name = tensor("op_7431_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049606208)))]; + tensor hidden_states_305_cast = mul(x = var_7429_cast, y = var_7431_to_fp16)[name = tensor("hidden_states_305_cast")]; + tensor var_7438 = const()[name = tensor("op_7438"), val = tensor([1, 1])]; + tensor var_7440 = const()[name = tensor("op_7440"), val = tensor([1, 1])]; + tensor q_149_pad_type_0 = const()[name = tensor("q_149_pad_type_0"), val = tensor("custom")]; + tensor q_149_pad_0 = const()[name = tensor("q_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049608832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050837696))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_149_cast = conv(dilations = var_7440, groups = var_31, pad = q_149_pad_0, pad_type = q_149_pad_type_0, strides = var_7438, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("q_149_cast")]; + tensor var_7444 = const()[name = tensor("op_7444"), val = tensor([1, 1])]; + tensor var_7446 = const()[name = tensor("op_7446"), val = tensor([1, 1])]; + tensor k_149_pad_type_0 = const()[name = tensor("k_149_pad_type_0"), val = tensor("custom")]; + tensor k_149_pad_0 = const()[name = tensor("k_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050837888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1052066752))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_149_cast = conv(dilations = var_7446, groups = var_31, pad = k_149_pad_0, pad_type = k_149_pad_type_0, strides = var_7444, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("k_149_cast")]; + tensor var_7450 = const()[name = tensor("op_7450"), val = tensor([1, 1])]; + tensor var_7452 = const()[name = tensor("op_7452"), val = tensor([1, 1])]; + tensor v_149_pad_type_0 = const()[name = tensor("v_149_pad_type_0"), val = tensor("custom")]; + tensor v_149_pad_0 = const()[name = tensor("v_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1052066944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1053295808))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_149_cast = conv(dilations = var_7452, groups = var_31, pad = v_149_pad_0, pad_type = v_149_pad_type_0, strides = var_7450, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("v_149_cast")]; + tensor var_7456 = const()[name = tensor("op_7456"), val = tensor([2, 20, 64, -1])]; + tensor var_7457_cast = reshape(shape = var_7456, x = q_149_cast)[name = tensor("op_7457_cast")]; + tensor var_7458 = const()[name = tensor("op_7458"), val = tensor([2, 20, 64, -1])]; + tensor var_7459_cast = reshape(shape = var_7458, x = k_149_cast)[name = tensor("op_7459_cast")]; + tensor var_7460 = const()[name = tensor("op_7460"), val = tensor([2, 20, 64, -1])]; + tensor var_7461_cast = reshape(shape = var_7460, x = v_149_cast)[name = tensor("op_7461_cast")]; + tensor attn_weights_297_transpose_x_0 = const()[name = tensor("attn_weights_297_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_297_transpose_y_0 = const()[name = tensor("attn_weights_297_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_297_cast = matmul(transpose_x = attn_weights_297_transpose_x_0, transpose_y = attn_weights_297_transpose_y_0, x = var_7457_cast, y = var_7459_cast)[name = tensor("attn_weights_297_cast")]; + tensor attn_weights_299_cast = mul(x = attn_weights_297_cast, y = var_12_to_fp16)[name = tensor("attn_weights_299_cast")]; + tensor var_7465_cast = softmax(axis = var_18, x = attn_weights_299_cast)[name = tensor("op_7465_cast")]; + tensor attn_149_transpose_x_0 = const()[name = tensor("attn_149_transpose_x_0"), val = tensor(false)]; + tensor attn_149_transpose_y_0 = const()[name = tensor("attn_149_transpose_y_0"), val = tensor(true)]; + tensor attn_149_cast = matmul(transpose_x = attn_149_transpose_x_0, transpose_y = attn_149_transpose_y_0, x = var_7461_cast, y = var_7465_cast)[name = tensor("attn_149_cast")]; + tensor var_7469 = const()[name = tensor("op_7469"), val = tensor([2, 1280, 1, -1])]; + tensor input_463_cast = reshape(shape = var_7469, x = attn_149_cast)[name = tensor("input_463_cast")]; + tensor var_7474 = const()[name = tensor("op_7474"), val = tensor([1, 1])]; + tensor var_7476 = const()[name = tensor("op_7476"), val = tensor([1, 1])]; + tensor var_7478_pad_type_0 = const()[name = tensor("op_7478_pad_type_0"), val = tensor("custom")]; + tensor var_7478_pad_0 = const()[name = tensor("op_7478_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1053296000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054524864))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054525056)))]; + tensor var_7478_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_7476, groups = var_31, pad = var_7478_pad_0, pad_type = var_7478_pad_type_0, strides = var_7474, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_463_cast)[name = tensor("op_7478_cast")]; + tensor inputs_225_cast = add(x = var_7478_cast, y = inputs_223_cast)[name = tensor("inputs_225_cast")]; + tensor var_7482 = const()[name = tensor("op_7482"), val = tensor([1])]; + tensor channels_mean_225_cast = reduce_mean(axes = var_7482, keep_dims = var_23, x = inputs_225_cast)[name = tensor("channels_mean_225_cast")]; + tensor zero_mean_225_cast = sub(x = inputs_225_cast, y = channels_mean_225_cast)[name = tensor("zero_mean_225_cast")]; + tensor zero_mean_sq_225_cast = mul(x = zero_mean_225_cast, y = zero_mean_225_cast)[name = tensor("zero_mean_sq_225_cast")]; + tensor var_7486 = const()[name = tensor("op_7486"), val = tensor([1])]; + tensor var_7487_cast = reduce_mean(axes = var_7486, keep_dims = var_23, x = zero_mean_sq_225_cast)[name = tensor("op_7487_cast")]; + tensor var_7488_to_fp16 = const()[name = tensor("op_7488_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7489_cast = add(x = var_7487_cast, y = var_7488_to_fp16)[name = tensor("op_7489_cast")]; + tensor denom_225_epsilon_0_to_fp16 = const()[name = tensor("denom_225_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_225_cast = rsqrt(epsilon = denom_225_epsilon_0_to_fp16, x = var_7489_cast)[name = tensor("denom_225_cast")]; + tensor out_225_cast = mul(x = zero_mean_225_cast, y = denom_225_cast)[name = tensor("out_225_cast")]; + tensor var_7493_to_fp16 = const()[name = tensor("op_7493_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054527680)))]; + tensor var_7494_cast = add(x = out_225_cast, y = var_7493_to_fp16)[name = tensor("op_7494_cast")]; + tensor var_7496_to_fp16 = const()[name = tensor("op_7496_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054530304)))]; + tensor hidden_states_307_cast = mul(x = var_7494_cast, y = var_7496_to_fp16)[name = tensor("hidden_states_307_cast")]; + tensor var_7503 = const()[name = tensor("op_7503"), val = tensor([1, 1])]; + tensor var_7505 = const()[name = tensor("op_7505"), val = tensor([1, 1])]; + tensor q_151_pad_type_0 = const()[name = tensor("q_151_pad_type_0"), val = tensor("custom")]; + tensor q_151_pad_0 = const()[name = tensor("q_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054532928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055761792))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_151_cast = conv(dilations = var_7505, groups = var_31, pad = q_151_pad_0, pad_type = q_151_pad_type_0, strides = var_7503, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_307_cast)[name = tensor("q_151_cast")]; + tensor var_7509 = const()[name = tensor("op_7509"), val = tensor([1, 1])]; + tensor var_7511 = const()[name = tensor("op_7511"), val = tensor([1, 1])]; + tensor k_151_pad_type_0 = const()[name = tensor("k_151_pad_type_0"), val = tensor("custom")]; + tensor k_151_pad_0 = const()[name = tensor("k_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055761984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1057728128))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_151_cast = conv(dilations = var_7511, groups = var_31, pad = k_151_pad_0, pad_type = k_151_pad_type_0, strides = var_7509, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_151_cast")]; + tensor var_7515 = const()[name = tensor("op_7515"), val = tensor([1, 1])]; + tensor var_7517 = const()[name = tensor("op_7517"), val = tensor([1, 1])]; + tensor v_151_pad_type_0 = const()[name = tensor("v_151_pad_type_0"), val = tensor("custom")]; + tensor v_151_pad_0 = const()[name = tensor("v_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1057728320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1059694464))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_151_cast = conv(dilations = var_7517, groups = var_31, pad = v_151_pad_0, pad_type = v_151_pad_type_0, strides = var_7515, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_151_cast")]; + tensor var_7521 = const()[name = tensor("op_7521"), val = tensor([2, 20, 64, -1])]; + tensor var_7522_cast = reshape(shape = var_7521, x = q_151_cast)[name = tensor("op_7522_cast")]; + tensor var_7523 = const()[name = tensor("op_7523"), val = tensor([2, 20, 64, -1])]; + tensor var_7524_cast = reshape(shape = var_7523, x = k_151_cast)[name = tensor("op_7524_cast")]; + tensor var_7525 = const()[name = tensor("op_7525"), val = tensor([2, 20, 64, -1])]; + tensor var_7526_cast = reshape(shape = var_7525, x = v_151_cast)[name = tensor("op_7526_cast")]; + tensor attn_weights_301_transpose_x_0 = const()[name = tensor("attn_weights_301_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_301_transpose_y_0 = const()[name = tensor("attn_weights_301_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_301_cast = matmul(transpose_x = attn_weights_301_transpose_x_0, transpose_y = attn_weights_301_transpose_y_0, x = var_7522_cast, y = var_7524_cast)[name = tensor("attn_weights_301_cast")]; + tensor attn_weights_303_cast = mul(x = attn_weights_301_cast, y = var_12_to_fp16)[name = tensor("attn_weights_303_cast")]; + tensor var_7530_cast = softmax(axis = var_18, x = attn_weights_303_cast)[name = tensor("op_7530_cast")]; + tensor attn_151_transpose_x_0 = const()[name = tensor("attn_151_transpose_x_0"), val = tensor(false)]; + tensor attn_151_transpose_y_0 = const()[name = tensor("attn_151_transpose_y_0"), val = tensor(true)]; + tensor attn_151_cast = matmul(transpose_x = attn_151_transpose_x_0, transpose_y = attn_151_transpose_y_0, x = var_7526_cast, y = var_7530_cast)[name = tensor("attn_151_cast")]; + tensor var_7534 = const()[name = tensor("op_7534"), val = tensor([2, 1280, 1, -1])]; + tensor input_465_cast = reshape(shape = var_7534, x = attn_151_cast)[name = tensor("input_465_cast")]; + tensor var_7539 = const()[name = tensor("op_7539"), val = tensor([1, 1])]; + tensor var_7541 = const()[name = tensor("op_7541"), val = tensor([1, 1])]; + tensor var_7543_pad_type_0 = const()[name = tensor("op_7543_pad_type_0"), val = tensor("custom")]; + tensor var_7543_pad_0 = const()[name = tensor("op_7543_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1059694656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060923520))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060923712)))]; + tensor var_7543_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_7541, groups = var_31, pad = var_7543_pad_0, pad_type = var_7543_pad_type_0, strides = var_7539, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_465_cast)[name = tensor("op_7543_cast")]; + tensor inputs_227_cast = add(x = var_7543_cast, y = inputs_225_cast)[name = tensor("inputs_227_cast")]; + tensor var_7547 = const()[name = tensor("op_7547"), val = tensor([1])]; + tensor channels_mean_227_cast = reduce_mean(axes = var_7547, keep_dims = var_23, x = inputs_227_cast)[name = tensor("channels_mean_227_cast")]; + tensor zero_mean_227_cast = sub(x = inputs_227_cast, y = channels_mean_227_cast)[name = tensor("zero_mean_227_cast")]; + tensor zero_mean_sq_227_cast = mul(x = zero_mean_227_cast, y = zero_mean_227_cast)[name = tensor("zero_mean_sq_227_cast")]; + tensor var_7551 = const()[name = tensor("op_7551"), val = tensor([1])]; + tensor var_7552_cast = reduce_mean(axes = var_7551, keep_dims = var_23, x = zero_mean_sq_227_cast)[name = tensor("op_7552_cast")]; + tensor var_7553_to_fp16 = const()[name = tensor("op_7553_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7554_cast = add(x = var_7552_cast, y = var_7553_to_fp16)[name = tensor("op_7554_cast")]; + tensor denom_227_epsilon_0_to_fp16 = const()[name = tensor("denom_227_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_227_cast = rsqrt(epsilon = denom_227_epsilon_0_to_fp16, x = var_7554_cast)[name = tensor("denom_227_cast")]; + tensor out_227_cast = mul(x = zero_mean_227_cast, y = denom_227_cast)[name = tensor("out_227_cast")]; + tensor var_7558_to_fp16 = const()[name = tensor("op_7558_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060926336)))]; + tensor var_7559_cast = add(x = out_227_cast, y = var_7558_to_fp16)[name = tensor("op_7559_cast")]; + tensor var_7561_to_fp16 = const()[name = tensor("op_7561_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060928960)))]; + tensor input_467_cast = mul(x = var_7559_cast, y = var_7561_to_fp16)[name = tensor("input_467_cast")]; + tensor var_7569 = const()[name = tensor("op_7569"), val = tensor([1, 1])]; + tensor var_7571 = const()[name = tensor("op_7571"), val = tensor([1, 1])]; + tensor var_7573_pad_type_0 = const()[name = tensor("op_7573_pad_type_0"), val = tensor("custom")]; + tensor var_7573_pad_0 = const()[name = tensor("op_7573_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060931584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070762048))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070762240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070769984))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7573_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7571, groups = var_31, pad = var_7573_pad_0, pad_type = var_7573_pad_type_0, strides = var_7569, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_467_cast)[name = tensor("op_7573_cast")]; + tensor var_7574_split_sizes_0 = const()[name = tensor("op_7574_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7574_axis_0 = const()[name = tensor("op_7574_axis_0"), val = tensor(1)]; + tensor var_7574_cast_0, tensor var_7574_cast_1 = split(axis = var_7574_axis_0, split_sizes = var_7574_split_sizes_0, x = var_7573_cast)[name = tensor("op_7574_cast")]; + tensor var_7576_mode_0 = const()[name = tensor("op_7576_mode_0"), val = tensor("EXACT")]; + tensor var_7576_cast = gelu(mode = var_7576_mode_0, x = var_7574_cast_1)[name = tensor("op_7576_cast")]; + tensor input_469_cast = mul(x = var_7574_cast_0, y = var_7576_cast)[name = tensor("input_469_cast")]; + tensor var_7580 = const()[name = tensor("op_7580"), val = tensor([1, 1])]; + tensor var_7582 = const()[name = tensor("op_7582"), val = tensor([1, 1])]; + tensor var_7584_pad_type_0 = const()[name = tensor("op_7584_pad_type_0"), val = tensor("custom")]; + tensor var_7584_pad_0 = const()[name = tensor("op_7584_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070770176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1075685440))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1075685632)))]; + tensor var_7584_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_7582, groups = var_31, pad = var_7584_pad_0, pad_type = var_7584_pad_type_0, strides = var_7580, weight = unet_up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_469_cast)[name = tensor("op_7584_cast")]; + tensor inputs_229_cast = add(x = var_7584_cast, y = inputs_227_cast)[name = tensor("inputs_229_cast")]; + tensor var_7594 = const()[name = tensor("op_7594"), val = tensor([1])]; + tensor channels_mean_229_cast = reduce_mean(axes = var_7594, keep_dims = var_23, x = inputs_229_cast)[name = tensor("channels_mean_229_cast")]; + tensor zero_mean_229_cast = sub(x = inputs_229_cast, y = channels_mean_229_cast)[name = tensor("zero_mean_229_cast")]; + tensor zero_mean_sq_229_cast = mul(x = zero_mean_229_cast, y = zero_mean_229_cast)[name = tensor("zero_mean_sq_229_cast")]; + tensor var_7598 = const()[name = tensor("op_7598"), val = tensor([1])]; + tensor var_7599_cast = reduce_mean(axes = var_7598, keep_dims = var_23, x = zero_mean_sq_229_cast)[name = tensor("op_7599_cast")]; + tensor var_7600_to_fp16 = const()[name = tensor("op_7600_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7601_cast = add(x = var_7599_cast, y = var_7600_to_fp16)[name = tensor("op_7601_cast")]; + tensor denom_229_epsilon_0_to_fp16 = const()[name = tensor("denom_229_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_229_cast = rsqrt(epsilon = denom_229_epsilon_0_to_fp16, x = var_7601_cast)[name = tensor("denom_229_cast")]; + tensor out_229_cast = mul(x = zero_mean_229_cast, y = denom_229_cast)[name = tensor("out_229_cast")]; + tensor var_7605_to_fp16 = const()[name = tensor("op_7605_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1075688256)))]; + tensor var_7606_cast = add(x = out_229_cast, y = var_7605_to_fp16)[name = tensor("op_7606_cast")]; + tensor var_7608_to_fp16 = const()[name = tensor("op_7608_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1075690880)))]; + tensor hidden_states_311_cast = mul(x = var_7606_cast, y = var_7608_to_fp16)[name = tensor("hidden_states_311_cast")]; + tensor var_7615 = const()[name = tensor("op_7615"), val = tensor([1, 1])]; + tensor var_7617 = const()[name = tensor("op_7617"), val = tensor([1, 1])]; + tensor q_153_pad_type_0 = const()[name = tensor("q_153_pad_type_0"), val = tensor("custom")]; + tensor q_153_pad_0 = const()[name = tensor("q_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1075693504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076922368))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_153_cast = conv(dilations = var_7617, groups = var_31, pad = q_153_pad_0, pad_type = q_153_pad_type_0, strides = var_7615, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("q_153_cast")]; + tensor var_7621 = const()[name = tensor("op_7621"), val = tensor([1, 1])]; + tensor var_7623 = const()[name = tensor("op_7623"), val = tensor([1, 1])]; + tensor k_153_pad_type_0 = const()[name = tensor("k_153_pad_type_0"), val = tensor("custom")]; + tensor k_153_pad_0 = const()[name = tensor("k_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076922560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1078151424))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_153_cast = conv(dilations = var_7623, groups = var_31, pad = k_153_pad_0, pad_type = k_153_pad_type_0, strides = var_7621, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("k_153_cast")]; + tensor var_7627 = const()[name = tensor("op_7627"), val = tensor([1, 1])]; + tensor var_7629 = const()[name = tensor("op_7629"), val = tensor([1, 1])]; + tensor v_153_pad_type_0 = const()[name = tensor("v_153_pad_type_0"), val = tensor("custom")]; + tensor v_153_pad_0 = const()[name = tensor("v_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1078151616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079380480))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_153_cast = conv(dilations = var_7629, groups = var_31, pad = v_153_pad_0, pad_type = v_153_pad_type_0, strides = var_7627, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("v_153_cast")]; + tensor var_7633 = const()[name = tensor("op_7633"), val = tensor([2, 20, 64, -1])]; + tensor var_7634_cast = reshape(shape = var_7633, x = q_153_cast)[name = tensor("op_7634_cast")]; + tensor var_7635 = const()[name = tensor("op_7635"), val = tensor([2, 20, 64, -1])]; + tensor var_7636_cast = reshape(shape = var_7635, x = k_153_cast)[name = tensor("op_7636_cast")]; + tensor var_7637 = const()[name = tensor("op_7637"), val = tensor([2, 20, 64, -1])]; + tensor var_7638_cast = reshape(shape = var_7637, x = v_153_cast)[name = tensor("op_7638_cast")]; + tensor attn_weights_305_transpose_x_0 = const()[name = tensor("attn_weights_305_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_305_transpose_y_0 = const()[name = tensor("attn_weights_305_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_305_cast = matmul(transpose_x = attn_weights_305_transpose_x_0, transpose_y = attn_weights_305_transpose_y_0, x = var_7634_cast, y = var_7636_cast)[name = tensor("attn_weights_305_cast")]; + tensor attn_weights_307_cast = mul(x = attn_weights_305_cast, y = var_12_to_fp16)[name = tensor("attn_weights_307_cast")]; + tensor var_7642_cast = softmax(axis = var_18, x = attn_weights_307_cast)[name = tensor("op_7642_cast")]; + tensor attn_153_transpose_x_0 = const()[name = tensor("attn_153_transpose_x_0"), val = tensor(false)]; + tensor attn_153_transpose_y_0 = const()[name = tensor("attn_153_transpose_y_0"), val = tensor(true)]; + tensor attn_153_cast = matmul(transpose_x = attn_153_transpose_x_0, transpose_y = attn_153_transpose_y_0, x = var_7638_cast, y = var_7642_cast)[name = tensor("attn_153_cast")]; + tensor var_7646 = const()[name = tensor("op_7646"), val = tensor([2, 1280, 1, -1])]; + tensor input_471_cast = reshape(shape = var_7646, x = attn_153_cast)[name = tensor("input_471_cast")]; + tensor var_7651 = const()[name = tensor("op_7651"), val = tensor([1, 1])]; + tensor var_7653 = const()[name = tensor("op_7653"), val = tensor([1, 1])]; + tensor var_7655_pad_type_0 = const()[name = tensor("op_7655_pad_type_0"), val = tensor("custom")]; + tensor var_7655_pad_0 = const()[name = tensor("op_7655_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079380672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080609536))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080609728)))]; + tensor var_7655_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_7653, groups = var_31, pad = var_7655_pad_0, pad_type = var_7655_pad_type_0, strides = var_7651, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_471_cast)[name = tensor("op_7655_cast")]; + tensor inputs_231_cast = add(x = var_7655_cast, y = inputs_229_cast)[name = tensor("inputs_231_cast")]; + tensor var_7659 = const()[name = tensor("op_7659"), val = tensor([1])]; + tensor channels_mean_231_cast = reduce_mean(axes = var_7659, keep_dims = var_23, x = inputs_231_cast)[name = tensor("channels_mean_231_cast")]; + tensor zero_mean_231_cast = sub(x = inputs_231_cast, y = channels_mean_231_cast)[name = tensor("zero_mean_231_cast")]; + tensor zero_mean_sq_231_cast = mul(x = zero_mean_231_cast, y = zero_mean_231_cast)[name = tensor("zero_mean_sq_231_cast")]; + tensor var_7663 = const()[name = tensor("op_7663"), val = tensor([1])]; + tensor var_7664_cast = reduce_mean(axes = var_7663, keep_dims = var_23, x = zero_mean_sq_231_cast)[name = tensor("op_7664_cast")]; + tensor var_7665_to_fp16 = const()[name = tensor("op_7665_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7666_cast = add(x = var_7664_cast, y = var_7665_to_fp16)[name = tensor("op_7666_cast")]; + tensor denom_231_epsilon_0_to_fp16 = const()[name = tensor("denom_231_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_231_cast = rsqrt(epsilon = denom_231_epsilon_0_to_fp16, x = var_7666_cast)[name = tensor("denom_231_cast")]; + tensor out_231_cast = mul(x = zero_mean_231_cast, y = denom_231_cast)[name = tensor("out_231_cast")]; + tensor var_7670_to_fp16 = const()[name = tensor("op_7670_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080612352)))]; + tensor var_7671_cast = add(x = out_231_cast, y = var_7670_to_fp16)[name = tensor("op_7671_cast")]; + tensor var_7673_to_fp16 = const()[name = tensor("op_7673_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080614976)))]; + tensor hidden_states_313_cast = mul(x = var_7671_cast, y = var_7673_to_fp16)[name = tensor("hidden_states_313_cast")]; + tensor var_7680 = const()[name = tensor("op_7680"), val = tensor([1, 1])]; + tensor var_7682 = const()[name = tensor("op_7682"), val = tensor([1, 1])]; + tensor q_155_pad_type_0 = const()[name = tensor("q_155_pad_type_0"), val = tensor("custom")]; + tensor q_155_pad_0 = const()[name = tensor("q_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080617600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081846464))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_155_cast = conv(dilations = var_7682, groups = var_31, pad = q_155_pad_0, pad_type = q_155_pad_type_0, strides = var_7680, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_313_cast)[name = tensor("q_155_cast")]; + tensor var_7686 = const()[name = tensor("op_7686"), val = tensor([1, 1])]; + tensor var_7688 = const()[name = tensor("op_7688"), val = tensor([1, 1])]; + tensor k_155_pad_type_0 = const()[name = tensor("k_155_pad_type_0"), val = tensor("custom")]; + tensor k_155_pad_0 = const()[name = tensor("k_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081846656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083812800))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_155_cast = conv(dilations = var_7688, groups = var_31, pad = k_155_pad_0, pad_type = k_155_pad_type_0, strides = var_7686, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_155_cast")]; + tensor var_7692 = const()[name = tensor("op_7692"), val = tensor([1, 1])]; + tensor var_7694 = const()[name = tensor("op_7694"), val = tensor([1, 1])]; + tensor v_155_pad_type_0 = const()[name = tensor("v_155_pad_type_0"), val = tensor("custom")]; + tensor v_155_pad_0 = const()[name = tensor("v_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083812992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085779136))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_155_cast = conv(dilations = var_7694, groups = var_31, pad = v_155_pad_0, pad_type = v_155_pad_type_0, strides = var_7692, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_155_cast")]; + tensor var_7698 = const()[name = tensor("op_7698"), val = tensor([2, 20, 64, -1])]; + tensor var_7699_cast = reshape(shape = var_7698, x = q_155_cast)[name = tensor("op_7699_cast")]; + tensor var_7700 = const()[name = tensor("op_7700"), val = tensor([2, 20, 64, -1])]; + tensor var_7701_cast = reshape(shape = var_7700, x = k_155_cast)[name = tensor("op_7701_cast")]; + tensor var_7702 = const()[name = tensor("op_7702"), val = tensor([2, 20, 64, -1])]; + tensor var_7703_cast = reshape(shape = var_7702, x = v_155_cast)[name = tensor("op_7703_cast")]; + tensor attn_weights_309_transpose_x_0 = const()[name = tensor("attn_weights_309_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_309_transpose_y_0 = const()[name = tensor("attn_weights_309_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_309_cast = matmul(transpose_x = attn_weights_309_transpose_x_0, transpose_y = attn_weights_309_transpose_y_0, x = var_7699_cast, y = var_7701_cast)[name = tensor("attn_weights_309_cast")]; + tensor attn_weights_311_cast = mul(x = attn_weights_309_cast, y = var_12_to_fp16)[name = tensor("attn_weights_311_cast")]; + tensor var_7707_cast = softmax(axis = var_18, x = attn_weights_311_cast)[name = tensor("op_7707_cast")]; + tensor attn_155_transpose_x_0 = const()[name = tensor("attn_155_transpose_x_0"), val = tensor(false)]; + tensor attn_155_transpose_y_0 = const()[name = tensor("attn_155_transpose_y_0"), val = tensor(true)]; + tensor attn_155_cast = matmul(transpose_x = attn_155_transpose_x_0, transpose_y = attn_155_transpose_y_0, x = var_7703_cast, y = var_7707_cast)[name = tensor("attn_155_cast")]; + tensor var_7711 = const()[name = tensor("op_7711"), val = tensor([2, 1280, 1, -1])]; + tensor input_473_cast = reshape(shape = var_7711, x = attn_155_cast)[name = tensor("input_473_cast")]; + tensor var_7716 = const()[name = tensor("op_7716"), val = tensor([1, 1])]; + tensor var_7718 = const()[name = tensor("op_7718"), val = tensor([1, 1])]; + tensor var_7720_pad_type_0 = const()[name = tensor("op_7720_pad_type_0"), val = tensor("custom")]; + tensor var_7720_pad_0 = const()[name = tensor("op_7720_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085779328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087008192))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087008384)))]; + tensor var_7720_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_7718, groups = var_31, pad = var_7720_pad_0, pad_type = var_7720_pad_type_0, strides = var_7716, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_473_cast)[name = tensor("op_7720_cast")]; + tensor inputs_233_cast = add(x = var_7720_cast, y = inputs_231_cast)[name = tensor("inputs_233_cast")]; + tensor var_7724 = const()[name = tensor("op_7724"), val = tensor([1])]; + tensor channels_mean_233_cast = reduce_mean(axes = var_7724, keep_dims = var_23, x = inputs_233_cast)[name = tensor("channels_mean_233_cast")]; + tensor zero_mean_233_cast = sub(x = inputs_233_cast, y = channels_mean_233_cast)[name = tensor("zero_mean_233_cast")]; + tensor zero_mean_sq_233_cast = mul(x = zero_mean_233_cast, y = zero_mean_233_cast)[name = tensor("zero_mean_sq_233_cast")]; + tensor var_7728 = const()[name = tensor("op_7728"), val = tensor([1])]; + tensor var_7729_cast = reduce_mean(axes = var_7728, keep_dims = var_23, x = zero_mean_sq_233_cast)[name = tensor("op_7729_cast")]; + tensor var_7730_to_fp16 = const()[name = tensor("op_7730_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7731_cast = add(x = var_7729_cast, y = var_7730_to_fp16)[name = tensor("op_7731_cast")]; + tensor denom_233_epsilon_0_to_fp16 = const()[name = tensor("denom_233_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_233_cast = rsqrt(epsilon = denom_233_epsilon_0_to_fp16, x = var_7731_cast)[name = tensor("denom_233_cast")]; + tensor out_233_cast = mul(x = zero_mean_233_cast, y = denom_233_cast)[name = tensor("out_233_cast")]; + tensor var_7735_to_fp16 = const()[name = tensor("op_7735_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087011008)))]; + tensor var_7736_cast = add(x = out_233_cast, y = var_7735_to_fp16)[name = tensor("op_7736_cast")]; + tensor var_7738_to_fp16 = const()[name = tensor("op_7738_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087013632)))]; + tensor input_475_cast = mul(x = var_7736_cast, y = var_7738_to_fp16)[name = tensor("input_475_cast")]; + tensor var_7746 = const()[name = tensor("op_7746"), val = tensor([1, 1])]; + tensor var_7748 = const()[name = tensor("op_7748"), val = tensor([1, 1])]; + tensor var_7750_pad_type_0 = const()[name = tensor("op_7750_pad_type_0"), val = tensor("custom")]; + tensor var_7750_pad_0 = const()[name = tensor("op_7750_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087016256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096846720))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096846912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096854656))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7750_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7748, groups = var_31, pad = var_7750_pad_0, pad_type = var_7750_pad_type_0, strides = var_7746, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_475_cast)[name = tensor("op_7750_cast")]; + tensor var_7751_split_sizes_0 = const()[name = tensor("op_7751_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7751_axis_0 = const()[name = tensor("op_7751_axis_0"), val = tensor(1)]; + tensor var_7751_cast_0, tensor var_7751_cast_1 = split(axis = var_7751_axis_0, split_sizes = var_7751_split_sizes_0, x = var_7750_cast)[name = tensor("op_7751_cast")]; + tensor var_7753_mode_0 = const()[name = tensor("op_7753_mode_0"), val = tensor("EXACT")]; + tensor var_7753_cast = gelu(mode = var_7753_mode_0, x = var_7751_cast_1)[name = tensor("op_7753_cast")]; + tensor input_477_cast = mul(x = var_7751_cast_0, y = var_7753_cast)[name = tensor("input_477_cast")]; + tensor var_7757 = const()[name = tensor("op_7757"), val = tensor([1, 1])]; + tensor var_7759 = const()[name = tensor("op_7759"), val = tensor([1, 1])]; + tensor var_7761_pad_type_0 = const()[name = tensor("op_7761_pad_type_0"), val = tensor("custom")]; + tensor var_7761_pad_0 = const()[name = tensor("op_7761_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096854848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101770112))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101770304)))]; + tensor var_7761_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_7759, groups = var_31, pad = var_7761_pad_0, pad_type = var_7761_pad_type_0, strides = var_7757, weight = unet_up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_477_cast)[name = tensor("op_7761_cast")]; + tensor inputs_235_cast = add(x = var_7761_cast, y = inputs_233_cast)[name = tensor("inputs_235_cast")]; + tensor var_7771 = const()[name = tensor("op_7771"), val = tensor([1])]; + tensor channels_mean_235_cast = reduce_mean(axes = var_7771, keep_dims = var_23, x = inputs_235_cast)[name = tensor("channels_mean_235_cast")]; + tensor zero_mean_235_cast = sub(x = inputs_235_cast, y = channels_mean_235_cast)[name = tensor("zero_mean_235_cast")]; + tensor zero_mean_sq_235_cast = mul(x = zero_mean_235_cast, y = zero_mean_235_cast)[name = tensor("zero_mean_sq_235_cast")]; + tensor var_7775 = const()[name = tensor("op_7775"), val = tensor([1])]; + tensor var_7776_cast = reduce_mean(axes = var_7775, keep_dims = var_23, x = zero_mean_sq_235_cast)[name = tensor("op_7776_cast")]; + tensor var_7777_to_fp16 = const()[name = tensor("op_7777_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7778_cast = add(x = var_7776_cast, y = var_7777_to_fp16)[name = tensor("op_7778_cast")]; + tensor denom_235_epsilon_0_to_fp16 = const()[name = tensor("denom_235_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_235_cast = rsqrt(epsilon = denom_235_epsilon_0_to_fp16, x = var_7778_cast)[name = tensor("denom_235_cast")]; + tensor out_235_cast = mul(x = zero_mean_235_cast, y = denom_235_cast)[name = tensor("out_235_cast")]; + tensor var_7782_to_fp16 = const()[name = tensor("op_7782_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101772928)))]; + tensor var_7783_cast = add(x = out_235_cast, y = var_7782_to_fp16)[name = tensor("op_7783_cast")]; + tensor var_7785_to_fp16 = const()[name = tensor("op_7785_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101775552)))]; + tensor hidden_states_317_cast = mul(x = var_7783_cast, y = var_7785_to_fp16)[name = tensor("hidden_states_317_cast")]; + tensor var_7792 = const()[name = tensor("op_7792"), val = tensor([1, 1])]; + tensor var_7794 = const()[name = tensor("op_7794"), val = tensor([1, 1])]; + tensor q_157_pad_type_0 = const()[name = tensor("q_157_pad_type_0"), val = tensor("custom")]; + tensor q_157_pad_0 = const()[name = tensor("q_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101778176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1103007040))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_157_cast = conv(dilations = var_7794, groups = var_31, pad = q_157_pad_0, pad_type = q_157_pad_type_0, strides = var_7792, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("q_157_cast")]; + tensor var_7798 = const()[name = tensor("op_7798"), val = tensor([1, 1])]; + tensor var_7800 = const()[name = tensor("op_7800"), val = tensor([1, 1])]; + tensor k_157_pad_type_0 = const()[name = tensor("k_157_pad_type_0"), val = tensor("custom")]; + tensor k_157_pad_0 = const()[name = tensor("k_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1103007232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104236096))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_157_cast = conv(dilations = var_7800, groups = var_31, pad = k_157_pad_0, pad_type = k_157_pad_type_0, strides = var_7798, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("k_157_cast")]; + tensor var_7804 = const()[name = tensor("op_7804"), val = tensor([1, 1])]; + tensor var_7806 = const()[name = tensor("op_7806"), val = tensor([1, 1])]; + tensor v_157_pad_type_0 = const()[name = tensor("v_157_pad_type_0"), val = tensor("custom")]; + tensor v_157_pad_0 = const()[name = tensor("v_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104236288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1105465152))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_157_cast = conv(dilations = var_7806, groups = var_31, pad = v_157_pad_0, pad_type = v_157_pad_type_0, strides = var_7804, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("v_157_cast")]; + tensor var_7810 = const()[name = tensor("op_7810"), val = tensor([2, 20, 64, -1])]; + tensor var_7811_cast = reshape(shape = var_7810, x = q_157_cast)[name = tensor("op_7811_cast")]; + tensor var_7812 = const()[name = tensor("op_7812"), val = tensor([2, 20, 64, -1])]; + tensor var_7813_cast = reshape(shape = var_7812, x = k_157_cast)[name = tensor("op_7813_cast")]; + tensor var_7814 = const()[name = tensor("op_7814"), val = tensor([2, 20, 64, -1])]; + tensor var_7815_cast = reshape(shape = var_7814, x = v_157_cast)[name = tensor("op_7815_cast")]; + tensor attn_weights_313_transpose_x_0 = const()[name = tensor("attn_weights_313_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_313_transpose_y_0 = const()[name = tensor("attn_weights_313_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_313_cast = matmul(transpose_x = attn_weights_313_transpose_x_0, transpose_y = attn_weights_313_transpose_y_0, x = var_7811_cast, y = var_7813_cast)[name = tensor("attn_weights_313_cast")]; + tensor attn_weights_315_cast = mul(x = attn_weights_313_cast, y = var_12_to_fp16)[name = tensor("attn_weights_315_cast")]; + tensor var_7819_cast = softmax(axis = var_18, x = attn_weights_315_cast)[name = tensor("op_7819_cast")]; + tensor attn_157_transpose_x_0 = const()[name = tensor("attn_157_transpose_x_0"), val = tensor(false)]; + tensor attn_157_transpose_y_0 = const()[name = tensor("attn_157_transpose_y_0"), val = tensor(true)]; + tensor attn_157_cast = matmul(transpose_x = attn_157_transpose_x_0, transpose_y = attn_157_transpose_y_0, x = var_7815_cast, y = var_7819_cast)[name = tensor("attn_157_cast")]; + tensor var_7823 = const()[name = tensor("op_7823"), val = tensor([2, 1280, 1, -1])]; + tensor input_479_cast = reshape(shape = var_7823, x = attn_157_cast)[name = tensor("input_479_cast")]; + tensor var_7828 = const()[name = tensor("op_7828"), val = tensor([1, 1])]; + tensor var_7830 = const()[name = tensor("op_7830"), val = tensor([1, 1])]; + tensor var_7832_pad_type_0 = const()[name = tensor("op_7832_pad_type_0"), val = tensor("custom")]; + tensor var_7832_pad_0 = const()[name = tensor("op_7832_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1105465344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106694208))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106694400)))]; + tensor var_7832_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_7830, groups = var_31, pad = var_7832_pad_0, pad_type = var_7832_pad_type_0, strides = var_7828, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_479_cast)[name = tensor("op_7832_cast")]; + tensor inputs_237_cast = add(x = var_7832_cast, y = inputs_235_cast)[name = tensor("inputs_237_cast")]; + tensor var_7836 = const()[name = tensor("op_7836"), val = tensor([1])]; + tensor channels_mean_237_cast = reduce_mean(axes = var_7836, keep_dims = var_23, x = inputs_237_cast)[name = tensor("channels_mean_237_cast")]; + tensor zero_mean_237_cast = sub(x = inputs_237_cast, y = channels_mean_237_cast)[name = tensor("zero_mean_237_cast")]; + tensor zero_mean_sq_237_cast = mul(x = zero_mean_237_cast, y = zero_mean_237_cast)[name = tensor("zero_mean_sq_237_cast")]; + tensor var_7840 = const()[name = tensor("op_7840"), val = tensor([1])]; + tensor var_7841_cast = reduce_mean(axes = var_7840, keep_dims = var_23, x = zero_mean_sq_237_cast)[name = tensor("op_7841_cast")]; + tensor var_7842_to_fp16 = const()[name = tensor("op_7842_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7843_cast = add(x = var_7841_cast, y = var_7842_to_fp16)[name = tensor("op_7843_cast")]; + tensor denom_237_epsilon_0_to_fp16 = const()[name = tensor("denom_237_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_237_cast = rsqrt(epsilon = denom_237_epsilon_0_to_fp16, x = var_7843_cast)[name = tensor("denom_237_cast")]; + tensor out_237_cast = mul(x = zero_mean_237_cast, y = denom_237_cast)[name = tensor("out_237_cast")]; + tensor var_7847_to_fp16 = const()[name = tensor("op_7847_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106697024)))]; + tensor var_7848_cast = add(x = out_237_cast, y = var_7847_to_fp16)[name = tensor("op_7848_cast")]; + tensor var_7850_to_fp16 = const()[name = tensor("op_7850_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106699648)))]; + tensor hidden_states_319_cast = mul(x = var_7848_cast, y = var_7850_to_fp16)[name = tensor("hidden_states_319_cast")]; + tensor var_7857 = const()[name = tensor("op_7857"), val = tensor([1, 1])]; + tensor var_7859 = const()[name = tensor("op_7859"), val = tensor([1, 1])]; + tensor q_159_pad_type_0 = const()[name = tensor("q_159_pad_type_0"), val = tensor("custom")]; + tensor q_159_pad_0 = const()[name = tensor("q_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106702272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1107931136))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_159_cast = conv(dilations = var_7859, groups = var_31, pad = q_159_pad_0, pad_type = q_159_pad_type_0, strides = var_7857, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_319_cast)[name = tensor("q_159_cast")]; + tensor var_7863 = const()[name = tensor("op_7863"), val = tensor([1, 1])]; + tensor var_7865 = const()[name = tensor("op_7865"), val = tensor([1, 1])]; + tensor k_159_pad_type_0 = const()[name = tensor("k_159_pad_type_0"), val = tensor("custom")]; + tensor k_159_pad_0 = const()[name = tensor("k_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1107931328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1109897472))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_159_cast = conv(dilations = var_7865, groups = var_31, pad = k_159_pad_0, pad_type = k_159_pad_type_0, strides = var_7863, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_159_cast")]; + tensor var_7869 = const()[name = tensor("op_7869"), val = tensor([1, 1])]; + tensor var_7871 = const()[name = tensor("op_7871"), val = tensor([1, 1])]; + tensor v_159_pad_type_0 = const()[name = tensor("v_159_pad_type_0"), val = tensor("custom")]; + tensor v_159_pad_0 = const()[name = tensor("v_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1109897664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111863808))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_159_cast = conv(dilations = var_7871, groups = var_31, pad = v_159_pad_0, pad_type = v_159_pad_type_0, strides = var_7869, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_159_cast")]; + tensor var_7875 = const()[name = tensor("op_7875"), val = tensor([2, 20, 64, -1])]; + tensor var_7876_cast = reshape(shape = var_7875, x = q_159_cast)[name = tensor("op_7876_cast")]; + tensor var_7877 = const()[name = tensor("op_7877"), val = tensor([2, 20, 64, -1])]; + tensor var_7878_cast = reshape(shape = var_7877, x = k_159_cast)[name = tensor("op_7878_cast")]; + tensor var_7879 = const()[name = tensor("op_7879"), val = tensor([2, 20, 64, -1])]; + tensor var_7880_cast = reshape(shape = var_7879, x = v_159_cast)[name = tensor("op_7880_cast")]; + tensor attn_weights_317_transpose_x_0 = const()[name = tensor("attn_weights_317_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_317_transpose_y_0 = const()[name = tensor("attn_weights_317_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_317_cast = matmul(transpose_x = attn_weights_317_transpose_x_0, transpose_y = attn_weights_317_transpose_y_0, x = var_7876_cast, y = var_7878_cast)[name = tensor("attn_weights_317_cast")]; + tensor attn_weights_319_cast = mul(x = attn_weights_317_cast, y = var_12_to_fp16)[name = tensor("attn_weights_319_cast")]; + tensor var_7884_cast = softmax(axis = var_18, x = attn_weights_319_cast)[name = tensor("op_7884_cast")]; + tensor attn_159_transpose_x_0 = const()[name = tensor("attn_159_transpose_x_0"), val = tensor(false)]; + tensor attn_159_transpose_y_0 = const()[name = tensor("attn_159_transpose_y_0"), val = tensor(true)]; + tensor attn_159_cast = matmul(transpose_x = attn_159_transpose_x_0, transpose_y = attn_159_transpose_y_0, x = var_7880_cast, y = var_7884_cast)[name = tensor("attn_159_cast")]; + tensor var_7888 = const()[name = tensor("op_7888"), val = tensor([2, 1280, 1, -1])]; + tensor input_481_cast = reshape(shape = var_7888, x = attn_159_cast)[name = tensor("input_481_cast")]; + tensor var_7893 = const()[name = tensor("op_7893"), val = tensor([1, 1])]; + tensor var_7895 = const()[name = tensor("op_7895"), val = tensor([1, 1])]; + tensor var_7897_pad_type_0 = const()[name = tensor("op_7897_pad_type_0"), val = tensor("custom")]; + tensor var_7897_pad_0 = const()[name = tensor("op_7897_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111864000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113092864))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113093056)))]; + tensor var_7897_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_7895, groups = var_31, pad = var_7897_pad_0, pad_type = var_7897_pad_type_0, strides = var_7893, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_481_cast)[name = tensor("op_7897_cast")]; + tensor inputs_239_cast = add(x = var_7897_cast, y = inputs_237_cast)[name = tensor("inputs_239_cast")]; + tensor var_7901 = const()[name = tensor("op_7901"), val = tensor([1])]; + tensor channels_mean_239_cast = reduce_mean(axes = var_7901, keep_dims = var_23, x = inputs_239_cast)[name = tensor("channels_mean_239_cast")]; + tensor zero_mean_239_cast = sub(x = inputs_239_cast, y = channels_mean_239_cast)[name = tensor("zero_mean_239_cast")]; + tensor zero_mean_sq_239_cast = mul(x = zero_mean_239_cast, y = zero_mean_239_cast)[name = tensor("zero_mean_sq_239_cast")]; + tensor var_7905 = const()[name = tensor("op_7905"), val = tensor([1])]; + tensor var_7906_cast = reduce_mean(axes = var_7905, keep_dims = var_23, x = zero_mean_sq_239_cast)[name = tensor("op_7906_cast")]; + tensor var_7907_to_fp16 = const()[name = tensor("op_7907_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7908_cast = add(x = var_7906_cast, y = var_7907_to_fp16)[name = tensor("op_7908_cast")]; + tensor denom_239_epsilon_0_to_fp16 = const()[name = tensor("denom_239_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_239_cast = rsqrt(epsilon = denom_239_epsilon_0_to_fp16, x = var_7908_cast)[name = tensor("denom_239_cast")]; + tensor out_239_cast = mul(x = zero_mean_239_cast, y = denom_239_cast)[name = tensor("out_239_cast")]; + tensor var_7912_to_fp16 = const()[name = tensor("op_7912_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113095680)))]; + tensor var_7913_cast = add(x = out_239_cast, y = var_7912_to_fp16)[name = tensor("op_7913_cast")]; + tensor var_7915_to_fp16 = const()[name = tensor("op_7915_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113098304)))]; + tensor input_483_cast = mul(x = var_7913_cast, y = var_7915_to_fp16)[name = tensor("input_483_cast")]; + tensor var_7923 = const()[name = tensor("op_7923"), val = tensor([1, 1])]; + tensor var_7925 = const()[name = tensor("op_7925"), val = tensor([1, 1])]; + tensor var_7927_pad_type_0 = const()[name = tensor("op_7927_pad_type_0"), val = tensor("custom")]; + tensor var_7927_pad_0 = const()[name = tensor("op_7927_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113100928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122931392))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122931584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122939328))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_7927_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_7925, groups = var_31, pad = var_7927_pad_0, pad_type = var_7927_pad_type_0, strides = var_7923, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_483_cast)[name = tensor("op_7927_cast")]; + tensor var_7928_split_sizes_0 = const()[name = tensor("op_7928_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7928_axis_0 = const()[name = tensor("op_7928_axis_0"), val = tensor(1)]; + tensor var_7928_cast_0, tensor var_7928_cast_1 = split(axis = var_7928_axis_0, split_sizes = var_7928_split_sizes_0, x = var_7927_cast)[name = tensor("op_7928_cast")]; + tensor var_7930_mode_0 = const()[name = tensor("op_7930_mode_0"), val = tensor("EXACT")]; + tensor var_7930_cast = gelu(mode = var_7930_mode_0, x = var_7928_cast_1)[name = tensor("op_7930_cast")]; + tensor input_485_cast = mul(x = var_7928_cast_0, y = var_7930_cast)[name = tensor("input_485_cast")]; + tensor var_7934 = const()[name = tensor("op_7934"), val = tensor([1, 1])]; + tensor var_7936 = const()[name = tensor("op_7936"), val = tensor([1, 1])]; + tensor var_7938_pad_type_0 = const()[name = tensor("op_7938_pad_type_0"), val = tensor("custom")]; + tensor var_7938_pad_0 = const()[name = tensor("op_7938_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122939520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1127854784))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1127854976)))]; + tensor var_7938_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_7936, groups = var_31, pad = var_7938_pad_0, pad_type = var_7938_pad_type_0, strides = var_7934, weight = unet_up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_485_cast)[name = tensor("op_7938_cast")]; + tensor inputs_241_cast = add(x = var_7938_cast, y = inputs_239_cast)[name = tensor("inputs_241_cast")]; + tensor var_7948 = const()[name = tensor("op_7948"), val = tensor([1])]; + tensor channels_mean_241_cast = reduce_mean(axes = var_7948, keep_dims = var_23, x = inputs_241_cast)[name = tensor("channels_mean_241_cast")]; + tensor zero_mean_241_cast = sub(x = inputs_241_cast, y = channels_mean_241_cast)[name = tensor("zero_mean_241_cast")]; + tensor zero_mean_sq_241_cast = mul(x = zero_mean_241_cast, y = zero_mean_241_cast)[name = tensor("zero_mean_sq_241_cast")]; + tensor var_7952 = const()[name = tensor("op_7952"), val = tensor([1])]; + tensor var_7953_cast = reduce_mean(axes = var_7952, keep_dims = var_23, x = zero_mean_sq_241_cast)[name = tensor("op_7953_cast")]; + tensor var_7954_to_fp16 = const()[name = tensor("op_7954_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7955_cast = add(x = var_7953_cast, y = var_7954_to_fp16)[name = tensor("op_7955_cast")]; + tensor denom_241_epsilon_0_to_fp16 = const()[name = tensor("denom_241_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_241_cast = rsqrt(epsilon = denom_241_epsilon_0_to_fp16, x = var_7955_cast)[name = tensor("denom_241_cast")]; + tensor out_241_cast = mul(x = zero_mean_241_cast, y = denom_241_cast)[name = tensor("out_241_cast")]; + tensor var_7959_to_fp16 = const()[name = tensor("op_7959_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1127857600)))]; + tensor var_7960_cast = add(x = out_241_cast, y = var_7959_to_fp16)[name = tensor("op_7960_cast")]; + tensor var_7962_to_fp16 = const()[name = tensor("op_7962_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1127860224)))]; + tensor hidden_states_323_cast = mul(x = var_7960_cast, y = var_7962_to_fp16)[name = tensor("hidden_states_323_cast")]; + tensor var_7969 = const()[name = tensor("op_7969"), val = tensor([1, 1])]; + tensor var_7971 = const()[name = tensor("op_7971"), val = tensor([1, 1])]; + tensor q_161_pad_type_0 = const()[name = tensor("q_161_pad_type_0"), val = tensor("custom")]; + tensor q_161_pad_0 = const()[name = tensor("q_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1127862848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129091712))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_161_cast = conv(dilations = var_7971, groups = var_31, pad = q_161_pad_0, pad_type = q_161_pad_type_0, strides = var_7969, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("q_161_cast")]; + tensor var_7975 = const()[name = tensor("op_7975"), val = tensor([1, 1])]; + tensor var_7977 = const()[name = tensor("op_7977"), val = tensor([1, 1])]; + tensor k_161_pad_type_0 = const()[name = tensor("k_161_pad_type_0"), val = tensor("custom")]; + tensor k_161_pad_0 = const()[name = tensor("k_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129091904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1130320768))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_161_cast = conv(dilations = var_7977, groups = var_31, pad = k_161_pad_0, pad_type = k_161_pad_type_0, strides = var_7975, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("k_161_cast")]; + tensor var_7981 = const()[name = tensor("op_7981"), val = tensor([1, 1])]; + tensor var_7983 = const()[name = tensor("op_7983"), val = tensor([1, 1])]; + tensor v_161_pad_type_0 = const()[name = tensor("v_161_pad_type_0"), val = tensor("custom")]; + tensor v_161_pad_0 = const()[name = tensor("v_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1130320960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131549824))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_161_cast = conv(dilations = var_7983, groups = var_31, pad = v_161_pad_0, pad_type = v_161_pad_type_0, strides = var_7981, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("v_161_cast")]; + tensor var_7987 = const()[name = tensor("op_7987"), val = tensor([2, 20, 64, -1])]; + tensor var_7988_cast = reshape(shape = var_7987, x = q_161_cast)[name = tensor("op_7988_cast")]; + tensor var_7989 = const()[name = tensor("op_7989"), val = tensor([2, 20, 64, -1])]; + tensor var_7990_cast = reshape(shape = var_7989, x = k_161_cast)[name = tensor("op_7990_cast")]; + tensor var_7991 = const()[name = tensor("op_7991"), val = tensor([2, 20, 64, -1])]; + tensor var_7992_cast = reshape(shape = var_7991, x = v_161_cast)[name = tensor("op_7992_cast")]; + tensor attn_weights_321_transpose_x_0 = const()[name = tensor("attn_weights_321_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_321_transpose_y_0 = const()[name = tensor("attn_weights_321_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_321_cast = matmul(transpose_x = attn_weights_321_transpose_x_0, transpose_y = attn_weights_321_transpose_y_0, x = var_7988_cast, y = var_7990_cast)[name = tensor("attn_weights_321_cast")]; + tensor attn_weights_323_cast = mul(x = attn_weights_321_cast, y = var_12_to_fp16)[name = tensor("attn_weights_323_cast")]; + tensor var_7996_cast = softmax(axis = var_18, x = attn_weights_323_cast)[name = tensor("op_7996_cast")]; + tensor attn_161_transpose_x_0 = const()[name = tensor("attn_161_transpose_x_0"), val = tensor(false)]; + tensor attn_161_transpose_y_0 = const()[name = tensor("attn_161_transpose_y_0"), val = tensor(true)]; + tensor attn_161_cast = matmul(transpose_x = attn_161_transpose_x_0, transpose_y = attn_161_transpose_y_0, x = var_7992_cast, y = var_7996_cast)[name = tensor("attn_161_cast")]; + tensor var_8000 = const()[name = tensor("op_8000"), val = tensor([2, 1280, 1, -1])]; + tensor input_487_cast = reshape(shape = var_8000, x = attn_161_cast)[name = tensor("input_487_cast")]; + tensor var_8005 = const()[name = tensor("op_8005"), val = tensor([1, 1])]; + tensor var_8007 = const()[name = tensor("op_8007"), val = tensor([1, 1])]; + tensor var_8009_pad_type_0 = const()[name = tensor("op_8009_pad_type_0"), val = tensor("custom")]; + tensor var_8009_pad_0 = const()[name = tensor("op_8009_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131550016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1132778880))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1132779072)))]; + tensor var_8009_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_8007, groups = var_31, pad = var_8009_pad_0, pad_type = var_8009_pad_type_0, strides = var_8005, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_487_cast)[name = tensor("op_8009_cast")]; + tensor inputs_243_cast = add(x = var_8009_cast, y = inputs_241_cast)[name = tensor("inputs_243_cast")]; + tensor var_8013 = const()[name = tensor("op_8013"), val = tensor([1])]; + tensor channels_mean_243_cast = reduce_mean(axes = var_8013, keep_dims = var_23, x = inputs_243_cast)[name = tensor("channels_mean_243_cast")]; + tensor zero_mean_243_cast = sub(x = inputs_243_cast, y = channels_mean_243_cast)[name = tensor("zero_mean_243_cast")]; + tensor zero_mean_sq_243_cast = mul(x = zero_mean_243_cast, y = zero_mean_243_cast)[name = tensor("zero_mean_sq_243_cast")]; + tensor var_8017 = const()[name = tensor("op_8017"), val = tensor([1])]; + tensor var_8018_cast = reduce_mean(axes = var_8017, keep_dims = var_23, x = zero_mean_sq_243_cast)[name = tensor("op_8018_cast")]; + tensor var_8019_to_fp16 = const()[name = tensor("op_8019_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8020_cast = add(x = var_8018_cast, y = var_8019_to_fp16)[name = tensor("op_8020_cast")]; + tensor denom_243_epsilon_0_to_fp16 = const()[name = tensor("denom_243_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_243_cast = rsqrt(epsilon = denom_243_epsilon_0_to_fp16, x = var_8020_cast)[name = tensor("denom_243_cast")]; + tensor out_243_cast = mul(x = zero_mean_243_cast, y = denom_243_cast)[name = tensor("out_243_cast")]; + tensor var_8024_to_fp16 = const()[name = tensor("op_8024_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1132781696)))]; + tensor var_8025_cast = add(x = out_243_cast, y = var_8024_to_fp16)[name = tensor("op_8025_cast")]; + tensor var_8027_to_fp16 = const()[name = tensor("op_8027_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1132784320)))]; + tensor hidden_states_325_cast = mul(x = var_8025_cast, y = var_8027_to_fp16)[name = tensor("hidden_states_325_cast")]; + tensor var_8034 = const()[name = tensor("op_8034"), val = tensor([1, 1])]; + tensor var_8036 = const()[name = tensor("op_8036"), val = tensor([1, 1])]; + tensor q_163_pad_type_0 = const()[name = tensor("q_163_pad_type_0"), val = tensor("custom")]; + tensor q_163_pad_0 = const()[name = tensor("q_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1132786944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134015808))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_163_cast = conv(dilations = var_8036, groups = var_31, pad = q_163_pad_0, pad_type = q_163_pad_type_0, strides = var_8034, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_325_cast)[name = tensor("q_163_cast")]; + tensor var_8040 = const()[name = tensor("op_8040"), val = tensor([1, 1])]; + tensor var_8042 = const()[name = tensor("op_8042"), val = tensor([1, 1])]; + tensor k_163_pad_type_0 = const()[name = tensor("k_163_pad_type_0"), val = tensor("custom")]; + tensor k_163_pad_0 = const()[name = tensor("k_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134016000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1135982144))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_163_cast = conv(dilations = var_8042, groups = var_31, pad = k_163_pad_0, pad_type = k_163_pad_type_0, strides = var_8040, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_163_cast")]; + tensor var_8046 = const()[name = tensor("op_8046"), val = tensor([1, 1])]; + tensor var_8048 = const()[name = tensor("op_8048"), val = tensor([1, 1])]; + tensor v_163_pad_type_0 = const()[name = tensor("v_163_pad_type_0"), val = tensor("custom")]; + tensor v_163_pad_0 = const()[name = tensor("v_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1135982336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1137948480))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_163_cast = conv(dilations = var_8048, groups = var_31, pad = v_163_pad_0, pad_type = v_163_pad_type_0, strides = var_8046, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_163_cast")]; + tensor var_8052 = const()[name = tensor("op_8052"), val = tensor([2, 20, 64, -1])]; + tensor var_8053_cast = reshape(shape = var_8052, x = q_163_cast)[name = tensor("op_8053_cast")]; + tensor var_8054 = const()[name = tensor("op_8054"), val = tensor([2, 20, 64, -1])]; + tensor var_8055_cast = reshape(shape = var_8054, x = k_163_cast)[name = tensor("op_8055_cast")]; + tensor var_8056 = const()[name = tensor("op_8056"), val = tensor([2, 20, 64, -1])]; + tensor var_8057_cast = reshape(shape = var_8056, x = v_163_cast)[name = tensor("op_8057_cast")]; + tensor attn_weights_325_transpose_x_0 = const()[name = tensor("attn_weights_325_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_325_transpose_y_0 = const()[name = tensor("attn_weights_325_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_325_cast = matmul(transpose_x = attn_weights_325_transpose_x_0, transpose_y = attn_weights_325_transpose_y_0, x = var_8053_cast, y = var_8055_cast)[name = tensor("attn_weights_325_cast")]; + tensor attn_weights_327_cast = mul(x = attn_weights_325_cast, y = var_12_to_fp16)[name = tensor("attn_weights_327_cast")]; + tensor var_8061_cast = softmax(axis = var_18, x = attn_weights_327_cast)[name = tensor("op_8061_cast")]; + tensor attn_163_transpose_x_0 = const()[name = tensor("attn_163_transpose_x_0"), val = tensor(false)]; + tensor attn_163_transpose_y_0 = const()[name = tensor("attn_163_transpose_y_0"), val = tensor(true)]; + tensor attn_163_cast = matmul(transpose_x = attn_163_transpose_x_0, transpose_y = attn_163_transpose_y_0, x = var_8057_cast, y = var_8061_cast)[name = tensor("attn_163_cast")]; + tensor var_8065 = const()[name = tensor("op_8065"), val = tensor([2, 1280, 1, -1])]; + tensor input_489_cast = reshape(shape = var_8065, x = attn_163_cast)[name = tensor("input_489_cast")]; + tensor var_8070 = const()[name = tensor("op_8070"), val = tensor([1, 1])]; + tensor var_8072 = const()[name = tensor("op_8072"), val = tensor([1, 1])]; + tensor var_8074_pad_type_0 = const()[name = tensor("op_8074_pad_type_0"), val = tensor("custom")]; + tensor var_8074_pad_0 = const()[name = tensor("op_8074_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1137948672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139177536))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139177728)))]; + tensor var_8074_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_8072, groups = var_31, pad = var_8074_pad_0, pad_type = var_8074_pad_type_0, strides = var_8070, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_489_cast)[name = tensor("op_8074_cast")]; + tensor inputs_245_cast = add(x = var_8074_cast, y = inputs_243_cast)[name = tensor("inputs_245_cast")]; + tensor var_8078 = const()[name = tensor("op_8078"), val = tensor([1])]; + tensor channels_mean_245_cast = reduce_mean(axes = var_8078, keep_dims = var_23, x = inputs_245_cast)[name = tensor("channels_mean_245_cast")]; + tensor zero_mean_245_cast = sub(x = inputs_245_cast, y = channels_mean_245_cast)[name = tensor("zero_mean_245_cast")]; + tensor zero_mean_sq_245_cast = mul(x = zero_mean_245_cast, y = zero_mean_245_cast)[name = tensor("zero_mean_sq_245_cast")]; + tensor var_8082 = const()[name = tensor("op_8082"), val = tensor([1])]; + tensor var_8083_cast = reduce_mean(axes = var_8082, keep_dims = var_23, x = zero_mean_sq_245_cast)[name = tensor("op_8083_cast")]; + tensor var_8084_to_fp16 = const()[name = tensor("op_8084_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8085_cast = add(x = var_8083_cast, y = var_8084_to_fp16)[name = tensor("op_8085_cast")]; + tensor denom_245_epsilon_0_to_fp16 = const()[name = tensor("denom_245_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_245_cast = rsqrt(epsilon = denom_245_epsilon_0_to_fp16, x = var_8085_cast)[name = tensor("denom_245_cast")]; + tensor out_245_cast = mul(x = zero_mean_245_cast, y = denom_245_cast)[name = tensor("out_245_cast")]; + tensor var_8089_to_fp16 = const()[name = tensor("op_8089_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139180352)))]; + tensor var_8090_cast = add(x = out_245_cast, y = var_8089_to_fp16)[name = tensor("op_8090_cast")]; + tensor var_8092_to_fp16 = const()[name = tensor("op_8092_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139182976)))]; + tensor input_491_cast = mul(x = var_8090_cast, y = var_8092_to_fp16)[name = tensor("input_491_cast")]; + tensor var_8100 = const()[name = tensor("op_8100"), val = tensor([1, 1])]; + tensor var_8102 = const()[name = tensor("op_8102"), val = tensor([1, 1])]; + tensor var_8104_pad_type_0 = const()[name = tensor("op_8104_pad_type_0"), val = tensor("custom")]; + tensor var_8104_pad_0 = const()[name = tensor("op_8104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139185600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149016064))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149016256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149024000))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_8104_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_8102, groups = var_31, pad = var_8104_pad_0, pad_type = var_8104_pad_type_0, strides = var_8100, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_491_cast)[name = tensor("op_8104_cast")]; + tensor var_8105_split_sizes_0 = const()[name = tensor("op_8105_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8105_axis_0 = const()[name = tensor("op_8105_axis_0"), val = tensor(1)]; + tensor var_8105_cast_0, tensor var_8105_cast_1 = split(axis = var_8105_axis_0, split_sizes = var_8105_split_sizes_0, x = var_8104_cast)[name = tensor("op_8105_cast")]; + tensor var_8107_mode_0 = const()[name = tensor("op_8107_mode_0"), val = tensor("EXACT")]; + tensor var_8107_cast = gelu(mode = var_8107_mode_0, x = var_8105_cast_1)[name = tensor("op_8107_cast")]; + tensor input_493_cast = mul(x = var_8105_cast_0, y = var_8107_cast)[name = tensor("input_493_cast")]; + tensor var_8111 = const()[name = tensor("op_8111"), val = tensor([1, 1])]; + tensor var_8113 = const()[name = tensor("op_8113"), val = tensor([1, 1])]; + tensor var_8115_pad_type_0 = const()[name = tensor("op_8115_pad_type_0"), val = tensor("custom")]; + tensor var_8115_pad_0 = const()[name = tensor("op_8115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149024192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153939456))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153939648)))]; + tensor var_8115_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_8113, groups = var_31, pad = var_8115_pad_0, pad_type = var_8115_pad_type_0, strides = var_8111, weight = unet_up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_493_cast)[name = tensor("op_8115_cast")]; + tensor inputs_247_cast = add(x = var_8115_cast, y = inputs_245_cast)[name = tensor("inputs_247_cast")]; + tensor var_8125 = const()[name = tensor("op_8125"), val = tensor([1])]; + tensor channels_mean_247_cast = reduce_mean(axes = var_8125, keep_dims = var_23, x = inputs_247_cast)[name = tensor("channels_mean_247_cast")]; + tensor zero_mean_247_cast = sub(x = inputs_247_cast, y = channels_mean_247_cast)[name = tensor("zero_mean_247_cast")]; + tensor zero_mean_sq_247_cast = mul(x = zero_mean_247_cast, y = zero_mean_247_cast)[name = tensor("zero_mean_sq_247_cast")]; + tensor var_8129 = const()[name = tensor("op_8129"), val = tensor([1])]; + tensor var_8130_cast = reduce_mean(axes = var_8129, keep_dims = var_23, x = zero_mean_sq_247_cast)[name = tensor("op_8130_cast")]; + tensor var_8131_to_fp16 = const()[name = tensor("op_8131_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8132_cast = add(x = var_8130_cast, y = var_8131_to_fp16)[name = tensor("op_8132_cast")]; + tensor denom_247_epsilon_0_to_fp16 = const()[name = tensor("denom_247_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_247_cast = rsqrt(epsilon = denom_247_epsilon_0_to_fp16, x = var_8132_cast)[name = tensor("denom_247_cast")]; + tensor out_247_cast = mul(x = zero_mean_247_cast, y = denom_247_cast)[name = tensor("out_247_cast")]; + tensor var_8136_to_fp16 = const()[name = tensor("op_8136_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153942272)))]; + tensor var_8137_cast = add(x = out_247_cast, y = var_8136_to_fp16)[name = tensor("op_8137_cast")]; + tensor var_8139_to_fp16 = const()[name = tensor("op_8139_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153944896)))]; + tensor hidden_states_329_cast = mul(x = var_8137_cast, y = var_8139_to_fp16)[name = tensor("hidden_states_329_cast")]; + tensor var_8146 = const()[name = tensor("op_8146"), val = tensor([1, 1])]; + tensor var_8148 = const()[name = tensor("op_8148"), val = tensor([1, 1])]; + tensor q_165_pad_type_0 = const()[name = tensor("q_165_pad_type_0"), val = tensor("custom")]; + tensor q_165_pad_0 = const()[name = tensor("q_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153947520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155176384))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_165_cast = conv(dilations = var_8148, groups = var_31, pad = q_165_pad_0, pad_type = q_165_pad_type_0, strides = var_8146, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("q_165_cast")]; + tensor var_8152 = const()[name = tensor("op_8152"), val = tensor([1, 1])]; + tensor var_8154 = const()[name = tensor("op_8154"), val = tensor([1, 1])]; + tensor k_165_pad_type_0 = const()[name = tensor("k_165_pad_type_0"), val = tensor("custom")]; + tensor k_165_pad_0 = const()[name = tensor("k_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155176576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156405440))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_165_cast = conv(dilations = var_8154, groups = var_31, pad = k_165_pad_0, pad_type = k_165_pad_type_0, strides = var_8152, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("k_165_cast")]; + tensor var_8158 = const()[name = tensor("op_8158"), val = tensor([1, 1])]; + tensor var_8160 = const()[name = tensor("op_8160"), val = tensor([1, 1])]; + tensor v_165_pad_type_0 = const()[name = tensor("v_165_pad_type_0"), val = tensor("custom")]; + tensor v_165_pad_0 = const()[name = tensor("v_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156405632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157634496))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_165_cast = conv(dilations = var_8160, groups = var_31, pad = v_165_pad_0, pad_type = v_165_pad_type_0, strides = var_8158, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("v_165_cast")]; + tensor var_8164 = const()[name = tensor("op_8164"), val = tensor([2, 20, 64, -1])]; + tensor var_8165_cast = reshape(shape = var_8164, x = q_165_cast)[name = tensor("op_8165_cast")]; + tensor var_8166 = const()[name = tensor("op_8166"), val = tensor([2, 20, 64, -1])]; + tensor var_8167_cast = reshape(shape = var_8166, x = k_165_cast)[name = tensor("op_8167_cast")]; + tensor var_8168 = const()[name = tensor("op_8168"), val = tensor([2, 20, 64, -1])]; + tensor var_8169_cast = reshape(shape = var_8168, x = v_165_cast)[name = tensor("op_8169_cast")]; + tensor attn_weights_329_transpose_x_0 = const()[name = tensor("attn_weights_329_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_329_transpose_y_0 = const()[name = tensor("attn_weights_329_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_329_cast = matmul(transpose_x = attn_weights_329_transpose_x_0, transpose_y = attn_weights_329_transpose_y_0, x = var_8165_cast, y = var_8167_cast)[name = tensor("attn_weights_329_cast")]; + tensor attn_weights_331_cast = mul(x = attn_weights_329_cast, y = var_12_to_fp16)[name = tensor("attn_weights_331_cast")]; + tensor var_8173_cast = softmax(axis = var_18, x = attn_weights_331_cast)[name = tensor("op_8173_cast")]; + tensor attn_165_transpose_x_0 = const()[name = tensor("attn_165_transpose_x_0"), val = tensor(false)]; + tensor attn_165_transpose_y_0 = const()[name = tensor("attn_165_transpose_y_0"), val = tensor(true)]; + tensor attn_165_cast = matmul(transpose_x = attn_165_transpose_x_0, transpose_y = attn_165_transpose_y_0, x = var_8169_cast, y = var_8173_cast)[name = tensor("attn_165_cast")]; + tensor var_8177 = const()[name = tensor("op_8177"), val = tensor([2, 1280, 1, -1])]; + tensor input_495_cast = reshape(shape = var_8177, x = attn_165_cast)[name = tensor("input_495_cast")]; + tensor var_8182 = const()[name = tensor("op_8182"), val = tensor([1, 1])]; + tensor var_8184 = const()[name = tensor("op_8184"), val = tensor([1, 1])]; + tensor var_8186_pad_type_0 = const()[name = tensor("op_8186_pad_type_0"), val = tensor("custom")]; + tensor var_8186_pad_0 = const()[name = tensor("op_8186_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157634688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158863552))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158863744)))]; + tensor var_8186_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_8184, groups = var_31, pad = var_8186_pad_0, pad_type = var_8186_pad_type_0, strides = var_8182, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_495_cast)[name = tensor("op_8186_cast")]; + tensor inputs_249_cast = add(x = var_8186_cast, y = inputs_247_cast)[name = tensor("inputs_249_cast")]; + tensor var_8190 = const()[name = tensor("op_8190"), val = tensor([1])]; + tensor channels_mean_249_cast = reduce_mean(axes = var_8190, keep_dims = var_23, x = inputs_249_cast)[name = tensor("channels_mean_249_cast")]; + tensor zero_mean_249_cast = sub(x = inputs_249_cast, y = channels_mean_249_cast)[name = tensor("zero_mean_249_cast")]; + tensor zero_mean_sq_249_cast = mul(x = zero_mean_249_cast, y = zero_mean_249_cast)[name = tensor("zero_mean_sq_249_cast")]; + tensor var_8194 = const()[name = tensor("op_8194"), val = tensor([1])]; + tensor var_8195_cast = reduce_mean(axes = var_8194, keep_dims = var_23, x = zero_mean_sq_249_cast)[name = tensor("op_8195_cast")]; + tensor var_8196_to_fp16 = const()[name = tensor("op_8196_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8197_cast = add(x = var_8195_cast, y = var_8196_to_fp16)[name = tensor("op_8197_cast")]; + tensor denom_249_epsilon_0_to_fp16 = const()[name = tensor("denom_249_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_249_cast = rsqrt(epsilon = denom_249_epsilon_0_to_fp16, x = var_8197_cast)[name = tensor("denom_249_cast")]; + tensor out_249_cast = mul(x = zero_mean_249_cast, y = denom_249_cast)[name = tensor("out_249_cast")]; + tensor var_8201_to_fp16 = const()[name = tensor("op_8201_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158866368)))]; + tensor var_8202_cast = add(x = out_249_cast, y = var_8201_to_fp16)[name = tensor("op_8202_cast")]; + tensor var_8204_to_fp16 = const()[name = tensor("op_8204_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158868992)))]; + tensor hidden_states_331_cast = mul(x = var_8202_cast, y = var_8204_to_fp16)[name = tensor("hidden_states_331_cast")]; + tensor var_8211 = const()[name = tensor("op_8211"), val = tensor([1, 1])]; + tensor var_8213 = const()[name = tensor("op_8213"), val = tensor([1, 1])]; + tensor q_167_pad_type_0 = const()[name = tensor("q_167_pad_type_0"), val = tensor("custom")]; + tensor q_167_pad_0 = const()[name = tensor("q_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158871616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1160100480))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_167_cast = conv(dilations = var_8213, groups = var_31, pad = q_167_pad_0, pad_type = q_167_pad_type_0, strides = var_8211, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_331_cast)[name = tensor("q_167_cast")]; + tensor var_8217 = const()[name = tensor("op_8217"), val = tensor([1, 1])]; + tensor var_8219 = const()[name = tensor("op_8219"), val = tensor([1, 1])]; + tensor k_167_pad_type_0 = const()[name = tensor("k_167_pad_type_0"), val = tensor("custom")]; + tensor k_167_pad_0 = const()[name = tensor("k_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1160100672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162066816))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_167_cast = conv(dilations = var_8219, groups = var_31, pad = k_167_pad_0, pad_type = k_167_pad_type_0, strides = var_8217, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_167_cast")]; + tensor var_8223 = const()[name = tensor("op_8223"), val = tensor([1, 1])]; + tensor var_8225 = const()[name = tensor("op_8225"), val = tensor([1, 1])]; + tensor v_167_pad_type_0 = const()[name = tensor("v_167_pad_type_0"), val = tensor("custom")]; + tensor v_167_pad_0 = const()[name = tensor("v_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162067008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164033152))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_167_cast = conv(dilations = var_8225, groups = var_31, pad = v_167_pad_0, pad_type = v_167_pad_type_0, strides = var_8223, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_167_cast")]; + tensor var_8229 = const()[name = tensor("op_8229"), val = tensor([2, 20, 64, -1])]; + tensor var_8230_cast = reshape(shape = var_8229, x = q_167_cast)[name = tensor("op_8230_cast")]; + tensor var_8231 = const()[name = tensor("op_8231"), val = tensor([2, 20, 64, -1])]; + tensor var_8232_cast = reshape(shape = var_8231, x = k_167_cast)[name = tensor("op_8232_cast")]; + tensor var_8233 = const()[name = tensor("op_8233"), val = tensor([2, 20, 64, -1])]; + tensor var_8234_cast = reshape(shape = var_8233, x = v_167_cast)[name = tensor("op_8234_cast")]; + tensor attn_weights_333_transpose_x_0 = const()[name = tensor("attn_weights_333_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_333_transpose_y_0 = const()[name = tensor("attn_weights_333_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_333_cast = matmul(transpose_x = attn_weights_333_transpose_x_0, transpose_y = attn_weights_333_transpose_y_0, x = var_8230_cast, y = var_8232_cast)[name = tensor("attn_weights_333_cast")]; + tensor attn_weights_335_cast = mul(x = attn_weights_333_cast, y = var_12_to_fp16)[name = tensor("attn_weights_335_cast")]; + tensor var_8238_cast = softmax(axis = var_18, x = attn_weights_335_cast)[name = tensor("op_8238_cast")]; + tensor attn_167_transpose_x_0 = const()[name = tensor("attn_167_transpose_x_0"), val = tensor(false)]; + tensor attn_167_transpose_y_0 = const()[name = tensor("attn_167_transpose_y_0"), val = tensor(true)]; + tensor attn_167_cast = matmul(transpose_x = attn_167_transpose_x_0, transpose_y = attn_167_transpose_y_0, x = var_8234_cast, y = var_8238_cast)[name = tensor("attn_167_cast")]; + tensor var_8242 = const()[name = tensor("op_8242"), val = tensor([2, 1280, 1, -1])]; + tensor input_497_cast = reshape(shape = var_8242, x = attn_167_cast)[name = tensor("input_497_cast")]; + tensor var_8247 = const()[name = tensor("op_8247"), val = tensor([1, 1])]; + tensor var_8249 = const()[name = tensor("op_8249"), val = tensor([1, 1])]; + tensor var_8251_pad_type_0 = const()[name = tensor("op_8251_pad_type_0"), val = tensor("custom")]; + tensor var_8251_pad_0 = const()[name = tensor("op_8251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164033344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165262208))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165262400)))]; + tensor var_8251_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_8249, groups = var_31, pad = var_8251_pad_0, pad_type = var_8251_pad_type_0, strides = var_8247, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_497_cast)[name = tensor("op_8251_cast")]; + tensor inputs_251_cast = add(x = var_8251_cast, y = inputs_249_cast)[name = tensor("inputs_251_cast")]; + tensor var_8255 = const()[name = tensor("op_8255"), val = tensor([1])]; + tensor channels_mean_251_cast = reduce_mean(axes = var_8255, keep_dims = var_23, x = inputs_251_cast)[name = tensor("channels_mean_251_cast")]; + tensor zero_mean_251_cast = sub(x = inputs_251_cast, y = channels_mean_251_cast)[name = tensor("zero_mean_251_cast")]; + tensor zero_mean_sq_251_cast = mul(x = zero_mean_251_cast, y = zero_mean_251_cast)[name = tensor("zero_mean_sq_251_cast")]; + tensor var_8259 = const()[name = tensor("op_8259"), val = tensor([1])]; + tensor var_8260_cast = reduce_mean(axes = var_8259, keep_dims = var_23, x = zero_mean_sq_251_cast)[name = tensor("op_8260_cast")]; + tensor var_8261_to_fp16 = const()[name = tensor("op_8261_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8262_cast = add(x = var_8260_cast, y = var_8261_to_fp16)[name = tensor("op_8262_cast")]; + tensor denom_251_epsilon_0_to_fp16 = const()[name = tensor("denom_251_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_251_cast = rsqrt(epsilon = denom_251_epsilon_0_to_fp16, x = var_8262_cast)[name = tensor("denom_251_cast")]; + tensor out_251_cast = mul(x = zero_mean_251_cast, y = denom_251_cast)[name = tensor("out_251_cast")]; + tensor var_8266_to_fp16 = const()[name = tensor("op_8266_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165265024)))]; + tensor var_8267_cast = add(x = out_251_cast, y = var_8266_to_fp16)[name = tensor("op_8267_cast")]; + tensor var_8269_to_fp16 = const()[name = tensor("op_8269_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165267648)))]; + tensor input_499_cast = mul(x = var_8267_cast, y = var_8269_to_fp16)[name = tensor("input_499_cast")]; + tensor var_8277 = const()[name = tensor("op_8277"), val = tensor([1, 1])]; + tensor var_8279 = const()[name = tensor("op_8279"), val = tensor([1, 1])]; + tensor var_8281_pad_type_0 = const()[name = tensor("op_8281_pad_type_0"), val = tensor("custom")]; + tensor var_8281_pad_0 = const()[name = tensor("op_8281_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165270272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1175100736))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1175100928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1175108672))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_8281_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_8279, groups = var_31, pad = var_8281_pad_0, pad_type = var_8281_pad_type_0, strides = var_8277, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_499_cast)[name = tensor("op_8281_cast")]; + tensor var_8282_split_sizes_0 = const()[name = tensor("op_8282_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8282_axis_0 = const()[name = tensor("op_8282_axis_0"), val = tensor(1)]; + tensor var_8282_cast_0, tensor var_8282_cast_1 = split(axis = var_8282_axis_0, split_sizes = var_8282_split_sizes_0, x = var_8281_cast)[name = tensor("op_8282_cast")]; + tensor var_8284_mode_0 = const()[name = tensor("op_8284_mode_0"), val = tensor("EXACT")]; + tensor var_8284_cast = gelu(mode = var_8284_mode_0, x = var_8282_cast_1)[name = tensor("op_8284_cast")]; + tensor input_501_cast = mul(x = var_8282_cast_0, y = var_8284_cast)[name = tensor("input_501_cast")]; + tensor var_8288 = const()[name = tensor("op_8288"), val = tensor([1, 1])]; + tensor var_8290 = const()[name = tensor("op_8290"), val = tensor([1, 1])]; + tensor var_8292_pad_type_0 = const()[name = tensor("op_8292_pad_type_0"), val = tensor("custom")]; + tensor var_8292_pad_0 = const()[name = tensor("op_8292_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1175108864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1180024128))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1180024320)))]; + tensor var_8292_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_8290, groups = var_31, pad = var_8292_pad_0, pad_type = var_8292_pad_type_0, strides = var_8288, weight = unet_up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_501_cast)[name = tensor("op_8292_cast")]; + tensor inputs_253_cast = add(x = var_8292_cast, y = inputs_251_cast)[name = tensor("inputs_253_cast")]; + tensor var_8302 = const()[name = tensor("op_8302"), val = tensor([1])]; + tensor channels_mean_253_cast = reduce_mean(axes = var_8302, keep_dims = var_23, x = inputs_253_cast)[name = tensor("channels_mean_253_cast")]; + tensor zero_mean_253_cast = sub(x = inputs_253_cast, y = channels_mean_253_cast)[name = tensor("zero_mean_253_cast")]; + tensor zero_mean_sq_253_cast = mul(x = zero_mean_253_cast, y = zero_mean_253_cast)[name = tensor("zero_mean_sq_253_cast")]; + tensor var_8306 = const()[name = tensor("op_8306"), val = tensor([1])]; + tensor var_8307_cast = reduce_mean(axes = var_8306, keep_dims = var_23, x = zero_mean_sq_253_cast)[name = tensor("op_8307_cast")]; + tensor var_8308_to_fp16 = const()[name = tensor("op_8308_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8309_cast = add(x = var_8307_cast, y = var_8308_to_fp16)[name = tensor("op_8309_cast")]; + tensor denom_253_epsilon_0_to_fp16 = const()[name = tensor("denom_253_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_253_cast = rsqrt(epsilon = denom_253_epsilon_0_to_fp16, x = var_8309_cast)[name = tensor("denom_253_cast")]; + tensor out_253_cast = mul(x = zero_mean_253_cast, y = denom_253_cast)[name = tensor("out_253_cast")]; + tensor var_8313_to_fp16 = const()[name = tensor("op_8313_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1180026944)))]; + tensor var_8314_cast = add(x = out_253_cast, y = var_8313_to_fp16)[name = tensor("op_8314_cast")]; + tensor var_8316_to_fp16 = const()[name = tensor("op_8316_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1180029568)))]; + tensor hidden_states_335_cast = mul(x = var_8314_cast, y = var_8316_to_fp16)[name = tensor("hidden_states_335_cast")]; + tensor var_8323 = const()[name = tensor("op_8323"), val = tensor([1, 1])]; + tensor var_8325 = const()[name = tensor("op_8325"), val = tensor([1, 1])]; + tensor q_169_pad_type_0 = const()[name = tensor("q_169_pad_type_0"), val = tensor("custom")]; + tensor q_169_pad_0 = const()[name = tensor("q_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1180032192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1181261056))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_169_cast = conv(dilations = var_8325, groups = var_31, pad = q_169_pad_0, pad_type = q_169_pad_type_0, strides = var_8323, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("q_169_cast")]; + tensor var_8329 = const()[name = tensor("op_8329"), val = tensor([1, 1])]; + tensor var_8331 = const()[name = tensor("op_8331"), val = tensor([1, 1])]; + tensor k_169_pad_type_0 = const()[name = tensor("k_169_pad_type_0"), val = tensor("custom")]; + tensor k_169_pad_0 = const()[name = tensor("k_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1181261248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1182490112))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_169_cast = conv(dilations = var_8331, groups = var_31, pad = k_169_pad_0, pad_type = k_169_pad_type_0, strides = var_8329, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("k_169_cast")]; + tensor var_8335 = const()[name = tensor("op_8335"), val = tensor([1, 1])]; + tensor var_8337 = const()[name = tensor("op_8337"), val = tensor([1, 1])]; + tensor v_169_pad_type_0 = const()[name = tensor("v_169_pad_type_0"), val = tensor("custom")]; + tensor v_169_pad_0 = const()[name = tensor("v_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1182490304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1183719168))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_169_cast = conv(dilations = var_8337, groups = var_31, pad = v_169_pad_0, pad_type = v_169_pad_type_0, strides = var_8335, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("v_169_cast")]; + tensor var_8341 = const()[name = tensor("op_8341"), val = tensor([2, 20, 64, -1])]; + tensor var_8342_cast = reshape(shape = var_8341, x = q_169_cast)[name = tensor("op_8342_cast")]; + tensor var_8343 = const()[name = tensor("op_8343"), val = tensor([2, 20, 64, -1])]; + tensor var_8344_cast = reshape(shape = var_8343, x = k_169_cast)[name = tensor("op_8344_cast")]; + tensor var_8345 = const()[name = tensor("op_8345"), val = tensor([2, 20, 64, -1])]; + tensor var_8346_cast = reshape(shape = var_8345, x = v_169_cast)[name = tensor("op_8346_cast")]; + tensor attn_weights_337_transpose_x_0 = const()[name = tensor("attn_weights_337_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_337_transpose_y_0 = const()[name = tensor("attn_weights_337_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_337_cast = matmul(transpose_x = attn_weights_337_transpose_x_0, transpose_y = attn_weights_337_transpose_y_0, x = var_8342_cast, y = var_8344_cast)[name = tensor("attn_weights_337_cast")]; + tensor attn_weights_339_cast = mul(x = attn_weights_337_cast, y = var_12_to_fp16)[name = tensor("attn_weights_339_cast")]; + tensor var_8350_cast = softmax(axis = var_18, x = attn_weights_339_cast)[name = tensor("op_8350_cast")]; + tensor attn_169_transpose_x_0 = const()[name = tensor("attn_169_transpose_x_0"), val = tensor(false)]; + tensor attn_169_transpose_y_0 = const()[name = tensor("attn_169_transpose_y_0"), val = tensor(true)]; + tensor attn_169_cast = matmul(transpose_x = attn_169_transpose_x_0, transpose_y = attn_169_transpose_y_0, x = var_8346_cast, y = var_8350_cast)[name = tensor("attn_169_cast")]; + tensor var_8354 = const()[name = tensor("op_8354"), val = tensor([2, 1280, 1, -1])]; + tensor input_503_cast = reshape(shape = var_8354, x = attn_169_cast)[name = tensor("input_503_cast")]; + tensor var_8359 = const()[name = tensor("op_8359"), val = tensor([1, 1])]; + tensor var_8361 = const()[name = tensor("op_8361"), val = tensor([1, 1])]; + tensor var_8363_pad_type_0 = const()[name = tensor("op_8363_pad_type_0"), val = tensor("custom")]; + tensor var_8363_pad_0 = const()[name = tensor("op_8363_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1183719360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184948224))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184948416)))]; + tensor var_8363_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_8361, groups = var_31, pad = var_8363_pad_0, pad_type = var_8363_pad_type_0, strides = var_8359, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_503_cast)[name = tensor("op_8363_cast")]; + tensor inputs_255_cast = add(x = var_8363_cast, y = inputs_253_cast)[name = tensor("inputs_255_cast")]; + tensor var_8367 = const()[name = tensor("op_8367"), val = tensor([1])]; + tensor channels_mean_255_cast = reduce_mean(axes = var_8367, keep_dims = var_23, x = inputs_255_cast)[name = tensor("channels_mean_255_cast")]; + tensor zero_mean_255_cast = sub(x = inputs_255_cast, y = channels_mean_255_cast)[name = tensor("zero_mean_255_cast")]; + tensor zero_mean_sq_255_cast = mul(x = zero_mean_255_cast, y = zero_mean_255_cast)[name = tensor("zero_mean_sq_255_cast")]; + tensor var_8371 = const()[name = tensor("op_8371"), val = tensor([1])]; + tensor var_8372_cast = reduce_mean(axes = var_8371, keep_dims = var_23, x = zero_mean_sq_255_cast)[name = tensor("op_8372_cast")]; + tensor var_8373_to_fp16 = const()[name = tensor("op_8373_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8374_cast = add(x = var_8372_cast, y = var_8373_to_fp16)[name = tensor("op_8374_cast")]; + tensor denom_255_epsilon_0_to_fp16 = const()[name = tensor("denom_255_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_255_cast = rsqrt(epsilon = denom_255_epsilon_0_to_fp16, x = var_8374_cast)[name = tensor("denom_255_cast")]; + tensor out_255_cast = mul(x = zero_mean_255_cast, y = denom_255_cast)[name = tensor("out_255_cast")]; + tensor var_8378_to_fp16 = const()[name = tensor("op_8378_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184951040)))]; + tensor var_8379_cast = add(x = out_255_cast, y = var_8378_to_fp16)[name = tensor("op_8379_cast")]; + tensor var_8381_to_fp16 = const()[name = tensor("op_8381_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184953664)))]; + tensor hidden_states_337_cast = mul(x = var_8379_cast, y = var_8381_to_fp16)[name = tensor("hidden_states_337_cast")]; + tensor var_8388 = const()[name = tensor("op_8388"), val = tensor([1, 1])]; + tensor var_8390 = const()[name = tensor("op_8390"), val = tensor([1, 1])]; + tensor q_171_pad_type_0 = const()[name = tensor("q_171_pad_type_0"), val = tensor("custom")]; + tensor q_171_pad_0 = const()[name = tensor("q_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184956288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1186185152))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_171_cast = conv(dilations = var_8390, groups = var_31, pad = q_171_pad_0, pad_type = q_171_pad_type_0, strides = var_8388, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_337_cast)[name = tensor("q_171_cast")]; + tensor var_8394 = const()[name = tensor("op_8394"), val = tensor([1, 1])]; + tensor var_8396 = const()[name = tensor("op_8396"), val = tensor([1, 1])]; + tensor k_171_pad_type_0 = const()[name = tensor("k_171_pad_type_0"), val = tensor("custom")]; + tensor k_171_pad_0 = const()[name = tensor("k_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1186185344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188151488))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_171_cast = conv(dilations = var_8396, groups = var_31, pad = k_171_pad_0, pad_type = k_171_pad_type_0, strides = var_8394, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_171_cast")]; + tensor var_8400 = const()[name = tensor("op_8400"), val = tensor([1, 1])]; + tensor var_8402 = const()[name = tensor("op_8402"), val = tensor([1, 1])]; + tensor v_171_pad_type_0 = const()[name = tensor("v_171_pad_type_0"), val = tensor("custom")]; + tensor v_171_pad_0 = const()[name = tensor("v_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188151680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190117824))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_171_cast = conv(dilations = var_8402, groups = var_31, pad = v_171_pad_0, pad_type = v_171_pad_type_0, strides = var_8400, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_171_cast")]; + tensor var_8406 = const()[name = tensor("op_8406"), val = tensor([2, 20, 64, -1])]; + tensor var_8407_cast = reshape(shape = var_8406, x = q_171_cast)[name = tensor("op_8407_cast")]; + tensor var_8408 = const()[name = tensor("op_8408"), val = tensor([2, 20, 64, -1])]; + tensor var_8409_cast = reshape(shape = var_8408, x = k_171_cast)[name = tensor("op_8409_cast")]; + tensor var_8410 = const()[name = tensor("op_8410"), val = tensor([2, 20, 64, -1])]; + tensor var_8411_cast = reshape(shape = var_8410, x = v_171_cast)[name = tensor("op_8411_cast")]; + tensor attn_weights_341_transpose_x_0 = const()[name = tensor("attn_weights_341_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_341_transpose_y_0 = const()[name = tensor("attn_weights_341_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_341_cast = matmul(transpose_x = attn_weights_341_transpose_x_0, transpose_y = attn_weights_341_transpose_y_0, x = var_8407_cast, y = var_8409_cast)[name = tensor("attn_weights_341_cast")]; + tensor attn_weights_343_cast = mul(x = attn_weights_341_cast, y = var_12_to_fp16)[name = tensor("attn_weights_343_cast")]; + tensor var_8415_cast = softmax(axis = var_18, x = attn_weights_343_cast)[name = tensor("op_8415_cast")]; + tensor attn_171_transpose_x_0 = const()[name = tensor("attn_171_transpose_x_0"), val = tensor(false)]; + tensor attn_171_transpose_y_0 = const()[name = tensor("attn_171_transpose_y_0"), val = tensor(true)]; + tensor attn_171_cast = matmul(transpose_x = attn_171_transpose_x_0, transpose_y = attn_171_transpose_y_0, x = var_8411_cast, y = var_8415_cast)[name = tensor("attn_171_cast")]; + tensor var_8419 = const()[name = tensor("op_8419"), val = tensor([2, 1280, 1, -1])]; + tensor input_505_cast = reshape(shape = var_8419, x = attn_171_cast)[name = tensor("input_505_cast")]; + tensor var_8424 = const()[name = tensor("op_8424"), val = tensor([1, 1])]; + tensor var_8426 = const()[name = tensor("op_8426"), val = tensor([1, 1])]; + tensor var_8428_pad_type_0 = const()[name = tensor("op_8428_pad_type_0"), val = tensor("custom")]; + tensor var_8428_pad_0 = const()[name = tensor("op_8428_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190118016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191346880))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191347072)))]; + tensor var_8428_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_8426, groups = var_31, pad = var_8428_pad_0, pad_type = var_8428_pad_type_0, strides = var_8424, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_505_cast)[name = tensor("op_8428_cast")]; + tensor inputs_257_cast = add(x = var_8428_cast, y = inputs_255_cast)[name = tensor("inputs_257_cast")]; + tensor var_8432 = const()[name = tensor("op_8432"), val = tensor([1])]; + tensor channels_mean_257_cast = reduce_mean(axes = var_8432, keep_dims = var_23, x = inputs_257_cast)[name = tensor("channels_mean_257_cast")]; + tensor zero_mean_257_cast = sub(x = inputs_257_cast, y = channels_mean_257_cast)[name = tensor("zero_mean_257_cast")]; + tensor zero_mean_sq_257_cast = mul(x = zero_mean_257_cast, y = zero_mean_257_cast)[name = tensor("zero_mean_sq_257_cast")]; + tensor var_8436 = const()[name = tensor("op_8436"), val = tensor([1])]; + tensor var_8437_cast = reduce_mean(axes = var_8436, keep_dims = var_23, x = zero_mean_sq_257_cast)[name = tensor("op_8437_cast")]; + tensor var_8438_to_fp16 = const()[name = tensor("op_8438_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8439_cast = add(x = var_8437_cast, y = var_8438_to_fp16)[name = tensor("op_8439_cast")]; + tensor denom_257_epsilon_0_to_fp16 = const()[name = tensor("denom_257_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_257_cast = rsqrt(epsilon = denom_257_epsilon_0_to_fp16, x = var_8439_cast)[name = tensor("denom_257_cast")]; + tensor out_257_cast = mul(x = zero_mean_257_cast, y = denom_257_cast)[name = tensor("out_257_cast")]; + tensor var_8443_to_fp16 = const()[name = tensor("op_8443_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191349696)))]; + tensor var_8444_cast = add(x = out_257_cast, y = var_8443_to_fp16)[name = tensor("op_8444_cast")]; + tensor var_8446_to_fp16 = const()[name = tensor("op_8446_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191352320)))]; + tensor input_507_cast = mul(x = var_8444_cast, y = var_8446_to_fp16)[name = tensor("input_507_cast")]; + tensor var_8454 = const()[name = tensor("op_8454"), val = tensor([1, 1])]; + tensor var_8456 = const()[name = tensor("op_8456"), val = tensor([1, 1])]; + tensor var_8458_pad_type_0 = const()[name = tensor("op_8458_pad_type_0"), val = tensor("custom")]; + tensor var_8458_pad_0 = const()[name = tensor("op_8458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191354944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201185408))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201185600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201193344))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_8458_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_8456, groups = var_31, pad = var_8458_pad_0, pad_type = var_8458_pad_type_0, strides = var_8454, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_507_cast)[name = tensor("op_8458_cast")]; + tensor var_8459_split_sizes_0 = const()[name = tensor("op_8459_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8459_axis_0 = const()[name = tensor("op_8459_axis_0"), val = tensor(1)]; + tensor var_8459_cast_0, tensor var_8459_cast_1 = split(axis = var_8459_axis_0, split_sizes = var_8459_split_sizes_0, x = var_8458_cast)[name = tensor("op_8459_cast")]; + tensor var_8461_mode_0 = const()[name = tensor("op_8461_mode_0"), val = tensor("EXACT")]; + tensor var_8461_cast = gelu(mode = var_8461_mode_0, x = var_8459_cast_1)[name = tensor("op_8461_cast")]; + tensor input_509_cast = mul(x = var_8459_cast_0, y = var_8461_cast)[name = tensor("input_509_cast")]; + tensor var_8465 = const()[name = tensor("op_8465"), val = tensor([1, 1])]; + tensor var_8467 = const()[name = tensor("op_8467"), val = tensor([1, 1])]; + tensor var_8469_pad_type_0 = const()[name = tensor("op_8469_pad_type_0"), val = tensor("custom")]; + tensor var_8469_pad_0 = const()[name = tensor("op_8469_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201193536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206108800))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206108992)))]; + tensor var_8469_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_8467, groups = var_31, pad = var_8469_pad_0, pad_type = var_8469_pad_type_0, strides = var_8465, weight = unet_up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_509_cast)[name = tensor("op_8469_cast")]; + tensor inputs_259_cast = add(x = var_8469_cast, y = inputs_257_cast)[name = tensor("inputs_259_cast")]; + tensor var_8479 = const()[name = tensor("op_8479"), val = tensor([1])]; + tensor channels_mean_259_cast = reduce_mean(axes = var_8479, keep_dims = var_23, x = inputs_259_cast)[name = tensor("channels_mean_259_cast")]; + tensor zero_mean_259_cast = sub(x = inputs_259_cast, y = channels_mean_259_cast)[name = tensor("zero_mean_259_cast")]; + tensor zero_mean_sq_259_cast = mul(x = zero_mean_259_cast, y = zero_mean_259_cast)[name = tensor("zero_mean_sq_259_cast")]; + tensor var_8483 = const()[name = tensor("op_8483"), val = tensor([1])]; + tensor var_8484_cast = reduce_mean(axes = var_8483, keep_dims = var_23, x = zero_mean_sq_259_cast)[name = tensor("op_8484_cast")]; + tensor var_8485_to_fp16 = const()[name = tensor("op_8485_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8486_cast = add(x = var_8484_cast, y = var_8485_to_fp16)[name = tensor("op_8486_cast")]; + tensor denom_259_epsilon_0_to_fp16 = const()[name = tensor("denom_259_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_259_cast = rsqrt(epsilon = denom_259_epsilon_0_to_fp16, x = var_8486_cast)[name = tensor("denom_259_cast")]; + tensor out_259_cast = mul(x = zero_mean_259_cast, y = denom_259_cast)[name = tensor("out_259_cast")]; + tensor var_8490_to_fp16 = const()[name = tensor("op_8490_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206111616)))]; + tensor var_8491_cast = add(x = out_259_cast, y = var_8490_to_fp16)[name = tensor("op_8491_cast")]; + tensor var_8493_to_fp16 = const()[name = tensor("op_8493_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206114240)))]; + tensor hidden_states_341_cast = mul(x = var_8491_cast, y = var_8493_to_fp16)[name = tensor("hidden_states_341_cast")]; + tensor var_8500 = const()[name = tensor("op_8500"), val = tensor([1, 1])]; + tensor var_8502 = const()[name = tensor("op_8502"), val = tensor([1, 1])]; + tensor q_173_pad_type_0 = const()[name = tensor("q_173_pad_type_0"), val = tensor("custom")]; + tensor q_173_pad_0 = const()[name = tensor("q_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206116864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207345728))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_173_cast = conv(dilations = var_8502, groups = var_31, pad = q_173_pad_0, pad_type = q_173_pad_type_0, strides = var_8500, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("q_173_cast")]; + tensor var_8506 = const()[name = tensor("op_8506"), val = tensor([1, 1])]; + tensor var_8508 = const()[name = tensor("op_8508"), val = tensor([1, 1])]; + tensor k_173_pad_type_0 = const()[name = tensor("k_173_pad_type_0"), val = tensor("custom")]; + tensor k_173_pad_0 = const()[name = tensor("k_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207345920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208574784))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_173_cast = conv(dilations = var_8508, groups = var_31, pad = k_173_pad_0, pad_type = k_173_pad_type_0, strides = var_8506, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("k_173_cast")]; + tensor var_8512 = const()[name = tensor("op_8512"), val = tensor([1, 1])]; + tensor var_8514 = const()[name = tensor("op_8514"), val = tensor([1, 1])]; + tensor v_173_pad_type_0 = const()[name = tensor("v_173_pad_type_0"), val = tensor("custom")]; + tensor v_173_pad_0 = const()[name = tensor("v_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208574976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209803840))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_173_cast = conv(dilations = var_8514, groups = var_31, pad = v_173_pad_0, pad_type = v_173_pad_type_0, strides = var_8512, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("v_173_cast")]; + tensor var_8518 = const()[name = tensor("op_8518"), val = tensor([2, 20, 64, -1])]; + tensor var_8519_cast = reshape(shape = var_8518, x = q_173_cast)[name = tensor("op_8519_cast")]; + tensor var_8520 = const()[name = tensor("op_8520"), val = tensor([2, 20, 64, -1])]; + tensor var_8521_cast = reshape(shape = var_8520, x = k_173_cast)[name = tensor("op_8521_cast")]; + tensor var_8522 = const()[name = tensor("op_8522"), val = tensor([2, 20, 64, -1])]; + tensor var_8523_cast = reshape(shape = var_8522, x = v_173_cast)[name = tensor("op_8523_cast")]; + tensor attn_weights_345_transpose_x_0 = const()[name = tensor("attn_weights_345_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_345_transpose_y_0 = const()[name = tensor("attn_weights_345_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_345_cast = matmul(transpose_x = attn_weights_345_transpose_x_0, transpose_y = attn_weights_345_transpose_y_0, x = var_8519_cast, y = var_8521_cast)[name = tensor("attn_weights_345_cast")]; + tensor attn_weights_347_cast = mul(x = attn_weights_345_cast, y = var_12_to_fp16)[name = tensor("attn_weights_347_cast")]; + tensor var_8527_cast = softmax(axis = var_18, x = attn_weights_347_cast)[name = tensor("op_8527_cast")]; + tensor attn_173_transpose_x_0 = const()[name = tensor("attn_173_transpose_x_0"), val = tensor(false)]; + tensor attn_173_transpose_y_0 = const()[name = tensor("attn_173_transpose_y_0"), val = tensor(true)]; + tensor attn_173_cast = matmul(transpose_x = attn_173_transpose_x_0, transpose_y = attn_173_transpose_y_0, x = var_8523_cast, y = var_8527_cast)[name = tensor("attn_173_cast")]; + tensor var_8531 = const()[name = tensor("op_8531"), val = tensor([2, 1280, 1, -1])]; + tensor input_511_cast = reshape(shape = var_8531, x = attn_173_cast)[name = tensor("input_511_cast")]; + tensor var_8536 = const()[name = tensor("op_8536"), val = tensor([1, 1])]; + tensor var_8538 = const()[name = tensor("op_8538"), val = tensor([1, 1])]; + tensor var_8540_pad_type_0 = const()[name = tensor("op_8540_pad_type_0"), val = tensor("custom")]; + tensor var_8540_pad_0 = const()[name = tensor("op_8540_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209804032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211032896))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211033088)))]; + tensor var_8540_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_8538, groups = var_31, pad = var_8540_pad_0, pad_type = var_8540_pad_type_0, strides = var_8536, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_511_cast)[name = tensor("op_8540_cast")]; + tensor inputs_261_cast = add(x = var_8540_cast, y = inputs_259_cast)[name = tensor("inputs_261_cast")]; + tensor var_8544 = const()[name = tensor("op_8544"), val = tensor([1])]; + tensor channels_mean_261_cast = reduce_mean(axes = var_8544, keep_dims = var_23, x = inputs_261_cast)[name = tensor("channels_mean_261_cast")]; + tensor zero_mean_261_cast = sub(x = inputs_261_cast, y = channels_mean_261_cast)[name = tensor("zero_mean_261_cast")]; + tensor zero_mean_sq_261_cast = mul(x = zero_mean_261_cast, y = zero_mean_261_cast)[name = tensor("zero_mean_sq_261_cast")]; + tensor var_8548 = const()[name = tensor("op_8548"), val = tensor([1])]; + tensor var_8549_cast = reduce_mean(axes = var_8548, keep_dims = var_23, x = zero_mean_sq_261_cast)[name = tensor("op_8549_cast")]; + tensor var_8550_to_fp16 = const()[name = tensor("op_8550_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8551_cast = add(x = var_8549_cast, y = var_8550_to_fp16)[name = tensor("op_8551_cast")]; + tensor denom_261_epsilon_0_to_fp16 = const()[name = tensor("denom_261_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_261_cast = rsqrt(epsilon = denom_261_epsilon_0_to_fp16, x = var_8551_cast)[name = tensor("denom_261_cast")]; + tensor out_261_cast = mul(x = zero_mean_261_cast, y = denom_261_cast)[name = tensor("out_261_cast")]; + tensor var_8555_to_fp16 = const()[name = tensor("op_8555_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211035712)))]; + tensor var_8556_cast = add(x = out_261_cast, y = var_8555_to_fp16)[name = tensor("op_8556_cast")]; + tensor var_8558_to_fp16 = const()[name = tensor("op_8558_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211038336)))]; + tensor hidden_states_343_cast = mul(x = var_8556_cast, y = var_8558_to_fp16)[name = tensor("hidden_states_343_cast")]; + tensor var_8565 = const()[name = tensor("op_8565"), val = tensor([1, 1])]; + tensor var_8567 = const()[name = tensor("op_8567"), val = tensor([1, 1])]; + tensor q_175_pad_type_0 = const()[name = tensor("q_175_pad_type_0"), val = tensor("custom")]; + tensor q_175_pad_0 = const()[name = tensor("q_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211040960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212269824))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_175_cast = conv(dilations = var_8567, groups = var_31, pad = q_175_pad_0, pad_type = q_175_pad_type_0, strides = var_8565, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_343_cast)[name = tensor("q_175_cast")]; + tensor var_8571 = const()[name = tensor("op_8571"), val = tensor([1, 1])]; + tensor var_8573 = const()[name = tensor("op_8573"), val = tensor([1, 1])]; + tensor k_175_pad_type_0 = const()[name = tensor("k_175_pad_type_0"), val = tensor("custom")]; + tensor k_175_pad_0 = const()[name = tensor("k_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212270016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1214236160))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_175_cast = conv(dilations = var_8573, groups = var_31, pad = k_175_pad_0, pad_type = k_175_pad_type_0, strides = var_8571, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_175_cast")]; + tensor var_8577 = const()[name = tensor("op_8577"), val = tensor([1, 1])]; + tensor var_8579 = const()[name = tensor("op_8579"), val = tensor([1, 1])]; + tensor v_175_pad_type_0 = const()[name = tensor("v_175_pad_type_0"), val = tensor("custom")]; + tensor v_175_pad_0 = const()[name = tensor("v_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1214236352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1216202496))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_175_cast = conv(dilations = var_8579, groups = var_31, pad = v_175_pad_0, pad_type = v_175_pad_type_0, strides = var_8577, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_175_cast")]; + tensor var_8583 = const()[name = tensor("op_8583"), val = tensor([2, 20, 64, -1])]; + tensor var_8584_cast = reshape(shape = var_8583, x = q_175_cast)[name = tensor("op_8584_cast")]; + tensor var_8585 = const()[name = tensor("op_8585"), val = tensor([2, 20, 64, -1])]; + tensor var_8586_cast = reshape(shape = var_8585, x = k_175_cast)[name = tensor("op_8586_cast")]; + tensor var_8587 = const()[name = tensor("op_8587"), val = tensor([2, 20, 64, -1])]; + tensor var_8588_cast = reshape(shape = var_8587, x = v_175_cast)[name = tensor("op_8588_cast")]; + tensor attn_weights_349_transpose_x_0 = const()[name = tensor("attn_weights_349_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_349_transpose_y_0 = const()[name = tensor("attn_weights_349_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_349_cast = matmul(transpose_x = attn_weights_349_transpose_x_0, transpose_y = attn_weights_349_transpose_y_0, x = var_8584_cast, y = var_8586_cast)[name = tensor("attn_weights_349_cast")]; + tensor attn_weights_351_cast = mul(x = attn_weights_349_cast, y = var_12_to_fp16)[name = tensor("attn_weights_351_cast")]; + tensor var_8592_cast = softmax(axis = var_18, x = attn_weights_351_cast)[name = tensor("op_8592_cast")]; + tensor attn_175_transpose_x_0 = const()[name = tensor("attn_175_transpose_x_0"), val = tensor(false)]; + tensor attn_175_transpose_y_0 = const()[name = tensor("attn_175_transpose_y_0"), val = tensor(true)]; + tensor attn_175_cast = matmul(transpose_x = attn_175_transpose_x_0, transpose_y = attn_175_transpose_y_0, x = var_8588_cast, y = var_8592_cast)[name = tensor("attn_175_cast")]; + tensor var_8596 = const()[name = tensor("op_8596"), val = tensor([2, 1280, 1, -1])]; + tensor input_513_cast = reshape(shape = var_8596, x = attn_175_cast)[name = tensor("input_513_cast")]; + tensor var_8601 = const()[name = tensor("op_8601"), val = tensor([1, 1])]; + tensor var_8603 = const()[name = tensor("op_8603"), val = tensor([1, 1])]; + tensor var_8605_pad_type_0 = const()[name = tensor("op_8605_pad_type_0"), val = tensor("custom")]; + tensor var_8605_pad_0 = const()[name = tensor("op_8605_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1216202688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217431552))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217431744)))]; + tensor var_8605_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_8603, groups = var_31, pad = var_8605_pad_0, pad_type = var_8605_pad_type_0, strides = var_8601, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_513_cast)[name = tensor("op_8605_cast")]; + tensor inputs_263_cast = add(x = var_8605_cast, y = inputs_261_cast)[name = tensor("inputs_263_cast")]; + tensor var_8609 = const()[name = tensor("op_8609"), val = tensor([1])]; + tensor channels_mean_263_cast = reduce_mean(axes = var_8609, keep_dims = var_23, x = inputs_263_cast)[name = tensor("channels_mean_263_cast")]; + tensor zero_mean_263_cast = sub(x = inputs_263_cast, y = channels_mean_263_cast)[name = tensor("zero_mean_263_cast")]; + tensor zero_mean_sq_263_cast = mul(x = zero_mean_263_cast, y = zero_mean_263_cast)[name = tensor("zero_mean_sq_263_cast")]; + tensor var_8613 = const()[name = tensor("op_8613"), val = tensor([1])]; + tensor var_8614_cast = reduce_mean(axes = var_8613, keep_dims = var_23, x = zero_mean_sq_263_cast)[name = tensor("op_8614_cast")]; + tensor var_8615_to_fp16 = const()[name = tensor("op_8615_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8616_cast = add(x = var_8614_cast, y = var_8615_to_fp16)[name = tensor("op_8616_cast")]; + tensor denom_263_epsilon_0_to_fp16 = const()[name = tensor("denom_263_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_263_cast = rsqrt(epsilon = denom_263_epsilon_0_to_fp16, x = var_8616_cast)[name = tensor("denom_263_cast")]; + tensor out_263_cast = mul(x = zero_mean_263_cast, y = denom_263_cast)[name = tensor("out_263_cast")]; + tensor var_8620_to_fp16 = const()[name = tensor("op_8620_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217434368)))]; + tensor var_8621_cast = add(x = out_263_cast, y = var_8620_to_fp16)[name = tensor("op_8621_cast")]; + tensor var_8623_to_fp16 = const()[name = tensor("op_8623_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217436992)))]; + tensor input_515_cast = mul(x = var_8621_cast, y = var_8623_to_fp16)[name = tensor("input_515_cast")]; + tensor var_8631 = const()[name = tensor("op_8631"), val = tensor([1, 1])]; + tensor var_8633 = const()[name = tensor("op_8633"), val = tensor([1, 1])]; + tensor var_8635_pad_type_0 = const()[name = tensor("op_8635_pad_type_0"), val = tensor("custom")]; + tensor var_8635_pad_0 = const()[name = tensor("op_8635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1217439616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227270080))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227270272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227278016))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_8635_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_8633, groups = var_31, pad = var_8635_pad_0, pad_type = var_8635_pad_type_0, strides = var_8631, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_515_cast)[name = tensor("op_8635_cast")]; + tensor var_8636_split_sizes_0 = const()[name = tensor("op_8636_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8636_axis_0 = const()[name = tensor("op_8636_axis_0"), val = tensor(1)]; + tensor var_8636_cast_0, tensor var_8636_cast_1 = split(axis = var_8636_axis_0, split_sizes = var_8636_split_sizes_0, x = var_8635_cast)[name = tensor("op_8636_cast")]; + tensor var_8638_mode_0 = const()[name = tensor("op_8638_mode_0"), val = tensor("EXACT")]; + tensor var_8638_cast = gelu(mode = var_8638_mode_0, x = var_8636_cast_1)[name = tensor("op_8638_cast")]; + tensor input_517_cast = mul(x = var_8636_cast_0, y = var_8638_cast)[name = tensor("input_517_cast")]; + tensor var_8642 = const()[name = tensor("op_8642"), val = tensor([1, 1])]; + tensor var_8644 = const()[name = tensor("op_8644"), val = tensor([1, 1])]; + tensor var_8646_pad_type_0 = const()[name = tensor("op_8646_pad_type_0"), val = tensor("custom")]; + tensor var_8646_pad_0 = const()[name = tensor("op_8646_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227278208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232193472))), name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232193664)))]; + tensor var_8646_cast = conv(bias = unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_8644, groups = var_31, pad = var_8646_pad_0, pad_type = var_8646_pad_type_0, strides = var_8642, weight = unet_up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_517_cast)[name = tensor("op_8646_cast")]; + tensor hidden_states_347_cast = add(x = var_8646_cast, y = inputs_263_cast)[name = tensor("hidden_states_347_cast")]; + tensor var_8648 = const()[name = tensor("op_8648"), val = tensor([2, 1280, 32, 32])]; + tensor input_519_cast = reshape(shape = var_8648, x = hidden_states_347_cast)[name = tensor("input_519_cast")]; + tensor var_8652 = const()[name = tensor("op_8652"), val = tensor([1, 1])]; + tensor var_8654 = const()[name = tensor("op_8654"), val = tensor([1, 1])]; + tensor hidden_states_349_pad_type_0 = const()[name = tensor("hidden_states_349_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_349_pad_0 = const()[name = tensor("hidden_states_349_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232196288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233425152))), name = tensor("unet_up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233425344)))]; + tensor hidden_states_349_cast = conv(bias = unet_up_blocks_0_attentions_0_proj_out_bias_to_fp16, dilations = var_8654, groups = var_31, pad = hidden_states_349_pad_0, pad_type = hidden_states_349_pad_type_0, strides = var_8652, weight = unet_up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized, x = input_519_cast)[name = tensor("hidden_states_349_cast")]; + tensor hidden_states_351_cast = add(x = hidden_states_349_cast, y = hidden_states_283_cast)[name = tensor("hidden_states_351_cast")]; + tensor input_521_interleave_0 = const()[name = tensor("input_521_interleave_0"), val = tensor(false)]; + tensor input_521_cast = concat(axis = var_31, interleave = input_521_interleave_0, values = (hidden_states_351_cast, input_213_cast))[name = tensor("input_521_cast")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_96_cast = reshape(shape = reshape_96_shape_0, x = input_521_cast)[name = tensor("reshape_96_cast")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72_cast = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96_cast)[name = tensor("reduce_mean_72_cast")]; + tensor sub_48_cast = sub(x = reshape_96_cast, y = reduce_mean_72_cast)[name = tensor("sub_48_cast")]; + tensor square_24_cast = square(x = sub_48_cast)[name = tensor("square_24_cast")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74_cast = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24_cast)[name = tensor("reduce_mean_74_cast")]; + tensor add_48_y_0_to_fp16 = const()[name = tensor("add_48_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_48_cast = add(x = reduce_mean_74_cast, y = add_48_y_0_to_fp16)[name = tensor("add_48_cast")]; + tensor sqrt_24_cast = sqrt(x = add_48_cast)[name = tensor("sqrt_24_cast")]; + tensor real_div_24_cast = real_div(x = sub_48_cast, y = sqrt_24_cast)[name = tensor("real_div_24_cast")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_97_cast = reshape(shape = reshape_97_shape_0, x = real_div_24_cast)[name = tensor("reshape_97_cast")]; + tensor add_49_gamma_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233427968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233429952))), name = tensor("add_49_gamma_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_49_beta_0_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233430144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233432128))), name = tensor("add_49_beta_0_to_fp16_palettized"), shape = tensor([2560])]; + tensor add_49_epsilon_0_to_fp16 = const()[name = tensor("add_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_49_cast = batch_norm(beta = add_49_beta_0_to_fp16_palettized, epsilon = add_49_epsilon_0_to_fp16, gamma = add_49_gamma_0_to_fp16_palettized, mean = add_43_mean_0_to_fp16_palettized, variance = add_43_variance_0_to_fp16_palettized, x = reshape_97_cast)[name = tensor("add_49_cast")]; + tensor input_525_cast = silu(x = add_49_cast)[name = tensor("input_525_cast")]; + tensor var_8672 = const()[name = tensor("op_8672"), val = tensor([1, 1])]; + tensor var_8674 = const()[name = tensor("op_8674"), val = tensor([1, 1])]; + tensor hidden_states_353_pad_type_0 = const()[name = tensor("hidden_states_353_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_353_pad_0 = const()[name = tensor("hidden_states_353_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233432320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255550784))), name = tensor("unet_up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor unet_up_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255550976)))]; + tensor hidden_states_353_cast = conv(bias = unet_up_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_8674, groups = var_31, pad = hidden_states_353_pad_0, pad_type = hidden_states_353_pad_type_0, strides = var_8672, weight = unet_up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_525_cast)[name = tensor("hidden_states_353_cast")]; + tensor var_8680 = const()[name = tensor("op_8680"), val = tensor([1, 1])]; + tensor var_8682 = const()[name = tensor("op_8682"), val = tensor([1, 1])]; + tensor temb_19_pad_type_0 = const()[name = tensor("temb_19_pad_type_0"), val = tensor("custom")]; + tensor temb_19_pad_0 = const()[name = tensor("temb_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255553600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256782464))), name = tensor("unet_up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256782656)))]; + tensor temb_19_cast = conv(bias = unet_up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_8682, groups = var_31, pad = temb_19_pad_0, pad_type = temb_19_pad_type_0, strides = var_8680, weight = unet_up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_19_cast")]; + tensor input_529_cast = add(x = hidden_states_353_cast, y = temb_19_cast)[name = tensor("input_529_cast")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_100_cast = reshape(shape = reshape_100_shape_0, x = input_529_cast)[name = tensor("reshape_100_cast")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75_cast = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100_cast)[name = tensor("reduce_mean_75_cast")]; + tensor sub_50_cast = sub(x = reshape_100_cast, y = reduce_mean_75_cast)[name = tensor("sub_50_cast")]; + tensor square_25_cast = square(x = sub_50_cast)[name = tensor("square_25_cast")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77_cast = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25_cast)[name = tensor("reduce_mean_77_cast")]; + tensor add_50_y_0_to_fp16 = const()[name = tensor("add_50_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_50_cast = add(x = reduce_mean_77_cast, y = add_50_y_0_to_fp16)[name = tensor("add_50_cast")]; + tensor sqrt_25_cast = sqrt(x = add_50_cast)[name = tensor("sqrt_25_cast")]; + tensor real_div_25_cast = real_div(x = sub_50_cast, y = sqrt_25_cast)[name = tensor("real_div_25_cast")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_101_cast = reshape(shape = reshape_101_shape_0, x = real_div_25_cast)[name = tensor("reshape_101_cast")]; + tensor add_51_gamma_0_to_fp16 = const()[name = tensor("add_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256785280)))]; + tensor add_51_beta_0_to_fp16 = const()[name = tensor("add_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256787904)))]; + tensor add_51_epsilon_0_to_fp16 = const()[name = tensor("add_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_51_cast = batch_norm(beta = add_51_beta_0_to_fp16, epsilon = add_51_epsilon_0_to_fp16, gamma = add_51_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_101_cast)[name = tensor("add_51_cast")]; + tensor input_533_cast = silu(x = add_51_cast)[name = tensor("input_533_cast")]; + tensor var_8692 = const()[name = tensor("op_8692"), val = tensor([1, 1])]; + tensor var_8694 = const()[name = tensor("op_8694"), val = tensor([1, 1])]; + tensor hidden_states_355_pad_type_0 = const()[name = tensor("hidden_states_355_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_355_pad_0 = const()[name = tensor("hidden_states_355_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256790528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1267849792))), name = tensor("unet_up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_up_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1267849984)))]; + tensor hidden_states_355_cast = conv(bias = unet_up_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_8694, groups = var_31, pad = hidden_states_355_pad_0, pad_type = hidden_states_355_pad_type_0, strides = var_8692, weight = unet_up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_533_cast)[name = tensor("hidden_states_355_cast")]; + tensor var_8699 = const()[name = tensor("op_8699"), val = tensor([1, 1])]; + tensor var_8701 = const()[name = tensor("op_8701"), val = tensor([1, 1])]; + tensor x_7_pad_type_0 = const()[name = tensor("x_7_pad_type_0"), val = tensor("custom")]; + tensor x_7_pad_0 = const()[name = tensor("x_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1267852608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270310272))), name = tensor("unet_up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor unet_up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270310464)))]; + tensor x_7_cast = conv(bias = unet_up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_8701, groups = var_31, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = var_8699, weight = unet_up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_521_cast)[name = tensor("x_7_cast")]; + tensor hidden_states_357_cast = add(x = x_7_cast, y = hidden_states_355_cast)[name = tensor("hidden_states_357_cast")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_104_cast = reshape(shape = reshape_104_shape_0, x = hidden_states_357_cast)[name = tensor("reshape_104_cast")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78_cast = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104_cast)[name = tensor("reduce_mean_78_cast")]; + tensor sub_52_cast = sub(x = reshape_104_cast, y = reduce_mean_78_cast)[name = tensor("sub_52_cast")]; + tensor square_26_cast = square(x = sub_52_cast)[name = tensor("square_26_cast")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80_cast = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26_cast)[name = tensor("reduce_mean_80_cast")]; + tensor add_52_y_0_to_fp16 = const()[name = tensor("add_52_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_52_cast = add(x = reduce_mean_80_cast, y = add_52_y_0_to_fp16)[name = tensor("add_52_cast")]; + tensor sqrt_26_cast = sqrt(x = add_52_cast)[name = tensor("sqrt_26_cast")]; + tensor real_div_26_cast = real_div(x = sub_52_cast, y = sqrt_26_cast)[name = tensor("real_div_26_cast")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_105_cast = reshape(shape = reshape_105_shape_0, x = real_div_26_cast)[name = tensor("reshape_105_cast")]; + tensor add_53_gamma_0_to_fp16 = const()[name = tensor("add_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270313088)))]; + tensor add_53_beta_0_to_fp16 = const()[name = tensor("add_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270315712)))]; + tensor add_53_epsilon_0_to_fp16 = const()[name = tensor("add_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_53_cast = batch_norm(beta = add_53_beta_0_to_fp16, epsilon = add_53_epsilon_0_to_fp16, gamma = add_53_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_105_cast)[name = tensor("add_53_cast")]; + tensor var_8739 = const()[name = tensor("op_8739"), val = tensor([1, 1])]; + tensor var_8741 = const()[name = tensor("op_8741"), val = tensor([1, 1])]; + tensor hidden_states_359_pad_type_0 = const()[name = tensor("hidden_states_359_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_359_pad_0 = const()[name = tensor("hidden_states_359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270318336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271547200))), name = tensor("unet_up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271547392)))]; + tensor hidden_states_359_cast = conv(bias = unet_up_blocks_0_attentions_1_proj_in_bias_to_fp16, dilations = var_8741, groups = var_31, pad = hidden_states_359_pad_0, pad_type = hidden_states_359_pad_type_0, strides = var_8739, weight = unet_up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized, x = add_53_cast)[name = tensor("hidden_states_359_cast")]; + tensor var_8746 = const()[name = tensor("op_8746"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_265_cast = reshape(shape = var_8746, x = hidden_states_359_cast)[name = tensor("inputs_265_cast")]; + tensor var_8756 = const()[name = tensor("op_8756"), val = tensor([1])]; + tensor channels_mean_265_cast = reduce_mean(axes = var_8756, keep_dims = var_23, x = inputs_265_cast)[name = tensor("channels_mean_265_cast")]; + tensor zero_mean_265_cast = sub(x = inputs_265_cast, y = channels_mean_265_cast)[name = tensor("zero_mean_265_cast")]; + tensor zero_mean_sq_265_cast = mul(x = zero_mean_265_cast, y = zero_mean_265_cast)[name = tensor("zero_mean_sq_265_cast")]; + tensor var_8760 = const()[name = tensor("op_8760"), val = tensor([1])]; + tensor var_8761_cast = reduce_mean(axes = var_8760, keep_dims = var_23, x = zero_mean_sq_265_cast)[name = tensor("op_8761_cast")]; + tensor var_8762_to_fp16 = const()[name = tensor("op_8762_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8763_cast = add(x = var_8761_cast, y = var_8762_to_fp16)[name = tensor("op_8763_cast")]; + tensor denom_265_epsilon_0_to_fp16 = const()[name = tensor("denom_265_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_265_cast = rsqrt(epsilon = denom_265_epsilon_0_to_fp16, x = var_8763_cast)[name = tensor("denom_265_cast")]; + tensor out_265_cast = mul(x = zero_mean_265_cast, y = denom_265_cast)[name = tensor("out_265_cast")]; + tensor var_8767_to_fp16 = const()[name = tensor("op_8767_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271550016)))]; + tensor var_8768_cast = add(x = out_265_cast, y = var_8767_to_fp16)[name = tensor("op_8768_cast")]; + tensor var_8770_to_fp16 = const()[name = tensor("op_8770_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271552640)))]; + tensor hidden_states_361_cast = mul(x = var_8768_cast, y = var_8770_to_fp16)[name = tensor("hidden_states_361_cast")]; + tensor var_8777 = const()[name = tensor("op_8777"), val = tensor([1, 1])]; + tensor var_8779 = const()[name = tensor("op_8779"), val = tensor([1, 1])]; + tensor q_177_pad_type_0 = const()[name = tensor("q_177_pad_type_0"), val = tensor("custom")]; + tensor q_177_pad_0 = const()[name = tensor("q_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271555264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272784128))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_177_cast = conv(dilations = var_8779, groups = var_31, pad = q_177_pad_0, pad_type = q_177_pad_type_0, strides = var_8777, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("q_177_cast")]; + tensor var_8783 = const()[name = tensor("op_8783"), val = tensor([1, 1])]; + tensor var_8785 = const()[name = tensor("op_8785"), val = tensor([1, 1])]; + tensor k_177_pad_type_0 = const()[name = tensor("k_177_pad_type_0"), val = tensor("custom")]; + tensor k_177_pad_0 = const()[name = tensor("k_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272784320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274013184))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_177_cast = conv(dilations = var_8785, groups = var_31, pad = k_177_pad_0, pad_type = k_177_pad_type_0, strides = var_8783, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("k_177_cast")]; + tensor var_8789 = const()[name = tensor("op_8789"), val = tensor([1, 1])]; + tensor var_8791 = const()[name = tensor("op_8791"), val = tensor([1, 1])]; + tensor v_177_pad_type_0 = const()[name = tensor("v_177_pad_type_0"), val = tensor("custom")]; + tensor v_177_pad_0 = const()[name = tensor("v_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274013376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1275242240))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_177_cast = conv(dilations = var_8791, groups = var_31, pad = v_177_pad_0, pad_type = v_177_pad_type_0, strides = var_8789, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("v_177_cast")]; + tensor var_8795 = const()[name = tensor("op_8795"), val = tensor([2, 20, 64, -1])]; + tensor var_8796_cast = reshape(shape = var_8795, x = q_177_cast)[name = tensor("op_8796_cast")]; + tensor var_8797 = const()[name = tensor("op_8797"), val = tensor([2, 20, 64, -1])]; + tensor var_8798_cast = reshape(shape = var_8797, x = k_177_cast)[name = tensor("op_8798_cast")]; + tensor var_8799 = const()[name = tensor("op_8799"), val = tensor([2, 20, 64, -1])]; + tensor var_8800_cast = reshape(shape = var_8799, x = v_177_cast)[name = tensor("op_8800_cast")]; + tensor attn_weights_353_transpose_x_0 = const()[name = tensor("attn_weights_353_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_353_transpose_y_0 = const()[name = tensor("attn_weights_353_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_353_cast = matmul(transpose_x = attn_weights_353_transpose_x_0, transpose_y = attn_weights_353_transpose_y_0, x = var_8796_cast, y = var_8798_cast)[name = tensor("attn_weights_353_cast")]; + tensor attn_weights_355_cast = mul(x = attn_weights_353_cast, y = var_12_to_fp16)[name = tensor("attn_weights_355_cast")]; + tensor var_8804_cast = softmax(axis = var_18, x = attn_weights_355_cast)[name = tensor("op_8804_cast")]; + tensor attn_177_transpose_x_0 = const()[name = tensor("attn_177_transpose_x_0"), val = tensor(false)]; + tensor attn_177_transpose_y_0 = const()[name = tensor("attn_177_transpose_y_0"), val = tensor(true)]; + tensor attn_177_cast = matmul(transpose_x = attn_177_transpose_x_0, transpose_y = attn_177_transpose_y_0, x = var_8800_cast, y = var_8804_cast)[name = tensor("attn_177_cast")]; + tensor var_8808 = const()[name = tensor("op_8808"), val = tensor([2, 1280, 1, -1])]; + tensor input_537_cast = reshape(shape = var_8808, x = attn_177_cast)[name = tensor("input_537_cast")]; + tensor var_8813 = const()[name = tensor("op_8813"), val = tensor([1, 1])]; + tensor var_8815 = const()[name = tensor("op_8815"), val = tensor([1, 1])]; + tensor var_8817_pad_type_0 = const()[name = tensor("op_8817_pad_type_0"), val = tensor("custom")]; + tensor var_8817_pad_0 = const()[name = tensor("op_8817_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1275242432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276471296))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276471488)))]; + tensor var_8817_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_8815, groups = var_31, pad = var_8817_pad_0, pad_type = var_8817_pad_type_0, strides = var_8813, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_537_cast)[name = tensor("op_8817_cast")]; + tensor inputs_267_cast = add(x = var_8817_cast, y = inputs_265_cast)[name = tensor("inputs_267_cast")]; + tensor var_8821 = const()[name = tensor("op_8821"), val = tensor([1])]; + tensor channels_mean_267_cast = reduce_mean(axes = var_8821, keep_dims = var_23, x = inputs_267_cast)[name = tensor("channels_mean_267_cast")]; + tensor zero_mean_267_cast = sub(x = inputs_267_cast, y = channels_mean_267_cast)[name = tensor("zero_mean_267_cast")]; + tensor zero_mean_sq_267_cast = mul(x = zero_mean_267_cast, y = zero_mean_267_cast)[name = tensor("zero_mean_sq_267_cast")]; + tensor var_8825 = const()[name = tensor("op_8825"), val = tensor([1])]; + tensor var_8826_cast = reduce_mean(axes = var_8825, keep_dims = var_23, x = zero_mean_sq_267_cast)[name = tensor("op_8826_cast")]; + tensor var_8827_to_fp16 = const()[name = tensor("op_8827_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8828_cast = add(x = var_8826_cast, y = var_8827_to_fp16)[name = tensor("op_8828_cast")]; + tensor denom_267_epsilon_0_to_fp16 = const()[name = tensor("denom_267_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_267_cast = rsqrt(epsilon = denom_267_epsilon_0_to_fp16, x = var_8828_cast)[name = tensor("denom_267_cast")]; + tensor out_267_cast = mul(x = zero_mean_267_cast, y = denom_267_cast)[name = tensor("out_267_cast")]; + tensor var_8832_to_fp16 = const()[name = tensor("op_8832_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276474112)))]; + tensor var_8833_cast = add(x = out_267_cast, y = var_8832_to_fp16)[name = tensor("op_8833_cast")]; + tensor var_8835_to_fp16 = const()[name = tensor("op_8835_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276476736)))]; + tensor hidden_states_363_cast = mul(x = var_8833_cast, y = var_8835_to_fp16)[name = tensor("hidden_states_363_cast")]; + tensor var_8842 = const()[name = tensor("op_8842"), val = tensor([1, 1])]; + tensor var_8844 = const()[name = tensor("op_8844"), val = tensor([1, 1])]; + tensor q_179_pad_type_0 = const()[name = tensor("q_179_pad_type_0"), val = tensor("custom")]; + tensor q_179_pad_0 = const()[name = tensor("q_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276479360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277708224))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_179_cast = conv(dilations = var_8844, groups = var_31, pad = q_179_pad_0, pad_type = q_179_pad_type_0, strides = var_8842, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_363_cast)[name = tensor("q_179_cast")]; + tensor var_8848 = const()[name = tensor("op_8848"), val = tensor([1, 1])]; + tensor var_8850 = const()[name = tensor("op_8850"), val = tensor([1, 1])]; + tensor k_179_pad_type_0 = const()[name = tensor("k_179_pad_type_0"), val = tensor("custom")]; + tensor k_179_pad_0 = const()[name = tensor("k_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277708416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1279674560))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_179_cast = conv(dilations = var_8850, groups = var_31, pad = k_179_pad_0, pad_type = k_179_pad_type_0, strides = var_8848, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_179_cast")]; + tensor var_8854 = const()[name = tensor("op_8854"), val = tensor([1, 1])]; + tensor var_8856 = const()[name = tensor("op_8856"), val = tensor([1, 1])]; + tensor v_179_pad_type_0 = const()[name = tensor("v_179_pad_type_0"), val = tensor("custom")]; + tensor v_179_pad_0 = const()[name = tensor("v_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1279674752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281640896))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_179_cast = conv(dilations = var_8856, groups = var_31, pad = v_179_pad_0, pad_type = v_179_pad_type_0, strides = var_8854, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_179_cast")]; + tensor var_8860 = const()[name = tensor("op_8860"), val = tensor([2, 20, 64, -1])]; + tensor var_8861_cast = reshape(shape = var_8860, x = q_179_cast)[name = tensor("op_8861_cast")]; + tensor var_8862 = const()[name = tensor("op_8862"), val = tensor([2, 20, 64, -1])]; + tensor var_8863_cast = reshape(shape = var_8862, x = k_179_cast)[name = tensor("op_8863_cast")]; + tensor var_8864 = const()[name = tensor("op_8864"), val = tensor([2, 20, 64, -1])]; + tensor var_8865_cast = reshape(shape = var_8864, x = v_179_cast)[name = tensor("op_8865_cast")]; + tensor attn_weights_357_transpose_x_0 = const()[name = tensor("attn_weights_357_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_357_transpose_y_0 = const()[name = tensor("attn_weights_357_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_357_cast = matmul(transpose_x = attn_weights_357_transpose_x_0, transpose_y = attn_weights_357_transpose_y_0, x = var_8861_cast, y = var_8863_cast)[name = tensor("attn_weights_357_cast")]; + tensor attn_weights_359_cast = mul(x = attn_weights_357_cast, y = var_12_to_fp16)[name = tensor("attn_weights_359_cast")]; + tensor var_8869_cast = softmax(axis = var_18, x = attn_weights_359_cast)[name = tensor("op_8869_cast")]; + tensor attn_179_transpose_x_0 = const()[name = tensor("attn_179_transpose_x_0"), val = tensor(false)]; + tensor attn_179_transpose_y_0 = const()[name = tensor("attn_179_transpose_y_0"), val = tensor(true)]; + tensor attn_179_cast = matmul(transpose_x = attn_179_transpose_x_0, transpose_y = attn_179_transpose_y_0, x = var_8865_cast, y = var_8869_cast)[name = tensor("attn_179_cast")]; + tensor var_8873 = const()[name = tensor("op_8873"), val = tensor([2, 1280, 1, -1])]; + tensor input_539_cast = reshape(shape = var_8873, x = attn_179_cast)[name = tensor("input_539_cast")]; + tensor var_8878 = const()[name = tensor("op_8878"), val = tensor([1, 1])]; + tensor var_8880 = const()[name = tensor("op_8880"), val = tensor([1, 1])]; + tensor var_8882_pad_type_0 = const()[name = tensor("op_8882_pad_type_0"), val = tensor("custom")]; + tensor var_8882_pad_0 = const()[name = tensor("op_8882_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281641088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282869952))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282870144)))]; + tensor var_8882_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_8880, groups = var_31, pad = var_8882_pad_0, pad_type = var_8882_pad_type_0, strides = var_8878, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_539_cast)[name = tensor("op_8882_cast")]; + tensor inputs_269_cast = add(x = var_8882_cast, y = inputs_267_cast)[name = tensor("inputs_269_cast")]; + tensor var_8886 = const()[name = tensor("op_8886"), val = tensor([1])]; + tensor channels_mean_269_cast = reduce_mean(axes = var_8886, keep_dims = var_23, x = inputs_269_cast)[name = tensor("channels_mean_269_cast")]; + tensor zero_mean_269_cast = sub(x = inputs_269_cast, y = channels_mean_269_cast)[name = tensor("zero_mean_269_cast")]; + tensor zero_mean_sq_269_cast = mul(x = zero_mean_269_cast, y = zero_mean_269_cast)[name = tensor("zero_mean_sq_269_cast")]; + tensor var_8890 = const()[name = tensor("op_8890"), val = tensor([1])]; + tensor var_8891_cast = reduce_mean(axes = var_8890, keep_dims = var_23, x = zero_mean_sq_269_cast)[name = tensor("op_8891_cast")]; + tensor var_8892_to_fp16 = const()[name = tensor("op_8892_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8893_cast = add(x = var_8891_cast, y = var_8892_to_fp16)[name = tensor("op_8893_cast")]; + tensor denom_269_epsilon_0_to_fp16 = const()[name = tensor("denom_269_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_269_cast = rsqrt(epsilon = denom_269_epsilon_0_to_fp16, x = var_8893_cast)[name = tensor("denom_269_cast")]; + tensor out_269_cast = mul(x = zero_mean_269_cast, y = denom_269_cast)[name = tensor("out_269_cast")]; + tensor var_8897_to_fp16 = const()[name = tensor("op_8897_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282872768)))]; + tensor var_8898_cast = add(x = out_269_cast, y = var_8897_to_fp16)[name = tensor("op_8898_cast")]; + tensor var_8900_to_fp16 = const()[name = tensor("op_8900_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282875392)))]; + tensor input_541_cast = mul(x = var_8898_cast, y = var_8900_to_fp16)[name = tensor("input_541_cast")]; + tensor var_8908 = const()[name = tensor("op_8908"), val = tensor([1, 1])]; + tensor var_8910 = const()[name = tensor("op_8910"), val = tensor([1, 1])]; + tensor var_8912_pad_type_0 = const()[name = tensor("op_8912_pad_type_0"), val = tensor("custom")]; + tensor var_8912_pad_0 = const()[name = tensor("op_8912_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1282878016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292708480))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292708672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292716416))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_8912_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_8910, groups = var_31, pad = var_8912_pad_0, pad_type = var_8912_pad_type_0, strides = var_8908, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_541_cast)[name = tensor("op_8912_cast")]; + tensor var_8913_split_sizes_0 = const()[name = tensor("op_8913_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8913_axis_0 = const()[name = tensor("op_8913_axis_0"), val = tensor(1)]; + tensor var_8913_cast_0, tensor var_8913_cast_1 = split(axis = var_8913_axis_0, split_sizes = var_8913_split_sizes_0, x = var_8912_cast)[name = tensor("op_8913_cast")]; + tensor var_8915_mode_0 = const()[name = tensor("op_8915_mode_0"), val = tensor("EXACT")]; + tensor var_8915_cast = gelu(mode = var_8915_mode_0, x = var_8913_cast_1)[name = tensor("op_8915_cast")]; + tensor input_543_cast = mul(x = var_8913_cast_0, y = var_8915_cast)[name = tensor("input_543_cast")]; + tensor var_8919 = const()[name = tensor("op_8919"), val = tensor([1, 1])]; + tensor var_8921 = const()[name = tensor("op_8921"), val = tensor([1, 1])]; + tensor var_8923_pad_type_0 = const()[name = tensor("op_8923_pad_type_0"), val = tensor("custom")]; + tensor var_8923_pad_0 = const()[name = tensor("op_8923_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292716608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1297631872))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1297632064)))]; + tensor var_8923_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_8921, groups = var_31, pad = var_8923_pad_0, pad_type = var_8923_pad_type_0, strides = var_8919, weight = unet_up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_543_cast)[name = tensor("op_8923_cast")]; + tensor inputs_271_cast = add(x = var_8923_cast, y = inputs_269_cast)[name = tensor("inputs_271_cast")]; + tensor var_8933 = const()[name = tensor("op_8933"), val = tensor([1])]; + tensor channels_mean_271_cast = reduce_mean(axes = var_8933, keep_dims = var_23, x = inputs_271_cast)[name = tensor("channels_mean_271_cast")]; + tensor zero_mean_271_cast = sub(x = inputs_271_cast, y = channels_mean_271_cast)[name = tensor("zero_mean_271_cast")]; + tensor zero_mean_sq_271_cast = mul(x = zero_mean_271_cast, y = zero_mean_271_cast)[name = tensor("zero_mean_sq_271_cast")]; + tensor var_8937 = const()[name = tensor("op_8937"), val = tensor([1])]; + tensor var_8938_cast = reduce_mean(axes = var_8937, keep_dims = var_23, x = zero_mean_sq_271_cast)[name = tensor("op_8938_cast")]; + tensor var_8939_to_fp16 = const()[name = tensor("op_8939_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8940_cast = add(x = var_8938_cast, y = var_8939_to_fp16)[name = tensor("op_8940_cast")]; + tensor denom_271_epsilon_0_to_fp16 = const()[name = tensor("denom_271_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_271_cast = rsqrt(epsilon = denom_271_epsilon_0_to_fp16, x = var_8940_cast)[name = tensor("denom_271_cast")]; + tensor out_271_cast = mul(x = zero_mean_271_cast, y = denom_271_cast)[name = tensor("out_271_cast")]; + tensor var_8944_to_fp16 = const()[name = tensor("op_8944_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1297634688)))]; + tensor var_8945_cast = add(x = out_271_cast, y = var_8944_to_fp16)[name = tensor("op_8945_cast")]; + tensor var_8947_to_fp16 = const()[name = tensor("op_8947_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1297637312)))]; + tensor hidden_states_367_cast = mul(x = var_8945_cast, y = var_8947_to_fp16)[name = tensor("hidden_states_367_cast")]; + tensor var_8954 = const()[name = tensor("op_8954"), val = tensor([1, 1])]; + tensor var_8956 = const()[name = tensor("op_8956"), val = tensor([1, 1])]; + tensor q_181_pad_type_0 = const()[name = tensor("q_181_pad_type_0"), val = tensor("custom")]; + tensor q_181_pad_0 = const()[name = tensor("q_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1297639936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1298868800))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_181_cast = conv(dilations = var_8956, groups = var_31, pad = q_181_pad_0, pad_type = q_181_pad_type_0, strides = var_8954, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("q_181_cast")]; + tensor var_8960 = const()[name = tensor("op_8960"), val = tensor([1, 1])]; + tensor var_8962 = const()[name = tensor("op_8962"), val = tensor([1, 1])]; + tensor k_181_pad_type_0 = const()[name = tensor("k_181_pad_type_0"), val = tensor("custom")]; + tensor k_181_pad_0 = const()[name = tensor("k_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1298868992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1300097856))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_181_cast = conv(dilations = var_8962, groups = var_31, pad = k_181_pad_0, pad_type = k_181_pad_type_0, strides = var_8960, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("k_181_cast")]; + tensor var_8966 = const()[name = tensor("op_8966"), val = tensor([1, 1])]; + tensor var_8968 = const()[name = tensor("op_8968"), val = tensor([1, 1])]; + tensor v_181_pad_type_0 = const()[name = tensor("v_181_pad_type_0"), val = tensor("custom")]; + tensor v_181_pad_0 = const()[name = tensor("v_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1300098048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1301326912))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_181_cast = conv(dilations = var_8968, groups = var_31, pad = v_181_pad_0, pad_type = v_181_pad_type_0, strides = var_8966, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("v_181_cast")]; + tensor var_8972 = const()[name = tensor("op_8972"), val = tensor([2, 20, 64, -1])]; + tensor var_8973_cast = reshape(shape = var_8972, x = q_181_cast)[name = tensor("op_8973_cast")]; + tensor var_8974 = const()[name = tensor("op_8974"), val = tensor([2, 20, 64, -1])]; + tensor var_8975_cast = reshape(shape = var_8974, x = k_181_cast)[name = tensor("op_8975_cast")]; + tensor var_8976 = const()[name = tensor("op_8976"), val = tensor([2, 20, 64, -1])]; + tensor var_8977_cast = reshape(shape = var_8976, x = v_181_cast)[name = tensor("op_8977_cast")]; + tensor attn_weights_361_transpose_x_0 = const()[name = tensor("attn_weights_361_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_361_transpose_y_0 = const()[name = tensor("attn_weights_361_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_361_cast = matmul(transpose_x = attn_weights_361_transpose_x_0, transpose_y = attn_weights_361_transpose_y_0, x = var_8973_cast, y = var_8975_cast)[name = tensor("attn_weights_361_cast")]; + tensor attn_weights_363_cast = mul(x = attn_weights_361_cast, y = var_12_to_fp16)[name = tensor("attn_weights_363_cast")]; + tensor var_8981_cast = softmax(axis = var_18, x = attn_weights_363_cast)[name = tensor("op_8981_cast")]; + tensor attn_181_transpose_x_0 = const()[name = tensor("attn_181_transpose_x_0"), val = tensor(false)]; + tensor attn_181_transpose_y_0 = const()[name = tensor("attn_181_transpose_y_0"), val = tensor(true)]; + tensor attn_181_cast = matmul(transpose_x = attn_181_transpose_x_0, transpose_y = attn_181_transpose_y_0, x = var_8977_cast, y = var_8981_cast)[name = tensor("attn_181_cast")]; + tensor var_8985 = const()[name = tensor("op_8985"), val = tensor([2, 1280, 1, -1])]; + tensor input_545_cast = reshape(shape = var_8985, x = attn_181_cast)[name = tensor("input_545_cast")]; + tensor var_8990 = const()[name = tensor("op_8990"), val = tensor([1, 1])]; + tensor var_8992 = const()[name = tensor("op_8992"), val = tensor([1, 1])]; + tensor var_8994_pad_type_0 = const()[name = tensor("op_8994_pad_type_0"), val = tensor("custom")]; + tensor var_8994_pad_0 = const()[name = tensor("op_8994_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1301327104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302555968))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302556160)))]; + tensor var_8994_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_8992, groups = var_31, pad = var_8994_pad_0, pad_type = var_8994_pad_type_0, strides = var_8990, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_545_cast)[name = tensor("op_8994_cast")]; + tensor inputs_273_cast = add(x = var_8994_cast, y = inputs_271_cast)[name = tensor("inputs_273_cast")]; + tensor var_8998 = const()[name = tensor("op_8998"), val = tensor([1])]; + tensor channels_mean_273_cast = reduce_mean(axes = var_8998, keep_dims = var_23, x = inputs_273_cast)[name = tensor("channels_mean_273_cast")]; + tensor zero_mean_273_cast = sub(x = inputs_273_cast, y = channels_mean_273_cast)[name = tensor("zero_mean_273_cast")]; + tensor zero_mean_sq_273_cast = mul(x = zero_mean_273_cast, y = zero_mean_273_cast)[name = tensor("zero_mean_sq_273_cast")]; + tensor var_9002 = const()[name = tensor("op_9002"), val = tensor([1])]; + tensor var_9003_cast = reduce_mean(axes = var_9002, keep_dims = var_23, x = zero_mean_sq_273_cast)[name = tensor("op_9003_cast")]; + tensor var_9004_to_fp16 = const()[name = tensor("op_9004_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9005_cast = add(x = var_9003_cast, y = var_9004_to_fp16)[name = tensor("op_9005_cast")]; + tensor denom_273_epsilon_0_to_fp16 = const()[name = tensor("denom_273_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_273_cast = rsqrt(epsilon = denom_273_epsilon_0_to_fp16, x = var_9005_cast)[name = tensor("denom_273_cast")]; + tensor out_273_cast = mul(x = zero_mean_273_cast, y = denom_273_cast)[name = tensor("out_273_cast")]; + tensor var_9009_to_fp16 = const()[name = tensor("op_9009_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302558784)))]; + tensor var_9010_cast = add(x = out_273_cast, y = var_9009_to_fp16)[name = tensor("op_9010_cast")]; + tensor var_9012_to_fp16 = const()[name = tensor("op_9012_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302561408)))]; + tensor hidden_states_369_cast = mul(x = var_9010_cast, y = var_9012_to_fp16)[name = tensor("hidden_states_369_cast")]; + tensor var_9019 = const()[name = tensor("op_9019"), val = tensor([1, 1])]; + tensor var_9021 = const()[name = tensor("op_9021"), val = tensor([1, 1])]; + tensor q_183_pad_type_0 = const()[name = tensor("q_183_pad_type_0"), val = tensor("custom")]; + tensor q_183_pad_0 = const()[name = tensor("q_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1302564032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1303792896))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_183_cast = conv(dilations = var_9021, groups = var_31, pad = q_183_pad_0, pad_type = q_183_pad_type_0, strides = var_9019, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_369_cast)[name = tensor("q_183_cast")]; + tensor var_9025 = const()[name = tensor("op_9025"), val = tensor([1, 1])]; + tensor var_9027 = const()[name = tensor("op_9027"), val = tensor([1, 1])]; + tensor k_183_pad_type_0 = const()[name = tensor("k_183_pad_type_0"), val = tensor("custom")]; + tensor k_183_pad_0 = const()[name = tensor("k_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1303793088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1305759232))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_183_cast = conv(dilations = var_9027, groups = var_31, pad = k_183_pad_0, pad_type = k_183_pad_type_0, strides = var_9025, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_183_cast")]; + tensor var_9031 = const()[name = tensor("op_9031"), val = tensor([1, 1])]; + tensor var_9033 = const()[name = tensor("op_9033"), val = tensor([1, 1])]; + tensor v_183_pad_type_0 = const()[name = tensor("v_183_pad_type_0"), val = tensor("custom")]; + tensor v_183_pad_0 = const()[name = tensor("v_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1305759424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307725568))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_183_cast = conv(dilations = var_9033, groups = var_31, pad = v_183_pad_0, pad_type = v_183_pad_type_0, strides = var_9031, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_183_cast")]; + tensor var_9037 = const()[name = tensor("op_9037"), val = tensor([2, 20, 64, -1])]; + tensor var_9038_cast = reshape(shape = var_9037, x = q_183_cast)[name = tensor("op_9038_cast")]; + tensor var_9039 = const()[name = tensor("op_9039"), val = tensor([2, 20, 64, -1])]; + tensor var_9040_cast = reshape(shape = var_9039, x = k_183_cast)[name = tensor("op_9040_cast")]; + tensor var_9041 = const()[name = tensor("op_9041"), val = tensor([2, 20, 64, -1])]; + tensor var_9042_cast = reshape(shape = var_9041, x = v_183_cast)[name = tensor("op_9042_cast")]; + tensor attn_weights_365_transpose_x_0 = const()[name = tensor("attn_weights_365_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_365_transpose_y_0 = const()[name = tensor("attn_weights_365_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_365_cast = matmul(transpose_x = attn_weights_365_transpose_x_0, transpose_y = attn_weights_365_transpose_y_0, x = var_9038_cast, y = var_9040_cast)[name = tensor("attn_weights_365_cast")]; + tensor attn_weights_367_cast = mul(x = attn_weights_365_cast, y = var_12_to_fp16)[name = tensor("attn_weights_367_cast")]; + tensor var_9046_cast = softmax(axis = var_18, x = attn_weights_367_cast)[name = tensor("op_9046_cast")]; + tensor attn_183_transpose_x_0 = const()[name = tensor("attn_183_transpose_x_0"), val = tensor(false)]; + tensor attn_183_transpose_y_0 = const()[name = tensor("attn_183_transpose_y_0"), val = tensor(true)]; + tensor attn_183_cast = matmul(transpose_x = attn_183_transpose_x_0, transpose_y = attn_183_transpose_y_0, x = var_9042_cast, y = var_9046_cast)[name = tensor("attn_183_cast")]; + tensor var_9050 = const()[name = tensor("op_9050"), val = tensor([2, 1280, 1, -1])]; + tensor input_547_cast = reshape(shape = var_9050, x = attn_183_cast)[name = tensor("input_547_cast")]; + tensor var_9055 = const()[name = tensor("op_9055"), val = tensor([1, 1])]; + tensor var_9057 = const()[name = tensor("op_9057"), val = tensor([1, 1])]; + tensor var_9059_pad_type_0 = const()[name = tensor("op_9059_pad_type_0"), val = tensor("custom")]; + tensor var_9059_pad_0 = const()[name = tensor("op_9059_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307725760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1308954624))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1308954816)))]; + tensor var_9059_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_9057, groups = var_31, pad = var_9059_pad_0, pad_type = var_9059_pad_type_0, strides = var_9055, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_547_cast)[name = tensor("op_9059_cast")]; + tensor inputs_275_cast = add(x = var_9059_cast, y = inputs_273_cast)[name = tensor("inputs_275_cast")]; + tensor var_9063 = const()[name = tensor("op_9063"), val = tensor([1])]; + tensor channels_mean_275_cast = reduce_mean(axes = var_9063, keep_dims = var_23, x = inputs_275_cast)[name = tensor("channels_mean_275_cast")]; + tensor zero_mean_275_cast = sub(x = inputs_275_cast, y = channels_mean_275_cast)[name = tensor("zero_mean_275_cast")]; + tensor zero_mean_sq_275_cast = mul(x = zero_mean_275_cast, y = zero_mean_275_cast)[name = tensor("zero_mean_sq_275_cast")]; + tensor var_9067 = const()[name = tensor("op_9067"), val = tensor([1])]; + tensor var_9068_cast = reduce_mean(axes = var_9067, keep_dims = var_23, x = zero_mean_sq_275_cast)[name = tensor("op_9068_cast")]; + tensor var_9069_to_fp16 = const()[name = tensor("op_9069_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9070_cast = add(x = var_9068_cast, y = var_9069_to_fp16)[name = tensor("op_9070_cast")]; + tensor denom_275_epsilon_0_to_fp16 = const()[name = tensor("denom_275_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_275_cast = rsqrt(epsilon = denom_275_epsilon_0_to_fp16, x = var_9070_cast)[name = tensor("denom_275_cast")]; + tensor out_275_cast = mul(x = zero_mean_275_cast, y = denom_275_cast)[name = tensor("out_275_cast")]; + tensor var_9074_to_fp16 = const()[name = tensor("op_9074_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1308957440)))]; + tensor var_9075_cast = add(x = out_275_cast, y = var_9074_to_fp16)[name = tensor("op_9075_cast")]; + tensor var_9077_to_fp16 = const()[name = tensor("op_9077_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1308960064)))]; + tensor input_549_cast = mul(x = var_9075_cast, y = var_9077_to_fp16)[name = tensor("input_549_cast")]; + tensor var_9085 = const()[name = tensor("op_9085"), val = tensor([1, 1])]; + tensor var_9087 = const()[name = tensor("op_9087"), val = tensor([1, 1])]; + tensor var_9089_pad_type_0 = const()[name = tensor("op_9089_pad_type_0"), val = tensor("custom")]; + tensor var_9089_pad_0 = const()[name = tensor("op_9089_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1308962688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318793152))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318793344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318801088))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9089_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9087, groups = var_31, pad = var_9089_pad_0, pad_type = var_9089_pad_type_0, strides = var_9085, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_549_cast)[name = tensor("op_9089_cast")]; + tensor var_9090_split_sizes_0 = const()[name = tensor("op_9090_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9090_axis_0 = const()[name = tensor("op_9090_axis_0"), val = tensor(1)]; + tensor var_9090_cast_0, tensor var_9090_cast_1 = split(axis = var_9090_axis_0, split_sizes = var_9090_split_sizes_0, x = var_9089_cast)[name = tensor("op_9090_cast")]; + tensor var_9092_mode_0 = const()[name = tensor("op_9092_mode_0"), val = tensor("EXACT")]; + tensor var_9092_cast = gelu(mode = var_9092_mode_0, x = var_9090_cast_1)[name = tensor("op_9092_cast")]; + tensor input_551_cast = mul(x = var_9090_cast_0, y = var_9092_cast)[name = tensor("input_551_cast")]; + tensor var_9096 = const()[name = tensor("op_9096"), val = tensor([1, 1])]; + tensor var_9098 = const()[name = tensor("op_9098"), val = tensor([1, 1])]; + tensor var_9100_pad_type_0 = const()[name = tensor("op_9100_pad_type_0"), val = tensor("custom")]; + tensor var_9100_pad_0 = const()[name = tensor("op_9100_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318801280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1323716544))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1323716736)))]; + tensor var_9100_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_9098, groups = var_31, pad = var_9100_pad_0, pad_type = var_9100_pad_type_0, strides = var_9096, weight = unet_up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_551_cast)[name = tensor("op_9100_cast")]; + tensor inputs_277_cast = add(x = var_9100_cast, y = inputs_275_cast)[name = tensor("inputs_277_cast")]; + tensor var_9110 = const()[name = tensor("op_9110"), val = tensor([1])]; + tensor channels_mean_277_cast = reduce_mean(axes = var_9110, keep_dims = var_23, x = inputs_277_cast)[name = tensor("channels_mean_277_cast")]; + tensor zero_mean_277_cast = sub(x = inputs_277_cast, y = channels_mean_277_cast)[name = tensor("zero_mean_277_cast")]; + tensor zero_mean_sq_277_cast = mul(x = zero_mean_277_cast, y = zero_mean_277_cast)[name = tensor("zero_mean_sq_277_cast")]; + tensor var_9114 = const()[name = tensor("op_9114"), val = tensor([1])]; + tensor var_9115_cast = reduce_mean(axes = var_9114, keep_dims = var_23, x = zero_mean_sq_277_cast)[name = tensor("op_9115_cast")]; + tensor var_9116_to_fp16 = const()[name = tensor("op_9116_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9117_cast = add(x = var_9115_cast, y = var_9116_to_fp16)[name = tensor("op_9117_cast")]; + tensor denom_277_epsilon_0_to_fp16 = const()[name = tensor("denom_277_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_277_cast = rsqrt(epsilon = denom_277_epsilon_0_to_fp16, x = var_9117_cast)[name = tensor("denom_277_cast")]; + tensor out_277_cast = mul(x = zero_mean_277_cast, y = denom_277_cast)[name = tensor("out_277_cast")]; + tensor var_9121_to_fp16 = const()[name = tensor("op_9121_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1323719360)))]; + tensor var_9122_cast = add(x = out_277_cast, y = var_9121_to_fp16)[name = tensor("op_9122_cast")]; + tensor var_9124_to_fp16 = const()[name = tensor("op_9124_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1323721984)))]; + tensor hidden_states_373_cast = mul(x = var_9122_cast, y = var_9124_to_fp16)[name = tensor("hidden_states_373_cast")]; + tensor var_9131 = const()[name = tensor("op_9131"), val = tensor([1, 1])]; + tensor var_9133 = const()[name = tensor("op_9133"), val = tensor([1, 1])]; + tensor q_185_pad_type_0 = const()[name = tensor("q_185_pad_type_0"), val = tensor("custom")]; + tensor q_185_pad_0 = const()[name = tensor("q_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1323724608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1324953472))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_185_cast = conv(dilations = var_9133, groups = var_31, pad = q_185_pad_0, pad_type = q_185_pad_type_0, strides = var_9131, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("q_185_cast")]; + tensor var_9137 = const()[name = tensor("op_9137"), val = tensor([1, 1])]; + tensor var_9139 = const()[name = tensor("op_9139"), val = tensor([1, 1])]; + tensor k_185_pad_type_0 = const()[name = tensor("k_185_pad_type_0"), val = tensor("custom")]; + tensor k_185_pad_0 = const()[name = tensor("k_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1324953664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1326182528))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_185_cast = conv(dilations = var_9139, groups = var_31, pad = k_185_pad_0, pad_type = k_185_pad_type_0, strides = var_9137, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("k_185_cast")]; + tensor var_9143 = const()[name = tensor("op_9143"), val = tensor([1, 1])]; + tensor var_9145 = const()[name = tensor("op_9145"), val = tensor([1, 1])]; + tensor v_185_pad_type_0 = const()[name = tensor("v_185_pad_type_0"), val = tensor("custom")]; + tensor v_185_pad_0 = const()[name = tensor("v_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1326182720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327411584))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_185_cast = conv(dilations = var_9145, groups = var_31, pad = v_185_pad_0, pad_type = v_185_pad_type_0, strides = var_9143, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("v_185_cast")]; + tensor var_9149 = const()[name = tensor("op_9149"), val = tensor([2, 20, 64, -1])]; + tensor var_9150_cast = reshape(shape = var_9149, x = q_185_cast)[name = tensor("op_9150_cast")]; + tensor var_9151 = const()[name = tensor("op_9151"), val = tensor([2, 20, 64, -1])]; + tensor var_9152_cast = reshape(shape = var_9151, x = k_185_cast)[name = tensor("op_9152_cast")]; + tensor var_9153 = const()[name = tensor("op_9153"), val = tensor([2, 20, 64, -1])]; + tensor var_9154_cast = reshape(shape = var_9153, x = v_185_cast)[name = tensor("op_9154_cast")]; + tensor attn_weights_369_transpose_x_0 = const()[name = tensor("attn_weights_369_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_369_transpose_y_0 = const()[name = tensor("attn_weights_369_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_369_cast = matmul(transpose_x = attn_weights_369_transpose_x_0, transpose_y = attn_weights_369_transpose_y_0, x = var_9150_cast, y = var_9152_cast)[name = tensor("attn_weights_369_cast")]; + tensor attn_weights_371_cast = mul(x = attn_weights_369_cast, y = var_12_to_fp16)[name = tensor("attn_weights_371_cast")]; + tensor var_9158_cast = softmax(axis = var_18, x = attn_weights_371_cast)[name = tensor("op_9158_cast")]; + tensor attn_185_transpose_x_0 = const()[name = tensor("attn_185_transpose_x_0"), val = tensor(false)]; + tensor attn_185_transpose_y_0 = const()[name = tensor("attn_185_transpose_y_0"), val = tensor(true)]; + tensor attn_185_cast = matmul(transpose_x = attn_185_transpose_x_0, transpose_y = attn_185_transpose_y_0, x = var_9154_cast, y = var_9158_cast)[name = tensor("attn_185_cast")]; + tensor var_9162 = const()[name = tensor("op_9162"), val = tensor([2, 1280, 1, -1])]; + tensor input_553_cast = reshape(shape = var_9162, x = attn_185_cast)[name = tensor("input_553_cast")]; + tensor var_9167 = const()[name = tensor("op_9167"), val = tensor([1, 1])]; + tensor var_9169 = const()[name = tensor("op_9169"), val = tensor([1, 1])]; + tensor var_9171_pad_type_0 = const()[name = tensor("op_9171_pad_type_0"), val = tensor("custom")]; + tensor var_9171_pad_0 = const()[name = tensor("op_9171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327411776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328640640))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328640832)))]; + tensor var_9171_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_9169, groups = var_31, pad = var_9171_pad_0, pad_type = var_9171_pad_type_0, strides = var_9167, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_553_cast)[name = tensor("op_9171_cast")]; + tensor inputs_279_cast = add(x = var_9171_cast, y = inputs_277_cast)[name = tensor("inputs_279_cast")]; + tensor var_9175 = const()[name = tensor("op_9175"), val = tensor([1])]; + tensor channels_mean_279_cast = reduce_mean(axes = var_9175, keep_dims = var_23, x = inputs_279_cast)[name = tensor("channels_mean_279_cast")]; + tensor zero_mean_279_cast = sub(x = inputs_279_cast, y = channels_mean_279_cast)[name = tensor("zero_mean_279_cast")]; + tensor zero_mean_sq_279_cast = mul(x = zero_mean_279_cast, y = zero_mean_279_cast)[name = tensor("zero_mean_sq_279_cast")]; + tensor var_9179 = const()[name = tensor("op_9179"), val = tensor([1])]; + tensor var_9180_cast = reduce_mean(axes = var_9179, keep_dims = var_23, x = zero_mean_sq_279_cast)[name = tensor("op_9180_cast")]; + tensor var_9181_to_fp16 = const()[name = tensor("op_9181_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9182_cast = add(x = var_9180_cast, y = var_9181_to_fp16)[name = tensor("op_9182_cast")]; + tensor denom_279_epsilon_0_to_fp16 = const()[name = tensor("denom_279_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_279_cast = rsqrt(epsilon = denom_279_epsilon_0_to_fp16, x = var_9182_cast)[name = tensor("denom_279_cast")]; + tensor out_279_cast = mul(x = zero_mean_279_cast, y = denom_279_cast)[name = tensor("out_279_cast")]; + tensor var_9186_to_fp16 = const()[name = tensor("op_9186_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328643456)))]; + tensor var_9187_cast = add(x = out_279_cast, y = var_9186_to_fp16)[name = tensor("op_9187_cast")]; + tensor var_9189_to_fp16 = const()[name = tensor("op_9189_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328646080)))]; + tensor hidden_states_375_cast = mul(x = var_9187_cast, y = var_9189_to_fp16)[name = tensor("hidden_states_375_cast")]; + tensor var_9196 = const()[name = tensor("op_9196"), val = tensor([1, 1])]; + tensor var_9198 = const()[name = tensor("op_9198"), val = tensor([1, 1])]; + tensor q_187_pad_type_0 = const()[name = tensor("q_187_pad_type_0"), val = tensor("custom")]; + tensor q_187_pad_0 = const()[name = tensor("q_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1328648704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1329877568))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_187_cast = conv(dilations = var_9198, groups = var_31, pad = q_187_pad_0, pad_type = q_187_pad_type_0, strides = var_9196, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_375_cast)[name = tensor("q_187_cast")]; + tensor var_9202 = const()[name = tensor("op_9202"), val = tensor([1, 1])]; + tensor var_9204 = const()[name = tensor("op_9204"), val = tensor([1, 1])]; + tensor k_187_pad_type_0 = const()[name = tensor("k_187_pad_type_0"), val = tensor("custom")]; + tensor k_187_pad_0 = const()[name = tensor("k_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1329877760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1331843904))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_187_cast = conv(dilations = var_9204, groups = var_31, pad = k_187_pad_0, pad_type = k_187_pad_type_0, strides = var_9202, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_187_cast")]; + tensor var_9208 = const()[name = tensor("op_9208"), val = tensor([1, 1])]; + tensor var_9210 = const()[name = tensor("op_9210"), val = tensor([1, 1])]; + tensor v_187_pad_type_0 = const()[name = tensor("v_187_pad_type_0"), val = tensor("custom")]; + tensor v_187_pad_0 = const()[name = tensor("v_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1331844096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333810240))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_187_cast = conv(dilations = var_9210, groups = var_31, pad = v_187_pad_0, pad_type = v_187_pad_type_0, strides = var_9208, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_187_cast")]; + tensor var_9214 = const()[name = tensor("op_9214"), val = tensor([2, 20, 64, -1])]; + tensor var_9215_cast = reshape(shape = var_9214, x = q_187_cast)[name = tensor("op_9215_cast")]; + tensor var_9216 = const()[name = tensor("op_9216"), val = tensor([2, 20, 64, -1])]; + tensor var_9217_cast = reshape(shape = var_9216, x = k_187_cast)[name = tensor("op_9217_cast")]; + tensor var_9218 = const()[name = tensor("op_9218"), val = tensor([2, 20, 64, -1])]; + tensor var_9219_cast = reshape(shape = var_9218, x = v_187_cast)[name = tensor("op_9219_cast")]; + tensor attn_weights_373_transpose_x_0 = const()[name = tensor("attn_weights_373_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_373_transpose_y_0 = const()[name = tensor("attn_weights_373_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_373_cast = matmul(transpose_x = attn_weights_373_transpose_x_0, transpose_y = attn_weights_373_transpose_y_0, x = var_9215_cast, y = var_9217_cast)[name = tensor("attn_weights_373_cast")]; + tensor attn_weights_375_cast = mul(x = attn_weights_373_cast, y = var_12_to_fp16)[name = tensor("attn_weights_375_cast")]; + tensor var_9223_cast = softmax(axis = var_18, x = attn_weights_375_cast)[name = tensor("op_9223_cast")]; + tensor attn_187_transpose_x_0 = const()[name = tensor("attn_187_transpose_x_0"), val = tensor(false)]; + tensor attn_187_transpose_y_0 = const()[name = tensor("attn_187_transpose_y_0"), val = tensor(true)]; + tensor attn_187_cast = matmul(transpose_x = attn_187_transpose_x_0, transpose_y = attn_187_transpose_y_0, x = var_9219_cast, y = var_9223_cast)[name = tensor("attn_187_cast")]; + tensor var_9227 = const()[name = tensor("op_9227"), val = tensor([2, 1280, 1, -1])]; + tensor input_555_cast = reshape(shape = var_9227, x = attn_187_cast)[name = tensor("input_555_cast")]; + tensor var_9232 = const()[name = tensor("op_9232"), val = tensor([1, 1])]; + tensor var_9234 = const()[name = tensor("op_9234"), val = tensor([1, 1])]; + tensor var_9236_pad_type_0 = const()[name = tensor("op_9236_pad_type_0"), val = tensor("custom")]; + tensor var_9236_pad_0 = const()[name = tensor("op_9236_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333810432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335039296))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335039488)))]; + tensor var_9236_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_9234, groups = var_31, pad = var_9236_pad_0, pad_type = var_9236_pad_type_0, strides = var_9232, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_555_cast)[name = tensor("op_9236_cast")]; + tensor inputs_281_cast = add(x = var_9236_cast, y = inputs_279_cast)[name = tensor("inputs_281_cast")]; + tensor var_9240 = const()[name = tensor("op_9240"), val = tensor([1])]; + tensor channels_mean_281_cast = reduce_mean(axes = var_9240, keep_dims = var_23, x = inputs_281_cast)[name = tensor("channels_mean_281_cast")]; + tensor zero_mean_281_cast = sub(x = inputs_281_cast, y = channels_mean_281_cast)[name = tensor("zero_mean_281_cast")]; + tensor zero_mean_sq_281_cast = mul(x = zero_mean_281_cast, y = zero_mean_281_cast)[name = tensor("zero_mean_sq_281_cast")]; + tensor var_9244 = const()[name = tensor("op_9244"), val = tensor([1])]; + tensor var_9245_cast = reduce_mean(axes = var_9244, keep_dims = var_23, x = zero_mean_sq_281_cast)[name = tensor("op_9245_cast")]; + tensor var_9246_to_fp16 = const()[name = tensor("op_9246_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9247_cast = add(x = var_9245_cast, y = var_9246_to_fp16)[name = tensor("op_9247_cast")]; + tensor denom_281_epsilon_0_to_fp16 = const()[name = tensor("denom_281_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_281_cast = rsqrt(epsilon = denom_281_epsilon_0_to_fp16, x = var_9247_cast)[name = tensor("denom_281_cast")]; + tensor out_281_cast = mul(x = zero_mean_281_cast, y = denom_281_cast)[name = tensor("out_281_cast")]; + tensor var_9251_to_fp16 = const()[name = tensor("op_9251_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335042112)))]; + tensor var_9252_cast = add(x = out_281_cast, y = var_9251_to_fp16)[name = tensor("op_9252_cast")]; + tensor var_9254_to_fp16 = const()[name = tensor("op_9254_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335044736)))]; + tensor input_557_cast = mul(x = var_9252_cast, y = var_9254_to_fp16)[name = tensor("input_557_cast")]; + tensor var_9262 = const()[name = tensor("op_9262"), val = tensor([1, 1])]; + tensor var_9264 = const()[name = tensor("op_9264"), val = tensor([1, 1])]; + tensor var_9266_pad_type_0 = const()[name = tensor("op_9266_pad_type_0"), val = tensor("custom")]; + tensor var_9266_pad_0 = const()[name = tensor("op_9266_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335047360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344877824))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344878016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344885760))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9266_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9264, groups = var_31, pad = var_9266_pad_0, pad_type = var_9266_pad_type_0, strides = var_9262, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_557_cast)[name = tensor("op_9266_cast")]; + tensor var_9267_split_sizes_0 = const()[name = tensor("op_9267_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9267_axis_0 = const()[name = tensor("op_9267_axis_0"), val = tensor(1)]; + tensor var_9267_cast_0, tensor var_9267_cast_1 = split(axis = var_9267_axis_0, split_sizes = var_9267_split_sizes_0, x = var_9266_cast)[name = tensor("op_9267_cast")]; + tensor var_9269_mode_0 = const()[name = tensor("op_9269_mode_0"), val = tensor("EXACT")]; + tensor var_9269_cast = gelu(mode = var_9269_mode_0, x = var_9267_cast_1)[name = tensor("op_9269_cast")]; + tensor input_559_cast = mul(x = var_9267_cast_0, y = var_9269_cast)[name = tensor("input_559_cast")]; + tensor var_9273 = const()[name = tensor("op_9273"), val = tensor([1, 1])]; + tensor var_9275 = const()[name = tensor("op_9275"), val = tensor([1, 1])]; + tensor var_9277_pad_type_0 = const()[name = tensor("op_9277_pad_type_0"), val = tensor("custom")]; + tensor var_9277_pad_0 = const()[name = tensor("op_9277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344885952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349801216))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349801408)))]; + tensor var_9277_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_9275, groups = var_31, pad = var_9277_pad_0, pad_type = var_9277_pad_type_0, strides = var_9273, weight = unet_up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_559_cast)[name = tensor("op_9277_cast")]; + tensor inputs_283_cast = add(x = var_9277_cast, y = inputs_281_cast)[name = tensor("inputs_283_cast")]; + tensor var_9287 = const()[name = tensor("op_9287"), val = tensor([1])]; + tensor channels_mean_283_cast = reduce_mean(axes = var_9287, keep_dims = var_23, x = inputs_283_cast)[name = tensor("channels_mean_283_cast")]; + tensor zero_mean_283_cast = sub(x = inputs_283_cast, y = channels_mean_283_cast)[name = tensor("zero_mean_283_cast")]; + tensor zero_mean_sq_283_cast = mul(x = zero_mean_283_cast, y = zero_mean_283_cast)[name = tensor("zero_mean_sq_283_cast")]; + tensor var_9291 = const()[name = tensor("op_9291"), val = tensor([1])]; + tensor var_9292_cast = reduce_mean(axes = var_9291, keep_dims = var_23, x = zero_mean_sq_283_cast)[name = tensor("op_9292_cast")]; + tensor var_9293_to_fp16 = const()[name = tensor("op_9293_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9294_cast = add(x = var_9292_cast, y = var_9293_to_fp16)[name = tensor("op_9294_cast")]; + tensor denom_283_epsilon_0_to_fp16 = const()[name = tensor("denom_283_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_283_cast = rsqrt(epsilon = denom_283_epsilon_0_to_fp16, x = var_9294_cast)[name = tensor("denom_283_cast")]; + tensor out_283_cast = mul(x = zero_mean_283_cast, y = denom_283_cast)[name = tensor("out_283_cast")]; + tensor var_9298_to_fp16 = const()[name = tensor("op_9298_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349804032)))]; + tensor var_9299_cast = add(x = out_283_cast, y = var_9298_to_fp16)[name = tensor("op_9299_cast")]; + tensor var_9301_to_fp16 = const()[name = tensor("op_9301_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349806656)))]; + tensor hidden_states_379_cast = mul(x = var_9299_cast, y = var_9301_to_fp16)[name = tensor("hidden_states_379_cast")]; + tensor var_9308 = const()[name = tensor("op_9308"), val = tensor([1, 1])]; + tensor var_9310 = const()[name = tensor("op_9310"), val = tensor([1, 1])]; + tensor q_189_pad_type_0 = const()[name = tensor("q_189_pad_type_0"), val = tensor("custom")]; + tensor q_189_pad_0 = const()[name = tensor("q_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349809280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351038144))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_189_cast = conv(dilations = var_9310, groups = var_31, pad = q_189_pad_0, pad_type = q_189_pad_type_0, strides = var_9308, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("q_189_cast")]; + tensor var_9314 = const()[name = tensor("op_9314"), val = tensor([1, 1])]; + tensor var_9316 = const()[name = tensor("op_9316"), val = tensor([1, 1])]; + tensor k_189_pad_type_0 = const()[name = tensor("k_189_pad_type_0"), val = tensor("custom")]; + tensor k_189_pad_0 = const()[name = tensor("k_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351038336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352267200))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_189_cast = conv(dilations = var_9316, groups = var_31, pad = k_189_pad_0, pad_type = k_189_pad_type_0, strides = var_9314, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("k_189_cast")]; + tensor var_9320 = const()[name = tensor("op_9320"), val = tensor([1, 1])]; + tensor var_9322 = const()[name = tensor("op_9322"), val = tensor([1, 1])]; + tensor v_189_pad_type_0 = const()[name = tensor("v_189_pad_type_0"), val = tensor("custom")]; + tensor v_189_pad_0 = const()[name = tensor("v_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352267392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353496256))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_189_cast = conv(dilations = var_9322, groups = var_31, pad = v_189_pad_0, pad_type = v_189_pad_type_0, strides = var_9320, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("v_189_cast")]; + tensor var_9326 = const()[name = tensor("op_9326"), val = tensor([2, 20, 64, -1])]; + tensor var_9327_cast = reshape(shape = var_9326, x = q_189_cast)[name = tensor("op_9327_cast")]; + tensor var_9328 = const()[name = tensor("op_9328"), val = tensor([2, 20, 64, -1])]; + tensor var_9329_cast = reshape(shape = var_9328, x = k_189_cast)[name = tensor("op_9329_cast")]; + tensor var_9330 = const()[name = tensor("op_9330"), val = tensor([2, 20, 64, -1])]; + tensor var_9331_cast = reshape(shape = var_9330, x = v_189_cast)[name = tensor("op_9331_cast")]; + tensor attn_weights_377_transpose_x_0 = const()[name = tensor("attn_weights_377_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_377_transpose_y_0 = const()[name = tensor("attn_weights_377_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_377_cast = matmul(transpose_x = attn_weights_377_transpose_x_0, transpose_y = attn_weights_377_transpose_y_0, x = var_9327_cast, y = var_9329_cast)[name = tensor("attn_weights_377_cast")]; + tensor attn_weights_379_cast = mul(x = attn_weights_377_cast, y = var_12_to_fp16)[name = tensor("attn_weights_379_cast")]; + tensor var_9335_cast = softmax(axis = var_18, x = attn_weights_379_cast)[name = tensor("op_9335_cast")]; + tensor attn_189_transpose_x_0 = const()[name = tensor("attn_189_transpose_x_0"), val = tensor(false)]; + tensor attn_189_transpose_y_0 = const()[name = tensor("attn_189_transpose_y_0"), val = tensor(true)]; + tensor attn_189_cast = matmul(transpose_x = attn_189_transpose_x_0, transpose_y = attn_189_transpose_y_0, x = var_9331_cast, y = var_9335_cast)[name = tensor("attn_189_cast")]; + tensor var_9339 = const()[name = tensor("op_9339"), val = tensor([2, 1280, 1, -1])]; + tensor input_561_cast = reshape(shape = var_9339, x = attn_189_cast)[name = tensor("input_561_cast")]; + tensor var_9344 = const()[name = tensor("op_9344"), val = tensor([1, 1])]; + tensor var_9346 = const()[name = tensor("op_9346"), val = tensor([1, 1])]; + tensor var_9348_pad_type_0 = const()[name = tensor("op_9348_pad_type_0"), val = tensor("custom")]; + tensor var_9348_pad_0 = const()[name = tensor("op_9348_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353496448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354725312))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354725504)))]; + tensor var_9348_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_9346, groups = var_31, pad = var_9348_pad_0, pad_type = var_9348_pad_type_0, strides = var_9344, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_561_cast)[name = tensor("op_9348_cast")]; + tensor inputs_285_cast = add(x = var_9348_cast, y = inputs_283_cast)[name = tensor("inputs_285_cast")]; + tensor var_9352 = const()[name = tensor("op_9352"), val = tensor([1])]; + tensor channels_mean_285_cast = reduce_mean(axes = var_9352, keep_dims = var_23, x = inputs_285_cast)[name = tensor("channels_mean_285_cast")]; + tensor zero_mean_285_cast = sub(x = inputs_285_cast, y = channels_mean_285_cast)[name = tensor("zero_mean_285_cast")]; + tensor zero_mean_sq_285_cast = mul(x = zero_mean_285_cast, y = zero_mean_285_cast)[name = tensor("zero_mean_sq_285_cast")]; + tensor var_9356 = const()[name = tensor("op_9356"), val = tensor([1])]; + tensor var_9357_cast = reduce_mean(axes = var_9356, keep_dims = var_23, x = zero_mean_sq_285_cast)[name = tensor("op_9357_cast")]; + tensor var_9358_to_fp16 = const()[name = tensor("op_9358_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9359_cast = add(x = var_9357_cast, y = var_9358_to_fp16)[name = tensor("op_9359_cast")]; + tensor denom_285_epsilon_0_to_fp16 = const()[name = tensor("denom_285_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_285_cast = rsqrt(epsilon = denom_285_epsilon_0_to_fp16, x = var_9359_cast)[name = tensor("denom_285_cast")]; + tensor out_285_cast = mul(x = zero_mean_285_cast, y = denom_285_cast)[name = tensor("out_285_cast")]; + tensor var_9363_to_fp16 = const()[name = tensor("op_9363_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354728128)))]; + tensor var_9364_cast = add(x = out_285_cast, y = var_9363_to_fp16)[name = tensor("op_9364_cast")]; + tensor var_9366_to_fp16 = const()[name = tensor("op_9366_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354730752)))]; + tensor hidden_states_381_cast = mul(x = var_9364_cast, y = var_9366_to_fp16)[name = tensor("hidden_states_381_cast")]; + tensor var_9373 = const()[name = tensor("op_9373"), val = tensor([1, 1])]; + tensor var_9375 = const()[name = tensor("op_9375"), val = tensor([1, 1])]; + tensor q_191_pad_type_0 = const()[name = tensor("q_191_pad_type_0"), val = tensor("custom")]; + tensor q_191_pad_0 = const()[name = tensor("q_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354733376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1355962240))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_191_cast = conv(dilations = var_9375, groups = var_31, pad = q_191_pad_0, pad_type = q_191_pad_type_0, strides = var_9373, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_381_cast)[name = tensor("q_191_cast")]; + tensor var_9379 = const()[name = tensor("op_9379"), val = tensor([1, 1])]; + tensor var_9381 = const()[name = tensor("op_9381"), val = tensor([1, 1])]; + tensor k_191_pad_type_0 = const()[name = tensor("k_191_pad_type_0"), val = tensor("custom")]; + tensor k_191_pad_0 = const()[name = tensor("k_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1355962432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357928576))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_191_cast = conv(dilations = var_9381, groups = var_31, pad = k_191_pad_0, pad_type = k_191_pad_type_0, strides = var_9379, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_191_cast")]; + tensor var_9385 = const()[name = tensor("op_9385"), val = tensor([1, 1])]; + tensor var_9387 = const()[name = tensor("op_9387"), val = tensor([1, 1])]; + tensor v_191_pad_type_0 = const()[name = tensor("v_191_pad_type_0"), val = tensor("custom")]; + tensor v_191_pad_0 = const()[name = tensor("v_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357928768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359894912))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_191_cast = conv(dilations = var_9387, groups = var_31, pad = v_191_pad_0, pad_type = v_191_pad_type_0, strides = var_9385, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_191_cast")]; + tensor var_9391 = const()[name = tensor("op_9391"), val = tensor([2, 20, 64, -1])]; + tensor var_9392_cast = reshape(shape = var_9391, x = q_191_cast)[name = tensor("op_9392_cast")]; + tensor var_9393 = const()[name = tensor("op_9393"), val = tensor([2, 20, 64, -1])]; + tensor var_9394_cast = reshape(shape = var_9393, x = k_191_cast)[name = tensor("op_9394_cast")]; + tensor var_9395 = const()[name = tensor("op_9395"), val = tensor([2, 20, 64, -1])]; + tensor var_9396_cast = reshape(shape = var_9395, x = v_191_cast)[name = tensor("op_9396_cast")]; + tensor attn_weights_381_transpose_x_0 = const()[name = tensor("attn_weights_381_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_381_transpose_y_0 = const()[name = tensor("attn_weights_381_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_381_cast = matmul(transpose_x = attn_weights_381_transpose_x_0, transpose_y = attn_weights_381_transpose_y_0, x = var_9392_cast, y = var_9394_cast)[name = tensor("attn_weights_381_cast")]; + tensor attn_weights_383_cast = mul(x = attn_weights_381_cast, y = var_12_to_fp16)[name = tensor("attn_weights_383_cast")]; + tensor var_9400_cast = softmax(axis = var_18, x = attn_weights_383_cast)[name = tensor("op_9400_cast")]; + tensor attn_191_transpose_x_0 = const()[name = tensor("attn_191_transpose_x_0"), val = tensor(false)]; + tensor attn_191_transpose_y_0 = const()[name = tensor("attn_191_transpose_y_0"), val = tensor(true)]; + tensor attn_191_cast = matmul(transpose_x = attn_191_transpose_x_0, transpose_y = attn_191_transpose_y_0, x = var_9396_cast, y = var_9400_cast)[name = tensor("attn_191_cast")]; + tensor var_9404 = const()[name = tensor("op_9404"), val = tensor([2, 1280, 1, -1])]; + tensor input_563_cast = reshape(shape = var_9404, x = attn_191_cast)[name = tensor("input_563_cast")]; + tensor var_9409 = const()[name = tensor("op_9409"), val = tensor([1, 1])]; + tensor var_9411 = const()[name = tensor("op_9411"), val = tensor([1, 1])]; + tensor var_9413_pad_type_0 = const()[name = tensor("op_9413_pad_type_0"), val = tensor("custom")]; + tensor var_9413_pad_0 = const()[name = tensor("op_9413_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359895104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361123968))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361124160)))]; + tensor var_9413_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_9411, groups = var_31, pad = var_9413_pad_0, pad_type = var_9413_pad_type_0, strides = var_9409, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_563_cast)[name = tensor("op_9413_cast")]; + tensor inputs_287_cast = add(x = var_9413_cast, y = inputs_285_cast)[name = tensor("inputs_287_cast")]; + tensor var_9417 = const()[name = tensor("op_9417"), val = tensor([1])]; + tensor channels_mean_287_cast = reduce_mean(axes = var_9417, keep_dims = var_23, x = inputs_287_cast)[name = tensor("channels_mean_287_cast")]; + tensor zero_mean_287_cast = sub(x = inputs_287_cast, y = channels_mean_287_cast)[name = tensor("zero_mean_287_cast")]; + tensor zero_mean_sq_287_cast = mul(x = zero_mean_287_cast, y = zero_mean_287_cast)[name = tensor("zero_mean_sq_287_cast")]; + tensor var_9421 = const()[name = tensor("op_9421"), val = tensor([1])]; + tensor var_9422_cast = reduce_mean(axes = var_9421, keep_dims = var_23, x = zero_mean_sq_287_cast)[name = tensor("op_9422_cast")]; + tensor var_9423_to_fp16 = const()[name = tensor("op_9423_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9424_cast = add(x = var_9422_cast, y = var_9423_to_fp16)[name = tensor("op_9424_cast")]; + tensor denom_287_epsilon_0_to_fp16 = const()[name = tensor("denom_287_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_287_cast = rsqrt(epsilon = denom_287_epsilon_0_to_fp16, x = var_9424_cast)[name = tensor("denom_287_cast")]; + tensor out_287_cast = mul(x = zero_mean_287_cast, y = denom_287_cast)[name = tensor("out_287_cast")]; + tensor var_9428_to_fp16 = const()[name = tensor("op_9428_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361126784)))]; + tensor var_9429_cast = add(x = out_287_cast, y = var_9428_to_fp16)[name = tensor("op_9429_cast")]; + tensor var_9431_to_fp16 = const()[name = tensor("op_9431_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361129408)))]; + tensor input_565_cast = mul(x = var_9429_cast, y = var_9431_to_fp16)[name = tensor("input_565_cast")]; + tensor var_9439 = const()[name = tensor("op_9439"), val = tensor([1, 1])]; + tensor var_9441 = const()[name = tensor("op_9441"), val = tensor([1, 1])]; + tensor var_9443_pad_type_0 = const()[name = tensor("op_9443_pad_type_0"), val = tensor("custom")]; + tensor var_9443_pad_0 = const()[name = tensor("op_9443_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1361132032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1370962496))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1370962688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1370970432))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9443_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9441, groups = var_31, pad = var_9443_pad_0, pad_type = var_9443_pad_type_0, strides = var_9439, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_565_cast)[name = tensor("op_9443_cast")]; + tensor var_9444_split_sizes_0 = const()[name = tensor("op_9444_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9444_axis_0 = const()[name = tensor("op_9444_axis_0"), val = tensor(1)]; + tensor var_9444_cast_0, tensor var_9444_cast_1 = split(axis = var_9444_axis_0, split_sizes = var_9444_split_sizes_0, x = var_9443_cast)[name = tensor("op_9444_cast")]; + tensor var_9446_mode_0 = const()[name = tensor("op_9446_mode_0"), val = tensor("EXACT")]; + tensor var_9446_cast = gelu(mode = var_9446_mode_0, x = var_9444_cast_1)[name = tensor("op_9446_cast")]; + tensor input_567_cast = mul(x = var_9444_cast_0, y = var_9446_cast)[name = tensor("input_567_cast")]; + tensor var_9450 = const()[name = tensor("op_9450"), val = tensor([1, 1])]; + tensor var_9452 = const()[name = tensor("op_9452"), val = tensor([1, 1])]; + tensor var_9454_pad_type_0 = const()[name = tensor("op_9454_pad_type_0"), val = tensor("custom")]; + tensor var_9454_pad_0 = const()[name = tensor("op_9454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1370970624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375885888))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375886080)))]; + tensor var_9454_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_9452, groups = var_31, pad = var_9454_pad_0, pad_type = var_9454_pad_type_0, strides = var_9450, weight = unet_up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_567_cast)[name = tensor("op_9454_cast")]; + tensor inputs_289_cast = add(x = var_9454_cast, y = inputs_287_cast)[name = tensor("inputs_289_cast")]; + tensor var_9464 = const()[name = tensor("op_9464"), val = tensor([1])]; + tensor channels_mean_289_cast = reduce_mean(axes = var_9464, keep_dims = var_23, x = inputs_289_cast)[name = tensor("channels_mean_289_cast")]; + tensor zero_mean_289_cast = sub(x = inputs_289_cast, y = channels_mean_289_cast)[name = tensor("zero_mean_289_cast")]; + tensor zero_mean_sq_289_cast = mul(x = zero_mean_289_cast, y = zero_mean_289_cast)[name = tensor("zero_mean_sq_289_cast")]; + tensor var_9468 = const()[name = tensor("op_9468"), val = tensor([1])]; + tensor var_9469_cast = reduce_mean(axes = var_9468, keep_dims = var_23, x = zero_mean_sq_289_cast)[name = tensor("op_9469_cast")]; + tensor var_9470_to_fp16 = const()[name = tensor("op_9470_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9471_cast = add(x = var_9469_cast, y = var_9470_to_fp16)[name = tensor("op_9471_cast")]; + tensor denom_289_epsilon_0_to_fp16 = const()[name = tensor("denom_289_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_289_cast = rsqrt(epsilon = denom_289_epsilon_0_to_fp16, x = var_9471_cast)[name = tensor("denom_289_cast")]; + tensor out_289_cast = mul(x = zero_mean_289_cast, y = denom_289_cast)[name = tensor("out_289_cast")]; + tensor var_9475_to_fp16 = const()[name = tensor("op_9475_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375888704)))]; + tensor var_9476_cast = add(x = out_289_cast, y = var_9475_to_fp16)[name = tensor("op_9476_cast")]; + tensor var_9478_to_fp16 = const()[name = tensor("op_9478_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375891328)))]; + tensor hidden_states_385_cast = mul(x = var_9476_cast, y = var_9478_to_fp16)[name = tensor("hidden_states_385_cast")]; + tensor var_9485 = const()[name = tensor("op_9485"), val = tensor([1, 1])]; + tensor var_9487 = const()[name = tensor("op_9487"), val = tensor([1, 1])]; + tensor q_193_pad_type_0 = const()[name = tensor("q_193_pad_type_0"), val = tensor("custom")]; + tensor q_193_pad_0 = const()[name = tensor("q_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375893952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377122816))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_193_cast = conv(dilations = var_9487, groups = var_31, pad = q_193_pad_0, pad_type = q_193_pad_type_0, strides = var_9485, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("q_193_cast")]; + tensor var_9491 = const()[name = tensor("op_9491"), val = tensor([1, 1])]; + tensor var_9493 = const()[name = tensor("op_9493"), val = tensor([1, 1])]; + tensor k_193_pad_type_0 = const()[name = tensor("k_193_pad_type_0"), val = tensor("custom")]; + tensor k_193_pad_0 = const()[name = tensor("k_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377123008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378351872))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_193_cast = conv(dilations = var_9493, groups = var_31, pad = k_193_pad_0, pad_type = k_193_pad_type_0, strides = var_9491, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("k_193_cast")]; + tensor var_9497 = const()[name = tensor("op_9497"), val = tensor([1, 1])]; + tensor var_9499 = const()[name = tensor("op_9499"), val = tensor([1, 1])]; + tensor v_193_pad_type_0 = const()[name = tensor("v_193_pad_type_0"), val = tensor("custom")]; + tensor v_193_pad_0 = const()[name = tensor("v_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378352064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1379580928))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_193_cast = conv(dilations = var_9499, groups = var_31, pad = v_193_pad_0, pad_type = v_193_pad_type_0, strides = var_9497, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("v_193_cast")]; + tensor var_9503 = const()[name = tensor("op_9503"), val = tensor([2, 20, 64, -1])]; + tensor var_9504_cast = reshape(shape = var_9503, x = q_193_cast)[name = tensor("op_9504_cast")]; + tensor var_9505 = const()[name = tensor("op_9505"), val = tensor([2, 20, 64, -1])]; + tensor var_9506_cast = reshape(shape = var_9505, x = k_193_cast)[name = tensor("op_9506_cast")]; + tensor var_9507 = const()[name = tensor("op_9507"), val = tensor([2, 20, 64, -1])]; + tensor var_9508_cast = reshape(shape = var_9507, x = v_193_cast)[name = tensor("op_9508_cast")]; + tensor attn_weights_385_transpose_x_0 = const()[name = tensor("attn_weights_385_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_385_transpose_y_0 = const()[name = tensor("attn_weights_385_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_385_cast = matmul(transpose_x = attn_weights_385_transpose_x_0, transpose_y = attn_weights_385_transpose_y_0, x = var_9504_cast, y = var_9506_cast)[name = tensor("attn_weights_385_cast")]; + tensor attn_weights_387_cast = mul(x = attn_weights_385_cast, y = var_12_to_fp16)[name = tensor("attn_weights_387_cast")]; + tensor var_9512_cast = softmax(axis = var_18, x = attn_weights_387_cast)[name = tensor("op_9512_cast")]; + tensor attn_193_transpose_x_0 = const()[name = tensor("attn_193_transpose_x_0"), val = tensor(false)]; + tensor attn_193_transpose_y_0 = const()[name = tensor("attn_193_transpose_y_0"), val = tensor(true)]; + tensor attn_193_cast = matmul(transpose_x = attn_193_transpose_x_0, transpose_y = attn_193_transpose_y_0, x = var_9508_cast, y = var_9512_cast)[name = tensor("attn_193_cast")]; + tensor var_9516 = const()[name = tensor("op_9516"), val = tensor([2, 1280, 1, -1])]; + tensor input_569_cast = reshape(shape = var_9516, x = attn_193_cast)[name = tensor("input_569_cast")]; + tensor var_9521 = const()[name = tensor("op_9521"), val = tensor([1, 1])]; + tensor var_9523 = const()[name = tensor("op_9523"), val = tensor([1, 1])]; + tensor var_9525_pad_type_0 = const()[name = tensor("op_9525_pad_type_0"), val = tensor("custom")]; + tensor var_9525_pad_0 = const()[name = tensor("op_9525_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1379581120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380809984))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380810176)))]; + tensor var_9525_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_9523, groups = var_31, pad = var_9525_pad_0, pad_type = var_9525_pad_type_0, strides = var_9521, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_569_cast)[name = tensor("op_9525_cast")]; + tensor inputs_291_cast = add(x = var_9525_cast, y = inputs_289_cast)[name = tensor("inputs_291_cast")]; + tensor var_9529 = const()[name = tensor("op_9529"), val = tensor([1])]; + tensor channels_mean_291_cast = reduce_mean(axes = var_9529, keep_dims = var_23, x = inputs_291_cast)[name = tensor("channels_mean_291_cast")]; + tensor zero_mean_291_cast = sub(x = inputs_291_cast, y = channels_mean_291_cast)[name = tensor("zero_mean_291_cast")]; + tensor zero_mean_sq_291_cast = mul(x = zero_mean_291_cast, y = zero_mean_291_cast)[name = tensor("zero_mean_sq_291_cast")]; + tensor var_9533 = const()[name = tensor("op_9533"), val = tensor([1])]; + tensor var_9534_cast = reduce_mean(axes = var_9533, keep_dims = var_23, x = zero_mean_sq_291_cast)[name = tensor("op_9534_cast")]; + tensor var_9535_to_fp16 = const()[name = tensor("op_9535_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9536_cast = add(x = var_9534_cast, y = var_9535_to_fp16)[name = tensor("op_9536_cast")]; + tensor denom_291_epsilon_0_to_fp16 = const()[name = tensor("denom_291_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_291_cast = rsqrt(epsilon = denom_291_epsilon_0_to_fp16, x = var_9536_cast)[name = tensor("denom_291_cast")]; + tensor out_291_cast = mul(x = zero_mean_291_cast, y = denom_291_cast)[name = tensor("out_291_cast")]; + tensor var_9540_to_fp16 = const()[name = tensor("op_9540_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380812800)))]; + tensor var_9541_cast = add(x = out_291_cast, y = var_9540_to_fp16)[name = tensor("op_9541_cast")]; + tensor var_9543_to_fp16 = const()[name = tensor("op_9543_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380815424)))]; + tensor hidden_states_387_cast = mul(x = var_9541_cast, y = var_9543_to_fp16)[name = tensor("hidden_states_387_cast")]; + tensor var_9550 = const()[name = tensor("op_9550"), val = tensor([1, 1])]; + tensor var_9552 = const()[name = tensor("op_9552"), val = tensor([1, 1])]; + tensor q_195_pad_type_0 = const()[name = tensor("q_195_pad_type_0"), val = tensor("custom")]; + tensor q_195_pad_0 = const()[name = tensor("q_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380818048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382046912))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_195_cast = conv(dilations = var_9552, groups = var_31, pad = q_195_pad_0, pad_type = q_195_pad_type_0, strides = var_9550, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_387_cast)[name = tensor("q_195_cast")]; + tensor var_9556 = const()[name = tensor("op_9556"), val = tensor([1, 1])]; + tensor var_9558 = const()[name = tensor("op_9558"), val = tensor([1, 1])]; + tensor k_195_pad_type_0 = const()[name = tensor("k_195_pad_type_0"), val = tensor("custom")]; + tensor k_195_pad_0 = const()[name = tensor("k_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382047104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384013248))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_195_cast = conv(dilations = var_9558, groups = var_31, pad = k_195_pad_0, pad_type = k_195_pad_type_0, strides = var_9556, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_195_cast")]; + tensor var_9562 = const()[name = tensor("op_9562"), val = tensor([1, 1])]; + tensor var_9564 = const()[name = tensor("op_9564"), val = tensor([1, 1])]; + tensor v_195_pad_type_0 = const()[name = tensor("v_195_pad_type_0"), val = tensor("custom")]; + tensor v_195_pad_0 = const()[name = tensor("v_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384013440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385979584))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_195_cast = conv(dilations = var_9564, groups = var_31, pad = v_195_pad_0, pad_type = v_195_pad_type_0, strides = var_9562, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_195_cast")]; + tensor var_9568 = const()[name = tensor("op_9568"), val = tensor([2, 20, 64, -1])]; + tensor var_9569_cast = reshape(shape = var_9568, x = q_195_cast)[name = tensor("op_9569_cast")]; + tensor var_9570 = const()[name = tensor("op_9570"), val = tensor([2, 20, 64, -1])]; + tensor var_9571_cast = reshape(shape = var_9570, x = k_195_cast)[name = tensor("op_9571_cast")]; + tensor var_9572 = const()[name = tensor("op_9572"), val = tensor([2, 20, 64, -1])]; + tensor var_9573_cast = reshape(shape = var_9572, x = v_195_cast)[name = tensor("op_9573_cast")]; + tensor attn_weights_389_transpose_x_0 = const()[name = tensor("attn_weights_389_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_389_transpose_y_0 = const()[name = tensor("attn_weights_389_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_389_cast = matmul(transpose_x = attn_weights_389_transpose_x_0, transpose_y = attn_weights_389_transpose_y_0, x = var_9569_cast, y = var_9571_cast)[name = tensor("attn_weights_389_cast")]; + tensor attn_weights_391_cast = mul(x = attn_weights_389_cast, y = var_12_to_fp16)[name = tensor("attn_weights_391_cast")]; + tensor var_9577_cast = softmax(axis = var_18, x = attn_weights_391_cast)[name = tensor("op_9577_cast")]; + tensor attn_195_transpose_x_0 = const()[name = tensor("attn_195_transpose_x_0"), val = tensor(false)]; + tensor attn_195_transpose_y_0 = const()[name = tensor("attn_195_transpose_y_0"), val = tensor(true)]; + tensor attn_195_cast = matmul(transpose_x = attn_195_transpose_x_0, transpose_y = attn_195_transpose_y_0, x = var_9573_cast, y = var_9577_cast)[name = tensor("attn_195_cast")]; + tensor var_9581 = const()[name = tensor("op_9581"), val = tensor([2, 1280, 1, -1])]; + tensor input_571_cast = reshape(shape = var_9581, x = attn_195_cast)[name = tensor("input_571_cast")]; + tensor var_9586 = const()[name = tensor("op_9586"), val = tensor([1, 1])]; + tensor var_9588 = const()[name = tensor("op_9588"), val = tensor([1, 1])]; + tensor var_9590_pad_type_0 = const()[name = tensor("op_9590_pad_type_0"), val = tensor("custom")]; + tensor var_9590_pad_0 = const()[name = tensor("op_9590_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385979776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387208640))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387208832)))]; + tensor var_9590_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_9588, groups = var_31, pad = var_9590_pad_0, pad_type = var_9590_pad_type_0, strides = var_9586, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_571_cast)[name = tensor("op_9590_cast")]; + tensor inputs_293_cast = add(x = var_9590_cast, y = inputs_291_cast)[name = tensor("inputs_293_cast")]; + tensor var_9594 = const()[name = tensor("op_9594"), val = tensor([1])]; + tensor channels_mean_293_cast = reduce_mean(axes = var_9594, keep_dims = var_23, x = inputs_293_cast)[name = tensor("channels_mean_293_cast")]; + tensor zero_mean_293_cast = sub(x = inputs_293_cast, y = channels_mean_293_cast)[name = tensor("zero_mean_293_cast")]; + tensor zero_mean_sq_293_cast = mul(x = zero_mean_293_cast, y = zero_mean_293_cast)[name = tensor("zero_mean_sq_293_cast")]; + tensor var_9598 = const()[name = tensor("op_9598"), val = tensor([1])]; + tensor var_9599_cast = reduce_mean(axes = var_9598, keep_dims = var_23, x = zero_mean_sq_293_cast)[name = tensor("op_9599_cast")]; + tensor var_9600_to_fp16 = const()[name = tensor("op_9600_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9601_cast = add(x = var_9599_cast, y = var_9600_to_fp16)[name = tensor("op_9601_cast")]; + tensor denom_293_epsilon_0_to_fp16 = const()[name = tensor("denom_293_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_293_cast = rsqrt(epsilon = denom_293_epsilon_0_to_fp16, x = var_9601_cast)[name = tensor("denom_293_cast")]; + tensor out_293_cast = mul(x = zero_mean_293_cast, y = denom_293_cast)[name = tensor("out_293_cast")]; + tensor var_9605_to_fp16 = const()[name = tensor("op_9605_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387211456)))]; + tensor var_9606_cast = add(x = out_293_cast, y = var_9605_to_fp16)[name = tensor("op_9606_cast")]; + tensor var_9608_to_fp16 = const()[name = tensor("op_9608_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387214080)))]; + tensor input_573_cast = mul(x = var_9606_cast, y = var_9608_to_fp16)[name = tensor("input_573_cast")]; + tensor var_9616 = const()[name = tensor("op_9616"), val = tensor([1, 1])]; + tensor var_9618 = const()[name = tensor("op_9618"), val = tensor([1, 1])]; + tensor var_9620_pad_type_0 = const()[name = tensor("op_9620_pad_type_0"), val = tensor("custom")]; + tensor var_9620_pad_0 = const()[name = tensor("op_9620_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387216704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397047168))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397047360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397055104))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9620_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9618, groups = var_31, pad = var_9620_pad_0, pad_type = var_9620_pad_type_0, strides = var_9616, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_573_cast)[name = tensor("op_9620_cast")]; + tensor var_9621_split_sizes_0 = const()[name = tensor("op_9621_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9621_axis_0 = const()[name = tensor("op_9621_axis_0"), val = tensor(1)]; + tensor var_9621_cast_0, tensor var_9621_cast_1 = split(axis = var_9621_axis_0, split_sizes = var_9621_split_sizes_0, x = var_9620_cast)[name = tensor("op_9621_cast")]; + tensor var_9623_mode_0 = const()[name = tensor("op_9623_mode_0"), val = tensor("EXACT")]; + tensor var_9623_cast = gelu(mode = var_9623_mode_0, x = var_9621_cast_1)[name = tensor("op_9623_cast")]; + tensor input_575_cast = mul(x = var_9621_cast_0, y = var_9623_cast)[name = tensor("input_575_cast")]; + tensor var_9627 = const()[name = tensor("op_9627"), val = tensor([1, 1])]; + tensor var_9629 = const()[name = tensor("op_9629"), val = tensor([1, 1])]; + tensor var_9631_pad_type_0 = const()[name = tensor("op_9631_pad_type_0"), val = tensor("custom")]; + tensor var_9631_pad_0 = const()[name = tensor("op_9631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397055296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401970560))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401970752)))]; + tensor var_9631_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_9629, groups = var_31, pad = var_9631_pad_0, pad_type = var_9631_pad_type_0, strides = var_9627, weight = unet_up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_575_cast)[name = tensor("op_9631_cast")]; + tensor inputs_295_cast = add(x = var_9631_cast, y = inputs_293_cast)[name = tensor("inputs_295_cast")]; + tensor var_9641 = const()[name = tensor("op_9641"), val = tensor([1])]; + tensor channels_mean_295_cast = reduce_mean(axes = var_9641, keep_dims = var_23, x = inputs_295_cast)[name = tensor("channels_mean_295_cast")]; + tensor zero_mean_295_cast = sub(x = inputs_295_cast, y = channels_mean_295_cast)[name = tensor("zero_mean_295_cast")]; + tensor zero_mean_sq_295_cast = mul(x = zero_mean_295_cast, y = zero_mean_295_cast)[name = tensor("zero_mean_sq_295_cast")]; + tensor var_9645 = const()[name = tensor("op_9645"), val = tensor([1])]; + tensor var_9646_cast = reduce_mean(axes = var_9645, keep_dims = var_23, x = zero_mean_sq_295_cast)[name = tensor("op_9646_cast")]; + tensor var_9647_to_fp16 = const()[name = tensor("op_9647_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9648_cast = add(x = var_9646_cast, y = var_9647_to_fp16)[name = tensor("op_9648_cast")]; + tensor denom_295_epsilon_0_to_fp16 = const()[name = tensor("denom_295_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_295_cast = rsqrt(epsilon = denom_295_epsilon_0_to_fp16, x = var_9648_cast)[name = tensor("denom_295_cast")]; + tensor out_295_cast = mul(x = zero_mean_295_cast, y = denom_295_cast)[name = tensor("out_295_cast")]; + tensor var_9652_to_fp16 = const()[name = tensor("op_9652_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401973376)))]; + tensor var_9653_cast = add(x = out_295_cast, y = var_9652_to_fp16)[name = tensor("op_9653_cast")]; + tensor var_9655_to_fp16 = const()[name = tensor("op_9655_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401976000)))]; + tensor hidden_states_391_cast = mul(x = var_9653_cast, y = var_9655_to_fp16)[name = tensor("hidden_states_391_cast")]; + tensor var_9662 = const()[name = tensor("op_9662"), val = tensor([1, 1])]; + tensor var_9664 = const()[name = tensor("op_9664"), val = tensor([1, 1])]; + tensor q_197_pad_type_0 = const()[name = tensor("q_197_pad_type_0"), val = tensor("custom")]; + tensor q_197_pad_0 = const()[name = tensor("q_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401978624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403207488))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_197_cast = conv(dilations = var_9664, groups = var_31, pad = q_197_pad_0, pad_type = q_197_pad_type_0, strides = var_9662, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("q_197_cast")]; + tensor var_9668 = const()[name = tensor("op_9668"), val = tensor([1, 1])]; + tensor var_9670 = const()[name = tensor("op_9670"), val = tensor([1, 1])]; + tensor k_197_pad_type_0 = const()[name = tensor("k_197_pad_type_0"), val = tensor("custom")]; + tensor k_197_pad_0 = const()[name = tensor("k_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403207680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404436544))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_197_cast = conv(dilations = var_9670, groups = var_31, pad = k_197_pad_0, pad_type = k_197_pad_type_0, strides = var_9668, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("k_197_cast")]; + tensor var_9674 = const()[name = tensor("op_9674"), val = tensor([1, 1])]; + tensor var_9676 = const()[name = tensor("op_9676"), val = tensor([1, 1])]; + tensor v_197_pad_type_0 = const()[name = tensor("v_197_pad_type_0"), val = tensor("custom")]; + tensor v_197_pad_0 = const()[name = tensor("v_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404436736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405665600))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_197_cast = conv(dilations = var_9676, groups = var_31, pad = v_197_pad_0, pad_type = v_197_pad_type_0, strides = var_9674, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("v_197_cast")]; + tensor var_9680 = const()[name = tensor("op_9680"), val = tensor([2, 20, 64, -1])]; + tensor var_9681_cast = reshape(shape = var_9680, x = q_197_cast)[name = tensor("op_9681_cast")]; + tensor var_9682 = const()[name = tensor("op_9682"), val = tensor([2, 20, 64, -1])]; + tensor var_9683_cast = reshape(shape = var_9682, x = k_197_cast)[name = tensor("op_9683_cast")]; + tensor var_9684 = const()[name = tensor("op_9684"), val = tensor([2, 20, 64, -1])]; + tensor var_9685_cast = reshape(shape = var_9684, x = v_197_cast)[name = tensor("op_9685_cast")]; + tensor attn_weights_393_transpose_x_0 = const()[name = tensor("attn_weights_393_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_393_transpose_y_0 = const()[name = tensor("attn_weights_393_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_393_cast = matmul(transpose_x = attn_weights_393_transpose_x_0, transpose_y = attn_weights_393_transpose_y_0, x = var_9681_cast, y = var_9683_cast)[name = tensor("attn_weights_393_cast")]; + tensor attn_weights_395_cast = mul(x = attn_weights_393_cast, y = var_12_to_fp16)[name = tensor("attn_weights_395_cast")]; + tensor var_9689_cast = softmax(axis = var_18, x = attn_weights_395_cast)[name = tensor("op_9689_cast")]; + tensor attn_197_transpose_x_0 = const()[name = tensor("attn_197_transpose_x_0"), val = tensor(false)]; + tensor attn_197_transpose_y_0 = const()[name = tensor("attn_197_transpose_y_0"), val = tensor(true)]; + tensor attn_197_cast = matmul(transpose_x = attn_197_transpose_x_0, transpose_y = attn_197_transpose_y_0, x = var_9685_cast, y = var_9689_cast)[name = tensor("attn_197_cast")]; + tensor var_9693 = const()[name = tensor("op_9693"), val = tensor([2, 1280, 1, -1])]; + tensor input_577_cast = reshape(shape = var_9693, x = attn_197_cast)[name = tensor("input_577_cast")]; + tensor var_9698 = const()[name = tensor("op_9698"), val = tensor([1, 1])]; + tensor var_9700 = const()[name = tensor("op_9700"), val = tensor([1, 1])]; + tensor var_9702_pad_type_0 = const()[name = tensor("op_9702_pad_type_0"), val = tensor("custom")]; + tensor var_9702_pad_0 = const()[name = tensor("op_9702_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405665792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406894656))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406894848)))]; + tensor var_9702_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_9700, groups = var_31, pad = var_9702_pad_0, pad_type = var_9702_pad_type_0, strides = var_9698, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_577_cast)[name = tensor("op_9702_cast")]; + tensor inputs_297_cast = add(x = var_9702_cast, y = inputs_295_cast)[name = tensor("inputs_297_cast")]; + tensor var_9706 = const()[name = tensor("op_9706"), val = tensor([1])]; + tensor channels_mean_297_cast = reduce_mean(axes = var_9706, keep_dims = var_23, x = inputs_297_cast)[name = tensor("channels_mean_297_cast")]; + tensor zero_mean_297_cast = sub(x = inputs_297_cast, y = channels_mean_297_cast)[name = tensor("zero_mean_297_cast")]; + tensor zero_mean_sq_297_cast = mul(x = zero_mean_297_cast, y = zero_mean_297_cast)[name = tensor("zero_mean_sq_297_cast")]; + tensor var_9710 = const()[name = tensor("op_9710"), val = tensor([1])]; + tensor var_9711_cast = reduce_mean(axes = var_9710, keep_dims = var_23, x = zero_mean_sq_297_cast)[name = tensor("op_9711_cast")]; + tensor var_9712_to_fp16 = const()[name = tensor("op_9712_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9713_cast = add(x = var_9711_cast, y = var_9712_to_fp16)[name = tensor("op_9713_cast")]; + tensor denom_297_epsilon_0_to_fp16 = const()[name = tensor("denom_297_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_297_cast = rsqrt(epsilon = denom_297_epsilon_0_to_fp16, x = var_9713_cast)[name = tensor("denom_297_cast")]; + tensor out_297_cast = mul(x = zero_mean_297_cast, y = denom_297_cast)[name = tensor("out_297_cast")]; + tensor var_9717_to_fp16 = const()[name = tensor("op_9717_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406897472)))]; + tensor var_9718_cast = add(x = out_297_cast, y = var_9717_to_fp16)[name = tensor("op_9718_cast")]; + tensor var_9720_to_fp16 = const()[name = tensor("op_9720_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406900096)))]; + tensor hidden_states_393_cast = mul(x = var_9718_cast, y = var_9720_to_fp16)[name = tensor("hidden_states_393_cast")]; + tensor var_9727 = const()[name = tensor("op_9727"), val = tensor([1, 1])]; + tensor var_9729 = const()[name = tensor("op_9729"), val = tensor([1, 1])]; + tensor q_199_pad_type_0 = const()[name = tensor("q_199_pad_type_0"), val = tensor("custom")]; + tensor q_199_pad_0 = const()[name = tensor("q_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406902720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1408131584))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_199_cast = conv(dilations = var_9729, groups = var_31, pad = q_199_pad_0, pad_type = q_199_pad_type_0, strides = var_9727, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_393_cast)[name = tensor("q_199_cast")]; + tensor var_9733 = const()[name = tensor("op_9733"), val = tensor([1, 1])]; + tensor var_9735 = const()[name = tensor("op_9735"), val = tensor([1, 1])]; + tensor k_199_pad_type_0 = const()[name = tensor("k_199_pad_type_0"), val = tensor("custom")]; + tensor k_199_pad_0 = const()[name = tensor("k_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1408131776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410097920))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_199_cast = conv(dilations = var_9735, groups = var_31, pad = k_199_pad_0, pad_type = k_199_pad_type_0, strides = var_9733, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_199_cast")]; + tensor var_9739 = const()[name = tensor("op_9739"), val = tensor([1, 1])]; + tensor var_9741 = const()[name = tensor("op_9741"), val = tensor([1, 1])]; + tensor v_199_pad_type_0 = const()[name = tensor("v_199_pad_type_0"), val = tensor("custom")]; + tensor v_199_pad_0 = const()[name = tensor("v_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410098112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412064256))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_199_cast = conv(dilations = var_9741, groups = var_31, pad = v_199_pad_0, pad_type = v_199_pad_type_0, strides = var_9739, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_199_cast")]; + tensor var_9745 = const()[name = tensor("op_9745"), val = tensor([2, 20, 64, -1])]; + tensor var_9746_cast = reshape(shape = var_9745, x = q_199_cast)[name = tensor("op_9746_cast")]; + tensor var_9747 = const()[name = tensor("op_9747"), val = tensor([2, 20, 64, -1])]; + tensor var_9748_cast = reshape(shape = var_9747, x = k_199_cast)[name = tensor("op_9748_cast")]; + tensor var_9749 = const()[name = tensor("op_9749"), val = tensor([2, 20, 64, -1])]; + tensor var_9750_cast = reshape(shape = var_9749, x = v_199_cast)[name = tensor("op_9750_cast")]; + tensor attn_weights_397_transpose_x_0 = const()[name = tensor("attn_weights_397_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_397_transpose_y_0 = const()[name = tensor("attn_weights_397_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_397_cast = matmul(transpose_x = attn_weights_397_transpose_x_0, transpose_y = attn_weights_397_transpose_y_0, x = var_9746_cast, y = var_9748_cast)[name = tensor("attn_weights_397_cast")]; + tensor attn_weights_399_cast = mul(x = attn_weights_397_cast, y = var_12_to_fp16)[name = tensor("attn_weights_399_cast")]; + tensor var_9754_cast = softmax(axis = var_18, x = attn_weights_399_cast)[name = tensor("op_9754_cast")]; + tensor attn_199_transpose_x_0 = const()[name = tensor("attn_199_transpose_x_0"), val = tensor(false)]; + tensor attn_199_transpose_y_0 = const()[name = tensor("attn_199_transpose_y_0"), val = tensor(true)]; + tensor attn_199_cast = matmul(transpose_x = attn_199_transpose_x_0, transpose_y = attn_199_transpose_y_0, x = var_9750_cast, y = var_9754_cast)[name = tensor("attn_199_cast")]; + tensor var_9758 = const()[name = tensor("op_9758"), val = tensor([2, 1280, 1, -1])]; + tensor input_579_cast = reshape(shape = var_9758, x = attn_199_cast)[name = tensor("input_579_cast")]; + tensor var_9763 = const()[name = tensor("op_9763"), val = tensor([1, 1])]; + tensor var_9765 = const()[name = tensor("op_9765"), val = tensor([1, 1])]; + tensor var_9767_pad_type_0 = const()[name = tensor("op_9767_pad_type_0"), val = tensor("custom")]; + tensor var_9767_pad_0 = const()[name = tensor("op_9767_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412064448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413293312))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413293504)))]; + tensor var_9767_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_9765, groups = var_31, pad = var_9767_pad_0, pad_type = var_9767_pad_type_0, strides = var_9763, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_579_cast)[name = tensor("op_9767_cast")]; + tensor inputs_299_cast = add(x = var_9767_cast, y = inputs_297_cast)[name = tensor("inputs_299_cast")]; + tensor var_9771 = const()[name = tensor("op_9771"), val = tensor([1])]; + tensor channels_mean_299_cast = reduce_mean(axes = var_9771, keep_dims = var_23, x = inputs_299_cast)[name = tensor("channels_mean_299_cast")]; + tensor zero_mean_299_cast = sub(x = inputs_299_cast, y = channels_mean_299_cast)[name = tensor("zero_mean_299_cast")]; + tensor zero_mean_sq_299_cast = mul(x = zero_mean_299_cast, y = zero_mean_299_cast)[name = tensor("zero_mean_sq_299_cast")]; + tensor var_9775 = const()[name = tensor("op_9775"), val = tensor([1])]; + tensor var_9776_cast = reduce_mean(axes = var_9775, keep_dims = var_23, x = zero_mean_sq_299_cast)[name = tensor("op_9776_cast")]; + tensor var_9777_to_fp16 = const()[name = tensor("op_9777_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9778_cast = add(x = var_9776_cast, y = var_9777_to_fp16)[name = tensor("op_9778_cast")]; + tensor denom_299_epsilon_0_to_fp16 = const()[name = tensor("denom_299_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_299_cast = rsqrt(epsilon = denom_299_epsilon_0_to_fp16, x = var_9778_cast)[name = tensor("denom_299_cast")]; + tensor out_299_cast = mul(x = zero_mean_299_cast, y = denom_299_cast)[name = tensor("out_299_cast")]; + tensor var_9782_to_fp16 = const()[name = tensor("op_9782_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413296128)))]; + tensor var_9783_cast = add(x = out_299_cast, y = var_9782_to_fp16)[name = tensor("op_9783_cast")]; + tensor var_9785_to_fp16 = const()[name = tensor("op_9785_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413298752)))]; + tensor input_581_cast = mul(x = var_9783_cast, y = var_9785_to_fp16)[name = tensor("input_581_cast")]; + tensor var_9793 = const()[name = tensor("op_9793"), val = tensor([1, 1])]; + tensor var_9795 = const()[name = tensor("op_9795"), val = tensor([1, 1])]; + tensor var_9797_pad_type_0 = const()[name = tensor("op_9797_pad_type_0"), val = tensor("custom")]; + tensor var_9797_pad_0 = const()[name = tensor("op_9797_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413301376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1423131840))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1423132032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1423139776))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9797_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9795, groups = var_31, pad = var_9797_pad_0, pad_type = var_9797_pad_type_0, strides = var_9793, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_581_cast)[name = tensor("op_9797_cast")]; + tensor var_9798_split_sizes_0 = const()[name = tensor("op_9798_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9798_axis_0 = const()[name = tensor("op_9798_axis_0"), val = tensor(1)]; + tensor var_9798_cast_0, tensor var_9798_cast_1 = split(axis = var_9798_axis_0, split_sizes = var_9798_split_sizes_0, x = var_9797_cast)[name = tensor("op_9798_cast")]; + tensor var_9800_mode_0 = const()[name = tensor("op_9800_mode_0"), val = tensor("EXACT")]; + tensor var_9800_cast = gelu(mode = var_9800_mode_0, x = var_9798_cast_1)[name = tensor("op_9800_cast")]; + tensor input_583_cast = mul(x = var_9798_cast_0, y = var_9800_cast)[name = tensor("input_583_cast")]; + tensor var_9804 = const()[name = tensor("op_9804"), val = tensor([1, 1])]; + tensor var_9806 = const()[name = tensor("op_9806"), val = tensor([1, 1])]; + tensor var_9808_pad_type_0 = const()[name = tensor("op_9808_pad_type_0"), val = tensor("custom")]; + tensor var_9808_pad_0 = const()[name = tensor("op_9808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1423139968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1428055232))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1428055424)))]; + tensor var_9808_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_9806, groups = var_31, pad = var_9808_pad_0, pad_type = var_9808_pad_type_0, strides = var_9804, weight = unet_up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_583_cast)[name = tensor("op_9808_cast")]; + tensor inputs_301_cast = add(x = var_9808_cast, y = inputs_299_cast)[name = tensor("inputs_301_cast")]; + tensor var_9818 = const()[name = tensor("op_9818"), val = tensor([1])]; + tensor channels_mean_301_cast = reduce_mean(axes = var_9818, keep_dims = var_23, x = inputs_301_cast)[name = tensor("channels_mean_301_cast")]; + tensor zero_mean_301_cast = sub(x = inputs_301_cast, y = channels_mean_301_cast)[name = tensor("zero_mean_301_cast")]; + tensor zero_mean_sq_301_cast = mul(x = zero_mean_301_cast, y = zero_mean_301_cast)[name = tensor("zero_mean_sq_301_cast")]; + tensor var_9822 = const()[name = tensor("op_9822"), val = tensor([1])]; + tensor var_9823_cast = reduce_mean(axes = var_9822, keep_dims = var_23, x = zero_mean_sq_301_cast)[name = tensor("op_9823_cast")]; + tensor var_9824_to_fp16 = const()[name = tensor("op_9824_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9825_cast = add(x = var_9823_cast, y = var_9824_to_fp16)[name = tensor("op_9825_cast")]; + tensor denom_301_epsilon_0_to_fp16 = const()[name = tensor("denom_301_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_301_cast = rsqrt(epsilon = denom_301_epsilon_0_to_fp16, x = var_9825_cast)[name = tensor("denom_301_cast")]; + tensor out_301_cast = mul(x = zero_mean_301_cast, y = denom_301_cast)[name = tensor("out_301_cast")]; + tensor var_9829_to_fp16 = const()[name = tensor("op_9829_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1428058048)))]; + tensor var_9830_cast = add(x = out_301_cast, y = var_9829_to_fp16)[name = tensor("op_9830_cast")]; + tensor var_9832_to_fp16 = const()[name = tensor("op_9832_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1428060672)))]; + tensor hidden_states_397_cast = mul(x = var_9830_cast, y = var_9832_to_fp16)[name = tensor("hidden_states_397_cast")]; + tensor var_9839 = const()[name = tensor("op_9839"), val = tensor([1, 1])]; + tensor var_9841 = const()[name = tensor("op_9841"), val = tensor([1, 1])]; + tensor q_201_pad_type_0 = const()[name = tensor("q_201_pad_type_0"), val = tensor("custom")]; + tensor q_201_pad_0 = const()[name = tensor("q_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1428063296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429292160))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_201_cast = conv(dilations = var_9841, groups = var_31, pad = q_201_pad_0, pad_type = q_201_pad_type_0, strides = var_9839, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("q_201_cast")]; + tensor var_9845 = const()[name = tensor("op_9845"), val = tensor([1, 1])]; + tensor var_9847 = const()[name = tensor("op_9847"), val = tensor([1, 1])]; + tensor k_201_pad_type_0 = const()[name = tensor("k_201_pad_type_0"), val = tensor("custom")]; + tensor k_201_pad_0 = const()[name = tensor("k_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429292352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1430521216))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_201_cast = conv(dilations = var_9847, groups = var_31, pad = k_201_pad_0, pad_type = k_201_pad_type_0, strides = var_9845, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("k_201_cast")]; + tensor var_9851 = const()[name = tensor("op_9851"), val = tensor([1, 1])]; + tensor var_9853 = const()[name = tensor("op_9853"), val = tensor([1, 1])]; + tensor v_201_pad_type_0 = const()[name = tensor("v_201_pad_type_0"), val = tensor("custom")]; + tensor v_201_pad_0 = const()[name = tensor("v_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1430521408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1431750272))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_201_cast = conv(dilations = var_9853, groups = var_31, pad = v_201_pad_0, pad_type = v_201_pad_type_0, strides = var_9851, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("v_201_cast")]; + tensor var_9857 = const()[name = tensor("op_9857"), val = tensor([2, 20, 64, -1])]; + tensor var_9858_cast = reshape(shape = var_9857, x = q_201_cast)[name = tensor("op_9858_cast")]; + tensor var_9859 = const()[name = tensor("op_9859"), val = tensor([2, 20, 64, -1])]; + tensor var_9860_cast = reshape(shape = var_9859, x = k_201_cast)[name = tensor("op_9860_cast")]; + tensor var_9861 = const()[name = tensor("op_9861"), val = tensor([2, 20, 64, -1])]; + tensor var_9862_cast = reshape(shape = var_9861, x = v_201_cast)[name = tensor("op_9862_cast")]; + tensor attn_weights_401_transpose_x_0 = const()[name = tensor("attn_weights_401_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_401_transpose_y_0 = const()[name = tensor("attn_weights_401_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_401_cast = matmul(transpose_x = attn_weights_401_transpose_x_0, transpose_y = attn_weights_401_transpose_y_0, x = var_9858_cast, y = var_9860_cast)[name = tensor("attn_weights_401_cast")]; + tensor attn_weights_403_cast = mul(x = attn_weights_401_cast, y = var_12_to_fp16)[name = tensor("attn_weights_403_cast")]; + tensor var_9866_cast = softmax(axis = var_18, x = attn_weights_403_cast)[name = tensor("op_9866_cast")]; + tensor attn_201_transpose_x_0 = const()[name = tensor("attn_201_transpose_x_0"), val = tensor(false)]; + tensor attn_201_transpose_y_0 = const()[name = tensor("attn_201_transpose_y_0"), val = tensor(true)]; + tensor attn_201_cast = matmul(transpose_x = attn_201_transpose_x_0, transpose_y = attn_201_transpose_y_0, x = var_9862_cast, y = var_9866_cast)[name = tensor("attn_201_cast")]; + tensor var_9870 = const()[name = tensor("op_9870"), val = tensor([2, 1280, 1, -1])]; + tensor input_585_cast = reshape(shape = var_9870, x = attn_201_cast)[name = tensor("input_585_cast")]; + tensor var_9875 = const()[name = tensor("op_9875"), val = tensor([1, 1])]; + tensor var_9877 = const()[name = tensor("op_9877"), val = tensor([1, 1])]; + tensor var_9879_pad_type_0 = const()[name = tensor("op_9879_pad_type_0"), val = tensor("custom")]; + tensor var_9879_pad_0 = const()[name = tensor("op_9879_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1431750464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1432979328))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1432979520)))]; + tensor var_9879_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_9877, groups = var_31, pad = var_9879_pad_0, pad_type = var_9879_pad_type_0, strides = var_9875, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_585_cast)[name = tensor("op_9879_cast")]; + tensor inputs_303_cast = add(x = var_9879_cast, y = inputs_301_cast)[name = tensor("inputs_303_cast")]; + tensor var_9883 = const()[name = tensor("op_9883"), val = tensor([1])]; + tensor channels_mean_303_cast = reduce_mean(axes = var_9883, keep_dims = var_23, x = inputs_303_cast)[name = tensor("channels_mean_303_cast")]; + tensor zero_mean_303_cast = sub(x = inputs_303_cast, y = channels_mean_303_cast)[name = tensor("zero_mean_303_cast")]; + tensor zero_mean_sq_303_cast = mul(x = zero_mean_303_cast, y = zero_mean_303_cast)[name = tensor("zero_mean_sq_303_cast")]; + tensor var_9887 = const()[name = tensor("op_9887"), val = tensor([1])]; + tensor var_9888_cast = reduce_mean(axes = var_9887, keep_dims = var_23, x = zero_mean_sq_303_cast)[name = tensor("op_9888_cast")]; + tensor var_9889_to_fp16 = const()[name = tensor("op_9889_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9890_cast = add(x = var_9888_cast, y = var_9889_to_fp16)[name = tensor("op_9890_cast")]; + tensor denom_303_epsilon_0_to_fp16 = const()[name = tensor("denom_303_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_303_cast = rsqrt(epsilon = denom_303_epsilon_0_to_fp16, x = var_9890_cast)[name = tensor("denom_303_cast")]; + tensor out_303_cast = mul(x = zero_mean_303_cast, y = denom_303_cast)[name = tensor("out_303_cast")]; + tensor var_9894_to_fp16 = const()[name = tensor("op_9894_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1432982144)))]; + tensor var_9895_cast = add(x = out_303_cast, y = var_9894_to_fp16)[name = tensor("op_9895_cast")]; + tensor var_9897_to_fp16 = const()[name = tensor("op_9897_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1432984768)))]; + tensor hidden_states_399_cast = mul(x = var_9895_cast, y = var_9897_to_fp16)[name = tensor("hidden_states_399_cast")]; + tensor var_9904 = const()[name = tensor("op_9904"), val = tensor([1, 1])]; + tensor var_9906 = const()[name = tensor("op_9906"), val = tensor([1, 1])]; + tensor q_203_pad_type_0 = const()[name = tensor("q_203_pad_type_0"), val = tensor("custom")]; + tensor q_203_pad_0 = const()[name = tensor("q_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1432987392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434216256))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_203_cast = conv(dilations = var_9906, groups = var_31, pad = q_203_pad_0, pad_type = q_203_pad_type_0, strides = var_9904, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_399_cast)[name = tensor("q_203_cast")]; + tensor var_9910 = const()[name = tensor("op_9910"), val = tensor([1, 1])]; + tensor var_9912 = const()[name = tensor("op_9912"), val = tensor([1, 1])]; + tensor k_203_pad_type_0 = const()[name = tensor("k_203_pad_type_0"), val = tensor("custom")]; + tensor k_203_pad_0 = const()[name = tensor("k_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434216448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436182592))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_203_cast = conv(dilations = var_9912, groups = var_31, pad = k_203_pad_0, pad_type = k_203_pad_type_0, strides = var_9910, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_203_cast")]; + tensor var_9916 = const()[name = tensor("op_9916"), val = tensor([1, 1])]; + tensor var_9918 = const()[name = tensor("op_9918"), val = tensor([1, 1])]; + tensor v_203_pad_type_0 = const()[name = tensor("v_203_pad_type_0"), val = tensor("custom")]; + tensor v_203_pad_0 = const()[name = tensor("v_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436182784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1438148928))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_203_cast = conv(dilations = var_9918, groups = var_31, pad = v_203_pad_0, pad_type = v_203_pad_type_0, strides = var_9916, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_203_cast")]; + tensor var_9922 = const()[name = tensor("op_9922"), val = tensor([2, 20, 64, -1])]; + tensor var_9923_cast = reshape(shape = var_9922, x = q_203_cast)[name = tensor("op_9923_cast")]; + tensor var_9924 = const()[name = tensor("op_9924"), val = tensor([2, 20, 64, -1])]; + tensor var_9925_cast = reshape(shape = var_9924, x = k_203_cast)[name = tensor("op_9925_cast")]; + tensor var_9926 = const()[name = tensor("op_9926"), val = tensor([2, 20, 64, -1])]; + tensor var_9927_cast = reshape(shape = var_9926, x = v_203_cast)[name = tensor("op_9927_cast")]; + tensor attn_weights_405_transpose_x_0 = const()[name = tensor("attn_weights_405_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_405_transpose_y_0 = const()[name = tensor("attn_weights_405_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_405_cast = matmul(transpose_x = attn_weights_405_transpose_x_0, transpose_y = attn_weights_405_transpose_y_0, x = var_9923_cast, y = var_9925_cast)[name = tensor("attn_weights_405_cast")]; + tensor attn_weights_407_cast = mul(x = attn_weights_405_cast, y = var_12_to_fp16)[name = tensor("attn_weights_407_cast")]; + tensor var_9931_cast = softmax(axis = var_18, x = attn_weights_407_cast)[name = tensor("op_9931_cast")]; + tensor attn_203_transpose_x_0 = const()[name = tensor("attn_203_transpose_x_0"), val = tensor(false)]; + tensor attn_203_transpose_y_0 = const()[name = tensor("attn_203_transpose_y_0"), val = tensor(true)]; + tensor attn_203_cast = matmul(transpose_x = attn_203_transpose_x_0, transpose_y = attn_203_transpose_y_0, x = var_9927_cast, y = var_9931_cast)[name = tensor("attn_203_cast")]; + tensor var_9935 = const()[name = tensor("op_9935"), val = tensor([2, 1280, 1, -1])]; + tensor input_587_cast = reshape(shape = var_9935, x = attn_203_cast)[name = tensor("input_587_cast")]; + tensor var_9940 = const()[name = tensor("op_9940"), val = tensor([1, 1])]; + tensor var_9942 = const()[name = tensor("op_9942"), val = tensor([1, 1])]; + tensor var_9944_pad_type_0 = const()[name = tensor("op_9944_pad_type_0"), val = tensor("custom")]; + tensor var_9944_pad_0 = const()[name = tensor("op_9944_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1438149120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439377984))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439378176)))]; + tensor var_9944_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_9942, groups = var_31, pad = var_9944_pad_0, pad_type = var_9944_pad_type_0, strides = var_9940, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_587_cast)[name = tensor("op_9944_cast")]; + tensor inputs_305_cast = add(x = var_9944_cast, y = inputs_303_cast)[name = tensor("inputs_305_cast")]; + tensor var_9948 = const()[name = tensor("op_9948"), val = tensor([1])]; + tensor channels_mean_305_cast = reduce_mean(axes = var_9948, keep_dims = var_23, x = inputs_305_cast)[name = tensor("channels_mean_305_cast")]; + tensor zero_mean_305_cast = sub(x = inputs_305_cast, y = channels_mean_305_cast)[name = tensor("zero_mean_305_cast")]; + tensor zero_mean_sq_305_cast = mul(x = zero_mean_305_cast, y = zero_mean_305_cast)[name = tensor("zero_mean_sq_305_cast")]; + tensor var_9952 = const()[name = tensor("op_9952"), val = tensor([1])]; + tensor var_9953_cast = reduce_mean(axes = var_9952, keep_dims = var_23, x = zero_mean_sq_305_cast)[name = tensor("op_9953_cast")]; + tensor var_9954_to_fp16 = const()[name = tensor("op_9954_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9955_cast = add(x = var_9953_cast, y = var_9954_to_fp16)[name = tensor("op_9955_cast")]; + tensor denom_305_epsilon_0_to_fp16 = const()[name = tensor("denom_305_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_305_cast = rsqrt(epsilon = denom_305_epsilon_0_to_fp16, x = var_9955_cast)[name = tensor("denom_305_cast")]; + tensor out_305_cast = mul(x = zero_mean_305_cast, y = denom_305_cast)[name = tensor("out_305_cast")]; + tensor var_9959_to_fp16 = const()[name = tensor("op_9959_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439380800)))]; + tensor var_9960_cast = add(x = out_305_cast, y = var_9959_to_fp16)[name = tensor("op_9960_cast")]; + tensor var_9962_to_fp16 = const()[name = tensor("op_9962_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439383424)))]; + tensor input_589_cast = mul(x = var_9960_cast, y = var_9962_to_fp16)[name = tensor("input_589_cast")]; + tensor var_9970 = const()[name = tensor("op_9970"), val = tensor([1, 1])]; + tensor var_9972 = const()[name = tensor("op_9972"), val = tensor([1, 1])]; + tensor var_9974_pad_type_0 = const()[name = tensor("op_9974_pad_type_0"), val = tensor("custom")]; + tensor var_9974_pad_0 = const()[name = tensor("op_9974_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439386048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449216512))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449216704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449224448))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_9974_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_9972, groups = var_31, pad = var_9974_pad_0, pad_type = var_9974_pad_type_0, strides = var_9970, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_589_cast)[name = tensor("op_9974_cast")]; + tensor var_9975_split_sizes_0 = const()[name = tensor("op_9975_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9975_axis_0 = const()[name = tensor("op_9975_axis_0"), val = tensor(1)]; + tensor var_9975_cast_0, tensor var_9975_cast_1 = split(axis = var_9975_axis_0, split_sizes = var_9975_split_sizes_0, x = var_9974_cast)[name = tensor("op_9975_cast")]; + tensor var_9977_mode_0 = const()[name = tensor("op_9977_mode_0"), val = tensor("EXACT")]; + tensor var_9977_cast = gelu(mode = var_9977_mode_0, x = var_9975_cast_1)[name = tensor("op_9977_cast")]; + tensor input_591_cast = mul(x = var_9975_cast_0, y = var_9977_cast)[name = tensor("input_591_cast")]; + tensor var_9981 = const()[name = tensor("op_9981"), val = tensor([1, 1])]; + tensor var_9983 = const()[name = tensor("op_9983"), val = tensor([1, 1])]; + tensor var_9985_pad_type_0 = const()[name = tensor("op_9985_pad_type_0"), val = tensor("custom")]; + tensor var_9985_pad_0 = const()[name = tensor("op_9985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449224640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1454139904))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1454140096)))]; + tensor var_9985_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_9983, groups = var_31, pad = var_9985_pad_0, pad_type = var_9985_pad_type_0, strides = var_9981, weight = unet_up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_591_cast)[name = tensor("op_9985_cast")]; + tensor inputs_307_cast = add(x = var_9985_cast, y = inputs_305_cast)[name = tensor("inputs_307_cast")]; + tensor var_9995 = const()[name = tensor("op_9995"), val = tensor([1])]; + tensor channels_mean_307_cast = reduce_mean(axes = var_9995, keep_dims = var_23, x = inputs_307_cast)[name = tensor("channels_mean_307_cast")]; + tensor zero_mean_307_cast = sub(x = inputs_307_cast, y = channels_mean_307_cast)[name = tensor("zero_mean_307_cast")]; + tensor zero_mean_sq_307_cast = mul(x = zero_mean_307_cast, y = zero_mean_307_cast)[name = tensor("zero_mean_sq_307_cast")]; + tensor var_9999 = const()[name = tensor("op_9999"), val = tensor([1])]; + tensor var_10000_cast = reduce_mean(axes = var_9999, keep_dims = var_23, x = zero_mean_sq_307_cast)[name = tensor("op_10000_cast")]; + tensor var_10001_to_fp16 = const()[name = tensor("op_10001_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10002_cast = add(x = var_10000_cast, y = var_10001_to_fp16)[name = tensor("op_10002_cast")]; + tensor denom_307_epsilon_0_to_fp16 = const()[name = tensor("denom_307_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_307_cast = rsqrt(epsilon = denom_307_epsilon_0_to_fp16, x = var_10002_cast)[name = tensor("denom_307_cast")]; + tensor out_307_cast = mul(x = zero_mean_307_cast, y = denom_307_cast)[name = tensor("out_307_cast")]; + tensor var_10006_to_fp16 = const()[name = tensor("op_10006_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1454142720)))]; + tensor var_10007_cast = add(x = out_307_cast, y = var_10006_to_fp16)[name = tensor("op_10007_cast")]; + tensor var_10009_to_fp16 = const()[name = tensor("op_10009_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1454145344)))]; + tensor hidden_states_403_cast = mul(x = var_10007_cast, y = var_10009_to_fp16)[name = tensor("hidden_states_403_cast")]; + tensor var_10016 = const()[name = tensor("op_10016"), val = tensor([1, 1])]; + tensor var_10018 = const()[name = tensor("op_10018"), val = tensor([1, 1])]; + tensor q_205_pad_type_0 = const()[name = tensor("q_205_pad_type_0"), val = tensor("custom")]; + tensor q_205_pad_0 = const()[name = tensor("q_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1454147968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1455376832))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_205_cast = conv(dilations = var_10018, groups = var_31, pad = q_205_pad_0, pad_type = q_205_pad_type_0, strides = var_10016, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("q_205_cast")]; + tensor var_10022 = const()[name = tensor("op_10022"), val = tensor([1, 1])]; + tensor var_10024 = const()[name = tensor("op_10024"), val = tensor([1, 1])]; + tensor k_205_pad_type_0 = const()[name = tensor("k_205_pad_type_0"), val = tensor("custom")]; + tensor k_205_pad_0 = const()[name = tensor("k_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1455377024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1456605888))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_205_cast = conv(dilations = var_10024, groups = var_31, pad = k_205_pad_0, pad_type = k_205_pad_type_0, strides = var_10022, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("k_205_cast")]; + tensor var_10028 = const()[name = tensor("op_10028"), val = tensor([1, 1])]; + tensor var_10030 = const()[name = tensor("op_10030"), val = tensor([1, 1])]; + tensor v_205_pad_type_0 = const()[name = tensor("v_205_pad_type_0"), val = tensor("custom")]; + tensor v_205_pad_0 = const()[name = tensor("v_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1456606080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1457834944))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_205_cast = conv(dilations = var_10030, groups = var_31, pad = v_205_pad_0, pad_type = v_205_pad_type_0, strides = var_10028, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("v_205_cast")]; + tensor var_10034 = const()[name = tensor("op_10034"), val = tensor([2, 20, 64, -1])]; + tensor var_10035_cast = reshape(shape = var_10034, x = q_205_cast)[name = tensor("op_10035_cast")]; + tensor var_10036 = const()[name = tensor("op_10036"), val = tensor([2, 20, 64, -1])]; + tensor var_10037_cast = reshape(shape = var_10036, x = k_205_cast)[name = tensor("op_10037_cast")]; + tensor var_10038 = const()[name = tensor("op_10038"), val = tensor([2, 20, 64, -1])]; + tensor var_10039_cast = reshape(shape = var_10038, x = v_205_cast)[name = tensor("op_10039_cast")]; + tensor attn_weights_409_transpose_x_0 = const()[name = tensor("attn_weights_409_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_409_transpose_y_0 = const()[name = tensor("attn_weights_409_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_409_cast = matmul(transpose_x = attn_weights_409_transpose_x_0, transpose_y = attn_weights_409_transpose_y_0, x = var_10035_cast, y = var_10037_cast)[name = tensor("attn_weights_409_cast")]; + tensor attn_weights_411_cast = mul(x = attn_weights_409_cast, y = var_12_to_fp16)[name = tensor("attn_weights_411_cast")]; + tensor var_10043_cast = softmax(axis = var_18, x = attn_weights_411_cast)[name = tensor("op_10043_cast")]; + tensor attn_205_transpose_x_0 = const()[name = tensor("attn_205_transpose_x_0"), val = tensor(false)]; + tensor attn_205_transpose_y_0 = const()[name = tensor("attn_205_transpose_y_0"), val = tensor(true)]; + tensor attn_205_cast = matmul(transpose_x = attn_205_transpose_x_0, transpose_y = attn_205_transpose_y_0, x = var_10039_cast, y = var_10043_cast)[name = tensor("attn_205_cast")]; + tensor var_10047 = const()[name = tensor("op_10047"), val = tensor([2, 1280, 1, -1])]; + tensor input_593_cast = reshape(shape = var_10047, x = attn_205_cast)[name = tensor("input_593_cast")]; + tensor var_10052 = const()[name = tensor("op_10052"), val = tensor([1, 1])]; + tensor var_10054 = const()[name = tensor("op_10054"), val = tensor([1, 1])]; + tensor var_10056_pad_type_0 = const()[name = tensor("op_10056_pad_type_0"), val = tensor("custom")]; + tensor var_10056_pad_0 = const()[name = tensor("op_10056_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1457835136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459064000))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459064192)))]; + tensor var_10056_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_10054, groups = var_31, pad = var_10056_pad_0, pad_type = var_10056_pad_type_0, strides = var_10052, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_593_cast)[name = tensor("op_10056_cast")]; + tensor inputs_309_cast = add(x = var_10056_cast, y = inputs_307_cast)[name = tensor("inputs_309_cast")]; + tensor var_10060 = const()[name = tensor("op_10060"), val = tensor([1])]; + tensor channels_mean_309_cast = reduce_mean(axes = var_10060, keep_dims = var_23, x = inputs_309_cast)[name = tensor("channels_mean_309_cast")]; + tensor zero_mean_309_cast = sub(x = inputs_309_cast, y = channels_mean_309_cast)[name = tensor("zero_mean_309_cast")]; + tensor zero_mean_sq_309_cast = mul(x = zero_mean_309_cast, y = zero_mean_309_cast)[name = tensor("zero_mean_sq_309_cast")]; + tensor var_10064 = const()[name = tensor("op_10064"), val = tensor([1])]; + tensor var_10065_cast = reduce_mean(axes = var_10064, keep_dims = var_23, x = zero_mean_sq_309_cast)[name = tensor("op_10065_cast")]; + tensor var_10066_to_fp16 = const()[name = tensor("op_10066_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10067_cast = add(x = var_10065_cast, y = var_10066_to_fp16)[name = tensor("op_10067_cast")]; + tensor denom_309_epsilon_0_to_fp16 = const()[name = tensor("denom_309_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_309_cast = rsqrt(epsilon = denom_309_epsilon_0_to_fp16, x = var_10067_cast)[name = tensor("denom_309_cast")]; + tensor out_309_cast = mul(x = zero_mean_309_cast, y = denom_309_cast)[name = tensor("out_309_cast")]; + tensor var_10071_to_fp16 = const()[name = tensor("op_10071_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459066816)))]; + tensor var_10072_cast = add(x = out_309_cast, y = var_10071_to_fp16)[name = tensor("op_10072_cast")]; + tensor var_10074_to_fp16 = const()[name = tensor("op_10074_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459069440)))]; + tensor hidden_states_405_cast = mul(x = var_10072_cast, y = var_10074_to_fp16)[name = tensor("hidden_states_405_cast")]; + tensor var_10081 = const()[name = tensor("op_10081"), val = tensor([1, 1])]; + tensor var_10083 = const()[name = tensor("op_10083"), val = tensor([1, 1])]; + tensor q_207_pad_type_0 = const()[name = tensor("q_207_pad_type_0"), val = tensor("custom")]; + tensor q_207_pad_0 = const()[name = tensor("q_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1459072064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1460300928))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_207_cast = conv(dilations = var_10083, groups = var_31, pad = q_207_pad_0, pad_type = q_207_pad_type_0, strides = var_10081, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_405_cast)[name = tensor("q_207_cast")]; + tensor var_10087 = const()[name = tensor("op_10087"), val = tensor([1, 1])]; + tensor var_10089 = const()[name = tensor("op_10089"), val = tensor([1, 1])]; + tensor k_207_pad_type_0 = const()[name = tensor("k_207_pad_type_0"), val = tensor("custom")]; + tensor k_207_pad_0 = const()[name = tensor("k_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1460301120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1462267264))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_207_cast = conv(dilations = var_10089, groups = var_31, pad = k_207_pad_0, pad_type = k_207_pad_type_0, strides = var_10087, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_207_cast")]; + tensor var_10093 = const()[name = tensor("op_10093"), val = tensor([1, 1])]; + tensor var_10095 = const()[name = tensor("op_10095"), val = tensor([1, 1])]; + tensor v_207_pad_type_0 = const()[name = tensor("v_207_pad_type_0"), val = tensor("custom")]; + tensor v_207_pad_0 = const()[name = tensor("v_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1462267456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1464233600))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_207_cast = conv(dilations = var_10095, groups = var_31, pad = v_207_pad_0, pad_type = v_207_pad_type_0, strides = var_10093, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_207_cast")]; + tensor var_10099 = const()[name = tensor("op_10099"), val = tensor([2, 20, 64, -1])]; + tensor var_10100_cast = reshape(shape = var_10099, x = q_207_cast)[name = tensor("op_10100_cast")]; + tensor var_10101 = const()[name = tensor("op_10101"), val = tensor([2, 20, 64, -1])]; + tensor var_10102_cast = reshape(shape = var_10101, x = k_207_cast)[name = tensor("op_10102_cast")]; + tensor var_10103 = const()[name = tensor("op_10103"), val = tensor([2, 20, 64, -1])]; + tensor var_10104_cast = reshape(shape = var_10103, x = v_207_cast)[name = tensor("op_10104_cast")]; + tensor attn_weights_413_transpose_x_0 = const()[name = tensor("attn_weights_413_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_413_transpose_y_0 = const()[name = tensor("attn_weights_413_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_413_cast = matmul(transpose_x = attn_weights_413_transpose_x_0, transpose_y = attn_weights_413_transpose_y_0, x = var_10100_cast, y = var_10102_cast)[name = tensor("attn_weights_413_cast")]; + tensor attn_weights_415_cast = mul(x = attn_weights_413_cast, y = var_12_to_fp16)[name = tensor("attn_weights_415_cast")]; + tensor var_10108_cast = softmax(axis = var_18, x = attn_weights_415_cast)[name = tensor("op_10108_cast")]; + tensor attn_207_transpose_x_0 = const()[name = tensor("attn_207_transpose_x_0"), val = tensor(false)]; + tensor attn_207_transpose_y_0 = const()[name = tensor("attn_207_transpose_y_0"), val = tensor(true)]; + tensor attn_207_cast = matmul(transpose_x = attn_207_transpose_x_0, transpose_y = attn_207_transpose_y_0, x = var_10104_cast, y = var_10108_cast)[name = tensor("attn_207_cast")]; + tensor var_10112 = const()[name = tensor("op_10112"), val = tensor([2, 1280, 1, -1])]; + tensor input_595_cast = reshape(shape = var_10112, x = attn_207_cast)[name = tensor("input_595_cast")]; + tensor var_10117 = const()[name = tensor("op_10117"), val = tensor([1, 1])]; + tensor var_10119 = const()[name = tensor("op_10119"), val = tensor([1, 1])]; + tensor var_10121_pad_type_0 = const()[name = tensor("op_10121_pad_type_0"), val = tensor("custom")]; + tensor var_10121_pad_0 = const()[name = tensor("op_10121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1464233792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1465462656))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1465462848)))]; + tensor var_10121_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_10119, groups = var_31, pad = var_10121_pad_0, pad_type = var_10121_pad_type_0, strides = var_10117, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_595_cast)[name = tensor("op_10121_cast")]; + tensor inputs_311_cast = add(x = var_10121_cast, y = inputs_309_cast)[name = tensor("inputs_311_cast")]; + tensor var_10125 = const()[name = tensor("op_10125"), val = tensor([1])]; + tensor channels_mean_311_cast = reduce_mean(axes = var_10125, keep_dims = var_23, x = inputs_311_cast)[name = tensor("channels_mean_311_cast")]; + tensor zero_mean_311_cast = sub(x = inputs_311_cast, y = channels_mean_311_cast)[name = tensor("zero_mean_311_cast")]; + tensor zero_mean_sq_311_cast = mul(x = zero_mean_311_cast, y = zero_mean_311_cast)[name = tensor("zero_mean_sq_311_cast")]; + tensor var_10129 = const()[name = tensor("op_10129"), val = tensor([1])]; + tensor var_10130_cast = reduce_mean(axes = var_10129, keep_dims = var_23, x = zero_mean_sq_311_cast)[name = tensor("op_10130_cast")]; + tensor var_10131_to_fp16 = const()[name = tensor("op_10131_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10132_cast = add(x = var_10130_cast, y = var_10131_to_fp16)[name = tensor("op_10132_cast")]; + tensor denom_311_epsilon_0_to_fp16 = const()[name = tensor("denom_311_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_311_cast = rsqrt(epsilon = denom_311_epsilon_0_to_fp16, x = var_10132_cast)[name = tensor("denom_311_cast")]; + tensor out_311_cast = mul(x = zero_mean_311_cast, y = denom_311_cast)[name = tensor("out_311_cast")]; + tensor var_10136_to_fp16 = const()[name = tensor("op_10136_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1465465472)))]; + tensor var_10137_cast = add(x = out_311_cast, y = var_10136_to_fp16)[name = tensor("op_10137_cast")]; + tensor var_10139_to_fp16 = const()[name = tensor("op_10139_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1465468096)))]; + tensor input_597_cast = mul(x = var_10137_cast, y = var_10139_to_fp16)[name = tensor("input_597_cast")]; + tensor var_10147 = const()[name = tensor("op_10147"), val = tensor([1, 1])]; + tensor var_10149 = const()[name = tensor("op_10149"), val = tensor([1, 1])]; + tensor var_10151_pad_type_0 = const()[name = tensor("op_10151_pad_type_0"), val = tensor("custom")]; + tensor var_10151_pad_0 = const()[name = tensor("op_10151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1465470720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475301184))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475301376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475309120))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_10151_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_10149, groups = var_31, pad = var_10151_pad_0, pad_type = var_10151_pad_type_0, strides = var_10147, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_597_cast)[name = tensor("op_10151_cast")]; + tensor var_10152_split_sizes_0 = const()[name = tensor("op_10152_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10152_axis_0 = const()[name = tensor("op_10152_axis_0"), val = tensor(1)]; + tensor var_10152_cast_0, tensor var_10152_cast_1 = split(axis = var_10152_axis_0, split_sizes = var_10152_split_sizes_0, x = var_10151_cast)[name = tensor("op_10152_cast")]; + tensor var_10154_mode_0 = const()[name = tensor("op_10154_mode_0"), val = tensor("EXACT")]; + tensor var_10154_cast = gelu(mode = var_10154_mode_0, x = var_10152_cast_1)[name = tensor("op_10154_cast")]; + tensor input_599_cast = mul(x = var_10152_cast_0, y = var_10154_cast)[name = tensor("input_599_cast")]; + tensor var_10158 = const()[name = tensor("op_10158"), val = tensor([1, 1])]; + tensor var_10160 = const()[name = tensor("op_10160"), val = tensor([1, 1])]; + tensor var_10162_pad_type_0 = const()[name = tensor("op_10162_pad_type_0"), val = tensor("custom")]; + tensor var_10162_pad_0 = const()[name = tensor("op_10162_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475309312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1480224576))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1480224768)))]; + tensor var_10162_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_10160, groups = var_31, pad = var_10162_pad_0, pad_type = var_10162_pad_type_0, strides = var_10158, weight = unet_up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_599_cast)[name = tensor("op_10162_cast")]; + tensor inputs_313_cast = add(x = var_10162_cast, y = inputs_311_cast)[name = tensor("inputs_313_cast")]; + tensor var_10172 = const()[name = tensor("op_10172"), val = tensor([1])]; + tensor channels_mean_313_cast = reduce_mean(axes = var_10172, keep_dims = var_23, x = inputs_313_cast)[name = tensor("channels_mean_313_cast")]; + tensor zero_mean_313_cast = sub(x = inputs_313_cast, y = channels_mean_313_cast)[name = tensor("zero_mean_313_cast")]; + tensor zero_mean_sq_313_cast = mul(x = zero_mean_313_cast, y = zero_mean_313_cast)[name = tensor("zero_mean_sq_313_cast")]; + tensor var_10176 = const()[name = tensor("op_10176"), val = tensor([1])]; + tensor var_10177_cast = reduce_mean(axes = var_10176, keep_dims = var_23, x = zero_mean_sq_313_cast)[name = tensor("op_10177_cast")]; + tensor var_10178_to_fp16 = const()[name = tensor("op_10178_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10179_cast = add(x = var_10177_cast, y = var_10178_to_fp16)[name = tensor("op_10179_cast")]; + tensor denom_313_epsilon_0_to_fp16 = const()[name = tensor("denom_313_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_313_cast = rsqrt(epsilon = denom_313_epsilon_0_to_fp16, x = var_10179_cast)[name = tensor("denom_313_cast")]; + tensor out_313_cast = mul(x = zero_mean_313_cast, y = denom_313_cast)[name = tensor("out_313_cast")]; + tensor var_10183_to_fp16 = const()[name = tensor("op_10183_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1480227392)))]; + tensor var_10184_cast = add(x = out_313_cast, y = var_10183_to_fp16)[name = tensor("op_10184_cast")]; + tensor var_10186_to_fp16 = const()[name = tensor("op_10186_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1480230016)))]; + tensor hidden_states_409_cast = mul(x = var_10184_cast, y = var_10186_to_fp16)[name = tensor("hidden_states_409_cast")]; + tensor var_10193 = const()[name = tensor("op_10193"), val = tensor([1, 1])]; + tensor var_10195 = const()[name = tensor("op_10195"), val = tensor([1, 1])]; + tensor q_209_pad_type_0 = const()[name = tensor("q_209_pad_type_0"), val = tensor("custom")]; + tensor q_209_pad_0 = const()[name = tensor("q_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1480232640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481461504))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_209_cast = conv(dilations = var_10195, groups = var_31, pad = q_209_pad_0, pad_type = q_209_pad_type_0, strides = var_10193, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("q_209_cast")]; + tensor var_10199 = const()[name = tensor("op_10199"), val = tensor([1, 1])]; + tensor var_10201 = const()[name = tensor("op_10201"), val = tensor([1, 1])]; + tensor k_209_pad_type_0 = const()[name = tensor("k_209_pad_type_0"), val = tensor("custom")]; + tensor k_209_pad_0 = const()[name = tensor("k_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481461696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1482690560))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_209_cast = conv(dilations = var_10201, groups = var_31, pad = k_209_pad_0, pad_type = k_209_pad_type_0, strides = var_10199, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("k_209_cast")]; + tensor var_10205 = const()[name = tensor("op_10205"), val = tensor([1, 1])]; + tensor var_10207 = const()[name = tensor("op_10207"), val = tensor([1, 1])]; + tensor v_209_pad_type_0 = const()[name = tensor("v_209_pad_type_0"), val = tensor("custom")]; + tensor v_209_pad_0 = const()[name = tensor("v_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1482690752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1483919616))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_209_cast = conv(dilations = var_10207, groups = var_31, pad = v_209_pad_0, pad_type = v_209_pad_type_0, strides = var_10205, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("v_209_cast")]; + tensor var_10211 = const()[name = tensor("op_10211"), val = tensor([2, 20, 64, -1])]; + tensor var_10212_cast = reshape(shape = var_10211, x = q_209_cast)[name = tensor("op_10212_cast")]; + tensor var_10213 = const()[name = tensor("op_10213"), val = tensor([2, 20, 64, -1])]; + tensor var_10214_cast = reshape(shape = var_10213, x = k_209_cast)[name = tensor("op_10214_cast")]; + tensor var_10215 = const()[name = tensor("op_10215"), val = tensor([2, 20, 64, -1])]; + tensor var_10216_cast = reshape(shape = var_10215, x = v_209_cast)[name = tensor("op_10216_cast")]; + tensor attn_weights_417_transpose_x_0 = const()[name = tensor("attn_weights_417_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_417_transpose_y_0 = const()[name = tensor("attn_weights_417_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_417_cast = matmul(transpose_x = attn_weights_417_transpose_x_0, transpose_y = attn_weights_417_transpose_y_0, x = var_10212_cast, y = var_10214_cast)[name = tensor("attn_weights_417_cast")]; + tensor attn_weights_419_cast = mul(x = attn_weights_417_cast, y = var_12_to_fp16)[name = tensor("attn_weights_419_cast")]; + tensor var_10220_cast = softmax(axis = var_18, x = attn_weights_419_cast)[name = tensor("op_10220_cast")]; + tensor attn_209_transpose_x_0 = const()[name = tensor("attn_209_transpose_x_0"), val = tensor(false)]; + tensor attn_209_transpose_y_0 = const()[name = tensor("attn_209_transpose_y_0"), val = tensor(true)]; + tensor attn_209_cast = matmul(transpose_x = attn_209_transpose_x_0, transpose_y = attn_209_transpose_y_0, x = var_10216_cast, y = var_10220_cast)[name = tensor("attn_209_cast")]; + tensor var_10224 = const()[name = tensor("op_10224"), val = tensor([2, 1280, 1, -1])]; + tensor input_601_cast = reshape(shape = var_10224, x = attn_209_cast)[name = tensor("input_601_cast")]; + tensor var_10229 = const()[name = tensor("op_10229"), val = tensor([1, 1])]; + tensor var_10231 = const()[name = tensor("op_10231"), val = tensor([1, 1])]; + tensor var_10233_pad_type_0 = const()[name = tensor("op_10233_pad_type_0"), val = tensor("custom")]; + tensor var_10233_pad_0 = const()[name = tensor("op_10233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1483919808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1485148672))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1485148864)))]; + tensor var_10233_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_10231, groups = var_31, pad = var_10233_pad_0, pad_type = var_10233_pad_type_0, strides = var_10229, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_601_cast)[name = tensor("op_10233_cast")]; + tensor inputs_315_cast = add(x = var_10233_cast, y = inputs_313_cast)[name = tensor("inputs_315_cast")]; + tensor var_10237 = const()[name = tensor("op_10237"), val = tensor([1])]; + tensor channels_mean_315_cast = reduce_mean(axes = var_10237, keep_dims = var_23, x = inputs_315_cast)[name = tensor("channels_mean_315_cast")]; + tensor zero_mean_315_cast = sub(x = inputs_315_cast, y = channels_mean_315_cast)[name = tensor("zero_mean_315_cast")]; + tensor zero_mean_sq_315_cast = mul(x = zero_mean_315_cast, y = zero_mean_315_cast)[name = tensor("zero_mean_sq_315_cast")]; + tensor var_10241 = const()[name = tensor("op_10241"), val = tensor([1])]; + tensor var_10242_cast = reduce_mean(axes = var_10241, keep_dims = var_23, x = zero_mean_sq_315_cast)[name = tensor("op_10242_cast")]; + tensor var_10243_to_fp16 = const()[name = tensor("op_10243_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10244_cast = add(x = var_10242_cast, y = var_10243_to_fp16)[name = tensor("op_10244_cast")]; + tensor denom_315_epsilon_0_to_fp16 = const()[name = tensor("denom_315_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_315_cast = rsqrt(epsilon = denom_315_epsilon_0_to_fp16, x = var_10244_cast)[name = tensor("denom_315_cast")]; + tensor out_315_cast = mul(x = zero_mean_315_cast, y = denom_315_cast)[name = tensor("out_315_cast")]; + tensor var_10248_to_fp16 = const()[name = tensor("op_10248_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1485151488)))]; + tensor var_10249_cast = add(x = out_315_cast, y = var_10248_to_fp16)[name = tensor("op_10249_cast")]; + tensor var_10251_to_fp16 = const()[name = tensor("op_10251_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1485154112)))]; + tensor hidden_states_411_cast = mul(x = var_10249_cast, y = var_10251_to_fp16)[name = tensor("hidden_states_411_cast")]; + tensor var_10258 = const()[name = tensor("op_10258"), val = tensor([1, 1])]; + tensor var_10260 = const()[name = tensor("op_10260"), val = tensor([1, 1])]; + tensor q_211_pad_type_0 = const()[name = tensor("q_211_pad_type_0"), val = tensor("custom")]; + tensor q_211_pad_0 = const()[name = tensor("q_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1485156736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1486385600))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_211_cast = conv(dilations = var_10260, groups = var_31, pad = q_211_pad_0, pad_type = q_211_pad_type_0, strides = var_10258, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_411_cast)[name = tensor("q_211_cast")]; + tensor var_10264 = const()[name = tensor("op_10264"), val = tensor([1, 1])]; + tensor var_10266 = const()[name = tensor("op_10266"), val = tensor([1, 1])]; + tensor k_211_pad_type_0 = const()[name = tensor("k_211_pad_type_0"), val = tensor("custom")]; + tensor k_211_pad_0 = const()[name = tensor("k_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1486385792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1488351936))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_211_cast = conv(dilations = var_10266, groups = var_31, pad = k_211_pad_0, pad_type = k_211_pad_type_0, strides = var_10264, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_211_cast")]; + tensor var_10270 = const()[name = tensor("op_10270"), val = tensor([1, 1])]; + tensor var_10272 = const()[name = tensor("op_10272"), val = tensor([1, 1])]; + tensor v_211_pad_type_0 = const()[name = tensor("v_211_pad_type_0"), val = tensor("custom")]; + tensor v_211_pad_0 = const()[name = tensor("v_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1488352128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1490318272))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_211_cast = conv(dilations = var_10272, groups = var_31, pad = v_211_pad_0, pad_type = v_211_pad_type_0, strides = var_10270, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_211_cast")]; + tensor var_10276 = const()[name = tensor("op_10276"), val = tensor([2, 20, 64, -1])]; + tensor var_10277_cast = reshape(shape = var_10276, x = q_211_cast)[name = tensor("op_10277_cast")]; + tensor var_10278 = const()[name = tensor("op_10278"), val = tensor([2, 20, 64, -1])]; + tensor var_10279_cast = reshape(shape = var_10278, x = k_211_cast)[name = tensor("op_10279_cast")]; + tensor var_10280 = const()[name = tensor("op_10280"), val = tensor([2, 20, 64, -1])]; + tensor var_10281_cast = reshape(shape = var_10280, x = v_211_cast)[name = tensor("op_10281_cast")]; + tensor attn_weights_421_transpose_x_0 = const()[name = tensor("attn_weights_421_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_421_transpose_y_0 = const()[name = tensor("attn_weights_421_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_421_cast = matmul(transpose_x = attn_weights_421_transpose_x_0, transpose_y = attn_weights_421_transpose_y_0, x = var_10277_cast, y = var_10279_cast)[name = tensor("attn_weights_421_cast")]; + tensor attn_weights_423_cast = mul(x = attn_weights_421_cast, y = var_12_to_fp16)[name = tensor("attn_weights_423_cast")]; + tensor var_10285_cast = softmax(axis = var_18, x = attn_weights_423_cast)[name = tensor("op_10285_cast")]; + tensor attn_211_transpose_x_0 = const()[name = tensor("attn_211_transpose_x_0"), val = tensor(false)]; + tensor attn_211_transpose_y_0 = const()[name = tensor("attn_211_transpose_y_0"), val = tensor(true)]; + tensor attn_211_cast = matmul(transpose_x = attn_211_transpose_x_0, transpose_y = attn_211_transpose_y_0, x = var_10281_cast, y = var_10285_cast)[name = tensor("attn_211_cast")]; + tensor var_10289 = const()[name = tensor("op_10289"), val = tensor([2, 1280, 1, -1])]; + tensor input_603_cast = reshape(shape = var_10289, x = attn_211_cast)[name = tensor("input_603_cast")]; + tensor var_10294 = const()[name = tensor("op_10294"), val = tensor([1, 1])]; + tensor var_10296 = const()[name = tensor("op_10296"), val = tensor([1, 1])]; + tensor var_10298_pad_type_0 = const()[name = tensor("op_10298_pad_type_0"), val = tensor("custom")]; + tensor var_10298_pad_0 = const()[name = tensor("op_10298_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1490318464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1491547328))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1491547520)))]; + tensor var_10298_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_10296, groups = var_31, pad = var_10298_pad_0, pad_type = var_10298_pad_type_0, strides = var_10294, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_603_cast)[name = tensor("op_10298_cast")]; + tensor inputs_317_cast = add(x = var_10298_cast, y = inputs_315_cast)[name = tensor("inputs_317_cast")]; + tensor var_10302 = const()[name = tensor("op_10302"), val = tensor([1])]; + tensor channels_mean_317_cast = reduce_mean(axes = var_10302, keep_dims = var_23, x = inputs_317_cast)[name = tensor("channels_mean_317_cast")]; + tensor zero_mean_317_cast = sub(x = inputs_317_cast, y = channels_mean_317_cast)[name = tensor("zero_mean_317_cast")]; + tensor zero_mean_sq_317_cast = mul(x = zero_mean_317_cast, y = zero_mean_317_cast)[name = tensor("zero_mean_sq_317_cast")]; + tensor var_10306 = const()[name = tensor("op_10306"), val = tensor([1])]; + tensor var_10307_cast = reduce_mean(axes = var_10306, keep_dims = var_23, x = zero_mean_sq_317_cast)[name = tensor("op_10307_cast")]; + tensor var_10308_to_fp16 = const()[name = tensor("op_10308_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10309_cast = add(x = var_10307_cast, y = var_10308_to_fp16)[name = tensor("op_10309_cast")]; + tensor denom_317_epsilon_0_to_fp16 = const()[name = tensor("denom_317_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_317_cast = rsqrt(epsilon = denom_317_epsilon_0_to_fp16, x = var_10309_cast)[name = tensor("denom_317_cast")]; + tensor out_317_cast = mul(x = zero_mean_317_cast, y = denom_317_cast)[name = tensor("out_317_cast")]; + tensor var_10313_to_fp16 = const()[name = tensor("op_10313_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1491550144)))]; + tensor var_10314_cast = add(x = out_317_cast, y = var_10313_to_fp16)[name = tensor("op_10314_cast")]; + tensor var_10316_to_fp16 = const()[name = tensor("op_10316_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1491552768)))]; + tensor input_605_cast = mul(x = var_10314_cast, y = var_10316_to_fp16)[name = tensor("input_605_cast")]; + tensor var_10324 = const()[name = tensor("op_10324"), val = tensor([1, 1])]; + tensor var_10326 = const()[name = tensor("op_10326"), val = tensor([1, 1])]; + tensor var_10328_pad_type_0 = const()[name = tensor("op_10328_pad_type_0"), val = tensor("custom")]; + tensor var_10328_pad_0 = const()[name = tensor("op_10328_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1491555392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1501385856))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1501386048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1501393792))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_10328_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_10326, groups = var_31, pad = var_10328_pad_0, pad_type = var_10328_pad_type_0, strides = var_10324, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_605_cast)[name = tensor("op_10328_cast")]; + tensor var_10329_split_sizes_0 = const()[name = tensor("op_10329_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10329_axis_0 = const()[name = tensor("op_10329_axis_0"), val = tensor(1)]; + tensor var_10329_cast_0, tensor var_10329_cast_1 = split(axis = var_10329_axis_0, split_sizes = var_10329_split_sizes_0, x = var_10328_cast)[name = tensor("op_10329_cast")]; + tensor var_10331_mode_0 = const()[name = tensor("op_10331_mode_0"), val = tensor("EXACT")]; + tensor var_10331_cast = gelu(mode = var_10331_mode_0, x = var_10329_cast_1)[name = tensor("op_10331_cast")]; + tensor input_607_cast = mul(x = var_10329_cast_0, y = var_10331_cast)[name = tensor("input_607_cast")]; + tensor var_10335 = const()[name = tensor("op_10335"), val = tensor([1, 1])]; + tensor var_10337 = const()[name = tensor("op_10337"), val = tensor([1, 1])]; + tensor var_10339_pad_type_0 = const()[name = tensor("op_10339_pad_type_0"), val = tensor("custom")]; + tensor var_10339_pad_0 = const()[name = tensor("op_10339_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1501393984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506309248))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506309440)))]; + tensor var_10339_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_10337, groups = var_31, pad = var_10339_pad_0, pad_type = var_10339_pad_type_0, strides = var_10335, weight = unet_up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_607_cast)[name = tensor("op_10339_cast")]; + tensor inputs_319_cast = add(x = var_10339_cast, y = inputs_317_cast)[name = tensor("inputs_319_cast")]; + tensor var_10349 = const()[name = tensor("op_10349"), val = tensor([1])]; + tensor channels_mean_319_cast = reduce_mean(axes = var_10349, keep_dims = var_23, x = inputs_319_cast)[name = tensor("channels_mean_319_cast")]; + tensor zero_mean_319_cast = sub(x = inputs_319_cast, y = channels_mean_319_cast)[name = tensor("zero_mean_319_cast")]; + tensor zero_mean_sq_319_cast = mul(x = zero_mean_319_cast, y = zero_mean_319_cast)[name = tensor("zero_mean_sq_319_cast")]; + tensor var_10353 = const()[name = tensor("op_10353"), val = tensor([1])]; + tensor var_10354_cast = reduce_mean(axes = var_10353, keep_dims = var_23, x = zero_mean_sq_319_cast)[name = tensor("op_10354_cast")]; + tensor var_10355_to_fp16 = const()[name = tensor("op_10355_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10356_cast = add(x = var_10354_cast, y = var_10355_to_fp16)[name = tensor("op_10356_cast")]; + tensor denom_319_epsilon_0_to_fp16 = const()[name = tensor("denom_319_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_319_cast = rsqrt(epsilon = denom_319_epsilon_0_to_fp16, x = var_10356_cast)[name = tensor("denom_319_cast")]; + tensor out_319_cast = mul(x = zero_mean_319_cast, y = denom_319_cast)[name = tensor("out_319_cast")]; + tensor var_10360_to_fp16 = const()[name = tensor("op_10360_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506312064)))]; + tensor var_10361_cast = add(x = out_319_cast, y = var_10360_to_fp16)[name = tensor("op_10361_cast")]; + tensor var_10363_to_fp16 = const()[name = tensor("op_10363_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506314688)))]; + tensor hidden_states_415_cast = mul(x = var_10361_cast, y = var_10363_to_fp16)[name = tensor("hidden_states_415_cast")]; + tensor var_10370 = const()[name = tensor("op_10370"), val = tensor([1, 1])]; + tensor var_10372 = const()[name = tensor("op_10372"), val = tensor([1, 1])]; + tensor q_213_pad_type_0 = const()[name = tensor("q_213_pad_type_0"), val = tensor("custom")]; + tensor q_213_pad_0 = const()[name = tensor("q_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506317312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1507546176))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_213_cast = conv(dilations = var_10372, groups = var_31, pad = q_213_pad_0, pad_type = q_213_pad_type_0, strides = var_10370, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("q_213_cast")]; + tensor var_10376 = const()[name = tensor("op_10376"), val = tensor([1, 1])]; + tensor var_10378 = const()[name = tensor("op_10378"), val = tensor([1, 1])]; + tensor k_213_pad_type_0 = const()[name = tensor("k_213_pad_type_0"), val = tensor("custom")]; + tensor k_213_pad_0 = const()[name = tensor("k_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1507546368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1508775232))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_213_cast = conv(dilations = var_10378, groups = var_31, pad = k_213_pad_0, pad_type = k_213_pad_type_0, strides = var_10376, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("k_213_cast")]; + tensor var_10382 = const()[name = tensor("op_10382"), val = tensor([1, 1])]; + tensor var_10384 = const()[name = tensor("op_10384"), val = tensor([1, 1])]; + tensor v_213_pad_type_0 = const()[name = tensor("v_213_pad_type_0"), val = tensor("custom")]; + tensor v_213_pad_0 = const()[name = tensor("v_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1508775424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1510004288))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_213_cast = conv(dilations = var_10384, groups = var_31, pad = v_213_pad_0, pad_type = v_213_pad_type_0, strides = var_10382, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("v_213_cast")]; + tensor var_10388 = const()[name = tensor("op_10388"), val = tensor([2, 20, 64, -1])]; + tensor var_10389_cast = reshape(shape = var_10388, x = q_213_cast)[name = tensor("op_10389_cast")]; + tensor var_10390 = const()[name = tensor("op_10390"), val = tensor([2, 20, 64, -1])]; + tensor var_10391_cast = reshape(shape = var_10390, x = k_213_cast)[name = tensor("op_10391_cast")]; + tensor var_10392 = const()[name = tensor("op_10392"), val = tensor([2, 20, 64, -1])]; + tensor var_10393_cast = reshape(shape = var_10392, x = v_213_cast)[name = tensor("op_10393_cast")]; + tensor attn_weights_425_transpose_x_0 = const()[name = tensor("attn_weights_425_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_425_transpose_y_0 = const()[name = tensor("attn_weights_425_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_425_cast = matmul(transpose_x = attn_weights_425_transpose_x_0, transpose_y = attn_weights_425_transpose_y_0, x = var_10389_cast, y = var_10391_cast)[name = tensor("attn_weights_425_cast")]; + tensor attn_weights_427_cast = mul(x = attn_weights_425_cast, y = var_12_to_fp16)[name = tensor("attn_weights_427_cast")]; + tensor var_10397_cast = softmax(axis = var_18, x = attn_weights_427_cast)[name = tensor("op_10397_cast")]; + tensor attn_213_transpose_x_0 = const()[name = tensor("attn_213_transpose_x_0"), val = tensor(false)]; + tensor attn_213_transpose_y_0 = const()[name = tensor("attn_213_transpose_y_0"), val = tensor(true)]; + tensor attn_213_cast = matmul(transpose_x = attn_213_transpose_x_0, transpose_y = attn_213_transpose_y_0, x = var_10393_cast, y = var_10397_cast)[name = tensor("attn_213_cast")]; + tensor var_10401 = const()[name = tensor("op_10401"), val = tensor([2, 1280, 1, -1])]; + tensor input_609_cast = reshape(shape = var_10401, x = attn_213_cast)[name = tensor("input_609_cast")]; + tensor var_10406 = const()[name = tensor("op_10406"), val = tensor([1, 1])]; + tensor var_10408 = const()[name = tensor("op_10408"), val = tensor([1, 1])]; + tensor var_10410_pad_type_0 = const()[name = tensor("op_10410_pad_type_0"), val = tensor("custom")]; + tensor var_10410_pad_0 = const()[name = tensor("op_10410_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1510004480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511233344))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511233536)))]; + tensor var_10410_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_10408, groups = var_31, pad = var_10410_pad_0, pad_type = var_10410_pad_type_0, strides = var_10406, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_609_cast)[name = tensor("op_10410_cast")]; + tensor inputs_321_cast = add(x = var_10410_cast, y = inputs_319_cast)[name = tensor("inputs_321_cast")]; + tensor var_10414 = const()[name = tensor("op_10414"), val = tensor([1])]; + tensor channels_mean_321_cast = reduce_mean(axes = var_10414, keep_dims = var_23, x = inputs_321_cast)[name = tensor("channels_mean_321_cast")]; + tensor zero_mean_321_cast = sub(x = inputs_321_cast, y = channels_mean_321_cast)[name = tensor("zero_mean_321_cast")]; + tensor zero_mean_sq_321_cast = mul(x = zero_mean_321_cast, y = zero_mean_321_cast)[name = tensor("zero_mean_sq_321_cast")]; + tensor var_10418 = const()[name = tensor("op_10418"), val = tensor([1])]; + tensor var_10419_cast = reduce_mean(axes = var_10418, keep_dims = var_23, x = zero_mean_sq_321_cast)[name = tensor("op_10419_cast")]; + tensor var_10420_to_fp16 = const()[name = tensor("op_10420_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10421_cast = add(x = var_10419_cast, y = var_10420_to_fp16)[name = tensor("op_10421_cast")]; + tensor denom_321_epsilon_0_to_fp16 = const()[name = tensor("denom_321_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_321_cast = rsqrt(epsilon = denom_321_epsilon_0_to_fp16, x = var_10421_cast)[name = tensor("denom_321_cast")]; + tensor out_321_cast = mul(x = zero_mean_321_cast, y = denom_321_cast)[name = tensor("out_321_cast")]; + tensor var_10425_to_fp16 = const()[name = tensor("op_10425_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511236160)))]; + tensor var_10426_cast = add(x = out_321_cast, y = var_10425_to_fp16)[name = tensor("op_10426_cast")]; + tensor var_10428_to_fp16 = const()[name = tensor("op_10428_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511238784)))]; + tensor hidden_states_417_cast = mul(x = var_10426_cast, y = var_10428_to_fp16)[name = tensor("hidden_states_417_cast")]; + tensor var_10435 = const()[name = tensor("op_10435"), val = tensor([1, 1])]; + tensor var_10437 = const()[name = tensor("op_10437"), val = tensor([1, 1])]; + tensor q_215_pad_type_0 = const()[name = tensor("q_215_pad_type_0"), val = tensor("custom")]; + tensor q_215_pad_0 = const()[name = tensor("q_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511241408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512470272))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_215_cast = conv(dilations = var_10437, groups = var_31, pad = q_215_pad_0, pad_type = q_215_pad_type_0, strides = var_10435, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_417_cast)[name = tensor("q_215_cast")]; + tensor var_10441 = const()[name = tensor("op_10441"), val = tensor([1, 1])]; + tensor var_10443 = const()[name = tensor("op_10443"), val = tensor([1, 1])]; + tensor k_215_pad_type_0 = const()[name = tensor("k_215_pad_type_0"), val = tensor("custom")]; + tensor k_215_pad_0 = const()[name = tensor("k_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1512470464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1514436608))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_215_cast = conv(dilations = var_10443, groups = var_31, pad = k_215_pad_0, pad_type = k_215_pad_type_0, strides = var_10441, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_215_cast")]; + tensor var_10447 = const()[name = tensor("op_10447"), val = tensor([1, 1])]; + tensor var_10449 = const()[name = tensor("op_10449"), val = tensor([1, 1])]; + tensor v_215_pad_type_0 = const()[name = tensor("v_215_pad_type_0"), val = tensor("custom")]; + tensor v_215_pad_0 = const()[name = tensor("v_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1514436800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1516402944))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_215_cast = conv(dilations = var_10449, groups = var_31, pad = v_215_pad_0, pad_type = v_215_pad_type_0, strides = var_10447, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_215_cast")]; + tensor var_10453 = const()[name = tensor("op_10453"), val = tensor([2, 20, 64, -1])]; + tensor var_10454_cast = reshape(shape = var_10453, x = q_215_cast)[name = tensor("op_10454_cast")]; + tensor var_10455 = const()[name = tensor("op_10455"), val = tensor([2, 20, 64, -1])]; + tensor var_10456_cast = reshape(shape = var_10455, x = k_215_cast)[name = tensor("op_10456_cast")]; + tensor var_10457 = const()[name = tensor("op_10457"), val = tensor([2, 20, 64, -1])]; + tensor var_10458_cast = reshape(shape = var_10457, x = v_215_cast)[name = tensor("op_10458_cast")]; + tensor attn_weights_429_transpose_x_0 = const()[name = tensor("attn_weights_429_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_429_transpose_y_0 = const()[name = tensor("attn_weights_429_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_429_cast = matmul(transpose_x = attn_weights_429_transpose_x_0, transpose_y = attn_weights_429_transpose_y_0, x = var_10454_cast, y = var_10456_cast)[name = tensor("attn_weights_429_cast")]; + tensor attn_weights_431_cast = mul(x = attn_weights_429_cast, y = var_12_to_fp16)[name = tensor("attn_weights_431_cast")]; + tensor var_10462_cast = softmax(axis = var_18, x = attn_weights_431_cast)[name = tensor("op_10462_cast")]; + tensor attn_215_transpose_x_0 = const()[name = tensor("attn_215_transpose_x_0"), val = tensor(false)]; + tensor attn_215_transpose_y_0 = const()[name = tensor("attn_215_transpose_y_0"), val = tensor(true)]; + tensor attn_215_cast = matmul(transpose_x = attn_215_transpose_x_0, transpose_y = attn_215_transpose_y_0, x = var_10458_cast, y = var_10462_cast)[name = tensor("attn_215_cast")]; + tensor var_10466 = const()[name = tensor("op_10466"), val = tensor([2, 1280, 1, -1])]; + tensor input_611_cast = reshape(shape = var_10466, x = attn_215_cast)[name = tensor("input_611_cast")]; + tensor var_10471 = const()[name = tensor("op_10471"), val = tensor([1, 1])]; + tensor var_10473 = const()[name = tensor("op_10473"), val = tensor([1, 1])]; + tensor var_10475_pad_type_0 = const()[name = tensor("op_10475_pad_type_0"), val = tensor("custom")]; + tensor var_10475_pad_0 = const()[name = tensor("op_10475_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1516403136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1517632000))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1517632192)))]; + tensor var_10475_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_10473, groups = var_31, pad = var_10475_pad_0, pad_type = var_10475_pad_type_0, strides = var_10471, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_611_cast)[name = tensor("op_10475_cast")]; + tensor inputs_323_cast = add(x = var_10475_cast, y = inputs_321_cast)[name = tensor("inputs_323_cast")]; + tensor var_10479 = const()[name = tensor("op_10479"), val = tensor([1])]; + tensor channels_mean_323_cast = reduce_mean(axes = var_10479, keep_dims = var_23, x = inputs_323_cast)[name = tensor("channels_mean_323_cast")]; + tensor zero_mean_323_cast = sub(x = inputs_323_cast, y = channels_mean_323_cast)[name = tensor("zero_mean_323_cast")]; + tensor zero_mean_sq_323_cast = mul(x = zero_mean_323_cast, y = zero_mean_323_cast)[name = tensor("zero_mean_sq_323_cast")]; + tensor var_10483 = const()[name = tensor("op_10483"), val = tensor([1])]; + tensor var_10484_cast = reduce_mean(axes = var_10483, keep_dims = var_23, x = zero_mean_sq_323_cast)[name = tensor("op_10484_cast")]; + tensor var_10485_to_fp16 = const()[name = tensor("op_10485_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10486_cast = add(x = var_10484_cast, y = var_10485_to_fp16)[name = tensor("op_10486_cast")]; + tensor denom_323_epsilon_0_to_fp16 = const()[name = tensor("denom_323_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_323_cast = rsqrt(epsilon = denom_323_epsilon_0_to_fp16, x = var_10486_cast)[name = tensor("denom_323_cast")]; + tensor out_323_cast = mul(x = zero_mean_323_cast, y = denom_323_cast)[name = tensor("out_323_cast")]; + tensor var_10490_to_fp16 = const()[name = tensor("op_10490_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1517634816)))]; + tensor var_10491_cast = add(x = out_323_cast, y = var_10490_to_fp16)[name = tensor("op_10491_cast")]; + tensor var_10493_to_fp16 = const()[name = tensor("op_10493_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1517637440)))]; + tensor input_613_cast = mul(x = var_10491_cast, y = var_10493_to_fp16)[name = tensor("input_613_cast")]; + tensor var_10501 = const()[name = tensor("op_10501"), val = tensor([1, 1])]; + tensor var_10503 = const()[name = tensor("op_10503"), val = tensor([1, 1])]; + tensor var_10505_pad_type_0 = const()[name = tensor("op_10505_pad_type_0"), val = tensor("custom")]; + tensor var_10505_pad_0 = const()[name = tensor("op_10505_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1517640064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1527470528))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1527470720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1527478464))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_10505_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_10503, groups = var_31, pad = var_10505_pad_0, pad_type = var_10505_pad_type_0, strides = var_10501, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_613_cast)[name = tensor("op_10505_cast")]; + tensor var_10506_split_sizes_0 = const()[name = tensor("op_10506_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10506_axis_0 = const()[name = tensor("op_10506_axis_0"), val = tensor(1)]; + tensor var_10506_cast_0, tensor var_10506_cast_1 = split(axis = var_10506_axis_0, split_sizes = var_10506_split_sizes_0, x = var_10505_cast)[name = tensor("op_10506_cast")]; + tensor var_10508_mode_0 = const()[name = tensor("op_10508_mode_0"), val = tensor("EXACT")]; + tensor var_10508_cast = gelu(mode = var_10508_mode_0, x = var_10506_cast_1)[name = tensor("op_10508_cast")]; + tensor input_615_cast = mul(x = var_10506_cast_0, y = var_10508_cast)[name = tensor("input_615_cast")]; + tensor var_10512 = const()[name = tensor("op_10512"), val = tensor([1, 1])]; + tensor var_10514 = const()[name = tensor("op_10514"), val = tensor([1, 1])]; + tensor var_10516_pad_type_0 = const()[name = tensor("op_10516_pad_type_0"), val = tensor("custom")]; + tensor var_10516_pad_0 = const()[name = tensor("op_10516_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1527478656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532393920))), name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532394112)))]; + tensor var_10516_cast = conv(bias = unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_10514, groups = var_31, pad = var_10516_pad_0, pad_type = var_10516_pad_type_0, strides = var_10512, weight = unet_up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_615_cast)[name = tensor("op_10516_cast")]; + tensor hidden_states_421_cast = add(x = var_10516_cast, y = inputs_323_cast)[name = tensor("hidden_states_421_cast")]; + tensor var_10518 = const()[name = tensor("op_10518"), val = tensor([2, 1280, 32, 32])]; + tensor input_617_cast = reshape(shape = var_10518, x = hidden_states_421_cast)[name = tensor("input_617_cast")]; + tensor var_10522 = const()[name = tensor("op_10522"), val = tensor([1, 1])]; + tensor var_10524 = const()[name = tensor("op_10524"), val = tensor([1, 1])]; + tensor hidden_states_423_pad_type_0 = const()[name = tensor("hidden_states_423_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_423_pad_0 = const()[name = tensor("hidden_states_423_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532396736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533625600))), name = tensor("unet_up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533625792)))]; + tensor hidden_states_423_cast = conv(bias = unet_up_blocks_0_attentions_1_proj_out_bias_to_fp16, dilations = var_10524, groups = var_31, pad = hidden_states_423_pad_0, pad_type = hidden_states_423_pad_type_0, strides = var_10522, weight = unet_up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized, x = input_617_cast)[name = tensor("hidden_states_423_cast")]; + tensor hidden_states_425_cast = add(x = hidden_states_423_cast, y = hidden_states_357_cast)[name = tensor("hidden_states_425_cast")]; + tensor input_619_interleave_0 = const()[name = tensor("input_619_interleave_0"), val = tensor(false)]; + tensor input_619_cast = concat(axis = var_31, interleave = input_619_interleave_0, values = (hidden_states_425_cast, input_115_cast))[name = tensor("input_619_cast")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([2, 32, 60, 32, 32])]; + tensor reshape_108_cast = reshape(shape = reshape_108_shape_0, x = input_619_cast)[name = tensor("reshape_108_cast")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81_cast = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108_cast)[name = tensor("reduce_mean_81_cast")]; + tensor sub_54_cast = sub(x = reshape_108_cast, y = reduce_mean_81_cast)[name = tensor("sub_54_cast")]; + tensor square_27_cast = square(x = sub_54_cast)[name = tensor("square_27_cast")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83_cast = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27_cast)[name = tensor("reduce_mean_83_cast")]; + tensor add_54_y_0_to_fp16 = const()[name = tensor("add_54_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_54_cast = add(x = reduce_mean_83_cast, y = add_54_y_0_to_fp16)[name = tensor("add_54_cast")]; + tensor sqrt_27_cast = sqrt(x = add_54_cast)[name = tensor("sqrt_27_cast")]; + tensor real_div_27_cast = real_div(x = sub_54_cast, y = sqrt_27_cast)[name = tensor("real_div_27_cast")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([2, 1920, 32, 32])]; + tensor reshape_109_cast = reshape(shape = reshape_109_shape_0, x = real_div_27_cast)[name = tensor("reshape_109_cast")]; + tensor add_55_mean_0_to_fp16 = const()[name = tensor("add_55_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533628416)))]; + tensor add_55_variance_0_to_fp16 = const()[name = tensor("add_55_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533632320)))]; + tensor add_55_gamma_0_to_fp16 = const()[name = tensor("add_55_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533636224)))]; + tensor add_55_beta_0_to_fp16 = const()[name = tensor("add_55_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533640128)))]; + tensor add_55_epsilon_0_to_fp16 = const()[name = tensor("add_55_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_55_cast = batch_norm(beta = add_55_beta_0_to_fp16, epsilon = add_55_epsilon_0_to_fp16, gamma = add_55_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_109_cast)[name = tensor("add_55_cast")]; + tensor input_623_cast = silu(x = add_55_cast)[name = tensor("input_623_cast")]; + tensor var_10542 = const()[name = tensor("op_10542"), val = tensor([1, 1])]; + tensor var_10544 = const()[name = tensor("op_10544"), val = tensor([1, 1])]; + tensor hidden_states_427_pad_type_0 = const()[name = tensor("hidden_states_427_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_427_pad_0 = const()[name = tensor("hidden_states_427_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1533644032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1550232896))), name = tensor("unet_up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 3, 3])]; + tensor unet_up_blocks_0_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1550233088)))]; + tensor hidden_states_427_cast = conv(bias = unet_up_blocks_0_resnets_2_conv1_bias_to_fp16, dilations = var_10544, groups = var_31, pad = hidden_states_427_pad_0, pad_type = hidden_states_427_pad_type_0, strides = var_10542, weight = unet_up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized, x = input_623_cast)[name = tensor("hidden_states_427_cast")]; + tensor var_10550 = const()[name = tensor("op_10550"), val = tensor([1, 1])]; + tensor var_10552 = const()[name = tensor("op_10552"), val = tensor([1, 1])]; + tensor temb_21_pad_type_0 = const()[name = tensor("temb_21_pad_type_0"), val = tensor("custom")]; + tensor temb_21_pad_0 = const()[name = tensor("temb_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1550235712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1551464576))), name = tensor("unet_up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1551464768)))]; + tensor temb_21_cast = conv(bias = unet_up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_10552, groups = var_31, pad = temb_21_pad_0, pad_type = temb_21_pad_type_0, strides = var_10550, weight = unet_up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_21_cast")]; + tensor input_627_cast = add(x = hidden_states_427_cast, y = temb_21_cast)[name = tensor("input_627_cast")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_112_cast = reshape(shape = reshape_112_shape_0, x = input_627_cast)[name = tensor("reshape_112_cast")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84_cast = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112_cast)[name = tensor("reduce_mean_84_cast")]; + tensor sub_56_cast = sub(x = reshape_112_cast, y = reduce_mean_84_cast)[name = tensor("sub_56_cast")]; + tensor square_28_cast = square(x = sub_56_cast)[name = tensor("square_28_cast")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86_cast = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28_cast)[name = tensor("reduce_mean_86_cast")]; + tensor add_56_y_0_to_fp16 = const()[name = tensor("add_56_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_56_cast = add(x = reduce_mean_86_cast, y = add_56_y_0_to_fp16)[name = tensor("add_56_cast")]; + tensor sqrt_28_cast = sqrt(x = add_56_cast)[name = tensor("sqrt_28_cast")]; + tensor real_div_28_cast = real_div(x = sub_56_cast, y = sqrt_28_cast)[name = tensor("real_div_28_cast")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_113_cast = reshape(shape = reshape_113_shape_0, x = real_div_28_cast)[name = tensor("reshape_113_cast")]; + tensor add_57_gamma_0_to_fp16 = const()[name = tensor("add_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1551467392)))]; + tensor add_57_beta_0_to_fp16 = const()[name = tensor("add_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1551470016)))]; + tensor add_57_epsilon_0_to_fp16 = const()[name = tensor("add_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_57_cast = batch_norm(beta = add_57_beta_0_to_fp16, epsilon = add_57_epsilon_0_to_fp16, gamma = add_57_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_113_cast)[name = tensor("add_57_cast")]; + tensor input_631_cast = silu(x = add_57_cast)[name = tensor("input_631_cast")]; + tensor var_10562 = const()[name = tensor("op_10562"), val = tensor([1, 1])]; + tensor var_10564 = const()[name = tensor("op_10564"), val = tensor([1, 1])]; + tensor hidden_states_429_pad_type_0 = const()[name = tensor("hidden_states_429_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_429_pad_0 = const()[name = tensor("hidden_states_429_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1551472640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1562531904))), name = tensor("unet_up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_up_blocks_0_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1562532096)))]; + tensor hidden_states_429_cast = conv(bias = unet_up_blocks_0_resnets_2_conv2_bias_to_fp16, dilations = var_10564, groups = var_31, pad = hidden_states_429_pad_0, pad_type = hidden_states_429_pad_type_0, strides = var_10562, weight = unet_up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized, x = input_631_cast)[name = tensor("hidden_states_429_cast")]; + tensor var_10569 = const()[name = tensor("op_10569"), val = tensor([1, 1])]; + tensor var_10571 = const()[name = tensor("op_10571"), val = tensor([1, 1])]; + tensor x_9_pad_type_0 = const()[name = tensor("x_9_pad_type_0"), val = tensor("custom")]; + tensor x_9_pad_0 = const()[name = tensor("x_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1562534720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564377984))), name = tensor("unet_up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 1, 1])]; + tensor unet_up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564378176)))]; + tensor x_9_cast = conv(bias = unet_up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_10571, groups = var_31, pad = x_9_pad_0, pad_type = x_9_pad_type_0, strides = var_10569, weight = unet_up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_619_cast)[name = tensor("x_9_cast")]; + tensor hidden_states_431_cast = add(x = x_9_cast, y = hidden_states_429_cast)[name = tensor("hidden_states_431_cast")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_116_cast = reshape(shape = reshape_116_shape_0, x = hidden_states_431_cast)[name = tensor("reshape_116_cast")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87_cast = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116_cast)[name = tensor("reduce_mean_87_cast")]; + tensor sub_58_cast = sub(x = reshape_116_cast, y = reduce_mean_87_cast)[name = tensor("sub_58_cast")]; + tensor square_29_cast = square(x = sub_58_cast)[name = tensor("square_29_cast")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89_cast = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29_cast)[name = tensor("reduce_mean_89_cast")]; + tensor add_58_y_0_to_fp16 = const()[name = tensor("add_58_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_58_cast = add(x = reduce_mean_89_cast, y = add_58_y_0_to_fp16)[name = tensor("add_58_cast")]; + tensor sqrt_29_cast = sqrt(x = add_58_cast)[name = tensor("sqrt_29_cast")]; + tensor real_div_29_cast = real_div(x = sub_58_cast, y = sqrt_29_cast)[name = tensor("real_div_29_cast")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_117_cast = reshape(shape = reshape_117_shape_0, x = real_div_29_cast)[name = tensor("reshape_117_cast")]; + tensor add_59_gamma_0_to_fp16 = const()[name = tensor("add_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564380800)))]; + tensor add_59_beta_0_to_fp16 = const()[name = tensor("add_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564383424)))]; + tensor add_59_epsilon_0_to_fp16 = const()[name = tensor("add_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_59_cast = batch_norm(beta = add_59_beta_0_to_fp16, epsilon = add_59_epsilon_0_to_fp16, gamma = add_59_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_117_cast)[name = tensor("add_59_cast")]; + tensor var_10609 = const()[name = tensor("op_10609"), val = tensor([1, 1])]; + tensor var_10611 = const()[name = tensor("op_10611"), val = tensor([1, 1])]; + tensor hidden_states_433_pad_type_0 = const()[name = tensor("hidden_states_433_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_433_pad_0 = const()[name = tensor("hidden_states_433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1564386048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1565614912))), name = tensor("unet_up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1565615104)))]; + tensor hidden_states_433_cast = conv(bias = unet_up_blocks_0_attentions_2_proj_in_bias_to_fp16, dilations = var_10611, groups = var_31, pad = hidden_states_433_pad_0, pad_type = hidden_states_433_pad_type_0, strides = var_10609, weight = unet_up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized, x = add_59_cast)[name = tensor("hidden_states_433_cast")]; + tensor var_10616 = const()[name = tensor("op_10616"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_325_cast = reshape(shape = var_10616, x = hidden_states_433_cast)[name = tensor("inputs_325_cast")]; + tensor var_10626 = const()[name = tensor("op_10626"), val = tensor([1])]; + tensor channels_mean_325_cast = reduce_mean(axes = var_10626, keep_dims = var_23, x = inputs_325_cast)[name = tensor("channels_mean_325_cast")]; + tensor zero_mean_325_cast = sub(x = inputs_325_cast, y = channels_mean_325_cast)[name = tensor("zero_mean_325_cast")]; + tensor zero_mean_sq_325_cast = mul(x = zero_mean_325_cast, y = zero_mean_325_cast)[name = tensor("zero_mean_sq_325_cast")]; + tensor var_10630 = const()[name = tensor("op_10630"), val = tensor([1])]; + tensor var_10631_cast = reduce_mean(axes = var_10630, keep_dims = var_23, x = zero_mean_sq_325_cast)[name = tensor("op_10631_cast")]; + tensor var_10632_to_fp16 = const()[name = tensor("op_10632_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10633_cast = add(x = var_10631_cast, y = var_10632_to_fp16)[name = tensor("op_10633_cast")]; + tensor denom_325_epsilon_0_to_fp16 = const()[name = tensor("denom_325_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_325_cast = rsqrt(epsilon = denom_325_epsilon_0_to_fp16, x = var_10633_cast)[name = tensor("denom_325_cast")]; + tensor out_325_cast = mul(x = zero_mean_325_cast, y = denom_325_cast)[name = tensor("out_325_cast")]; + tensor var_10637_to_fp16 = const()[name = tensor("op_10637_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1565617728)))]; + tensor var_10638_cast = add(x = out_325_cast, y = var_10637_to_fp16)[name = tensor("op_10638_cast")]; + tensor var_10640_to_fp16 = const()[name = tensor("op_10640_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1565620352)))]; + tensor hidden_states_435_cast = mul(x = var_10638_cast, y = var_10640_to_fp16)[name = tensor("hidden_states_435_cast")]; + tensor var_10647 = const()[name = tensor("op_10647"), val = tensor([1, 1])]; + tensor var_10649 = const()[name = tensor("op_10649"), val = tensor([1, 1])]; + tensor q_217_pad_type_0 = const()[name = tensor("q_217_pad_type_0"), val = tensor("custom")]; + tensor q_217_pad_0 = const()[name = tensor("q_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1565622976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1566851840))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_217_cast = conv(dilations = var_10649, groups = var_31, pad = q_217_pad_0, pad_type = q_217_pad_type_0, strides = var_10647, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("q_217_cast")]; + tensor var_10653 = const()[name = tensor("op_10653"), val = tensor([1, 1])]; + tensor var_10655 = const()[name = tensor("op_10655"), val = tensor([1, 1])]; + tensor k_217_pad_type_0 = const()[name = tensor("k_217_pad_type_0"), val = tensor("custom")]; + tensor k_217_pad_0 = const()[name = tensor("k_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1566852032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1568080896))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_217_cast = conv(dilations = var_10655, groups = var_31, pad = k_217_pad_0, pad_type = k_217_pad_type_0, strides = var_10653, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("k_217_cast")]; + tensor var_10659 = const()[name = tensor("op_10659"), val = tensor([1, 1])]; + tensor var_10661 = const()[name = tensor("op_10661"), val = tensor([1, 1])]; + tensor v_217_pad_type_0 = const()[name = tensor("v_217_pad_type_0"), val = tensor("custom")]; + tensor v_217_pad_0 = const()[name = tensor("v_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1568081088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1569309952))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_217_cast = conv(dilations = var_10661, groups = var_31, pad = v_217_pad_0, pad_type = v_217_pad_type_0, strides = var_10659, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("v_217_cast")]; + tensor var_10665 = const()[name = tensor("op_10665"), val = tensor([2, 20, 64, -1])]; + tensor var_10666_cast = reshape(shape = var_10665, x = q_217_cast)[name = tensor("op_10666_cast")]; + tensor var_10667 = const()[name = tensor("op_10667"), val = tensor([2, 20, 64, -1])]; + tensor var_10668_cast = reshape(shape = var_10667, x = k_217_cast)[name = tensor("op_10668_cast")]; + tensor var_10669 = const()[name = tensor("op_10669"), val = tensor([2, 20, 64, -1])]; + tensor var_10670_cast = reshape(shape = var_10669, x = v_217_cast)[name = tensor("op_10670_cast")]; + tensor attn_weights_433_transpose_x_0 = const()[name = tensor("attn_weights_433_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_433_transpose_y_0 = const()[name = tensor("attn_weights_433_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_433_cast = matmul(transpose_x = attn_weights_433_transpose_x_0, transpose_y = attn_weights_433_transpose_y_0, x = var_10666_cast, y = var_10668_cast)[name = tensor("attn_weights_433_cast")]; + tensor attn_weights_435_cast = mul(x = attn_weights_433_cast, y = var_12_to_fp16)[name = tensor("attn_weights_435_cast")]; + tensor var_10674_cast = softmax(axis = var_18, x = attn_weights_435_cast)[name = tensor("op_10674_cast")]; + tensor attn_217_transpose_x_0 = const()[name = tensor("attn_217_transpose_x_0"), val = tensor(false)]; + tensor attn_217_transpose_y_0 = const()[name = tensor("attn_217_transpose_y_0"), val = tensor(true)]; + tensor attn_217_cast = matmul(transpose_x = attn_217_transpose_x_0, transpose_y = attn_217_transpose_y_0, x = var_10670_cast, y = var_10674_cast)[name = tensor("attn_217_cast")]; + tensor var_10678 = const()[name = tensor("op_10678"), val = tensor([2, 1280, 1, -1])]; + tensor input_635_cast = reshape(shape = var_10678, x = attn_217_cast)[name = tensor("input_635_cast")]; + tensor var_10683 = const()[name = tensor("op_10683"), val = tensor([1, 1])]; + tensor var_10685 = const()[name = tensor("op_10685"), val = tensor([1, 1])]; + tensor var_10687_pad_type_0 = const()[name = tensor("op_10687_pad_type_0"), val = tensor("custom")]; + tensor var_10687_pad_0 = const()[name = tensor("op_10687_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1569310144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570539008))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570539200)))]; + tensor var_10687_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_10685, groups = var_31, pad = var_10687_pad_0, pad_type = var_10687_pad_type_0, strides = var_10683, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_635_cast)[name = tensor("op_10687_cast")]; + tensor inputs_327_cast = add(x = var_10687_cast, y = inputs_325_cast)[name = tensor("inputs_327_cast")]; + tensor var_10691 = const()[name = tensor("op_10691"), val = tensor([1])]; + tensor channels_mean_327_cast = reduce_mean(axes = var_10691, keep_dims = var_23, x = inputs_327_cast)[name = tensor("channels_mean_327_cast")]; + tensor zero_mean_327_cast = sub(x = inputs_327_cast, y = channels_mean_327_cast)[name = tensor("zero_mean_327_cast")]; + tensor zero_mean_sq_327_cast = mul(x = zero_mean_327_cast, y = zero_mean_327_cast)[name = tensor("zero_mean_sq_327_cast")]; + tensor var_10695 = const()[name = tensor("op_10695"), val = tensor([1])]; + tensor var_10696_cast = reduce_mean(axes = var_10695, keep_dims = var_23, x = zero_mean_sq_327_cast)[name = tensor("op_10696_cast")]; + tensor var_10697_to_fp16 = const()[name = tensor("op_10697_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10698_cast = add(x = var_10696_cast, y = var_10697_to_fp16)[name = tensor("op_10698_cast")]; + tensor denom_327_epsilon_0_to_fp16 = const()[name = tensor("denom_327_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_327_cast = rsqrt(epsilon = denom_327_epsilon_0_to_fp16, x = var_10698_cast)[name = tensor("denom_327_cast")]; + tensor out_327_cast = mul(x = zero_mean_327_cast, y = denom_327_cast)[name = tensor("out_327_cast")]; + tensor var_10702_to_fp16 = const()[name = tensor("op_10702_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570541824)))]; + tensor var_10703_cast = add(x = out_327_cast, y = var_10702_to_fp16)[name = tensor("op_10703_cast")]; + tensor var_10705_to_fp16 = const()[name = tensor("op_10705_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570544448)))]; + tensor hidden_states_437_cast = mul(x = var_10703_cast, y = var_10705_to_fp16)[name = tensor("hidden_states_437_cast")]; + tensor var_10712 = const()[name = tensor("op_10712"), val = tensor([1, 1])]; + tensor var_10714 = const()[name = tensor("op_10714"), val = tensor([1, 1])]; + tensor q_219_pad_type_0 = const()[name = tensor("q_219_pad_type_0"), val = tensor("custom")]; + tensor q_219_pad_0 = const()[name = tensor("q_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570547072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1571775936))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_219_cast = conv(dilations = var_10714, groups = var_31, pad = q_219_pad_0, pad_type = q_219_pad_type_0, strides = var_10712, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_437_cast)[name = tensor("q_219_cast")]; + tensor var_10718 = const()[name = tensor("op_10718"), val = tensor([1, 1])]; + tensor var_10720 = const()[name = tensor("op_10720"), val = tensor([1, 1])]; + tensor k_219_pad_type_0 = const()[name = tensor("k_219_pad_type_0"), val = tensor("custom")]; + tensor k_219_pad_0 = const()[name = tensor("k_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1571776128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1573742272))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_219_cast = conv(dilations = var_10720, groups = var_31, pad = k_219_pad_0, pad_type = k_219_pad_type_0, strides = var_10718, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_219_cast")]; + tensor var_10724 = const()[name = tensor("op_10724"), val = tensor([1, 1])]; + tensor var_10726 = const()[name = tensor("op_10726"), val = tensor([1, 1])]; + tensor v_219_pad_type_0 = const()[name = tensor("v_219_pad_type_0"), val = tensor("custom")]; + tensor v_219_pad_0 = const()[name = tensor("v_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1573742464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575708608))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_219_cast = conv(dilations = var_10726, groups = var_31, pad = v_219_pad_0, pad_type = v_219_pad_type_0, strides = var_10724, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_219_cast")]; + tensor var_10730 = const()[name = tensor("op_10730"), val = tensor([2, 20, 64, -1])]; + tensor var_10731_cast = reshape(shape = var_10730, x = q_219_cast)[name = tensor("op_10731_cast")]; + tensor var_10732 = const()[name = tensor("op_10732"), val = tensor([2, 20, 64, -1])]; + tensor var_10733_cast = reshape(shape = var_10732, x = k_219_cast)[name = tensor("op_10733_cast")]; + tensor var_10734 = const()[name = tensor("op_10734"), val = tensor([2, 20, 64, -1])]; + tensor var_10735_cast = reshape(shape = var_10734, x = v_219_cast)[name = tensor("op_10735_cast")]; + tensor attn_weights_437_transpose_x_0 = const()[name = tensor("attn_weights_437_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_437_transpose_y_0 = const()[name = tensor("attn_weights_437_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_437_cast = matmul(transpose_x = attn_weights_437_transpose_x_0, transpose_y = attn_weights_437_transpose_y_0, x = var_10731_cast, y = var_10733_cast)[name = tensor("attn_weights_437_cast")]; + tensor attn_weights_439_cast = mul(x = attn_weights_437_cast, y = var_12_to_fp16)[name = tensor("attn_weights_439_cast")]; + tensor var_10739_cast = softmax(axis = var_18, x = attn_weights_439_cast)[name = tensor("op_10739_cast")]; + tensor attn_219_transpose_x_0 = const()[name = tensor("attn_219_transpose_x_0"), val = tensor(false)]; + tensor attn_219_transpose_y_0 = const()[name = tensor("attn_219_transpose_y_0"), val = tensor(true)]; + tensor attn_219_cast = matmul(transpose_x = attn_219_transpose_x_0, transpose_y = attn_219_transpose_y_0, x = var_10735_cast, y = var_10739_cast)[name = tensor("attn_219_cast")]; + tensor var_10743 = const()[name = tensor("op_10743"), val = tensor([2, 1280, 1, -1])]; + tensor input_637_cast = reshape(shape = var_10743, x = attn_219_cast)[name = tensor("input_637_cast")]; + tensor var_10748 = const()[name = tensor("op_10748"), val = tensor([1, 1])]; + tensor var_10750 = const()[name = tensor("op_10750"), val = tensor([1, 1])]; + tensor var_10752_pad_type_0 = const()[name = tensor("op_10752_pad_type_0"), val = tensor("custom")]; + tensor var_10752_pad_0 = const()[name = tensor("op_10752_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575708800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1576937664))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1576937856)))]; + tensor var_10752_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_10750, groups = var_31, pad = var_10752_pad_0, pad_type = var_10752_pad_type_0, strides = var_10748, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_637_cast)[name = tensor("op_10752_cast")]; + tensor inputs_329_cast = add(x = var_10752_cast, y = inputs_327_cast)[name = tensor("inputs_329_cast")]; + tensor var_10756 = const()[name = tensor("op_10756"), val = tensor([1])]; + tensor channels_mean_329_cast = reduce_mean(axes = var_10756, keep_dims = var_23, x = inputs_329_cast)[name = tensor("channels_mean_329_cast")]; + tensor zero_mean_329_cast = sub(x = inputs_329_cast, y = channels_mean_329_cast)[name = tensor("zero_mean_329_cast")]; + tensor zero_mean_sq_329_cast = mul(x = zero_mean_329_cast, y = zero_mean_329_cast)[name = tensor("zero_mean_sq_329_cast")]; + tensor var_10760 = const()[name = tensor("op_10760"), val = tensor([1])]; + tensor var_10761_cast = reduce_mean(axes = var_10760, keep_dims = var_23, x = zero_mean_sq_329_cast)[name = tensor("op_10761_cast")]; + tensor var_10762_to_fp16 = const()[name = tensor("op_10762_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10763_cast = add(x = var_10761_cast, y = var_10762_to_fp16)[name = tensor("op_10763_cast")]; + tensor denom_329_epsilon_0_to_fp16 = const()[name = tensor("denom_329_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_329_cast = rsqrt(epsilon = denom_329_epsilon_0_to_fp16, x = var_10763_cast)[name = tensor("denom_329_cast")]; + tensor out_329_cast = mul(x = zero_mean_329_cast, y = denom_329_cast)[name = tensor("out_329_cast")]; + tensor var_10767_to_fp16 = const()[name = tensor("op_10767_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1576940480)))]; + tensor var_10768_cast = add(x = out_329_cast, y = var_10767_to_fp16)[name = tensor("op_10768_cast")]; + tensor var_10770_to_fp16 = const()[name = tensor("op_10770_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1576943104)))]; + tensor input_639_cast = mul(x = var_10768_cast, y = var_10770_to_fp16)[name = tensor("input_639_cast")]; + tensor var_10778 = const()[name = tensor("op_10778"), val = tensor([1, 1])]; + tensor var_10780 = const()[name = tensor("op_10780"), val = tensor([1, 1])]; + tensor var_10782_pad_type_0 = const()[name = tensor("op_10782_pad_type_0"), val = tensor("custom")]; + tensor var_10782_pad_0 = const()[name = tensor("op_10782_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1576945728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586776192))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586776384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586784128))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_10782_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_10780, groups = var_31, pad = var_10782_pad_0, pad_type = var_10782_pad_type_0, strides = var_10778, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_639_cast)[name = tensor("op_10782_cast")]; + tensor var_10783_split_sizes_0 = const()[name = tensor("op_10783_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10783_axis_0 = const()[name = tensor("op_10783_axis_0"), val = tensor(1)]; + tensor var_10783_cast_0, tensor var_10783_cast_1 = split(axis = var_10783_axis_0, split_sizes = var_10783_split_sizes_0, x = var_10782_cast)[name = tensor("op_10783_cast")]; + tensor var_10785_mode_0 = const()[name = tensor("op_10785_mode_0"), val = tensor("EXACT")]; + tensor var_10785_cast = gelu(mode = var_10785_mode_0, x = var_10783_cast_1)[name = tensor("op_10785_cast")]; + tensor input_641_cast = mul(x = var_10783_cast_0, y = var_10785_cast)[name = tensor("input_641_cast")]; + tensor var_10789 = const()[name = tensor("op_10789"), val = tensor([1, 1])]; + tensor var_10791 = const()[name = tensor("op_10791"), val = tensor([1, 1])]; + tensor var_10793_pad_type_0 = const()[name = tensor("op_10793_pad_type_0"), val = tensor("custom")]; + tensor var_10793_pad_0 = const()[name = tensor("op_10793_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586784320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591699584))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591699776)))]; + tensor var_10793_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_10791, groups = var_31, pad = var_10793_pad_0, pad_type = var_10793_pad_type_0, strides = var_10789, weight = unet_up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_641_cast)[name = tensor("op_10793_cast")]; + tensor inputs_331_cast = add(x = var_10793_cast, y = inputs_329_cast)[name = tensor("inputs_331_cast")]; + tensor var_10803 = const()[name = tensor("op_10803"), val = tensor([1])]; + tensor channels_mean_331_cast = reduce_mean(axes = var_10803, keep_dims = var_23, x = inputs_331_cast)[name = tensor("channels_mean_331_cast")]; + tensor zero_mean_331_cast = sub(x = inputs_331_cast, y = channels_mean_331_cast)[name = tensor("zero_mean_331_cast")]; + tensor zero_mean_sq_331_cast = mul(x = zero_mean_331_cast, y = zero_mean_331_cast)[name = tensor("zero_mean_sq_331_cast")]; + tensor var_10807 = const()[name = tensor("op_10807"), val = tensor([1])]; + tensor var_10808_cast = reduce_mean(axes = var_10807, keep_dims = var_23, x = zero_mean_sq_331_cast)[name = tensor("op_10808_cast")]; + tensor var_10809_to_fp16 = const()[name = tensor("op_10809_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10810_cast = add(x = var_10808_cast, y = var_10809_to_fp16)[name = tensor("op_10810_cast")]; + tensor denom_331_epsilon_0_to_fp16 = const()[name = tensor("denom_331_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_331_cast = rsqrt(epsilon = denom_331_epsilon_0_to_fp16, x = var_10810_cast)[name = tensor("denom_331_cast")]; + tensor out_331_cast = mul(x = zero_mean_331_cast, y = denom_331_cast)[name = tensor("out_331_cast")]; + tensor var_10814_to_fp16 = const()[name = tensor("op_10814_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591702400)))]; + tensor var_10815_cast = add(x = out_331_cast, y = var_10814_to_fp16)[name = tensor("op_10815_cast")]; + tensor var_10817_to_fp16 = const()[name = tensor("op_10817_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591705024)))]; + tensor hidden_states_441_cast = mul(x = var_10815_cast, y = var_10817_to_fp16)[name = tensor("hidden_states_441_cast")]; + tensor var_10824 = const()[name = tensor("op_10824"), val = tensor([1, 1])]; + tensor var_10826 = const()[name = tensor("op_10826"), val = tensor([1, 1])]; + tensor q_221_pad_type_0 = const()[name = tensor("q_221_pad_type_0"), val = tensor("custom")]; + tensor q_221_pad_0 = const()[name = tensor("q_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1591707648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1592936512))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_221_cast = conv(dilations = var_10826, groups = var_31, pad = q_221_pad_0, pad_type = q_221_pad_type_0, strides = var_10824, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("q_221_cast")]; + tensor var_10830 = const()[name = tensor("op_10830"), val = tensor([1, 1])]; + tensor var_10832 = const()[name = tensor("op_10832"), val = tensor([1, 1])]; + tensor k_221_pad_type_0 = const()[name = tensor("k_221_pad_type_0"), val = tensor("custom")]; + tensor k_221_pad_0 = const()[name = tensor("k_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1592936704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1594165568))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_221_cast = conv(dilations = var_10832, groups = var_31, pad = k_221_pad_0, pad_type = k_221_pad_type_0, strides = var_10830, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("k_221_cast")]; + tensor var_10836 = const()[name = tensor("op_10836"), val = tensor([1, 1])]; + tensor var_10838 = const()[name = tensor("op_10838"), val = tensor([1, 1])]; + tensor v_221_pad_type_0 = const()[name = tensor("v_221_pad_type_0"), val = tensor("custom")]; + tensor v_221_pad_0 = const()[name = tensor("v_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1594165760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1595394624))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_221_cast = conv(dilations = var_10838, groups = var_31, pad = v_221_pad_0, pad_type = v_221_pad_type_0, strides = var_10836, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("v_221_cast")]; + tensor var_10842 = const()[name = tensor("op_10842"), val = tensor([2, 20, 64, -1])]; + tensor var_10843_cast = reshape(shape = var_10842, x = q_221_cast)[name = tensor("op_10843_cast")]; + tensor var_10844 = const()[name = tensor("op_10844"), val = tensor([2, 20, 64, -1])]; + tensor var_10845_cast = reshape(shape = var_10844, x = k_221_cast)[name = tensor("op_10845_cast")]; + tensor var_10846 = const()[name = tensor("op_10846"), val = tensor([2, 20, 64, -1])]; + tensor var_10847_cast = reshape(shape = var_10846, x = v_221_cast)[name = tensor("op_10847_cast")]; + tensor attn_weights_441_transpose_x_0 = const()[name = tensor("attn_weights_441_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_441_transpose_y_0 = const()[name = tensor("attn_weights_441_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_441_cast = matmul(transpose_x = attn_weights_441_transpose_x_0, transpose_y = attn_weights_441_transpose_y_0, x = var_10843_cast, y = var_10845_cast)[name = tensor("attn_weights_441_cast")]; + tensor attn_weights_443_cast = mul(x = attn_weights_441_cast, y = var_12_to_fp16)[name = tensor("attn_weights_443_cast")]; + tensor var_10851_cast = softmax(axis = var_18, x = attn_weights_443_cast)[name = tensor("op_10851_cast")]; + tensor attn_221_transpose_x_0 = const()[name = tensor("attn_221_transpose_x_0"), val = tensor(false)]; + tensor attn_221_transpose_y_0 = const()[name = tensor("attn_221_transpose_y_0"), val = tensor(true)]; + tensor attn_221_cast = matmul(transpose_x = attn_221_transpose_x_0, transpose_y = attn_221_transpose_y_0, x = var_10847_cast, y = var_10851_cast)[name = tensor("attn_221_cast")]; + tensor var_10855 = const()[name = tensor("op_10855"), val = tensor([2, 1280, 1, -1])]; + tensor input_643_cast = reshape(shape = var_10855, x = attn_221_cast)[name = tensor("input_643_cast")]; + tensor var_10860 = const()[name = tensor("op_10860"), val = tensor([1, 1])]; + tensor var_10862 = const()[name = tensor("op_10862"), val = tensor([1, 1])]; + tensor var_10864_pad_type_0 = const()[name = tensor("op_10864_pad_type_0"), val = tensor("custom")]; + tensor var_10864_pad_0 = const()[name = tensor("op_10864_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1595394816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1596623680))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1596623872)))]; + tensor var_10864_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_10862, groups = var_31, pad = var_10864_pad_0, pad_type = var_10864_pad_type_0, strides = var_10860, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_643_cast)[name = tensor("op_10864_cast")]; + tensor inputs_333_cast = add(x = var_10864_cast, y = inputs_331_cast)[name = tensor("inputs_333_cast")]; + tensor var_10868 = const()[name = tensor("op_10868"), val = tensor([1])]; + tensor channels_mean_333_cast = reduce_mean(axes = var_10868, keep_dims = var_23, x = inputs_333_cast)[name = tensor("channels_mean_333_cast")]; + tensor zero_mean_333_cast = sub(x = inputs_333_cast, y = channels_mean_333_cast)[name = tensor("zero_mean_333_cast")]; + tensor zero_mean_sq_333_cast = mul(x = zero_mean_333_cast, y = zero_mean_333_cast)[name = tensor("zero_mean_sq_333_cast")]; + tensor var_10872 = const()[name = tensor("op_10872"), val = tensor([1])]; + tensor var_10873_cast = reduce_mean(axes = var_10872, keep_dims = var_23, x = zero_mean_sq_333_cast)[name = tensor("op_10873_cast")]; + tensor var_10874_to_fp16 = const()[name = tensor("op_10874_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10875_cast = add(x = var_10873_cast, y = var_10874_to_fp16)[name = tensor("op_10875_cast")]; + tensor denom_333_epsilon_0_to_fp16 = const()[name = tensor("denom_333_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_333_cast = rsqrt(epsilon = denom_333_epsilon_0_to_fp16, x = var_10875_cast)[name = tensor("denom_333_cast")]; + tensor out_333_cast = mul(x = zero_mean_333_cast, y = denom_333_cast)[name = tensor("out_333_cast")]; + tensor var_10879_to_fp16 = const()[name = tensor("op_10879_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1596626496)))]; + tensor var_10880_cast = add(x = out_333_cast, y = var_10879_to_fp16)[name = tensor("op_10880_cast")]; + tensor var_10882_to_fp16 = const()[name = tensor("op_10882_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1596629120)))]; + tensor hidden_states_443_cast = mul(x = var_10880_cast, y = var_10882_to_fp16)[name = tensor("hidden_states_443_cast")]; + tensor var_10889 = const()[name = tensor("op_10889"), val = tensor([1, 1])]; + tensor var_10891 = const()[name = tensor("op_10891"), val = tensor([1, 1])]; + tensor q_223_pad_type_0 = const()[name = tensor("q_223_pad_type_0"), val = tensor("custom")]; + tensor q_223_pad_0 = const()[name = tensor("q_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1596631744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1597860608))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_223_cast = conv(dilations = var_10891, groups = var_31, pad = q_223_pad_0, pad_type = q_223_pad_type_0, strides = var_10889, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_443_cast)[name = tensor("q_223_cast")]; + tensor var_10895 = const()[name = tensor("op_10895"), val = tensor([1, 1])]; + tensor var_10897 = const()[name = tensor("op_10897"), val = tensor([1, 1])]; + tensor k_223_pad_type_0 = const()[name = tensor("k_223_pad_type_0"), val = tensor("custom")]; + tensor k_223_pad_0 = const()[name = tensor("k_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1597860800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1599826944))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_223_cast = conv(dilations = var_10897, groups = var_31, pad = k_223_pad_0, pad_type = k_223_pad_type_0, strides = var_10895, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_223_cast")]; + tensor var_10901 = const()[name = tensor("op_10901"), val = tensor([1, 1])]; + tensor var_10903 = const()[name = tensor("op_10903"), val = tensor([1, 1])]; + tensor v_223_pad_type_0 = const()[name = tensor("v_223_pad_type_0"), val = tensor("custom")]; + tensor v_223_pad_0 = const()[name = tensor("v_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1599827136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1601793280))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_223_cast = conv(dilations = var_10903, groups = var_31, pad = v_223_pad_0, pad_type = v_223_pad_type_0, strides = var_10901, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_223_cast")]; + tensor var_10907 = const()[name = tensor("op_10907"), val = tensor([2, 20, 64, -1])]; + tensor var_10908_cast = reshape(shape = var_10907, x = q_223_cast)[name = tensor("op_10908_cast")]; + tensor var_10909 = const()[name = tensor("op_10909"), val = tensor([2, 20, 64, -1])]; + tensor var_10910_cast = reshape(shape = var_10909, x = k_223_cast)[name = tensor("op_10910_cast")]; + tensor var_10911 = const()[name = tensor("op_10911"), val = tensor([2, 20, 64, -1])]; + tensor var_10912_cast = reshape(shape = var_10911, x = v_223_cast)[name = tensor("op_10912_cast")]; + tensor attn_weights_445_transpose_x_0 = const()[name = tensor("attn_weights_445_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_445_transpose_y_0 = const()[name = tensor("attn_weights_445_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_445_cast = matmul(transpose_x = attn_weights_445_transpose_x_0, transpose_y = attn_weights_445_transpose_y_0, x = var_10908_cast, y = var_10910_cast)[name = tensor("attn_weights_445_cast")]; + tensor attn_weights_447_cast = mul(x = attn_weights_445_cast, y = var_12_to_fp16)[name = tensor("attn_weights_447_cast")]; + tensor var_10916_cast = softmax(axis = var_18, x = attn_weights_447_cast)[name = tensor("op_10916_cast")]; + tensor attn_223_transpose_x_0 = const()[name = tensor("attn_223_transpose_x_0"), val = tensor(false)]; + tensor attn_223_transpose_y_0 = const()[name = tensor("attn_223_transpose_y_0"), val = tensor(true)]; + tensor attn_223_cast = matmul(transpose_x = attn_223_transpose_x_0, transpose_y = attn_223_transpose_y_0, x = var_10912_cast, y = var_10916_cast)[name = tensor("attn_223_cast")]; + tensor var_10920 = const()[name = tensor("op_10920"), val = tensor([2, 1280, 1, -1])]; + tensor input_645_cast = reshape(shape = var_10920, x = attn_223_cast)[name = tensor("input_645_cast")]; + tensor var_10925 = const()[name = tensor("op_10925"), val = tensor([1, 1])]; + tensor var_10927 = const()[name = tensor("op_10927"), val = tensor([1, 1])]; + tensor var_10929_pad_type_0 = const()[name = tensor("op_10929_pad_type_0"), val = tensor("custom")]; + tensor var_10929_pad_0 = const()[name = tensor("op_10929_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1601793472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1603022336))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1603022528)))]; + tensor var_10929_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_10927, groups = var_31, pad = var_10929_pad_0, pad_type = var_10929_pad_type_0, strides = var_10925, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_645_cast)[name = tensor("op_10929_cast")]; + tensor inputs_335_cast = add(x = var_10929_cast, y = inputs_333_cast)[name = tensor("inputs_335_cast")]; + tensor var_10933 = const()[name = tensor("op_10933"), val = tensor([1])]; + tensor channels_mean_335_cast = reduce_mean(axes = var_10933, keep_dims = var_23, x = inputs_335_cast)[name = tensor("channels_mean_335_cast")]; + tensor zero_mean_335_cast = sub(x = inputs_335_cast, y = channels_mean_335_cast)[name = tensor("zero_mean_335_cast")]; + tensor zero_mean_sq_335_cast = mul(x = zero_mean_335_cast, y = zero_mean_335_cast)[name = tensor("zero_mean_sq_335_cast")]; + tensor var_10937 = const()[name = tensor("op_10937"), val = tensor([1])]; + tensor var_10938_cast = reduce_mean(axes = var_10937, keep_dims = var_23, x = zero_mean_sq_335_cast)[name = tensor("op_10938_cast")]; + tensor var_10939_to_fp16 = const()[name = tensor("op_10939_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10940_cast = add(x = var_10938_cast, y = var_10939_to_fp16)[name = tensor("op_10940_cast")]; + tensor denom_335_epsilon_0_to_fp16 = const()[name = tensor("denom_335_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_335_cast = rsqrt(epsilon = denom_335_epsilon_0_to_fp16, x = var_10940_cast)[name = tensor("denom_335_cast")]; + tensor out_335_cast = mul(x = zero_mean_335_cast, y = denom_335_cast)[name = tensor("out_335_cast")]; + tensor var_10944_to_fp16 = const()[name = tensor("op_10944_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1603025152)))]; + tensor var_10945_cast = add(x = out_335_cast, y = var_10944_to_fp16)[name = tensor("op_10945_cast")]; + tensor var_10947_to_fp16 = const()[name = tensor("op_10947_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1603027776)))]; + tensor input_647_cast = mul(x = var_10945_cast, y = var_10947_to_fp16)[name = tensor("input_647_cast")]; + tensor var_10955 = const()[name = tensor("op_10955"), val = tensor([1, 1])]; + tensor var_10957 = const()[name = tensor("op_10957"), val = tensor([1, 1])]; + tensor var_10959_pad_type_0 = const()[name = tensor("op_10959_pad_type_0"), val = tensor("custom")]; + tensor var_10959_pad_0 = const()[name = tensor("op_10959_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1603030400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1612860864))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1612861056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1612868800))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_10959_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_10957, groups = var_31, pad = var_10959_pad_0, pad_type = var_10959_pad_type_0, strides = var_10955, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_647_cast)[name = tensor("op_10959_cast")]; + tensor var_10960_split_sizes_0 = const()[name = tensor("op_10960_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10960_axis_0 = const()[name = tensor("op_10960_axis_0"), val = tensor(1)]; + tensor var_10960_cast_0, tensor var_10960_cast_1 = split(axis = var_10960_axis_0, split_sizes = var_10960_split_sizes_0, x = var_10959_cast)[name = tensor("op_10960_cast")]; + tensor var_10962_mode_0 = const()[name = tensor("op_10962_mode_0"), val = tensor("EXACT")]; + tensor var_10962_cast = gelu(mode = var_10962_mode_0, x = var_10960_cast_1)[name = tensor("op_10962_cast")]; + tensor input_649_cast = mul(x = var_10960_cast_0, y = var_10962_cast)[name = tensor("input_649_cast")]; + tensor var_10966 = const()[name = tensor("op_10966"), val = tensor([1, 1])]; + tensor var_10968 = const()[name = tensor("op_10968"), val = tensor([1, 1])]; + tensor var_10970_pad_type_0 = const()[name = tensor("op_10970_pad_type_0"), val = tensor("custom")]; + tensor var_10970_pad_0 = const()[name = tensor("op_10970_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1612868992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617784256))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617784448)))]; + tensor var_10970_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_10968, groups = var_31, pad = var_10970_pad_0, pad_type = var_10970_pad_type_0, strides = var_10966, weight = unet_up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_649_cast)[name = tensor("op_10970_cast")]; + tensor inputs_337_cast = add(x = var_10970_cast, y = inputs_335_cast)[name = tensor("inputs_337_cast")]; + tensor var_10980 = const()[name = tensor("op_10980"), val = tensor([1])]; + tensor channels_mean_337_cast = reduce_mean(axes = var_10980, keep_dims = var_23, x = inputs_337_cast)[name = tensor("channels_mean_337_cast")]; + tensor zero_mean_337_cast = sub(x = inputs_337_cast, y = channels_mean_337_cast)[name = tensor("zero_mean_337_cast")]; + tensor zero_mean_sq_337_cast = mul(x = zero_mean_337_cast, y = zero_mean_337_cast)[name = tensor("zero_mean_sq_337_cast")]; + tensor var_10984 = const()[name = tensor("op_10984"), val = tensor([1])]; + tensor var_10985_cast = reduce_mean(axes = var_10984, keep_dims = var_23, x = zero_mean_sq_337_cast)[name = tensor("op_10985_cast")]; + tensor var_10986_to_fp16 = const()[name = tensor("op_10986_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10987_cast = add(x = var_10985_cast, y = var_10986_to_fp16)[name = tensor("op_10987_cast")]; + tensor denom_337_epsilon_0_to_fp16 = const()[name = tensor("denom_337_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_337_cast = rsqrt(epsilon = denom_337_epsilon_0_to_fp16, x = var_10987_cast)[name = tensor("denom_337_cast")]; + tensor out_337_cast = mul(x = zero_mean_337_cast, y = denom_337_cast)[name = tensor("out_337_cast")]; + tensor var_10991_to_fp16 = const()[name = tensor("op_10991_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617787072)))]; + tensor var_10992_cast = add(x = out_337_cast, y = var_10991_to_fp16)[name = tensor("op_10992_cast")]; + tensor var_10994_to_fp16 = const()[name = tensor("op_10994_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617789696)))]; + tensor hidden_states_447_cast = mul(x = var_10992_cast, y = var_10994_to_fp16)[name = tensor("hidden_states_447_cast")]; + tensor var_11001 = const()[name = tensor("op_11001"), val = tensor([1, 1])]; + tensor var_11003 = const()[name = tensor("op_11003"), val = tensor([1, 1])]; + tensor q_225_pad_type_0 = const()[name = tensor("q_225_pad_type_0"), val = tensor("custom")]; + tensor q_225_pad_0 = const()[name = tensor("q_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617792320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1619021184))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_225_cast = conv(dilations = var_11003, groups = var_31, pad = q_225_pad_0, pad_type = q_225_pad_type_0, strides = var_11001, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("q_225_cast")]; + tensor var_11007 = const()[name = tensor("op_11007"), val = tensor([1, 1])]; + tensor var_11009 = const()[name = tensor("op_11009"), val = tensor([1, 1])]; + tensor k_225_pad_type_0 = const()[name = tensor("k_225_pad_type_0"), val = tensor("custom")]; + tensor k_225_pad_0 = const()[name = tensor("k_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1619021376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620250240))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_225_cast = conv(dilations = var_11009, groups = var_31, pad = k_225_pad_0, pad_type = k_225_pad_type_0, strides = var_11007, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("k_225_cast")]; + tensor var_11013 = const()[name = tensor("op_11013"), val = tensor([1, 1])]; + tensor var_11015 = const()[name = tensor("op_11015"), val = tensor([1, 1])]; + tensor v_225_pad_type_0 = const()[name = tensor("v_225_pad_type_0"), val = tensor("custom")]; + tensor v_225_pad_0 = const()[name = tensor("v_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620250432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1621479296))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_225_cast = conv(dilations = var_11015, groups = var_31, pad = v_225_pad_0, pad_type = v_225_pad_type_0, strides = var_11013, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("v_225_cast")]; + tensor var_11019 = const()[name = tensor("op_11019"), val = tensor([2, 20, 64, -1])]; + tensor var_11020_cast = reshape(shape = var_11019, x = q_225_cast)[name = tensor("op_11020_cast")]; + tensor var_11021 = const()[name = tensor("op_11021"), val = tensor([2, 20, 64, -1])]; + tensor var_11022_cast = reshape(shape = var_11021, x = k_225_cast)[name = tensor("op_11022_cast")]; + tensor var_11023 = const()[name = tensor("op_11023"), val = tensor([2, 20, 64, -1])]; + tensor var_11024_cast = reshape(shape = var_11023, x = v_225_cast)[name = tensor("op_11024_cast")]; + tensor attn_weights_449_transpose_x_0 = const()[name = tensor("attn_weights_449_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_449_transpose_y_0 = const()[name = tensor("attn_weights_449_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_449_cast = matmul(transpose_x = attn_weights_449_transpose_x_0, transpose_y = attn_weights_449_transpose_y_0, x = var_11020_cast, y = var_11022_cast)[name = tensor("attn_weights_449_cast")]; + tensor attn_weights_451_cast = mul(x = attn_weights_449_cast, y = var_12_to_fp16)[name = tensor("attn_weights_451_cast")]; + tensor var_11028_cast = softmax(axis = var_18, x = attn_weights_451_cast)[name = tensor("op_11028_cast")]; + tensor attn_225_transpose_x_0 = const()[name = tensor("attn_225_transpose_x_0"), val = tensor(false)]; + tensor attn_225_transpose_y_0 = const()[name = tensor("attn_225_transpose_y_0"), val = tensor(true)]; + tensor attn_225_cast = matmul(transpose_x = attn_225_transpose_x_0, transpose_y = attn_225_transpose_y_0, x = var_11024_cast, y = var_11028_cast)[name = tensor("attn_225_cast")]; + tensor var_11032 = const()[name = tensor("op_11032"), val = tensor([2, 1280, 1, -1])]; + tensor input_651_cast = reshape(shape = var_11032, x = attn_225_cast)[name = tensor("input_651_cast")]; + tensor var_11037 = const()[name = tensor("op_11037"), val = tensor([1, 1])]; + tensor var_11039 = const()[name = tensor("op_11039"), val = tensor([1, 1])]; + tensor var_11041_pad_type_0 = const()[name = tensor("op_11041_pad_type_0"), val = tensor("custom")]; + tensor var_11041_pad_0 = const()[name = tensor("op_11041_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1621479488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1622708352))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1622708544)))]; + tensor var_11041_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_11039, groups = var_31, pad = var_11041_pad_0, pad_type = var_11041_pad_type_0, strides = var_11037, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_651_cast)[name = tensor("op_11041_cast")]; + tensor inputs_339_cast = add(x = var_11041_cast, y = inputs_337_cast)[name = tensor("inputs_339_cast")]; + tensor var_11045 = const()[name = tensor("op_11045"), val = tensor([1])]; + tensor channels_mean_339_cast = reduce_mean(axes = var_11045, keep_dims = var_23, x = inputs_339_cast)[name = tensor("channels_mean_339_cast")]; + tensor zero_mean_339_cast = sub(x = inputs_339_cast, y = channels_mean_339_cast)[name = tensor("zero_mean_339_cast")]; + tensor zero_mean_sq_339_cast = mul(x = zero_mean_339_cast, y = zero_mean_339_cast)[name = tensor("zero_mean_sq_339_cast")]; + tensor var_11049 = const()[name = tensor("op_11049"), val = tensor([1])]; + tensor var_11050_cast = reduce_mean(axes = var_11049, keep_dims = var_23, x = zero_mean_sq_339_cast)[name = tensor("op_11050_cast")]; + tensor var_11051_to_fp16 = const()[name = tensor("op_11051_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11052_cast = add(x = var_11050_cast, y = var_11051_to_fp16)[name = tensor("op_11052_cast")]; + tensor denom_339_epsilon_0_to_fp16 = const()[name = tensor("denom_339_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_339_cast = rsqrt(epsilon = denom_339_epsilon_0_to_fp16, x = var_11052_cast)[name = tensor("denom_339_cast")]; + tensor out_339_cast = mul(x = zero_mean_339_cast, y = denom_339_cast)[name = tensor("out_339_cast")]; + tensor var_11056_to_fp16 = const()[name = tensor("op_11056_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1622711168)))]; + tensor var_11057_cast = add(x = out_339_cast, y = var_11056_to_fp16)[name = tensor("op_11057_cast")]; + tensor var_11059_to_fp16 = const()[name = tensor("op_11059_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1622713792)))]; + tensor hidden_states_449_cast = mul(x = var_11057_cast, y = var_11059_to_fp16)[name = tensor("hidden_states_449_cast")]; + tensor var_11066 = const()[name = tensor("op_11066"), val = tensor([1, 1])]; + tensor var_11068 = const()[name = tensor("op_11068"), val = tensor([1, 1])]; + tensor q_227_pad_type_0 = const()[name = tensor("q_227_pad_type_0"), val = tensor("custom")]; + tensor q_227_pad_0 = const()[name = tensor("q_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1622716416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1623945280))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_227_cast = conv(dilations = var_11068, groups = var_31, pad = q_227_pad_0, pad_type = q_227_pad_type_0, strides = var_11066, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_449_cast)[name = tensor("q_227_cast")]; + tensor var_11072 = const()[name = tensor("op_11072"), val = tensor([1, 1])]; + tensor var_11074 = const()[name = tensor("op_11074"), val = tensor([1, 1])]; + tensor k_227_pad_type_0 = const()[name = tensor("k_227_pad_type_0"), val = tensor("custom")]; + tensor k_227_pad_0 = const()[name = tensor("k_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1623945472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1625911616))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_227_cast = conv(dilations = var_11074, groups = var_31, pad = k_227_pad_0, pad_type = k_227_pad_type_0, strides = var_11072, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_227_cast")]; + tensor var_11078 = const()[name = tensor("op_11078"), val = tensor([1, 1])]; + tensor var_11080 = const()[name = tensor("op_11080"), val = tensor([1, 1])]; + tensor v_227_pad_type_0 = const()[name = tensor("v_227_pad_type_0"), val = tensor("custom")]; + tensor v_227_pad_0 = const()[name = tensor("v_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1625911808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1627877952))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_227_cast = conv(dilations = var_11080, groups = var_31, pad = v_227_pad_0, pad_type = v_227_pad_type_0, strides = var_11078, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_227_cast")]; + tensor var_11084 = const()[name = tensor("op_11084"), val = tensor([2, 20, 64, -1])]; + tensor var_11085_cast = reshape(shape = var_11084, x = q_227_cast)[name = tensor("op_11085_cast")]; + tensor var_11086 = const()[name = tensor("op_11086"), val = tensor([2, 20, 64, -1])]; + tensor var_11087_cast = reshape(shape = var_11086, x = k_227_cast)[name = tensor("op_11087_cast")]; + tensor var_11088 = const()[name = tensor("op_11088"), val = tensor([2, 20, 64, -1])]; + tensor var_11089_cast = reshape(shape = var_11088, x = v_227_cast)[name = tensor("op_11089_cast")]; + tensor attn_weights_453_transpose_x_0 = const()[name = tensor("attn_weights_453_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_453_transpose_y_0 = const()[name = tensor("attn_weights_453_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_453_cast = matmul(transpose_x = attn_weights_453_transpose_x_0, transpose_y = attn_weights_453_transpose_y_0, x = var_11085_cast, y = var_11087_cast)[name = tensor("attn_weights_453_cast")]; + tensor attn_weights_455_cast = mul(x = attn_weights_453_cast, y = var_12_to_fp16)[name = tensor("attn_weights_455_cast")]; + tensor var_11093_cast = softmax(axis = var_18, x = attn_weights_455_cast)[name = tensor("op_11093_cast")]; + tensor attn_227_transpose_x_0 = const()[name = tensor("attn_227_transpose_x_0"), val = tensor(false)]; + tensor attn_227_transpose_y_0 = const()[name = tensor("attn_227_transpose_y_0"), val = tensor(true)]; + tensor attn_227_cast = matmul(transpose_x = attn_227_transpose_x_0, transpose_y = attn_227_transpose_y_0, x = var_11089_cast, y = var_11093_cast)[name = tensor("attn_227_cast")]; + tensor var_11097 = const()[name = tensor("op_11097"), val = tensor([2, 1280, 1, -1])]; + tensor input_653_cast = reshape(shape = var_11097, x = attn_227_cast)[name = tensor("input_653_cast")]; + tensor var_11102 = const()[name = tensor("op_11102"), val = tensor([1, 1])]; + tensor var_11104 = const()[name = tensor("op_11104"), val = tensor([1, 1])]; + tensor var_11106_pad_type_0 = const()[name = tensor("op_11106_pad_type_0"), val = tensor("custom")]; + tensor var_11106_pad_0 = const()[name = tensor("op_11106_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1627878144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1629107008))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1629107200)))]; + tensor var_11106_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_11104, groups = var_31, pad = var_11106_pad_0, pad_type = var_11106_pad_type_0, strides = var_11102, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_653_cast)[name = tensor("op_11106_cast")]; + tensor inputs_341_cast = add(x = var_11106_cast, y = inputs_339_cast)[name = tensor("inputs_341_cast")]; + tensor var_11110 = const()[name = tensor("op_11110"), val = tensor([1])]; + tensor channels_mean_341_cast = reduce_mean(axes = var_11110, keep_dims = var_23, x = inputs_341_cast)[name = tensor("channels_mean_341_cast")]; + tensor zero_mean_341_cast = sub(x = inputs_341_cast, y = channels_mean_341_cast)[name = tensor("zero_mean_341_cast")]; + tensor zero_mean_sq_341_cast = mul(x = zero_mean_341_cast, y = zero_mean_341_cast)[name = tensor("zero_mean_sq_341_cast")]; + tensor var_11114 = const()[name = tensor("op_11114"), val = tensor([1])]; + tensor var_11115_cast = reduce_mean(axes = var_11114, keep_dims = var_23, x = zero_mean_sq_341_cast)[name = tensor("op_11115_cast")]; + tensor var_11116_to_fp16 = const()[name = tensor("op_11116_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11117_cast = add(x = var_11115_cast, y = var_11116_to_fp16)[name = tensor("op_11117_cast")]; + tensor denom_341_epsilon_0_to_fp16 = const()[name = tensor("denom_341_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_341_cast = rsqrt(epsilon = denom_341_epsilon_0_to_fp16, x = var_11117_cast)[name = tensor("denom_341_cast")]; + tensor out_341_cast = mul(x = zero_mean_341_cast, y = denom_341_cast)[name = tensor("out_341_cast")]; + tensor var_11121_to_fp16 = const()[name = tensor("op_11121_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1629109824)))]; + tensor var_11122_cast = add(x = out_341_cast, y = var_11121_to_fp16)[name = tensor("op_11122_cast")]; + tensor var_11124_to_fp16 = const()[name = tensor("op_11124_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1629112448)))]; + tensor input_655_cast = mul(x = var_11122_cast, y = var_11124_to_fp16)[name = tensor("input_655_cast")]; + tensor var_11132 = const()[name = tensor("op_11132"), val = tensor([1, 1])]; + tensor var_11134 = const()[name = tensor("op_11134"), val = tensor([1, 1])]; + tensor var_11136_pad_type_0 = const()[name = tensor("op_11136_pad_type_0"), val = tensor("custom")]; + tensor var_11136_pad_0 = const()[name = tensor("op_11136_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1629115072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1638945536))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1638945728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1638953472))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_11136_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_11134, groups = var_31, pad = var_11136_pad_0, pad_type = var_11136_pad_type_0, strides = var_11132, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_655_cast)[name = tensor("op_11136_cast")]; + tensor var_11137_split_sizes_0 = const()[name = tensor("op_11137_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11137_axis_0 = const()[name = tensor("op_11137_axis_0"), val = tensor(1)]; + tensor var_11137_cast_0, tensor var_11137_cast_1 = split(axis = var_11137_axis_0, split_sizes = var_11137_split_sizes_0, x = var_11136_cast)[name = tensor("op_11137_cast")]; + tensor var_11139_mode_0 = const()[name = tensor("op_11139_mode_0"), val = tensor("EXACT")]; + tensor var_11139_cast = gelu(mode = var_11139_mode_0, x = var_11137_cast_1)[name = tensor("op_11139_cast")]; + tensor input_657_cast = mul(x = var_11137_cast_0, y = var_11139_cast)[name = tensor("input_657_cast")]; + tensor var_11143 = const()[name = tensor("op_11143"), val = tensor([1, 1])]; + tensor var_11145 = const()[name = tensor("op_11145"), val = tensor([1, 1])]; + tensor var_11147_pad_type_0 = const()[name = tensor("op_11147_pad_type_0"), val = tensor("custom")]; + tensor var_11147_pad_0 = const()[name = tensor("op_11147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1638953664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643868928))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643869120)))]; + tensor var_11147_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_11145, groups = var_31, pad = var_11147_pad_0, pad_type = var_11147_pad_type_0, strides = var_11143, weight = unet_up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_657_cast)[name = tensor("op_11147_cast")]; + tensor inputs_343_cast = add(x = var_11147_cast, y = inputs_341_cast)[name = tensor("inputs_343_cast")]; + tensor var_11157 = const()[name = tensor("op_11157"), val = tensor([1])]; + tensor channels_mean_343_cast = reduce_mean(axes = var_11157, keep_dims = var_23, x = inputs_343_cast)[name = tensor("channels_mean_343_cast")]; + tensor zero_mean_343_cast = sub(x = inputs_343_cast, y = channels_mean_343_cast)[name = tensor("zero_mean_343_cast")]; + tensor zero_mean_sq_343_cast = mul(x = zero_mean_343_cast, y = zero_mean_343_cast)[name = tensor("zero_mean_sq_343_cast")]; + tensor var_11161 = const()[name = tensor("op_11161"), val = tensor([1])]; + tensor var_11162_cast = reduce_mean(axes = var_11161, keep_dims = var_23, x = zero_mean_sq_343_cast)[name = tensor("op_11162_cast")]; + tensor var_11163_to_fp16 = const()[name = tensor("op_11163_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11164_cast = add(x = var_11162_cast, y = var_11163_to_fp16)[name = tensor("op_11164_cast")]; + tensor denom_343_epsilon_0_to_fp16 = const()[name = tensor("denom_343_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_343_cast = rsqrt(epsilon = denom_343_epsilon_0_to_fp16, x = var_11164_cast)[name = tensor("denom_343_cast")]; + tensor out_343_cast = mul(x = zero_mean_343_cast, y = denom_343_cast)[name = tensor("out_343_cast")]; + tensor var_11168_to_fp16 = const()[name = tensor("op_11168_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643871744)))]; + tensor var_11169_cast = add(x = out_343_cast, y = var_11168_to_fp16)[name = tensor("op_11169_cast")]; + tensor var_11171_to_fp16 = const()[name = tensor("op_11171_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643874368)))]; + tensor hidden_states_453_cast = mul(x = var_11169_cast, y = var_11171_to_fp16)[name = tensor("hidden_states_453_cast")]; + tensor var_11178 = const()[name = tensor("op_11178"), val = tensor([1, 1])]; + tensor var_11180 = const()[name = tensor("op_11180"), val = tensor([1, 1])]; + tensor q_229_pad_type_0 = const()[name = tensor("q_229_pad_type_0"), val = tensor("custom")]; + tensor q_229_pad_0 = const()[name = tensor("q_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1643876992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645105856))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_229_cast = conv(dilations = var_11180, groups = var_31, pad = q_229_pad_0, pad_type = q_229_pad_type_0, strides = var_11178, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("q_229_cast")]; + tensor var_11184 = const()[name = tensor("op_11184"), val = tensor([1, 1])]; + tensor var_11186 = const()[name = tensor("op_11186"), val = tensor([1, 1])]; + tensor k_229_pad_type_0 = const()[name = tensor("k_229_pad_type_0"), val = tensor("custom")]; + tensor k_229_pad_0 = const()[name = tensor("k_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645106048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1646334912))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_229_cast = conv(dilations = var_11186, groups = var_31, pad = k_229_pad_0, pad_type = k_229_pad_type_0, strides = var_11184, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("k_229_cast")]; + tensor var_11190 = const()[name = tensor("op_11190"), val = tensor([1, 1])]; + tensor var_11192 = const()[name = tensor("op_11192"), val = tensor([1, 1])]; + tensor v_229_pad_type_0 = const()[name = tensor("v_229_pad_type_0"), val = tensor("custom")]; + tensor v_229_pad_0 = const()[name = tensor("v_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1646335104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1647563968))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_229_cast = conv(dilations = var_11192, groups = var_31, pad = v_229_pad_0, pad_type = v_229_pad_type_0, strides = var_11190, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("v_229_cast")]; + tensor var_11196 = const()[name = tensor("op_11196"), val = tensor([2, 20, 64, -1])]; + tensor var_11197_cast = reshape(shape = var_11196, x = q_229_cast)[name = tensor("op_11197_cast")]; + tensor var_11198 = const()[name = tensor("op_11198"), val = tensor([2, 20, 64, -1])]; + tensor var_11199_cast = reshape(shape = var_11198, x = k_229_cast)[name = tensor("op_11199_cast")]; + tensor var_11200 = const()[name = tensor("op_11200"), val = tensor([2, 20, 64, -1])]; + tensor var_11201_cast = reshape(shape = var_11200, x = v_229_cast)[name = tensor("op_11201_cast")]; + tensor attn_weights_457_transpose_x_0 = const()[name = tensor("attn_weights_457_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_457_transpose_y_0 = const()[name = tensor("attn_weights_457_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_457_cast = matmul(transpose_x = attn_weights_457_transpose_x_0, transpose_y = attn_weights_457_transpose_y_0, x = var_11197_cast, y = var_11199_cast)[name = tensor("attn_weights_457_cast")]; + tensor attn_weights_459_cast = mul(x = attn_weights_457_cast, y = var_12_to_fp16)[name = tensor("attn_weights_459_cast")]; + tensor var_11205_cast = softmax(axis = var_18, x = attn_weights_459_cast)[name = tensor("op_11205_cast")]; + tensor attn_229_transpose_x_0 = const()[name = tensor("attn_229_transpose_x_0"), val = tensor(false)]; + tensor attn_229_transpose_y_0 = const()[name = tensor("attn_229_transpose_y_0"), val = tensor(true)]; + tensor attn_229_cast = matmul(transpose_x = attn_229_transpose_x_0, transpose_y = attn_229_transpose_y_0, x = var_11201_cast, y = var_11205_cast)[name = tensor("attn_229_cast")]; + tensor var_11209 = const()[name = tensor("op_11209"), val = tensor([2, 1280, 1, -1])]; + tensor input_659_cast = reshape(shape = var_11209, x = attn_229_cast)[name = tensor("input_659_cast")]; + tensor var_11214 = const()[name = tensor("op_11214"), val = tensor([1, 1])]; + tensor var_11216 = const()[name = tensor("op_11216"), val = tensor([1, 1])]; + tensor var_11218_pad_type_0 = const()[name = tensor("op_11218_pad_type_0"), val = tensor("custom")]; + tensor var_11218_pad_0 = const()[name = tensor("op_11218_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1647564160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648793024))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648793216)))]; + tensor var_11218_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_11216, groups = var_31, pad = var_11218_pad_0, pad_type = var_11218_pad_type_0, strides = var_11214, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_659_cast)[name = tensor("op_11218_cast")]; + tensor inputs_345_cast = add(x = var_11218_cast, y = inputs_343_cast)[name = tensor("inputs_345_cast")]; + tensor var_11222 = const()[name = tensor("op_11222"), val = tensor([1])]; + tensor channels_mean_345_cast = reduce_mean(axes = var_11222, keep_dims = var_23, x = inputs_345_cast)[name = tensor("channels_mean_345_cast")]; + tensor zero_mean_345_cast = sub(x = inputs_345_cast, y = channels_mean_345_cast)[name = tensor("zero_mean_345_cast")]; + tensor zero_mean_sq_345_cast = mul(x = zero_mean_345_cast, y = zero_mean_345_cast)[name = tensor("zero_mean_sq_345_cast")]; + tensor var_11226 = const()[name = tensor("op_11226"), val = tensor([1])]; + tensor var_11227_cast = reduce_mean(axes = var_11226, keep_dims = var_23, x = zero_mean_sq_345_cast)[name = tensor("op_11227_cast")]; + tensor var_11228_to_fp16 = const()[name = tensor("op_11228_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11229_cast = add(x = var_11227_cast, y = var_11228_to_fp16)[name = tensor("op_11229_cast")]; + tensor denom_345_epsilon_0_to_fp16 = const()[name = tensor("denom_345_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_345_cast = rsqrt(epsilon = denom_345_epsilon_0_to_fp16, x = var_11229_cast)[name = tensor("denom_345_cast")]; + tensor out_345_cast = mul(x = zero_mean_345_cast, y = denom_345_cast)[name = tensor("out_345_cast")]; + tensor var_11233_to_fp16 = const()[name = tensor("op_11233_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648795840)))]; + tensor var_11234_cast = add(x = out_345_cast, y = var_11233_to_fp16)[name = tensor("op_11234_cast")]; + tensor var_11236_to_fp16 = const()[name = tensor("op_11236_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648798464)))]; + tensor hidden_states_455_cast = mul(x = var_11234_cast, y = var_11236_to_fp16)[name = tensor("hidden_states_455_cast")]; + tensor var_11243 = const()[name = tensor("op_11243"), val = tensor([1, 1])]; + tensor var_11245 = const()[name = tensor("op_11245"), val = tensor([1, 1])]; + tensor q_231_pad_type_0 = const()[name = tensor("q_231_pad_type_0"), val = tensor("custom")]; + tensor q_231_pad_0 = const()[name = tensor("q_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648801088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1650029952))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_231_cast = conv(dilations = var_11245, groups = var_31, pad = q_231_pad_0, pad_type = q_231_pad_type_0, strides = var_11243, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_455_cast)[name = tensor("q_231_cast")]; + tensor var_11249 = const()[name = tensor("op_11249"), val = tensor([1, 1])]; + tensor var_11251 = const()[name = tensor("op_11251"), val = tensor([1, 1])]; + tensor k_231_pad_type_0 = const()[name = tensor("k_231_pad_type_0"), val = tensor("custom")]; + tensor k_231_pad_0 = const()[name = tensor("k_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1650030144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651996288))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_231_cast = conv(dilations = var_11251, groups = var_31, pad = k_231_pad_0, pad_type = k_231_pad_type_0, strides = var_11249, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_231_cast")]; + tensor var_11255 = const()[name = tensor("op_11255"), val = tensor([1, 1])]; + tensor var_11257 = const()[name = tensor("op_11257"), val = tensor([1, 1])]; + tensor v_231_pad_type_0 = const()[name = tensor("v_231_pad_type_0"), val = tensor("custom")]; + tensor v_231_pad_0 = const()[name = tensor("v_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651996480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1653962624))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_231_cast = conv(dilations = var_11257, groups = var_31, pad = v_231_pad_0, pad_type = v_231_pad_type_0, strides = var_11255, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_231_cast")]; + tensor var_11261 = const()[name = tensor("op_11261"), val = tensor([2, 20, 64, -1])]; + tensor var_11262_cast = reshape(shape = var_11261, x = q_231_cast)[name = tensor("op_11262_cast")]; + tensor var_11263 = const()[name = tensor("op_11263"), val = tensor([2, 20, 64, -1])]; + tensor var_11264_cast = reshape(shape = var_11263, x = k_231_cast)[name = tensor("op_11264_cast")]; + tensor var_11265 = const()[name = tensor("op_11265"), val = tensor([2, 20, 64, -1])]; + tensor var_11266_cast = reshape(shape = var_11265, x = v_231_cast)[name = tensor("op_11266_cast")]; + tensor attn_weights_461_transpose_x_0 = const()[name = tensor("attn_weights_461_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_461_transpose_y_0 = const()[name = tensor("attn_weights_461_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_461_cast = matmul(transpose_x = attn_weights_461_transpose_x_0, transpose_y = attn_weights_461_transpose_y_0, x = var_11262_cast, y = var_11264_cast)[name = tensor("attn_weights_461_cast")]; + tensor attn_weights_463_cast = mul(x = attn_weights_461_cast, y = var_12_to_fp16)[name = tensor("attn_weights_463_cast")]; + tensor var_11270_cast = softmax(axis = var_18, x = attn_weights_463_cast)[name = tensor("op_11270_cast")]; + tensor attn_231_transpose_x_0 = const()[name = tensor("attn_231_transpose_x_0"), val = tensor(false)]; + tensor attn_231_transpose_y_0 = const()[name = tensor("attn_231_transpose_y_0"), val = tensor(true)]; + tensor attn_231_cast = matmul(transpose_x = attn_231_transpose_x_0, transpose_y = attn_231_transpose_y_0, x = var_11266_cast, y = var_11270_cast)[name = tensor("attn_231_cast")]; + tensor var_11274 = const()[name = tensor("op_11274"), val = tensor([2, 1280, 1, -1])]; + tensor input_661_cast = reshape(shape = var_11274, x = attn_231_cast)[name = tensor("input_661_cast")]; + tensor var_11279 = const()[name = tensor("op_11279"), val = tensor([1, 1])]; + tensor var_11281 = const()[name = tensor("op_11281"), val = tensor([1, 1])]; + tensor var_11283_pad_type_0 = const()[name = tensor("op_11283_pad_type_0"), val = tensor("custom")]; + tensor var_11283_pad_0 = const()[name = tensor("op_11283_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1653962816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1655191680))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1655191872)))]; + tensor var_11283_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_11281, groups = var_31, pad = var_11283_pad_0, pad_type = var_11283_pad_type_0, strides = var_11279, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_661_cast)[name = tensor("op_11283_cast")]; + tensor inputs_347_cast = add(x = var_11283_cast, y = inputs_345_cast)[name = tensor("inputs_347_cast")]; + tensor var_11287 = const()[name = tensor("op_11287"), val = tensor([1])]; + tensor channels_mean_347_cast = reduce_mean(axes = var_11287, keep_dims = var_23, x = inputs_347_cast)[name = tensor("channels_mean_347_cast")]; + tensor zero_mean_347_cast = sub(x = inputs_347_cast, y = channels_mean_347_cast)[name = tensor("zero_mean_347_cast")]; + tensor zero_mean_sq_347_cast = mul(x = zero_mean_347_cast, y = zero_mean_347_cast)[name = tensor("zero_mean_sq_347_cast")]; + tensor var_11291 = const()[name = tensor("op_11291"), val = tensor([1])]; + tensor var_11292_cast = reduce_mean(axes = var_11291, keep_dims = var_23, x = zero_mean_sq_347_cast)[name = tensor("op_11292_cast")]; + tensor var_11293_to_fp16 = const()[name = tensor("op_11293_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11294_cast = add(x = var_11292_cast, y = var_11293_to_fp16)[name = tensor("op_11294_cast")]; + tensor denom_347_epsilon_0_to_fp16 = const()[name = tensor("denom_347_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_347_cast = rsqrt(epsilon = denom_347_epsilon_0_to_fp16, x = var_11294_cast)[name = tensor("denom_347_cast")]; + tensor out_347_cast = mul(x = zero_mean_347_cast, y = denom_347_cast)[name = tensor("out_347_cast")]; + tensor var_11298_to_fp16 = const()[name = tensor("op_11298_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1655194496)))]; + tensor var_11299_cast = add(x = out_347_cast, y = var_11298_to_fp16)[name = tensor("op_11299_cast")]; + tensor var_11301_to_fp16 = const()[name = tensor("op_11301_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1655197120)))]; + tensor input_663_cast = mul(x = var_11299_cast, y = var_11301_to_fp16)[name = tensor("input_663_cast")]; + tensor var_11309 = const()[name = tensor("op_11309"), val = tensor([1, 1])]; + tensor var_11311 = const()[name = tensor("op_11311"), val = tensor([1, 1])]; + tensor var_11313_pad_type_0 = const()[name = tensor("op_11313_pad_type_0"), val = tensor("custom")]; + tensor var_11313_pad_0 = const()[name = tensor("op_11313_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1655199744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1665030208))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1665030400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1665038144))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_11313_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_11311, groups = var_31, pad = var_11313_pad_0, pad_type = var_11313_pad_type_0, strides = var_11309, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_663_cast)[name = tensor("op_11313_cast")]; + tensor var_11314_split_sizes_0 = const()[name = tensor("op_11314_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11314_axis_0 = const()[name = tensor("op_11314_axis_0"), val = tensor(1)]; + tensor var_11314_cast_0, tensor var_11314_cast_1 = split(axis = var_11314_axis_0, split_sizes = var_11314_split_sizes_0, x = var_11313_cast)[name = tensor("op_11314_cast")]; + tensor var_11316_mode_0 = const()[name = tensor("op_11316_mode_0"), val = tensor("EXACT")]; + tensor var_11316_cast = gelu(mode = var_11316_mode_0, x = var_11314_cast_1)[name = tensor("op_11316_cast")]; + tensor input_665_cast = mul(x = var_11314_cast_0, y = var_11316_cast)[name = tensor("input_665_cast")]; + tensor var_11320 = const()[name = tensor("op_11320"), val = tensor([1, 1])]; + tensor var_11322 = const()[name = tensor("op_11322"), val = tensor([1, 1])]; + tensor var_11324_pad_type_0 = const()[name = tensor("op_11324_pad_type_0"), val = tensor("custom")]; + tensor var_11324_pad_0 = const()[name = tensor("op_11324_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1665038336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669953600))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669953792)))]; + tensor var_11324_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_11322, groups = var_31, pad = var_11324_pad_0, pad_type = var_11324_pad_type_0, strides = var_11320, weight = unet_up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_665_cast)[name = tensor("op_11324_cast")]; + tensor inputs_349_cast = add(x = var_11324_cast, y = inputs_347_cast)[name = tensor("inputs_349_cast")]; + tensor var_11334 = const()[name = tensor("op_11334"), val = tensor([1])]; + tensor channels_mean_349_cast = reduce_mean(axes = var_11334, keep_dims = var_23, x = inputs_349_cast)[name = tensor("channels_mean_349_cast")]; + tensor zero_mean_349_cast = sub(x = inputs_349_cast, y = channels_mean_349_cast)[name = tensor("zero_mean_349_cast")]; + tensor zero_mean_sq_349_cast = mul(x = zero_mean_349_cast, y = zero_mean_349_cast)[name = tensor("zero_mean_sq_349_cast")]; + tensor var_11338 = const()[name = tensor("op_11338"), val = tensor([1])]; + tensor var_11339_cast = reduce_mean(axes = var_11338, keep_dims = var_23, x = zero_mean_sq_349_cast)[name = tensor("op_11339_cast")]; + tensor var_11340_to_fp16 = const()[name = tensor("op_11340_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11341_cast = add(x = var_11339_cast, y = var_11340_to_fp16)[name = tensor("op_11341_cast")]; + tensor denom_349_epsilon_0_to_fp16 = const()[name = tensor("denom_349_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_349_cast = rsqrt(epsilon = denom_349_epsilon_0_to_fp16, x = var_11341_cast)[name = tensor("denom_349_cast")]; + tensor out_349_cast = mul(x = zero_mean_349_cast, y = denom_349_cast)[name = tensor("out_349_cast")]; + tensor var_11345_to_fp16 = const()[name = tensor("op_11345_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669956416)))]; + tensor var_11346_cast = add(x = out_349_cast, y = var_11345_to_fp16)[name = tensor("op_11346_cast")]; + tensor var_11348_to_fp16 = const()[name = tensor("op_11348_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669959040)))]; + tensor hidden_states_459_cast = mul(x = var_11346_cast, y = var_11348_to_fp16)[name = tensor("hidden_states_459_cast")]; + tensor var_11355 = const()[name = tensor("op_11355"), val = tensor([1, 1])]; + tensor var_11357 = const()[name = tensor("op_11357"), val = tensor([1, 1])]; + tensor q_233_pad_type_0 = const()[name = tensor("q_233_pad_type_0"), val = tensor("custom")]; + tensor q_233_pad_0 = const()[name = tensor("q_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1669961664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671190528))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_233_cast = conv(dilations = var_11357, groups = var_31, pad = q_233_pad_0, pad_type = q_233_pad_type_0, strides = var_11355, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("q_233_cast")]; + tensor var_11361 = const()[name = tensor("op_11361"), val = tensor([1, 1])]; + tensor var_11363 = const()[name = tensor("op_11363"), val = tensor([1, 1])]; + tensor k_233_pad_type_0 = const()[name = tensor("k_233_pad_type_0"), val = tensor("custom")]; + tensor k_233_pad_0 = const()[name = tensor("k_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671190720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1672419584))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_233_cast = conv(dilations = var_11363, groups = var_31, pad = k_233_pad_0, pad_type = k_233_pad_type_0, strides = var_11361, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("k_233_cast")]; + tensor var_11367 = const()[name = tensor("op_11367"), val = tensor([1, 1])]; + tensor var_11369 = const()[name = tensor("op_11369"), val = tensor([1, 1])]; + tensor v_233_pad_type_0 = const()[name = tensor("v_233_pad_type_0"), val = tensor("custom")]; + tensor v_233_pad_0 = const()[name = tensor("v_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1672419776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1673648640))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_233_cast = conv(dilations = var_11369, groups = var_31, pad = v_233_pad_0, pad_type = v_233_pad_type_0, strides = var_11367, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("v_233_cast")]; + tensor var_11373 = const()[name = tensor("op_11373"), val = tensor([2, 20, 64, -1])]; + tensor var_11374_cast = reshape(shape = var_11373, x = q_233_cast)[name = tensor("op_11374_cast")]; + tensor var_11375 = const()[name = tensor("op_11375"), val = tensor([2, 20, 64, -1])]; + tensor var_11376_cast = reshape(shape = var_11375, x = k_233_cast)[name = tensor("op_11376_cast")]; + tensor var_11377 = const()[name = tensor("op_11377"), val = tensor([2, 20, 64, -1])]; + tensor var_11378_cast = reshape(shape = var_11377, x = v_233_cast)[name = tensor("op_11378_cast")]; + tensor attn_weights_465_transpose_x_0 = const()[name = tensor("attn_weights_465_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_465_transpose_y_0 = const()[name = tensor("attn_weights_465_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_465_cast = matmul(transpose_x = attn_weights_465_transpose_x_0, transpose_y = attn_weights_465_transpose_y_0, x = var_11374_cast, y = var_11376_cast)[name = tensor("attn_weights_465_cast")]; + tensor attn_weights_467_cast = mul(x = attn_weights_465_cast, y = var_12_to_fp16)[name = tensor("attn_weights_467_cast")]; + tensor var_11382_cast = softmax(axis = var_18, x = attn_weights_467_cast)[name = tensor("op_11382_cast")]; + tensor attn_233_transpose_x_0 = const()[name = tensor("attn_233_transpose_x_0"), val = tensor(false)]; + tensor attn_233_transpose_y_0 = const()[name = tensor("attn_233_transpose_y_0"), val = tensor(true)]; + tensor attn_233_cast = matmul(transpose_x = attn_233_transpose_x_0, transpose_y = attn_233_transpose_y_0, x = var_11378_cast, y = var_11382_cast)[name = tensor("attn_233_cast")]; + tensor var_11386 = const()[name = tensor("op_11386"), val = tensor([2, 1280, 1, -1])]; + tensor input_667_cast = reshape(shape = var_11386, x = attn_233_cast)[name = tensor("input_667_cast")]; + tensor var_11391 = const()[name = tensor("op_11391"), val = tensor([1, 1])]; + tensor var_11393 = const()[name = tensor("op_11393"), val = tensor([1, 1])]; + tensor var_11395_pad_type_0 = const()[name = tensor("op_11395_pad_type_0"), val = tensor("custom")]; + tensor var_11395_pad_0 = const()[name = tensor("op_11395_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1673648832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1674877696))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1674877888)))]; + tensor var_11395_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_11393, groups = var_31, pad = var_11395_pad_0, pad_type = var_11395_pad_type_0, strides = var_11391, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_667_cast)[name = tensor("op_11395_cast")]; + tensor inputs_351_cast = add(x = var_11395_cast, y = inputs_349_cast)[name = tensor("inputs_351_cast")]; + tensor var_11399 = const()[name = tensor("op_11399"), val = tensor([1])]; + tensor channels_mean_351_cast = reduce_mean(axes = var_11399, keep_dims = var_23, x = inputs_351_cast)[name = tensor("channels_mean_351_cast")]; + tensor zero_mean_351_cast = sub(x = inputs_351_cast, y = channels_mean_351_cast)[name = tensor("zero_mean_351_cast")]; + tensor zero_mean_sq_351_cast = mul(x = zero_mean_351_cast, y = zero_mean_351_cast)[name = tensor("zero_mean_sq_351_cast")]; + tensor var_11403 = const()[name = tensor("op_11403"), val = tensor([1])]; + tensor var_11404_cast = reduce_mean(axes = var_11403, keep_dims = var_23, x = zero_mean_sq_351_cast)[name = tensor("op_11404_cast")]; + tensor var_11405_to_fp16 = const()[name = tensor("op_11405_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11406_cast = add(x = var_11404_cast, y = var_11405_to_fp16)[name = tensor("op_11406_cast")]; + tensor denom_351_epsilon_0_to_fp16 = const()[name = tensor("denom_351_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_351_cast = rsqrt(epsilon = denom_351_epsilon_0_to_fp16, x = var_11406_cast)[name = tensor("denom_351_cast")]; + tensor out_351_cast = mul(x = zero_mean_351_cast, y = denom_351_cast)[name = tensor("out_351_cast")]; + tensor var_11410_to_fp16 = const()[name = tensor("op_11410_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1674880512)))]; + tensor var_11411_cast = add(x = out_351_cast, y = var_11410_to_fp16)[name = tensor("op_11411_cast")]; + tensor var_11413_to_fp16 = const()[name = tensor("op_11413_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1674883136)))]; + tensor hidden_states_461_cast = mul(x = var_11411_cast, y = var_11413_to_fp16)[name = tensor("hidden_states_461_cast")]; + tensor var_11420 = const()[name = tensor("op_11420"), val = tensor([1, 1])]; + tensor var_11422 = const()[name = tensor("op_11422"), val = tensor([1, 1])]; + tensor q_235_pad_type_0 = const()[name = tensor("q_235_pad_type_0"), val = tensor("custom")]; + tensor q_235_pad_0 = const()[name = tensor("q_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1674885760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1676114624))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_235_cast = conv(dilations = var_11422, groups = var_31, pad = q_235_pad_0, pad_type = q_235_pad_type_0, strides = var_11420, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_461_cast)[name = tensor("q_235_cast")]; + tensor var_11426 = const()[name = tensor("op_11426"), val = tensor([1, 1])]; + tensor var_11428 = const()[name = tensor("op_11428"), val = tensor([1, 1])]; + tensor k_235_pad_type_0 = const()[name = tensor("k_235_pad_type_0"), val = tensor("custom")]; + tensor k_235_pad_0 = const()[name = tensor("k_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1676114816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1678080960))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_235_cast = conv(dilations = var_11428, groups = var_31, pad = k_235_pad_0, pad_type = k_235_pad_type_0, strides = var_11426, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_235_cast")]; + tensor var_11432 = const()[name = tensor("op_11432"), val = tensor([1, 1])]; + tensor var_11434 = const()[name = tensor("op_11434"), val = tensor([1, 1])]; + tensor v_235_pad_type_0 = const()[name = tensor("v_235_pad_type_0"), val = tensor("custom")]; + tensor v_235_pad_0 = const()[name = tensor("v_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1678081152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1680047296))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_235_cast = conv(dilations = var_11434, groups = var_31, pad = v_235_pad_0, pad_type = v_235_pad_type_0, strides = var_11432, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_235_cast")]; + tensor var_11438 = const()[name = tensor("op_11438"), val = tensor([2, 20, 64, -1])]; + tensor var_11439_cast = reshape(shape = var_11438, x = q_235_cast)[name = tensor("op_11439_cast")]; + tensor var_11440 = const()[name = tensor("op_11440"), val = tensor([2, 20, 64, -1])]; + tensor var_11441_cast = reshape(shape = var_11440, x = k_235_cast)[name = tensor("op_11441_cast")]; + tensor var_11442 = const()[name = tensor("op_11442"), val = tensor([2, 20, 64, -1])]; + tensor var_11443_cast = reshape(shape = var_11442, x = v_235_cast)[name = tensor("op_11443_cast")]; + tensor attn_weights_469_transpose_x_0 = const()[name = tensor("attn_weights_469_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_469_transpose_y_0 = const()[name = tensor("attn_weights_469_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_469_cast = matmul(transpose_x = attn_weights_469_transpose_x_0, transpose_y = attn_weights_469_transpose_y_0, x = var_11439_cast, y = var_11441_cast)[name = tensor("attn_weights_469_cast")]; + tensor attn_weights_471_cast = mul(x = attn_weights_469_cast, y = var_12_to_fp16)[name = tensor("attn_weights_471_cast")]; + tensor var_11447_cast = softmax(axis = var_18, x = attn_weights_471_cast)[name = tensor("op_11447_cast")]; + tensor attn_235_transpose_x_0 = const()[name = tensor("attn_235_transpose_x_0"), val = tensor(false)]; + tensor attn_235_transpose_y_0 = const()[name = tensor("attn_235_transpose_y_0"), val = tensor(true)]; + tensor attn_235_cast = matmul(transpose_x = attn_235_transpose_x_0, transpose_y = attn_235_transpose_y_0, x = var_11443_cast, y = var_11447_cast)[name = tensor("attn_235_cast")]; + tensor var_11451 = const()[name = tensor("op_11451"), val = tensor([2, 1280, 1, -1])]; + tensor input_669_cast = reshape(shape = var_11451, x = attn_235_cast)[name = tensor("input_669_cast")]; + tensor var_11456 = const()[name = tensor("op_11456"), val = tensor([1, 1])]; + tensor var_11458 = const()[name = tensor("op_11458"), val = tensor([1, 1])]; + tensor var_11460_pad_type_0 = const()[name = tensor("op_11460_pad_type_0"), val = tensor("custom")]; + tensor var_11460_pad_0 = const()[name = tensor("op_11460_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1680047488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681276352))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681276544)))]; + tensor var_11460_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_11458, groups = var_31, pad = var_11460_pad_0, pad_type = var_11460_pad_type_0, strides = var_11456, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_669_cast)[name = tensor("op_11460_cast")]; + tensor inputs_353_cast = add(x = var_11460_cast, y = inputs_351_cast)[name = tensor("inputs_353_cast")]; + tensor var_11464 = const()[name = tensor("op_11464"), val = tensor([1])]; + tensor channels_mean_353_cast = reduce_mean(axes = var_11464, keep_dims = var_23, x = inputs_353_cast)[name = tensor("channels_mean_353_cast")]; + tensor zero_mean_353_cast = sub(x = inputs_353_cast, y = channels_mean_353_cast)[name = tensor("zero_mean_353_cast")]; + tensor zero_mean_sq_353_cast = mul(x = zero_mean_353_cast, y = zero_mean_353_cast)[name = tensor("zero_mean_sq_353_cast")]; + tensor var_11468 = const()[name = tensor("op_11468"), val = tensor([1])]; + tensor var_11469_cast = reduce_mean(axes = var_11468, keep_dims = var_23, x = zero_mean_sq_353_cast)[name = tensor("op_11469_cast")]; + tensor var_11470_to_fp16 = const()[name = tensor("op_11470_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11471_cast = add(x = var_11469_cast, y = var_11470_to_fp16)[name = tensor("op_11471_cast")]; + tensor denom_353_epsilon_0_to_fp16 = const()[name = tensor("denom_353_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_353_cast = rsqrt(epsilon = denom_353_epsilon_0_to_fp16, x = var_11471_cast)[name = tensor("denom_353_cast")]; + tensor out_353_cast = mul(x = zero_mean_353_cast, y = denom_353_cast)[name = tensor("out_353_cast")]; + tensor var_11475_to_fp16 = const()[name = tensor("op_11475_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681279168)))]; + tensor var_11476_cast = add(x = out_353_cast, y = var_11475_to_fp16)[name = tensor("op_11476_cast")]; + tensor var_11478_to_fp16 = const()[name = tensor("op_11478_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681281792)))]; + tensor input_671_cast = mul(x = var_11476_cast, y = var_11478_to_fp16)[name = tensor("input_671_cast")]; + tensor var_11486 = const()[name = tensor("op_11486"), val = tensor([1, 1])]; + tensor var_11488 = const()[name = tensor("op_11488"), val = tensor([1, 1])]; + tensor var_11490_pad_type_0 = const()[name = tensor("op_11490_pad_type_0"), val = tensor("custom")]; + tensor var_11490_pad_0 = const()[name = tensor("op_11490_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681284416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1691114880))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1691115072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1691122816))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_11490_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_11488, groups = var_31, pad = var_11490_pad_0, pad_type = var_11490_pad_type_0, strides = var_11486, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_671_cast)[name = tensor("op_11490_cast")]; + tensor var_11491_split_sizes_0 = const()[name = tensor("op_11491_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11491_axis_0 = const()[name = tensor("op_11491_axis_0"), val = tensor(1)]; + tensor var_11491_cast_0, tensor var_11491_cast_1 = split(axis = var_11491_axis_0, split_sizes = var_11491_split_sizes_0, x = var_11490_cast)[name = tensor("op_11491_cast")]; + tensor var_11493_mode_0 = const()[name = tensor("op_11493_mode_0"), val = tensor("EXACT")]; + tensor var_11493_cast = gelu(mode = var_11493_mode_0, x = var_11491_cast_1)[name = tensor("op_11493_cast")]; + tensor input_673_cast = mul(x = var_11491_cast_0, y = var_11493_cast)[name = tensor("input_673_cast")]; + tensor var_11497 = const()[name = tensor("op_11497"), val = tensor([1, 1])]; + tensor var_11499 = const()[name = tensor("op_11499"), val = tensor([1, 1])]; + tensor var_11501_pad_type_0 = const()[name = tensor("op_11501_pad_type_0"), val = tensor("custom")]; + tensor var_11501_pad_0 = const()[name = tensor("op_11501_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1691123008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696038272))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696038464)))]; + tensor var_11501_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_11499, groups = var_31, pad = var_11501_pad_0, pad_type = var_11501_pad_type_0, strides = var_11497, weight = unet_up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_673_cast)[name = tensor("op_11501_cast")]; + tensor inputs_355_cast = add(x = var_11501_cast, y = inputs_353_cast)[name = tensor("inputs_355_cast")]; + tensor var_11511 = const()[name = tensor("op_11511"), val = tensor([1])]; + tensor channels_mean_355_cast = reduce_mean(axes = var_11511, keep_dims = var_23, x = inputs_355_cast)[name = tensor("channels_mean_355_cast")]; + tensor zero_mean_355_cast = sub(x = inputs_355_cast, y = channels_mean_355_cast)[name = tensor("zero_mean_355_cast")]; + tensor zero_mean_sq_355_cast = mul(x = zero_mean_355_cast, y = zero_mean_355_cast)[name = tensor("zero_mean_sq_355_cast")]; + tensor var_11515 = const()[name = tensor("op_11515"), val = tensor([1])]; + tensor var_11516_cast = reduce_mean(axes = var_11515, keep_dims = var_23, x = zero_mean_sq_355_cast)[name = tensor("op_11516_cast")]; + tensor var_11517_to_fp16 = const()[name = tensor("op_11517_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11518_cast = add(x = var_11516_cast, y = var_11517_to_fp16)[name = tensor("op_11518_cast")]; + tensor denom_355_epsilon_0_to_fp16 = const()[name = tensor("denom_355_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_355_cast = rsqrt(epsilon = denom_355_epsilon_0_to_fp16, x = var_11518_cast)[name = tensor("denom_355_cast")]; + tensor out_355_cast = mul(x = zero_mean_355_cast, y = denom_355_cast)[name = tensor("out_355_cast")]; + tensor var_11522_to_fp16 = const()[name = tensor("op_11522_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696041088)))]; + tensor var_11523_cast = add(x = out_355_cast, y = var_11522_to_fp16)[name = tensor("op_11523_cast")]; + tensor var_11525_to_fp16 = const()[name = tensor("op_11525_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696043712)))]; + tensor hidden_states_465_cast = mul(x = var_11523_cast, y = var_11525_to_fp16)[name = tensor("hidden_states_465_cast")]; + tensor var_11532 = const()[name = tensor("op_11532"), val = tensor([1, 1])]; + tensor var_11534 = const()[name = tensor("op_11534"), val = tensor([1, 1])]; + tensor q_237_pad_type_0 = const()[name = tensor("q_237_pad_type_0"), val = tensor("custom")]; + tensor q_237_pad_0 = const()[name = tensor("q_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1696046336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1697275200))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_237_cast = conv(dilations = var_11534, groups = var_31, pad = q_237_pad_0, pad_type = q_237_pad_type_0, strides = var_11532, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("q_237_cast")]; + tensor var_11538 = const()[name = tensor("op_11538"), val = tensor([1, 1])]; + tensor var_11540 = const()[name = tensor("op_11540"), val = tensor([1, 1])]; + tensor k_237_pad_type_0 = const()[name = tensor("k_237_pad_type_0"), val = tensor("custom")]; + tensor k_237_pad_0 = const()[name = tensor("k_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1697275392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1698504256))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_237_cast = conv(dilations = var_11540, groups = var_31, pad = k_237_pad_0, pad_type = k_237_pad_type_0, strides = var_11538, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("k_237_cast")]; + tensor var_11544 = const()[name = tensor("op_11544"), val = tensor([1, 1])]; + tensor var_11546 = const()[name = tensor("op_11546"), val = tensor([1, 1])]; + tensor v_237_pad_type_0 = const()[name = tensor("v_237_pad_type_0"), val = tensor("custom")]; + tensor v_237_pad_0 = const()[name = tensor("v_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1698504448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1699733312))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_237_cast = conv(dilations = var_11546, groups = var_31, pad = v_237_pad_0, pad_type = v_237_pad_type_0, strides = var_11544, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("v_237_cast")]; + tensor var_11550 = const()[name = tensor("op_11550"), val = tensor([2, 20, 64, -1])]; + tensor var_11551_cast = reshape(shape = var_11550, x = q_237_cast)[name = tensor("op_11551_cast")]; + tensor var_11552 = const()[name = tensor("op_11552"), val = tensor([2, 20, 64, -1])]; + tensor var_11553_cast = reshape(shape = var_11552, x = k_237_cast)[name = tensor("op_11553_cast")]; + tensor var_11554 = const()[name = tensor("op_11554"), val = tensor([2, 20, 64, -1])]; + tensor var_11555_cast = reshape(shape = var_11554, x = v_237_cast)[name = tensor("op_11555_cast")]; + tensor attn_weights_473_transpose_x_0 = const()[name = tensor("attn_weights_473_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_473_transpose_y_0 = const()[name = tensor("attn_weights_473_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_473_cast = matmul(transpose_x = attn_weights_473_transpose_x_0, transpose_y = attn_weights_473_transpose_y_0, x = var_11551_cast, y = var_11553_cast)[name = tensor("attn_weights_473_cast")]; + tensor attn_weights_475_cast = mul(x = attn_weights_473_cast, y = var_12_to_fp16)[name = tensor("attn_weights_475_cast")]; + tensor var_11559_cast = softmax(axis = var_18, x = attn_weights_475_cast)[name = tensor("op_11559_cast")]; + tensor attn_237_transpose_x_0 = const()[name = tensor("attn_237_transpose_x_0"), val = tensor(false)]; + tensor attn_237_transpose_y_0 = const()[name = tensor("attn_237_transpose_y_0"), val = tensor(true)]; + tensor attn_237_cast = matmul(transpose_x = attn_237_transpose_x_0, transpose_y = attn_237_transpose_y_0, x = var_11555_cast, y = var_11559_cast)[name = tensor("attn_237_cast")]; + tensor var_11563 = const()[name = tensor("op_11563"), val = tensor([2, 1280, 1, -1])]; + tensor input_675_cast = reshape(shape = var_11563, x = attn_237_cast)[name = tensor("input_675_cast")]; + tensor var_11568 = const()[name = tensor("op_11568"), val = tensor([1, 1])]; + tensor var_11570 = const()[name = tensor("op_11570"), val = tensor([1, 1])]; + tensor var_11572_pad_type_0 = const()[name = tensor("op_11572_pad_type_0"), val = tensor("custom")]; + tensor var_11572_pad_0 = const()[name = tensor("op_11572_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1699733504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1700962368))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1700962560)))]; + tensor var_11572_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_11570, groups = var_31, pad = var_11572_pad_0, pad_type = var_11572_pad_type_0, strides = var_11568, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_675_cast)[name = tensor("op_11572_cast")]; + tensor inputs_357_cast = add(x = var_11572_cast, y = inputs_355_cast)[name = tensor("inputs_357_cast")]; + tensor var_11576 = const()[name = tensor("op_11576"), val = tensor([1])]; + tensor channels_mean_357_cast = reduce_mean(axes = var_11576, keep_dims = var_23, x = inputs_357_cast)[name = tensor("channels_mean_357_cast")]; + tensor zero_mean_357_cast = sub(x = inputs_357_cast, y = channels_mean_357_cast)[name = tensor("zero_mean_357_cast")]; + tensor zero_mean_sq_357_cast = mul(x = zero_mean_357_cast, y = zero_mean_357_cast)[name = tensor("zero_mean_sq_357_cast")]; + tensor var_11580 = const()[name = tensor("op_11580"), val = tensor([1])]; + tensor var_11581_cast = reduce_mean(axes = var_11580, keep_dims = var_23, x = zero_mean_sq_357_cast)[name = tensor("op_11581_cast")]; + tensor var_11582_to_fp16 = const()[name = tensor("op_11582_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11583_cast = add(x = var_11581_cast, y = var_11582_to_fp16)[name = tensor("op_11583_cast")]; + tensor denom_357_epsilon_0_to_fp16 = const()[name = tensor("denom_357_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_357_cast = rsqrt(epsilon = denom_357_epsilon_0_to_fp16, x = var_11583_cast)[name = tensor("denom_357_cast")]; + tensor out_357_cast = mul(x = zero_mean_357_cast, y = denom_357_cast)[name = tensor("out_357_cast")]; + tensor var_11587_to_fp16 = const()[name = tensor("op_11587_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1700965184)))]; + tensor var_11588_cast = add(x = out_357_cast, y = var_11587_to_fp16)[name = tensor("op_11588_cast")]; + tensor var_11590_to_fp16 = const()[name = tensor("op_11590_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1700967808)))]; + tensor hidden_states_467_cast = mul(x = var_11588_cast, y = var_11590_to_fp16)[name = tensor("hidden_states_467_cast")]; + tensor var_11597 = const()[name = tensor("op_11597"), val = tensor([1, 1])]; + tensor var_11599 = const()[name = tensor("op_11599"), val = tensor([1, 1])]; + tensor q_239_pad_type_0 = const()[name = tensor("q_239_pad_type_0"), val = tensor("custom")]; + tensor q_239_pad_0 = const()[name = tensor("q_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1700970432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1702199296))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_239_cast = conv(dilations = var_11599, groups = var_31, pad = q_239_pad_0, pad_type = q_239_pad_type_0, strides = var_11597, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_467_cast)[name = tensor("q_239_cast")]; + tensor var_11603 = const()[name = tensor("op_11603"), val = tensor([1, 1])]; + tensor var_11605 = const()[name = tensor("op_11605"), val = tensor([1, 1])]; + tensor k_239_pad_type_0 = const()[name = tensor("k_239_pad_type_0"), val = tensor("custom")]; + tensor k_239_pad_0 = const()[name = tensor("k_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1702199488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1704165632))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_239_cast = conv(dilations = var_11605, groups = var_31, pad = k_239_pad_0, pad_type = k_239_pad_type_0, strides = var_11603, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_239_cast")]; + tensor var_11609 = const()[name = tensor("op_11609"), val = tensor([1, 1])]; + tensor var_11611 = const()[name = tensor("op_11611"), val = tensor([1, 1])]; + tensor v_239_pad_type_0 = const()[name = tensor("v_239_pad_type_0"), val = tensor("custom")]; + tensor v_239_pad_0 = const()[name = tensor("v_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1704165824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1706131968))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_239_cast = conv(dilations = var_11611, groups = var_31, pad = v_239_pad_0, pad_type = v_239_pad_type_0, strides = var_11609, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_239_cast")]; + tensor var_11615 = const()[name = tensor("op_11615"), val = tensor([2, 20, 64, -1])]; + tensor var_11616_cast = reshape(shape = var_11615, x = q_239_cast)[name = tensor("op_11616_cast")]; + tensor var_11617 = const()[name = tensor("op_11617"), val = tensor([2, 20, 64, -1])]; + tensor var_11618_cast = reshape(shape = var_11617, x = k_239_cast)[name = tensor("op_11618_cast")]; + tensor var_11619 = const()[name = tensor("op_11619"), val = tensor([2, 20, 64, -1])]; + tensor var_11620_cast = reshape(shape = var_11619, x = v_239_cast)[name = tensor("op_11620_cast")]; + tensor attn_weights_477_transpose_x_0 = const()[name = tensor("attn_weights_477_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_477_transpose_y_0 = const()[name = tensor("attn_weights_477_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_477_cast = matmul(transpose_x = attn_weights_477_transpose_x_0, transpose_y = attn_weights_477_transpose_y_0, x = var_11616_cast, y = var_11618_cast)[name = tensor("attn_weights_477_cast")]; + tensor attn_weights_479_cast = mul(x = attn_weights_477_cast, y = var_12_to_fp16)[name = tensor("attn_weights_479_cast")]; + tensor var_11624_cast = softmax(axis = var_18, x = attn_weights_479_cast)[name = tensor("op_11624_cast")]; + tensor attn_239_transpose_x_0 = const()[name = tensor("attn_239_transpose_x_0"), val = tensor(false)]; + tensor attn_239_transpose_y_0 = const()[name = tensor("attn_239_transpose_y_0"), val = tensor(true)]; + tensor attn_239_cast = matmul(transpose_x = attn_239_transpose_x_0, transpose_y = attn_239_transpose_y_0, x = var_11620_cast, y = var_11624_cast)[name = tensor("attn_239_cast")]; + tensor var_11628 = const()[name = tensor("op_11628"), val = tensor([2, 1280, 1, -1])]; + tensor input_677_cast = reshape(shape = var_11628, x = attn_239_cast)[name = tensor("input_677_cast")]; + tensor var_11633 = const()[name = tensor("op_11633"), val = tensor([1, 1])]; + tensor var_11635 = const()[name = tensor("op_11635"), val = tensor([1, 1])]; + tensor var_11637_pad_type_0 = const()[name = tensor("op_11637_pad_type_0"), val = tensor("custom")]; + tensor var_11637_pad_0 = const()[name = tensor("op_11637_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1706132160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707361024))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707361216)))]; + tensor var_11637_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_11635, groups = var_31, pad = var_11637_pad_0, pad_type = var_11637_pad_type_0, strides = var_11633, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_677_cast)[name = tensor("op_11637_cast")]; + tensor inputs_359_cast = add(x = var_11637_cast, y = inputs_357_cast)[name = tensor("inputs_359_cast")]; + tensor var_11641 = const()[name = tensor("op_11641"), val = tensor([1])]; + tensor channels_mean_359_cast = reduce_mean(axes = var_11641, keep_dims = var_23, x = inputs_359_cast)[name = tensor("channels_mean_359_cast")]; + tensor zero_mean_359_cast = sub(x = inputs_359_cast, y = channels_mean_359_cast)[name = tensor("zero_mean_359_cast")]; + tensor zero_mean_sq_359_cast = mul(x = zero_mean_359_cast, y = zero_mean_359_cast)[name = tensor("zero_mean_sq_359_cast")]; + tensor var_11645 = const()[name = tensor("op_11645"), val = tensor([1])]; + tensor var_11646_cast = reduce_mean(axes = var_11645, keep_dims = var_23, x = zero_mean_sq_359_cast)[name = tensor("op_11646_cast")]; + tensor var_11647_to_fp16 = const()[name = tensor("op_11647_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11648_cast = add(x = var_11646_cast, y = var_11647_to_fp16)[name = tensor("op_11648_cast")]; + tensor denom_359_epsilon_0_to_fp16 = const()[name = tensor("denom_359_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_359_cast = rsqrt(epsilon = denom_359_epsilon_0_to_fp16, x = var_11648_cast)[name = tensor("denom_359_cast")]; + tensor out_359_cast = mul(x = zero_mean_359_cast, y = denom_359_cast)[name = tensor("out_359_cast")]; + tensor var_11652_to_fp16 = const()[name = tensor("op_11652_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707363840)))]; + tensor var_11653_cast = add(x = out_359_cast, y = var_11652_to_fp16)[name = tensor("op_11653_cast")]; + tensor var_11655_to_fp16 = const()[name = tensor("op_11655_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707366464)))]; + tensor input_679_cast = mul(x = var_11653_cast, y = var_11655_to_fp16)[name = tensor("input_679_cast")]; + tensor var_11663 = const()[name = tensor("op_11663"), val = tensor([1, 1])]; + tensor var_11665 = const()[name = tensor("op_11665"), val = tensor([1, 1])]; + tensor var_11667_pad_type_0 = const()[name = tensor("op_11667_pad_type_0"), val = tensor("custom")]; + tensor var_11667_pad_0 = const()[name = tensor("op_11667_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707369088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1717199552))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1717199744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1717207488))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_11667_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_11665, groups = var_31, pad = var_11667_pad_0, pad_type = var_11667_pad_type_0, strides = var_11663, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_679_cast)[name = tensor("op_11667_cast")]; + tensor var_11668_split_sizes_0 = const()[name = tensor("op_11668_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11668_axis_0 = const()[name = tensor("op_11668_axis_0"), val = tensor(1)]; + tensor var_11668_cast_0, tensor var_11668_cast_1 = split(axis = var_11668_axis_0, split_sizes = var_11668_split_sizes_0, x = var_11667_cast)[name = tensor("op_11668_cast")]; + tensor var_11670_mode_0 = const()[name = tensor("op_11670_mode_0"), val = tensor("EXACT")]; + tensor var_11670_cast = gelu(mode = var_11670_mode_0, x = var_11668_cast_1)[name = tensor("op_11670_cast")]; + tensor input_681_cast = mul(x = var_11668_cast_0, y = var_11670_cast)[name = tensor("input_681_cast")]; + tensor var_11674 = const()[name = tensor("op_11674"), val = tensor([1, 1])]; + tensor var_11676 = const()[name = tensor("op_11676"), val = tensor([1, 1])]; + tensor var_11678_pad_type_0 = const()[name = tensor("op_11678_pad_type_0"), val = tensor("custom")]; + tensor var_11678_pad_0 = const()[name = tensor("op_11678_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1717207680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722122944))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722123136)))]; + tensor var_11678_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_11676, groups = var_31, pad = var_11678_pad_0, pad_type = var_11678_pad_type_0, strides = var_11674, weight = unet_up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_681_cast)[name = tensor("op_11678_cast")]; + tensor inputs_361_cast = add(x = var_11678_cast, y = inputs_359_cast)[name = tensor("inputs_361_cast")]; + tensor var_11688 = const()[name = tensor("op_11688"), val = tensor([1])]; + tensor channels_mean_361_cast = reduce_mean(axes = var_11688, keep_dims = var_23, x = inputs_361_cast)[name = tensor("channels_mean_361_cast")]; + tensor zero_mean_361_cast = sub(x = inputs_361_cast, y = channels_mean_361_cast)[name = tensor("zero_mean_361_cast")]; + tensor zero_mean_sq_361_cast = mul(x = zero_mean_361_cast, y = zero_mean_361_cast)[name = tensor("zero_mean_sq_361_cast")]; + tensor var_11692 = const()[name = tensor("op_11692"), val = tensor([1])]; + tensor var_11693_cast = reduce_mean(axes = var_11692, keep_dims = var_23, x = zero_mean_sq_361_cast)[name = tensor("op_11693_cast")]; + tensor var_11694_to_fp16 = const()[name = tensor("op_11694_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11695_cast = add(x = var_11693_cast, y = var_11694_to_fp16)[name = tensor("op_11695_cast")]; + tensor denom_361_epsilon_0_to_fp16 = const()[name = tensor("denom_361_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_361_cast = rsqrt(epsilon = denom_361_epsilon_0_to_fp16, x = var_11695_cast)[name = tensor("denom_361_cast")]; + tensor out_361_cast = mul(x = zero_mean_361_cast, y = denom_361_cast)[name = tensor("out_361_cast")]; + tensor var_11699_to_fp16 = const()[name = tensor("op_11699_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722125760)))]; + tensor var_11700_cast = add(x = out_361_cast, y = var_11699_to_fp16)[name = tensor("op_11700_cast")]; + tensor var_11702_to_fp16 = const()[name = tensor("op_11702_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722128384)))]; + tensor hidden_states_471_cast = mul(x = var_11700_cast, y = var_11702_to_fp16)[name = tensor("hidden_states_471_cast")]; + tensor var_11709 = const()[name = tensor("op_11709"), val = tensor([1, 1])]; + tensor var_11711 = const()[name = tensor("op_11711"), val = tensor([1, 1])]; + tensor q_241_pad_type_0 = const()[name = tensor("q_241_pad_type_0"), val = tensor("custom")]; + tensor q_241_pad_0 = const()[name = tensor("q_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1722131008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1723359872))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_241_cast = conv(dilations = var_11711, groups = var_31, pad = q_241_pad_0, pad_type = q_241_pad_type_0, strides = var_11709, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("q_241_cast")]; + tensor var_11715 = const()[name = tensor("op_11715"), val = tensor([1, 1])]; + tensor var_11717 = const()[name = tensor("op_11717"), val = tensor([1, 1])]; + tensor k_241_pad_type_0 = const()[name = tensor("k_241_pad_type_0"), val = tensor("custom")]; + tensor k_241_pad_0 = const()[name = tensor("k_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1723360064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1724588928))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_241_cast = conv(dilations = var_11717, groups = var_31, pad = k_241_pad_0, pad_type = k_241_pad_type_0, strides = var_11715, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("k_241_cast")]; + tensor var_11721 = const()[name = tensor("op_11721"), val = tensor([1, 1])]; + tensor var_11723 = const()[name = tensor("op_11723"), val = tensor([1, 1])]; + tensor v_241_pad_type_0 = const()[name = tensor("v_241_pad_type_0"), val = tensor("custom")]; + tensor v_241_pad_0 = const()[name = tensor("v_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1724589120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1725817984))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_241_cast = conv(dilations = var_11723, groups = var_31, pad = v_241_pad_0, pad_type = v_241_pad_type_0, strides = var_11721, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("v_241_cast")]; + tensor var_11727 = const()[name = tensor("op_11727"), val = tensor([2, 20, 64, -1])]; + tensor var_11728_cast = reshape(shape = var_11727, x = q_241_cast)[name = tensor("op_11728_cast")]; + tensor var_11729 = const()[name = tensor("op_11729"), val = tensor([2, 20, 64, -1])]; + tensor var_11730_cast = reshape(shape = var_11729, x = k_241_cast)[name = tensor("op_11730_cast")]; + tensor var_11731 = const()[name = tensor("op_11731"), val = tensor([2, 20, 64, -1])]; + tensor var_11732_cast = reshape(shape = var_11731, x = v_241_cast)[name = tensor("op_11732_cast")]; + tensor attn_weights_481_transpose_x_0 = const()[name = tensor("attn_weights_481_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_481_transpose_y_0 = const()[name = tensor("attn_weights_481_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_481_cast = matmul(transpose_x = attn_weights_481_transpose_x_0, transpose_y = attn_weights_481_transpose_y_0, x = var_11728_cast, y = var_11730_cast)[name = tensor("attn_weights_481_cast")]; + tensor attn_weights_483_cast = mul(x = attn_weights_481_cast, y = var_12_to_fp16)[name = tensor("attn_weights_483_cast")]; + tensor var_11736_cast = softmax(axis = var_18, x = attn_weights_483_cast)[name = tensor("op_11736_cast")]; + tensor attn_241_transpose_x_0 = const()[name = tensor("attn_241_transpose_x_0"), val = tensor(false)]; + tensor attn_241_transpose_y_0 = const()[name = tensor("attn_241_transpose_y_0"), val = tensor(true)]; + tensor attn_241_cast = matmul(transpose_x = attn_241_transpose_x_0, transpose_y = attn_241_transpose_y_0, x = var_11732_cast, y = var_11736_cast)[name = tensor("attn_241_cast")]; + tensor var_11740 = const()[name = tensor("op_11740"), val = tensor([2, 1280, 1, -1])]; + tensor input_683_cast = reshape(shape = var_11740, x = attn_241_cast)[name = tensor("input_683_cast")]; + tensor var_11745 = const()[name = tensor("op_11745"), val = tensor([1, 1])]; + tensor var_11747 = const()[name = tensor("op_11747"), val = tensor([1, 1])]; + tensor var_11749_pad_type_0 = const()[name = tensor("op_11749_pad_type_0"), val = tensor("custom")]; + tensor var_11749_pad_0 = const()[name = tensor("op_11749_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1725818176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1727047040))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1727047232)))]; + tensor var_11749_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_11747, groups = var_31, pad = var_11749_pad_0, pad_type = var_11749_pad_type_0, strides = var_11745, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_683_cast)[name = tensor("op_11749_cast")]; + tensor inputs_363_cast = add(x = var_11749_cast, y = inputs_361_cast)[name = tensor("inputs_363_cast")]; + tensor var_11753 = const()[name = tensor("op_11753"), val = tensor([1])]; + tensor channels_mean_363_cast = reduce_mean(axes = var_11753, keep_dims = var_23, x = inputs_363_cast)[name = tensor("channels_mean_363_cast")]; + tensor zero_mean_363_cast = sub(x = inputs_363_cast, y = channels_mean_363_cast)[name = tensor("zero_mean_363_cast")]; + tensor zero_mean_sq_363_cast = mul(x = zero_mean_363_cast, y = zero_mean_363_cast)[name = tensor("zero_mean_sq_363_cast")]; + tensor var_11757 = const()[name = tensor("op_11757"), val = tensor([1])]; + tensor var_11758_cast = reduce_mean(axes = var_11757, keep_dims = var_23, x = zero_mean_sq_363_cast)[name = tensor("op_11758_cast")]; + tensor var_11759_to_fp16 = const()[name = tensor("op_11759_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11760_cast = add(x = var_11758_cast, y = var_11759_to_fp16)[name = tensor("op_11760_cast")]; + tensor denom_363_epsilon_0_to_fp16 = const()[name = tensor("denom_363_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_363_cast = rsqrt(epsilon = denom_363_epsilon_0_to_fp16, x = var_11760_cast)[name = tensor("denom_363_cast")]; + tensor out_363_cast = mul(x = zero_mean_363_cast, y = denom_363_cast)[name = tensor("out_363_cast")]; + tensor var_11764_to_fp16 = const()[name = tensor("op_11764_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1727049856)))]; + tensor var_11765_cast = add(x = out_363_cast, y = var_11764_to_fp16)[name = tensor("op_11765_cast")]; + tensor var_11767_to_fp16 = const()[name = tensor("op_11767_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1727052480)))]; + tensor hidden_states_473_cast = mul(x = var_11765_cast, y = var_11767_to_fp16)[name = tensor("hidden_states_473_cast")]; + tensor var_11774 = const()[name = tensor("op_11774"), val = tensor([1, 1])]; + tensor var_11776 = const()[name = tensor("op_11776"), val = tensor([1, 1])]; + tensor q_243_pad_type_0 = const()[name = tensor("q_243_pad_type_0"), val = tensor("custom")]; + tensor q_243_pad_0 = const()[name = tensor("q_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1727055104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1728283968))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_243_cast = conv(dilations = var_11776, groups = var_31, pad = q_243_pad_0, pad_type = q_243_pad_type_0, strides = var_11774, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_473_cast)[name = tensor("q_243_cast")]; + tensor var_11780 = const()[name = tensor("op_11780"), val = tensor([1, 1])]; + tensor var_11782 = const()[name = tensor("op_11782"), val = tensor([1, 1])]; + tensor k_243_pad_type_0 = const()[name = tensor("k_243_pad_type_0"), val = tensor("custom")]; + tensor k_243_pad_0 = const()[name = tensor("k_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1728284160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1730250304))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_243_cast = conv(dilations = var_11782, groups = var_31, pad = k_243_pad_0, pad_type = k_243_pad_type_0, strides = var_11780, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_243_cast")]; + tensor var_11786 = const()[name = tensor("op_11786"), val = tensor([1, 1])]; + tensor var_11788 = const()[name = tensor("op_11788"), val = tensor([1, 1])]; + tensor v_243_pad_type_0 = const()[name = tensor("v_243_pad_type_0"), val = tensor("custom")]; + tensor v_243_pad_0 = const()[name = tensor("v_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1730250496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732216640))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_243_cast = conv(dilations = var_11788, groups = var_31, pad = v_243_pad_0, pad_type = v_243_pad_type_0, strides = var_11786, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_243_cast")]; + tensor var_11792 = const()[name = tensor("op_11792"), val = tensor([2, 20, 64, -1])]; + tensor var_11793_cast = reshape(shape = var_11792, x = q_243_cast)[name = tensor("op_11793_cast")]; + tensor var_11794 = const()[name = tensor("op_11794"), val = tensor([2, 20, 64, -1])]; + tensor var_11795_cast = reshape(shape = var_11794, x = k_243_cast)[name = tensor("op_11795_cast")]; + tensor var_11796 = const()[name = tensor("op_11796"), val = tensor([2, 20, 64, -1])]; + tensor var_11797_cast = reshape(shape = var_11796, x = v_243_cast)[name = tensor("op_11797_cast")]; + tensor attn_weights_485_transpose_x_0 = const()[name = tensor("attn_weights_485_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_485_transpose_y_0 = const()[name = tensor("attn_weights_485_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_485_cast = matmul(transpose_x = attn_weights_485_transpose_x_0, transpose_y = attn_weights_485_transpose_y_0, x = var_11793_cast, y = var_11795_cast)[name = tensor("attn_weights_485_cast")]; + tensor attn_weights_487_cast = mul(x = attn_weights_485_cast, y = var_12_to_fp16)[name = tensor("attn_weights_487_cast")]; + tensor var_11801_cast = softmax(axis = var_18, x = attn_weights_487_cast)[name = tensor("op_11801_cast")]; + tensor attn_243_transpose_x_0 = const()[name = tensor("attn_243_transpose_x_0"), val = tensor(false)]; + tensor attn_243_transpose_y_0 = const()[name = tensor("attn_243_transpose_y_0"), val = tensor(true)]; + tensor attn_243_cast = matmul(transpose_x = attn_243_transpose_x_0, transpose_y = attn_243_transpose_y_0, x = var_11797_cast, y = var_11801_cast)[name = tensor("attn_243_cast")]; + tensor var_11805 = const()[name = tensor("op_11805"), val = tensor([2, 1280, 1, -1])]; + tensor input_685_cast = reshape(shape = var_11805, x = attn_243_cast)[name = tensor("input_685_cast")]; + tensor var_11810 = const()[name = tensor("op_11810"), val = tensor([1, 1])]; + tensor var_11812 = const()[name = tensor("op_11812"), val = tensor([1, 1])]; + tensor var_11814_pad_type_0 = const()[name = tensor("op_11814_pad_type_0"), val = tensor("custom")]; + tensor var_11814_pad_0 = const()[name = tensor("op_11814_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732216832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733445696))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733445888)))]; + tensor var_11814_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_11812, groups = var_31, pad = var_11814_pad_0, pad_type = var_11814_pad_type_0, strides = var_11810, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_685_cast)[name = tensor("op_11814_cast")]; + tensor inputs_365_cast = add(x = var_11814_cast, y = inputs_363_cast)[name = tensor("inputs_365_cast")]; + tensor var_11818 = const()[name = tensor("op_11818"), val = tensor([1])]; + tensor channels_mean_365_cast = reduce_mean(axes = var_11818, keep_dims = var_23, x = inputs_365_cast)[name = tensor("channels_mean_365_cast")]; + tensor zero_mean_365_cast = sub(x = inputs_365_cast, y = channels_mean_365_cast)[name = tensor("zero_mean_365_cast")]; + tensor zero_mean_sq_365_cast = mul(x = zero_mean_365_cast, y = zero_mean_365_cast)[name = tensor("zero_mean_sq_365_cast")]; + tensor var_11822 = const()[name = tensor("op_11822"), val = tensor([1])]; + tensor var_11823_cast = reduce_mean(axes = var_11822, keep_dims = var_23, x = zero_mean_sq_365_cast)[name = tensor("op_11823_cast")]; + tensor var_11824_to_fp16 = const()[name = tensor("op_11824_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11825_cast = add(x = var_11823_cast, y = var_11824_to_fp16)[name = tensor("op_11825_cast")]; + tensor denom_365_epsilon_0_to_fp16 = const()[name = tensor("denom_365_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_365_cast = rsqrt(epsilon = denom_365_epsilon_0_to_fp16, x = var_11825_cast)[name = tensor("denom_365_cast")]; + tensor out_365_cast = mul(x = zero_mean_365_cast, y = denom_365_cast)[name = tensor("out_365_cast")]; + tensor var_11829_to_fp16 = const()[name = tensor("op_11829_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733448512)))]; + tensor var_11830_cast = add(x = out_365_cast, y = var_11829_to_fp16)[name = tensor("op_11830_cast")]; + tensor var_11832_to_fp16 = const()[name = tensor("op_11832_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733451136)))]; + tensor input_687_cast = mul(x = var_11830_cast, y = var_11832_to_fp16)[name = tensor("input_687_cast")]; + tensor var_11840 = const()[name = tensor("op_11840"), val = tensor([1, 1])]; + tensor var_11842 = const()[name = tensor("op_11842"), val = tensor([1, 1])]; + tensor var_11844_pad_type_0 = const()[name = tensor("op_11844_pad_type_0"), val = tensor("custom")]; + tensor var_11844_pad_0 = const()[name = tensor("op_11844_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733453760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1743284224))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1743284416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1743292160))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_11844_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_11842, groups = var_31, pad = var_11844_pad_0, pad_type = var_11844_pad_type_0, strides = var_11840, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_687_cast)[name = tensor("op_11844_cast")]; + tensor var_11845_split_sizes_0 = const()[name = tensor("op_11845_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11845_axis_0 = const()[name = tensor("op_11845_axis_0"), val = tensor(1)]; + tensor var_11845_cast_0, tensor var_11845_cast_1 = split(axis = var_11845_axis_0, split_sizes = var_11845_split_sizes_0, x = var_11844_cast)[name = tensor("op_11845_cast")]; + tensor var_11847_mode_0 = const()[name = tensor("op_11847_mode_0"), val = tensor("EXACT")]; + tensor var_11847_cast = gelu(mode = var_11847_mode_0, x = var_11845_cast_1)[name = tensor("op_11847_cast")]; + tensor input_689_cast = mul(x = var_11845_cast_0, y = var_11847_cast)[name = tensor("input_689_cast")]; + tensor var_11851 = const()[name = tensor("op_11851"), val = tensor([1, 1])]; + tensor var_11853 = const()[name = tensor("op_11853"), val = tensor([1, 1])]; + tensor var_11855_pad_type_0 = const()[name = tensor("op_11855_pad_type_0"), val = tensor("custom")]; + tensor var_11855_pad_0 = const()[name = tensor("op_11855_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1743292352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748207616))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748207808)))]; + tensor var_11855_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_11853, groups = var_31, pad = var_11855_pad_0, pad_type = var_11855_pad_type_0, strides = var_11851, weight = unet_up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_689_cast)[name = tensor("op_11855_cast")]; + tensor inputs_367_cast = add(x = var_11855_cast, y = inputs_365_cast)[name = tensor("inputs_367_cast")]; + tensor var_11865 = const()[name = tensor("op_11865"), val = tensor([1])]; + tensor channels_mean_367_cast = reduce_mean(axes = var_11865, keep_dims = var_23, x = inputs_367_cast)[name = tensor("channels_mean_367_cast")]; + tensor zero_mean_367_cast = sub(x = inputs_367_cast, y = channels_mean_367_cast)[name = tensor("zero_mean_367_cast")]; + tensor zero_mean_sq_367_cast = mul(x = zero_mean_367_cast, y = zero_mean_367_cast)[name = tensor("zero_mean_sq_367_cast")]; + tensor var_11869 = const()[name = tensor("op_11869"), val = tensor([1])]; + tensor var_11870_cast = reduce_mean(axes = var_11869, keep_dims = var_23, x = zero_mean_sq_367_cast)[name = tensor("op_11870_cast")]; + tensor var_11871_to_fp16 = const()[name = tensor("op_11871_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11872_cast = add(x = var_11870_cast, y = var_11871_to_fp16)[name = tensor("op_11872_cast")]; + tensor denom_367_epsilon_0_to_fp16 = const()[name = tensor("denom_367_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_367_cast = rsqrt(epsilon = denom_367_epsilon_0_to_fp16, x = var_11872_cast)[name = tensor("denom_367_cast")]; + tensor out_367_cast = mul(x = zero_mean_367_cast, y = denom_367_cast)[name = tensor("out_367_cast")]; + tensor var_11876_to_fp16 = const()[name = tensor("op_11876_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748210432)))]; + tensor var_11877_cast = add(x = out_367_cast, y = var_11876_to_fp16)[name = tensor("op_11877_cast")]; + tensor var_11879_to_fp16 = const()[name = tensor("op_11879_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748213056)))]; + tensor hidden_states_477_cast = mul(x = var_11877_cast, y = var_11879_to_fp16)[name = tensor("hidden_states_477_cast")]; + tensor var_11886 = const()[name = tensor("op_11886"), val = tensor([1, 1])]; + tensor var_11888 = const()[name = tensor("op_11888"), val = tensor([1, 1])]; + tensor q_245_pad_type_0 = const()[name = tensor("q_245_pad_type_0"), val = tensor("custom")]; + tensor q_245_pad_0 = const()[name = tensor("q_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1748215680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1749444544))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_245_cast = conv(dilations = var_11888, groups = var_31, pad = q_245_pad_0, pad_type = q_245_pad_type_0, strides = var_11886, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("q_245_cast")]; + tensor var_11892 = const()[name = tensor("op_11892"), val = tensor([1, 1])]; + tensor var_11894 = const()[name = tensor("op_11894"), val = tensor([1, 1])]; + tensor k_245_pad_type_0 = const()[name = tensor("k_245_pad_type_0"), val = tensor("custom")]; + tensor k_245_pad_0 = const()[name = tensor("k_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1749444736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1750673600))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_245_cast = conv(dilations = var_11894, groups = var_31, pad = k_245_pad_0, pad_type = k_245_pad_type_0, strides = var_11892, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("k_245_cast")]; + tensor var_11898 = const()[name = tensor("op_11898"), val = tensor([1, 1])]; + tensor var_11900 = const()[name = tensor("op_11900"), val = tensor([1, 1])]; + tensor v_245_pad_type_0 = const()[name = tensor("v_245_pad_type_0"), val = tensor("custom")]; + tensor v_245_pad_0 = const()[name = tensor("v_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1750673792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1751902656))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_245_cast = conv(dilations = var_11900, groups = var_31, pad = v_245_pad_0, pad_type = v_245_pad_type_0, strides = var_11898, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("v_245_cast")]; + tensor var_11904 = const()[name = tensor("op_11904"), val = tensor([2, 20, 64, -1])]; + tensor var_11905_cast = reshape(shape = var_11904, x = q_245_cast)[name = tensor("op_11905_cast")]; + tensor var_11906 = const()[name = tensor("op_11906"), val = tensor([2, 20, 64, -1])]; + tensor var_11907_cast = reshape(shape = var_11906, x = k_245_cast)[name = tensor("op_11907_cast")]; + tensor var_11908 = const()[name = tensor("op_11908"), val = tensor([2, 20, 64, -1])]; + tensor var_11909_cast = reshape(shape = var_11908, x = v_245_cast)[name = tensor("op_11909_cast")]; + tensor attn_weights_489_transpose_x_0 = const()[name = tensor("attn_weights_489_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_489_transpose_y_0 = const()[name = tensor("attn_weights_489_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_489_cast = matmul(transpose_x = attn_weights_489_transpose_x_0, transpose_y = attn_weights_489_transpose_y_0, x = var_11905_cast, y = var_11907_cast)[name = tensor("attn_weights_489_cast")]; + tensor attn_weights_491_cast = mul(x = attn_weights_489_cast, y = var_12_to_fp16)[name = tensor("attn_weights_491_cast")]; + tensor var_11913_cast = softmax(axis = var_18, x = attn_weights_491_cast)[name = tensor("op_11913_cast")]; + tensor attn_245_transpose_x_0 = const()[name = tensor("attn_245_transpose_x_0"), val = tensor(false)]; + tensor attn_245_transpose_y_0 = const()[name = tensor("attn_245_transpose_y_0"), val = tensor(true)]; + tensor attn_245_cast = matmul(transpose_x = attn_245_transpose_x_0, transpose_y = attn_245_transpose_y_0, x = var_11909_cast, y = var_11913_cast)[name = tensor("attn_245_cast")]; + tensor var_11917 = const()[name = tensor("op_11917"), val = tensor([2, 1280, 1, -1])]; + tensor input_691_cast = reshape(shape = var_11917, x = attn_245_cast)[name = tensor("input_691_cast")]; + tensor var_11922 = const()[name = tensor("op_11922"), val = tensor([1, 1])]; + tensor var_11924 = const()[name = tensor("op_11924"), val = tensor([1, 1])]; + tensor var_11926_pad_type_0 = const()[name = tensor("op_11926_pad_type_0"), val = tensor("custom")]; + tensor var_11926_pad_0 = const()[name = tensor("op_11926_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1751902848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1753131712))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1753131904)))]; + tensor var_11926_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_11924, groups = var_31, pad = var_11926_pad_0, pad_type = var_11926_pad_type_0, strides = var_11922, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_691_cast)[name = tensor("op_11926_cast")]; + tensor inputs_369_cast = add(x = var_11926_cast, y = inputs_367_cast)[name = tensor("inputs_369_cast")]; + tensor var_11930 = const()[name = tensor("op_11930"), val = tensor([1])]; + tensor channels_mean_369_cast = reduce_mean(axes = var_11930, keep_dims = var_23, x = inputs_369_cast)[name = tensor("channels_mean_369_cast")]; + tensor zero_mean_369_cast = sub(x = inputs_369_cast, y = channels_mean_369_cast)[name = tensor("zero_mean_369_cast")]; + tensor zero_mean_sq_369_cast = mul(x = zero_mean_369_cast, y = zero_mean_369_cast)[name = tensor("zero_mean_sq_369_cast")]; + tensor var_11934 = const()[name = tensor("op_11934"), val = tensor([1])]; + tensor var_11935_cast = reduce_mean(axes = var_11934, keep_dims = var_23, x = zero_mean_sq_369_cast)[name = tensor("op_11935_cast")]; + tensor var_11936_to_fp16 = const()[name = tensor("op_11936_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11937_cast = add(x = var_11935_cast, y = var_11936_to_fp16)[name = tensor("op_11937_cast")]; + tensor denom_369_epsilon_0_to_fp16 = const()[name = tensor("denom_369_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_369_cast = rsqrt(epsilon = denom_369_epsilon_0_to_fp16, x = var_11937_cast)[name = tensor("denom_369_cast")]; + tensor out_369_cast = mul(x = zero_mean_369_cast, y = denom_369_cast)[name = tensor("out_369_cast")]; + tensor var_11941_to_fp16 = const()[name = tensor("op_11941_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1753134528)))]; + tensor var_11942_cast = add(x = out_369_cast, y = var_11941_to_fp16)[name = tensor("op_11942_cast")]; + tensor var_11944_to_fp16 = const()[name = tensor("op_11944_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1753137152)))]; + tensor hidden_states_479_cast = mul(x = var_11942_cast, y = var_11944_to_fp16)[name = tensor("hidden_states_479_cast")]; + tensor var_11951 = const()[name = tensor("op_11951"), val = tensor([1, 1])]; + tensor var_11953 = const()[name = tensor("op_11953"), val = tensor([1, 1])]; + tensor q_247_pad_type_0 = const()[name = tensor("q_247_pad_type_0"), val = tensor("custom")]; + tensor q_247_pad_0 = const()[name = tensor("q_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1753139776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1754368640))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_247_cast = conv(dilations = var_11953, groups = var_31, pad = q_247_pad_0, pad_type = q_247_pad_type_0, strides = var_11951, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_479_cast)[name = tensor("q_247_cast")]; + tensor var_11957 = const()[name = tensor("op_11957"), val = tensor([1, 1])]; + tensor var_11959 = const()[name = tensor("op_11959"), val = tensor([1, 1])]; + tensor k_247_pad_type_0 = const()[name = tensor("k_247_pad_type_0"), val = tensor("custom")]; + tensor k_247_pad_0 = const()[name = tensor("k_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1754368832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1756334976))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_247_cast = conv(dilations = var_11959, groups = var_31, pad = k_247_pad_0, pad_type = k_247_pad_type_0, strides = var_11957, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_247_cast")]; + tensor var_11963 = const()[name = tensor("op_11963"), val = tensor([1, 1])]; + tensor var_11965 = const()[name = tensor("op_11965"), val = tensor([1, 1])]; + tensor v_247_pad_type_0 = const()[name = tensor("v_247_pad_type_0"), val = tensor("custom")]; + tensor v_247_pad_0 = const()[name = tensor("v_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1756335168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1758301312))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_247_cast = conv(dilations = var_11965, groups = var_31, pad = v_247_pad_0, pad_type = v_247_pad_type_0, strides = var_11963, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_247_cast")]; + tensor var_11969 = const()[name = tensor("op_11969"), val = tensor([2, 20, 64, -1])]; + tensor var_11970_cast = reshape(shape = var_11969, x = q_247_cast)[name = tensor("op_11970_cast")]; + tensor var_11971 = const()[name = tensor("op_11971"), val = tensor([2, 20, 64, -1])]; + tensor var_11972_cast = reshape(shape = var_11971, x = k_247_cast)[name = tensor("op_11972_cast")]; + tensor var_11973 = const()[name = tensor("op_11973"), val = tensor([2, 20, 64, -1])]; + tensor var_11974_cast = reshape(shape = var_11973, x = v_247_cast)[name = tensor("op_11974_cast")]; + tensor attn_weights_493_transpose_x_0 = const()[name = tensor("attn_weights_493_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_493_transpose_y_0 = const()[name = tensor("attn_weights_493_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_493_cast = matmul(transpose_x = attn_weights_493_transpose_x_0, transpose_y = attn_weights_493_transpose_y_0, x = var_11970_cast, y = var_11972_cast)[name = tensor("attn_weights_493_cast")]; + tensor attn_weights_495_cast = mul(x = attn_weights_493_cast, y = var_12_to_fp16)[name = tensor("attn_weights_495_cast")]; + tensor var_11978_cast = softmax(axis = var_18, x = attn_weights_495_cast)[name = tensor("op_11978_cast")]; + tensor attn_247_transpose_x_0 = const()[name = tensor("attn_247_transpose_x_0"), val = tensor(false)]; + tensor attn_247_transpose_y_0 = const()[name = tensor("attn_247_transpose_y_0"), val = tensor(true)]; + tensor attn_247_cast = matmul(transpose_x = attn_247_transpose_x_0, transpose_y = attn_247_transpose_y_0, x = var_11974_cast, y = var_11978_cast)[name = tensor("attn_247_cast")]; + tensor var_11982 = const()[name = tensor("op_11982"), val = tensor([2, 1280, 1, -1])]; + tensor input_693_cast = reshape(shape = var_11982, x = attn_247_cast)[name = tensor("input_693_cast")]; + tensor var_11987 = const()[name = tensor("op_11987"), val = tensor([1, 1])]; + tensor var_11989 = const()[name = tensor("op_11989"), val = tensor([1, 1])]; + tensor var_11991_pad_type_0 = const()[name = tensor("op_11991_pad_type_0"), val = tensor("custom")]; + tensor var_11991_pad_0 = const()[name = tensor("op_11991_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1758301504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1759530368))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1759530560)))]; + tensor var_11991_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_11989, groups = var_31, pad = var_11991_pad_0, pad_type = var_11991_pad_type_0, strides = var_11987, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_693_cast)[name = tensor("op_11991_cast")]; + tensor inputs_371_cast = add(x = var_11991_cast, y = inputs_369_cast)[name = tensor("inputs_371_cast")]; + tensor var_11995 = const()[name = tensor("op_11995"), val = tensor([1])]; + tensor channels_mean_371_cast = reduce_mean(axes = var_11995, keep_dims = var_23, x = inputs_371_cast)[name = tensor("channels_mean_371_cast")]; + tensor zero_mean_371_cast = sub(x = inputs_371_cast, y = channels_mean_371_cast)[name = tensor("zero_mean_371_cast")]; + tensor zero_mean_sq_371_cast = mul(x = zero_mean_371_cast, y = zero_mean_371_cast)[name = tensor("zero_mean_sq_371_cast")]; + tensor var_11999 = const()[name = tensor("op_11999"), val = tensor([1])]; + tensor var_12000_cast = reduce_mean(axes = var_11999, keep_dims = var_23, x = zero_mean_sq_371_cast)[name = tensor("op_12000_cast")]; + tensor var_12001_to_fp16 = const()[name = tensor("op_12001_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12002_cast = add(x = var_12000_cast, y = var_12001_to_fp16)[name = tensor("op_12002_cast")]; + tensor denom_371_epsilon_0_to_fp16 = const()[name = tensor("denom_371_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_371_cast = rsqrt(epsilon = denom_371_epsilon_0_to_fp16, x = var_12002_cast)[name = tensor("denom_371_cast")]; + tensor out_371_cast = mul(x = zero_mean_371_cast, y = denom_371_cast)[name = tensor("out_371_cast")]; + tensor var_12006_to_fp16 = const()[name = tensor("op_12006_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1759533184)))]; + tensor var_12007_cast = add(x = out_371_cast, y = var_12006_to_fp16)[name = tensor("op_12007_cast")]; + tensor var_12009_to_fp16 = const()[name = tensor("op_12009_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1759535808)))]; + tensor input_695_cast = mul(x = var_12007_cast, y = var_12009_to_fp16)[name = tensor("input_695_cast")]; + tensor var_12017 = const()[name = tensor("op_12017"), val = tensor([1, 1])]; + tensor var_12019 = const()[name = tensor("op_12019"), val = tensor([1, 1])]; + tensor var_12021_pad_type_0 = const()[name = tensor("op_12021_pad_type_0"), val = tensor("custom")]; + tensor var_12021_pad_0 = const()[name = tensor("op_12021_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1759538432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769368896))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769369088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769376832))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_12021_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_12019, groups = var_31, pad = var_12021_pad_0, pad_type = var_12021_pad_type_0, strides = var_12017, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_695_cast)[name = tensor("op_12021_cast")]; + tensor var_12022_split_sizes_0 = const()[name = tensor("op_12022_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12022_axis_0 = const()[name = tensor("op_12022_axis_0"), val = tensor(1)]; + tensor var_12022_cast_0, tensor var_12022_cast_1 = split(axis = var_12022_axis_0, split_sizes = var_12022_split_sizes_0, x = var_12021_cast)[name = tensor("op_12022_cast")]; + tensor var_12024_mode_0 = const()[name = tensor("op_12024_mode_0"), val = tensor("EXACT")]; + tensor var_12024_cast = gelu(mode = var_12024_mode_0, x = var_12022_cast_1)[name = tensor("op_12024_cast")]; + tensor input_697_cast = mul(x = var_12022_cast_0, y = var_12024_cast)[name = tensor("input_697_cast")]; + tensor var_12028 = const()[name = tensor("op_12028"), val = tensor([1, 1])]; + tensor var_12030 = const()[name = tensor("op_12030"), val = tensor([1, 1])]; + tensor var_12032_pad_type_0 = const()[name = tensor("op_12032_pad_type_0"), val = tensor("custom")]; + tensor var_12032_pad_0 = const()[name = tensor("op_12032_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769377024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774292288))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774292480)))]; + tensor var_12032_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_12030, groups = var_31, pad = var_12032_pad_0, pad_type = var_12032_pad_type_0, strides = var_12028, weight = unet_up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_697_cast)[name = tensor("op_12032_cast")]; + tensor inputs_373_cast = add(x = var_12032_cast, y = inputs_371_cast)[name = tensor("inputs_373_cast")]; + tensor var_12042 = const()[name = tensor("op_12042"), val = tensor([1])]; + tensor channels_mean_373_cast = reduce_mean(axes = var_12042, keep_dims = var_23, x = inputs_373_cast)[name = tensor("channels_mean_373_cast")]; + tensor zero_mean_373_cast = sub(x = inputs_373_cast, y = channels_mean_373_cast)[name = tensor("zero_mean_373_cast")]; + tensor zero_mean_sq_373_cast = mul(x = zero_mean_373_cast, y = zero_mean_373_cast)[name = tensor("zero_mean_sq_373_cast")]; + tensor var_12046 = const()[name = tensor("op_12046"), val = tensor([1])]; + tensor var_12047_cast = reduce_mean(axes = var_12046, keep_dims = var_23, x = zero_mean_sq_373_cast)[name = tensor("op_12047_cast")]; + tensor var_12048_to_fp16 = const()[name = tensor("op_12048_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12049_cast = add(x = var_12047_cast, y = var_12048_to_fp16)[name = tensor("op_12049_cast")]; + tensor denom_373_epsilon_0_to_fp16 = const()[name = tensor("denom_373_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_373_cast = rsqrt(epsilon = denom_373_epsilon_0_to_fp16, x = var_12049_cast)[name = tensor("denom_373_cast")]; + tensor out_373_cast = mul(x = zero_mean_373_cast, y = denom_373_cast)[name = tensor("out_373_cast")]; + tensor var_12053_to_fp16 = const()[name = tensor("op_12053_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774295104)))]; + tensor var_12054_cast = add(x = out_373_cast, y = var_12053_to_fp16)[name = tensor("op_12054_cast")]; + tensor var_12056_to_fp16 = const()[name = tensor("op_12056_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774297728)))]; + tensor hidden_states_483_cast = mul(x = var_12054_cast, y = var_12056_to_fp16)[name = tensor("hidden_states_483_cast")]; + tensor var_12063 = const()[name = tensor("op_12063"), val = tensor([1, 1])]; + tensor var_12065 = const()[name = tensor("op_12065"), val = tensor([1, 1])]; + tensor q_249_pad_type_0 = const()[name = tensor("q_249_pad_type_0"), val = tensor("custom")]; + tensor q_249_pad_0 = const()[name = tensor("q_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1774300352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1775529216))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_249_cast = conv(dilations = var_12065, groups = var_31, pad = q_249_pad_0, pad_type = q_249_pad_type_0, strides = var_12063, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("q_249_cast")]; + tensor var_12069 = const()[name = tensor("op_12069"), val = tensor([1, 1])]; + tensor var_12071 = const()[name = tensor("op_12071"), val = tensor([1, 1])]; + tensor k_249_pad_type_0 = const()[name = tensor("k_249_pad_type_0"), val = tensor("custom")]; + tensor k_249_pad_0 = const()[name = tensor("k_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1775529408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1776758272))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_249_cast = conv(dilations = var_12071, groups = var_31, pad = k_249_pad_0, pad_type = k_249_pad_type_0, strides = var_12069, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("k_249_cast")]; + tensor var_12075 = const()[name = tensor("op_12075"), val = tensor([1, 1])]; + tensor var_12077 = const()[name = tensor("op_12077"), val = tensor([1, 1])]; + tensor v_249_pad_type_0 = const()[name = tensor("v_249_pad_type_0"), val = tensor("custom")]; + tensor v_249_pad_0 = const()[name = tensor("v_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1776758464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1777987328))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_249_cast = conv(dilations = var_12077, groups = var_31, pad = v_249_pad_0, pad_type = v_249_pad_type_0, strides = var_12075, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("v_249_cast")]; + tensor var_12081 = const()[name = tensor("op_12081"), val = tensor([2, 20, 64, -1])]; + tensor var_12082_cast = reshape(shape = var_12081, x = q_249_cast)[name = tensor("op_12082_cast")]; + tensor var_12083 = const()[name = tensor("op_12083"), val = tensor([2, 20, 64, -1])]; + tensor var_12084_cast = reshape(shape = var_12083, x = k_249_cast)[name = tensor("op_12084_cast")]; + tensor var_12085 = const()[name = tensor("op_12085"), val = tensor([2, 20, 64, -1])]; + tensor var_12086_cast = reshape(shape = var_12085, x = v_249_cast)[name = tensor("op_12086_cast")]; + tensor attn_weights_497_transpose_x_0 = const()[name = tensor("attn_weights_497_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_497_transpose_y_0 = const()[name = tensor("attn_weights_497_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_497_cast = matmul(transpose_x = attn_weights_497_transpose_x_0, transpose_y = attn_weights_497_transpose_y_0, x = var_12082_cast, y = var_12084_cast)[name = tensor("attn_weights_497_cast")]; + tensor attn_weights_499_cast = mul(x = attn_weights_497_cast, y = var_12_to_fp16)[name = tensor("attn_weights_499_cast")]; + tensor var_12090_cast = softmax(axis = var_18, x = attn_weights_499_cast)[name = tensor("op_12090_cast")]; + tensor attn_249_transpose_x_0 = const()[name = tensor("attn_249_transpose_x_0"), val = tensor(false)]; + tensor attn_249_transpose_y_0 = const()[name = tensor("attn_249_transpose_y_0"), val = tensor(true)]; + tensor attn_249_cast = matmul(transpose_x = attn_249_transpose_x_0, transpose_y = attn_249_transpose_y_0, x = var_12086_cast, y = var_12090_cast)[name = tensor("attn_249_cast")]; + tensor var_12094 = const()[name = tensor("op_12094"), val = tensor([2, 1280, 1, -1])]; + tensor input_699_cast = reshape(shape = var_12094, x = attn_249_cast)[name = tensor("input_699_cast")]; + tensor var_12099 = const()[name = tensor("op_12099"), val = tensor([1, 1])]; + tensor var_12101 = const()[name = tensor("op_12101"), val = tensor([1, 1])]; + tensor var_12103_pad_type_0 = const()[name = tensor("op_12103_pad_type_0"), val = tensor("custom")]; + tensor var_12103_pad_0 = const()[name = tensor("op_12103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1777987520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1779216384))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1779216576)))]; + tensor var_12103_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_12101, groups = var_31, pad = var_12103_pad_0, pad_type = var_12103_pad_type_0, strides = var_12099, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_699_cast)[name = tensor("op_12103_cast")]; + tensor inputs_375_cast = add(x = var_12103_cast, y = inputs_373_cast)[name = tensor("inputs_375_cast")]; + tensor var_12107 = const()[name = tensor("op_12107"), val = tensor([1])]; + tensor channels_mean_375_cast = reduce_mean(axes = var_12107, keep_dims = var_23, x = inputs_375_cast)[name = tensor("channels_mean_375_cast")]; + tensor zero_mean_375_cast = sub(x = inputs_375_cast, y = channels_mean_375_cast)[name = tensor("zero_mean_375_cast")]; + tensor zero_mean_sq_375_cast = mul(x = zero_mean_375_cast, y = zero_mean_375_cast)[name = tensor("zero_mean_sq_375_cast")]; + tensor var_12111 = const()[name = tensor("op_12111"), val = tensor([1])]; + tensor var_12112_cast = reduce_mean(axes = var_12111, keep_dims = var_23, x = zero_mean_sq_375_cast)[name = tensor("op_12112_cast")]; + tensor var_12113_to_fp16 = const()[name = tensor("op_12113_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12114_cast = add(x = var_12112_cast, y = var_12113_to_fp16)[name = tensor("op_12114_cast")]; + tensor denom_375_epsilon_0_to_fp16 = const()[name = tensor("denom_375_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_375_cast = rsqrt(epsilon = denom_375_epsilon_0_to_fp16, x = var_12114_cast)[name = tensor("denom_375_cast")]; + tensor out_375_cast = mul(x = zero_mean_375_cast, y = denom_375_cast)[name = tensor("out_375_cast")]; + tensor var_12118_to_fp16 = const()[name = tensor("op_12118_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1779219200)))]; + tensor var_12119_cast = add(x = out_375_cast, y = var_12118_to_fp16)[name = tensor("op_12119_cast")]; + tensor var_12121_to_fp16 = const()[name = tensor("op_12121_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1779221824)))]; + tensor hidden_states_485_cast = mul(x = var_12119_cast, y = var_12121_to_fp16)[name = tensor("hidden_states_485_cast")]; + tensor var_12128 = const()[name = tensor("op_12128"), val = tensor([1, 1])]; + tensor var_12130 = const()[name = tensor("op_12130"), val = tensor([1, 1])]; + tensor q_251_pad_type_0 = const()[name = tensor("q_251_pad_type_0"), val = tensor("custom")]; + tensor q_251_pad_0 = const()[name = tensor("q_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1779224448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1780453312))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_251_cast = conv(dilations = var_12130, groups = var_31, pad = q_251_pad_0, pad_type = q_251_pad_type_0, strides = var_12128, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_485_cast)[name = tensor("q_251_cast")]; + tensor var_12134 = const()[name = tensor("op_12134"), val = tensor([1, 1])]; + tensor var_12136 = const()[name = tensor("op_12136"), val = tensor([1, 1])]; + tensor k_251_pad_type_0 = const()[name = tensor("k_251_pad_type_0"), val = tensor("custom")]; + tensor k_251_pad_0 = const()[name = tensor("k_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1780453504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1782419648))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_251_cast = conv(dilations = var_12136, groups = var_31, pad = k_251_pad_0, pad_type = k_251_pad_type_0, strides = var_12134, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_251_cast")]; + tensor var_12140 = const()[name = tensor("op_12140"), val = tensor([1, 1])]; + tensor var_12142 = const()[name = tensor("op_12142"), val = tensor([1, 1])]; + tensor v_251_pad_type_0 = const()[name = tensor("v_251_pad_type_0"), val = tensor("custom")]; + tensor v_251_pad_0 = const()[name = tensor("v_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1782419840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1784385984))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_251_cast = conv(dilations = var_12142, groups = var_31, pad = v_251_pad_0, pad_type = v_251_pad_type_0, strides = var_12140, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_251_cast")]; + tensor var_12146 = const()[name = tensor("op_12146"), val = tensor([2, 20, 64, -1])]; + tensor var_12147_cast = reshape(shape = var_12146, x = q_251_cast)[name = tensor("op_12147_cast")]; + tensor var_12148 = const()[name = tensor("op_12148"), val = tensor([2, 20, 64, -1])]; + tensor var_12149_cast = reshape(shape = var_12148, x = k_251_cast)[name = tensor("op_12149_cast")]; + tensor var_12150 = const()[name = tensor("op_12150"), val = tensor([2, 20, 64, -1])]; + tensor var_12151_cast = reshape(shape = var_12150, x = v_251_cast)[name = tensor("op_12151_cast")]; + tensor attn_weights_501_transpose_x_0 = const()[name = tensor("attn_weights_501_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_501_transpose_y_0 = const()[name = tensor("attn_weights_501_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_501_cast = matmul(transpose_x = attn_weights_501_transpose_x_0, transpose_y = attn_weights_501_transpose_y_0, x = var_12147_cast, y = var_12149_cast)[name = tensor("attn_weights_501_cast")]; + tensor attn_weights_503_cast = mul(x = attn_weights_501_cast, y = var_12_to_fp16)[name = tensor("attn_weights_503_cast")]; + tensor var_12155_cast = softmax(axis = var_18, x = attn_weights_503_cast)[name = tensor("op_12155_cast")]; + tensor attn_251_transpose_x_0 = const()[name = tensor("attn_251_transpose_x_0"), val = tensor(false)]; + tensor attn_251_transpose_y_0 = const()[name = tensor("attn_251_transpose_y_0"), val = tensor(true)]; + tensor attn_251_cast = matmul(transpose_x = attn_251_transpose_x_0, transpose_y = attn_251_transpose_y_0, x = var_12151_cast, y = var_12155_cast)[name = tensor("attn_251_cast")]; + tensor var_12159 = const()[name = tensor("op_12159"), val = tensor([2, 1280, 1, -1])]; + tensor input_701_cast = reshape(shape = var_12159, x = attn_251_cast)[name = tensor("input_701_cast")]; + tensor var_12164 = const()[name = tensor("op_12164"), val = tensor([1, 1])]; + tensor var_12166 = const()[name = tensor("op_12166"), val = tensor([1, 1])]; + tensor var_12168_pad_type_0 = const()[name = tensor("op_12168_pad_type_0"), val = tensor("custom")]; + tensor var_12168_pad_0 = const()[name = tensor("op_12168_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1784386176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785615040))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785615232)))]; + tensor var_12168_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_12166, groups = var_31, pad = var_12168_pad_0, pad_type = var_12168_pad_type_0, strides = var_12164, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_701_cast)[name = tensor("op_12168_cast")]; + tensor inputs_377_cast = add(x = var_12168_cast, y = inputs_375_cast)[name = tensor("inputs_377_cast")]; + tensor var_12172 = const()[name = tensor("op_12172"), val = tensor([1])]; + tensor channels_mean_377_cast = reduce_mean(axes = var_12172, keep_dims = var_23, x = inputs_377_cast)[name = tensor("channels_mean_377_cast")]; + tensor zero_mean_377_cast = sub(x = inputs_377_cast, y = channels_mean_377_cast)[name = tensor("zero_mean_377_cast")]; + tensor zero_mean_sq_377_cast = mul(x = zero_mean_377_cast, y = zero_mean_377_cast)[name = tensor("zero_mean_sq_377_cast")]; + tensor var_12176 = const()[name = tensor("op_12176"), val = tensor([1])]; + tensor var_12177_cast = reduce_mean(axes = var_12176, keep_dims = var_23, x = zero_mean_sq_377_cast)[name = tensor("op_12177_cast")]; + tensor var_12178_to_fp16 = const()[name = tensor("op_12178_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12179_cast = add(x = var_12177_cast, y = var_12178_to_fp16)[name = tensor("op_12179_cast")]; + tensor denom_377_epsilon_0_to_fp16 = const()[name = tensor("denom_377_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_377_cast = rsqrt(epsilon = denom_377_epsilon_0_to_fp16, x = var_12179_cast)[name = tensor("denom_377_cast")]; + tensor out_377_cast = mul(x = zero_mean_377_cast, y = denom_377_cast)[name = tensor("out_377_cast")]; + tensor var_12183_to_fp16 = const()[name = tensor("op_12183_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785617856)))]; + tensor var_12184_cast = add(x = out_377_cast, y = var_12183_to_fp16)[name = tensor("op_12184_cast")]; + tensor var_12186_to_fp16 = const()[name = tensor("op_12186_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785620480)))]; + tensor input_703_cast = mul(x = var_12184_cast, y = var_12186_to_fp16)[name = tensor("input_703_cast")]; + tensor var_12194 = const()[name = tensor("op_12194"), val = tensor([1, 1])]; + tensor var_12196 = const()[name = tensor("op_12196"), val = tensor([1, 1])]; + tensor var_12198_pad_type_0 = const()[name = tensor("op_12198_pad_type_0"), val = tensor("custom")]; + tensor var_12198_pad_0 = const()[name = tensor("op_12198_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785623104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1795453568))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1795453760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1795461504))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_12198_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_12196, groups = var_31, pad = var_12198_pad_0, pad_type = var_12198_pad_type_0, strides = var_12194, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_703_cast)[name = tensor("op_12198_cast")]; + tensor var_12199_split_sizes_0 = const()[name = tensor("op_12199_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12199_axis_0 = const()[name = tensor("op_12199_axis_0"), val = tensor(1)]; + tensor var_12199_cast_0, tensor var_12199_cast_1 = split(axis = var_12199_axis_0, split_sizes = var_12199_split_sizes_0, x = var_12198_cast)[name = tensor("op_12199_cast")]; + tensor var_12201_mode_0 = const()[name = tensor("op_12201_mode_0"), val = tensor("EXACT")]; + tensor var_12201_cast = gelu(mode = var_12201_mode_0, x = var_12199_cast_1)[name = tensor("op_12201_cast")]; + tensor input_705_cast = mul(x = var_12199_cast_0, y = var_12201_cast)[name = tensor("input_705_cast")]; + tensor var_12205 = const()[name = tensor("op_12205"), val = tensor([1, 1])]; + tensor var_12207 = const()[name = tensor("op_12207"), val = tensor([1, 1])]; + tensor var_12209_pad_type_0 = const()[name = tensor("op_12209_pad_type_0"), val = tensor("custom")]; + tensor var_12209_pad_0 = const()[name = tensor("op_12209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1795461696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800376960))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800377152)))]; + tensor var_12209_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_12207, groups = var_31, pad = var_12209_pad_0, pad_type = var_12209_pad_type_0, strides = var_12205, weight = unet_up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_705_cast)[name = tensor("op_12209_cast")]; + tensor inputs_379_cast = add(x = var_12209_cast, y = inputs_377_cast)[name = tensor("inputs_379_cast")]; + tensor var_12219 = const()[name = tensor("op_12219"), val = tensor([1])]; + tensor channels_mean_379_cast = reduce_mean(axes = var_12219, keep_dims = var_23, x = inputs_379_cast)[name = tensor("channels_mean_379_cast")]; + tensor zero_mean_379_cast = sub(x = inputs_379_cast, y = channels_mean_379_cast)[name = tensor("zero_mean_379_cast")]; + tensor zero_mean_sq_379_cast = mul(x = zero_mean_379_cast, y = zero_mean_379_cast)[name = tensor("zero_mean_sq_379_cast")]; + tensor var_12223 = const()[name = tensor("op_12223"), val = tensor([1])]; + tensor var_12224_cast = reduce_mean(axes = var_12223, keep_dims = var_23, x = zero_mean_sq_379_cast)[name = tensor("op_12224_cast")]; + tensor var_12225_to_fp16 = const()[name = tensor("op_12225_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12226_cast = add(x = var_12224_cast, y = var_12225_to_fp16)[name = tensor("op_12226_cast")]; + tensor denom_379_epsilon_0_to_fp16 = const()[name = tensor("denom_379_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_379_cast = rsqrt(epsilon = denom_379_epsilon_0_to_fp16, x = var_12226_cast)[name = tensor("denom_379_cast")]; + tensor out_379_cast = mul(x = zero_mean_379_cast, y = denom_379_cast)[name = tensor("out_379_cast")]; + tensor var_12230_to_fp16 = const()[name = tensor("op_12230_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800379776)))]; + tensor var_12231_cast = add(x = out_379_cast, y = var_12230_to_fp16)[name = tensor("op_12231_cast")]; + tensor var_12233_to_fp16 = const()[name = tensor("op_12233_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800382400)))]; + tensor hidden_states_489_cast = mul(x = var_12231_cast, y = var_12233_to_fp16)[name = tensor("hidden_states_489_cast")]; + tensor var_12240 = const()[name = tensor("op_12240"), val = tensor([1, 1])]; + tensor var_12242 = const()[name = tensor("op_12242"), val = tensor([1, 1])]; + tensor q_253_pad_type_0 = const()[name = tensor("q_253_pad_type_0"), val = tensor("custom")]; + tensor q_253_pad_0 = const()[name = tensor("q_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1800385024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1801613888))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_253_cast = conv(dilations = var_12242, groups = var_31, pad = q_253_pad_0, pad_type = q_253_pad_type_0, strides = var_12240, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("q_253_cast")]; + tensor var_12246 = const()[name = tensor("op_12246"), val = tensor([1, 1])]; + tensor var_12248 = const()[name = tensor("op_12248"), val = tensor([1, 1])]; + tensor k_253_pad_type_0 = const()[name = tensor("k_253_pad_type_0"), val = tensor("custom")]; + tensor k_253_pad_0 = const()[name = tensor("k_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1801614080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1802842944))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_253_cast = conv(dilations = var_12248, groups = var_31, pad = k_253_pad_0, pad_type = k_253_pad_type_0, strides = var_12246, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("k_253_cast")]; + tensor var_12252 = const()[name = tensor("op_12252"), val = tensor([1, 1])]; + tensor var_12254 = const()[name = tensor("op_12254"), val = tensor([1, 1])]; + tensor v_253_pad_type_0 = const()[name = tensor("v_253_pad_type_0"), val = tensor("custom")]; + tensor v_253_pad_0 = const()[name = tensor("v_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1802843136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1804072000))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_253_cast = conv(dilations = var_12254, groups = var_31, pad = v_253_pad_0, pad_type = v_253_pad_type_0, strides = var_12252, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("v_253_cast")]; + tensor var_12258 = const()[name = tensor("op_12258"), val = tensor([2, 20, 64, -1])]; + tensor var_12259_cast = reshape(shape = var_12258, x = q_253_cast)[name = tensor("op_12259_cast")]; + tensor var_12260 = const()[name = tensor("op_12260"), val = tensor([2, 20, 64, -1])]; + tensor var_12261_cast = reshape(shape = var_12260, x = k_253_cast)[name = tensor("op_12261_cast")]; + tensor var_12262 = const()[name = tensor("op_12262"), val = tensor([2, 20, 64, -1])]; + tensor var_12263_cast = reshape(shape = var_12262, x = v_253_cast)[name = tensor("op_12263_cast")]; + tensor attn_weights_505_transpose_x_0 = const()[name = tensor("attn_weights_505_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_505_transpose_y_0 = const()[name = tensor("attn_weights_505_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_505_cast = matmul(transpose_x = attn_weights_505_transpose_x_0, transpose_y = attn_weights_505_transpose_y_0, x = var_12259_cast, y = var_12261_cast)[name = tensor("attn_weights_505_cast")]; + tensor attn_weights_507_cast = mul(x = attn_weights_505_cast, y = var_12_to_fp16)[name = tensor("attn_weights_507_cast")]; + tensor var_12267_cast = softmax(axis = var_18, x = attn_weights_507_cast)[name = tensor("op_12267_cast")]; + tensor attn_253_transpose_x_0 = const()[name = tensor("attn_253_transpose_x_0"), val = tensor(false)]; + tensor attn_253_transpose_y_0 = const()[name = tensor("attn_253_transpose_y_0"), val = tensor(true)]; + tensor attn_253_cast = matmul(transpose_x = attn_253_transpose_x_0, transpose_y = attn_253_transpose_y_0, x = var_12263_cast, y = var_12267_cast)[name = tensor("attn_253_cast")]; + tensor var_12271 = const()[name = tensor("op_12271"), val = tensor([2, 1280, 1, -1])]; + tensor input_707_cast = reshape(shape = var_12271, x = attn_253_cast)[name = tensor("input_707_cast")]; + tensor var_12276 = const()[name = tensor("op_12276"), val = tensor([1, 1])]; + tensor var_12278 = const()[name = tensor("op_12278"), val = tensor([1, 1])]; + tensor var_12280_pad_type_0 = const()[name = tensor("op_12280_pad_type_0"), val = tensor("custom")]; + tensor var_12280_pad_0 = const()[name = tensor("op_12280_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1804072192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1805301056))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1805301248)))]; + tensor var_12280_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_12278, groups = var_31, pad = var_12280_pad_0, pad_type = var_12280_pad_type_0, strides = var_12276, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_707_cast)[name = tensor("op_12280_cast")]; + tensor inputs_381_cast = add(x = var_12280_cast, y = inputs_379_cast)[name = tensor("inputs_381_cast")]; + tensor var_12284 = const()[name = tensor("op_12284"), val = tensor([1])]; + tensor channels_mean_381_cast = reduce_mean(axes = var_12284, keep_dims = var_23, x = inputs_381_cast)[name = tensor("channels_mean_381_cast")]; + tensor zero_mean_381_cast = sub(x = inputs_381_cast, y = channels_mean_381_cast)[name = tensor("zero_mean_381_cast")]; + tensor zero_mean_sq_381_cast = mul(x = zero_mean_381_cast, y = zero_mean_381_cast)[name = tensor("zero_mean_sq_381_cast")]; + tensor var_12288 = const()[name = tensor("op_12288"), val = tensor([1])]; + tensor var_12289_cast = reduce_mean(axes = var_12288, keep_dims = var_23, x = zero_mean_sq_381_cast)[name = tensor("op_12289_cast")]; + tensor var_12290_to_fp16 = const()[name = tensor("op_12290_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12291_cast = add(x = var_12289_cast, y = var_12290_to_fp16)[name = tensor("op_12291_cast")]; + tensor denom_381_epsilon_0_to_fp16 = const()[name = tensor("denom_381_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_381_cast = rsqrt(epsilon = denom_381_epsilon_0_to_fp16, x = var_12291_cast)[name = tensor("denom_381_cast")]; + tensor out_381_cast = mul(x = zero_mean_381_cast, y = denom_381_cast)[name = tensor("out_381_cast")]; + tensor var_12295_to_fp16 = const()[name = tensor("op_12295_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1805303872)))]; + tensor var_12296_cast = add(x = out_381_cast, y = var_12295_to_fp16)[name = tensor("op_12296_cast")]; + tensor var_12298_to_fp16 = const()[name = tensor("op_12298_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1805306496)))]; + tensor hidden_states_491_cast = mul(x = var_12296_cast, y = var_12298_to_fp16)[name = tensor("hidden_states_491_cast")]; + tensor var_12305 = const()[name = tensor("op_12305"), val = tensor([1, 1])]; + tensor var_12307 = const()[name = tensor("op_12307"), val = tensor([1, 1])]; + tensor q_255_pad_type_0 = const()[name = tensor("q_255_pad_type_0"), val = tensor("custom")]; + tensor q_255_pad_0 = const()[name = tensor("q_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1805309120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1806537984))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_255_cast = conv(dilations = var_12307, groups = var_31, pad = q_255_pad_0, pad_type = q_255_pad_type_0, strides = var_12305, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_491_cast)[name = tensor("q_255_cast")]; + tensor var_12311 = const()[name = tensor("op_12311"), val = tensor([1, 1])]; + tensor var_12313 = const()[name = tensor("op_12313"), val = tensor([1, 1])]; + tensor k_255_pad_type_0 = const()[name = tensor("k_255_pad_type_0"), val = tensor("custom")]; + tensor k_255_pad_0 = const()[name = tensor("k_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1806538176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1808504320))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_255_cast = conv(dilations = var_12313, groups = var_31, pad = k_255_pad_0, pad_type = k_255_pad_type_0, strides = var_12311, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_255_cast")]; + tensor var_12317 = const()[name = tensor("op_12317"), val = tensor([1, 1])]; + tensor var_12319 = const()[name = tensor("op_12319"), val = tensor([1, 1])]; + tensor v_255_pad_type_0 = const()[name = tensor("v_255_pad_type_0"), val = tensor("custom")]; + tensor v_255_pad_0 = const()[name = tensor("v_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1808504512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1810470656))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_255_cast = conv(dilations = var_12319, groups = var_31, pad = v_255_pad_0, pad_type = v_255_pad_type_0, strides = var_12317, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_255_cast")]; + tensor var_12323 = const()[name = tensor("op_12323"), val = tensor([2, 20, 64, -1])]; + tensor var_12324_cast = reshape(shape = var_12323, x = q_255_cast)[name = tensor("op_12324_cast")]; + tensor var_12325 = const()[name = tensor("op_12325"), val = tensor([2, 20, 64, -1])]; + tensor var_12326_cast = reshape(shape = var_12325, x = k_255_cast)[name = tensor("op_12326_cast")]; + tensor var_12327 = const()[name = tensor("op_12327"), val = tensor([2, 20, 64, -1])]; + tensor var_12328_cast = reshape(shape = var_12327, x = v_255_cast)[name = tensor("op_12328_cast")]; + tensor attn_weights_509_transpose_x_0 = const()[name = tensor("attn_weights_509_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_509_transpose_y_0 = const()[name = tensor("attn_weights_509_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_509_cast = matmul(transpose_x = attn_weights_509_transpose_x_0, transpose_y = attn_weights_509_transpose_y_0, x = var_12324_cast, y = var_12326_cast)[name = tensor("attn_weights_509_cast")]; + tensor attn_weights_511_cast = mul(x = attn_weights_509_cast, y = var_12_to_fp16)[name = tensor("attn_weights_511_cast")]; + tensor var_12332_cast = softmax(axis = var_18, x = attn_weights_511_cast)[name = tensor("op_12332_cast")]; + tensor attn_255_transpose_x_0 = const()[name = tensor("attn_255_transpose_x_0"), val = tensor(false)]; + tensor attn_255_transpose_y_0 = const()[name = tensor("attn_255_transpose_y_0"), val = tensor(true)]; + tensor attn_255_cast = matmul(transpose_x = attn_255_transpose_x_0, transpose_y = attn_255_transpose_y_0, x = var_12328_cast, y = var_12332_cast)[name = tensor("attn_255_cast")]; + tensor var_12336 = const()[name = tensor("op_12336"), val = tensor([2, 1280, 1, -1])]; + tensor input_709_cast = reshape(shape = var_12336, x = attn_255_cast)[name = tensor("input_709_cast")]; + tensor var_12341 = const()[name = tensor("op_12341"), val = tensor([1, 1])]; + tensor var_12343 = const()[name = tensor("op_12343"), val = tensor([1, 1])]; + tensor var_12345_pad_type_0 = const()[name = tensor("op_12345_pad_type_0"), val = tensor("custom")]; + tensor var_12345_pad_0 = const()[name = tensor("op_12345_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1810470848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1811699712))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1811699904)))]; + tensor var_12345_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_12343, groups = var_31, pad = var_12345_pad_0, pad_type = var_12345_pad_type_0, strides = var_12341, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_709_cast)[name = tensor("op_12345_cast")]; + tensor inputs_383_cast = add(x = var_12345_cast, y = inputs_381_cast)[name = tensor("inputs_383_cast")]; + tensor var_12349 = const()[name = tensor("op_12349"), val = tensor([1])]; + tensor channels_mean_383_cast = reduce_mean(axes = var_12349, keep_dims = var_23, x = inputs_383_cast)[name = tensor("channels_mean_383_cast")]; + tensor zero_mean_383_cast = sub(x = inputs_383_cast, y = channels_mean_383_cast)[name = tensor("zero_mean_383_cast")]; + tensor zero_mean_sq_383_cast = mul(x = zero_mean_383_cast, y = zero_mean_383_cast)[name = tensor("zero_mean_sq_383_cast")]; + tensor var_12353 = const()[name = tensor("op_12353"), val = tensor([1])]; + tensor var_12354_cast = reduce_mean(axes = var_12353, keep_dims = var_23, x = zero_mean_sq_383_cast)[name = tensor("op_12354_cast")]; + tensor var_12355_to_fp16 = const()[name = tensor("op_12355_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12356_cast = add(x = var_12354_cast, y = var_12355_to_fp16)[name = tensor("op_12356_cast")]; + tensor denom_383_epsilon_0_to_fp16 = const()[name = tensor("denom_383_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_383_cast = rsqrt(epsilon = denom_383_epsilon_0_to_fp16, x = var_12356_cast)[name = tensor("denom_383_cast")]; + tensor out_383_cast = mul(x = zero_mean_383_cast, y = denom_383_cast)[name = tensor("out_383_cast")]; + tensor var_12360_to_fp16 = const()[name = tensor("op_12360_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1811702528)))]; + tensor var_12361_cast = add(x = out_383_cast, y = var_12360_to_fp16)[name = tensor("op_12361_cast")]; + tensor var_12363_to_fp16 = const()[name = tensor("op_12363_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1811705152)))]; + tensor input_711_cast = mul(x = var_12361_cast, y = var_12363_to_fp16)[name = tensor("input_711_cast")]; + tensor var_12371 = const()[name = tensor("op_12371"), val = tensor([1, 1])]; + tensor var_12373 = const()[name = tensor("op_12373"), val = tensor([1, 1])]; + tensor var_12375_pad_type_0 = const()[name = tensor("op_12375_pad_type_0"), val = tensor("custom")]; + tensor var_12375_pad_0 = const()[name = tensor("op_12375_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1811707776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1821538240))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1821538432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1821546176))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([10240])]; + tensor var_12375_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_12373, groups = var_31, pad = var_12375_pad_0, pad_type = var_12375_pad_type_0, strides = var_12371, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_711_cast)[name = tensor("op_12375_cast")]; + tensor var_12376_split_sizes_0 = const()[name = tensor("op_12376_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12376_axis_0 = const()[name = tensor("op_12376_axis_0"), val = tensor(1)]; + tensor var_12376_cast_0, tensor var_12376_cast_1 = split(axis = var_12376_axis_0, split_sizes = var_12376_split_sizes_0, x = var_12375_cast)[name = tensor("op_12376_cast")]; + tensor var_12378_mode_0 = const()[name = tensor("op_12378_mode_0"), val = tensor("EXACT")]; + tensor var_12378_cast = gelu(mode = var_12378_mode_0, x = var_12376_cast_1)[name = tensor("op_12378_cast")]; + tensor input_713_cast = mul(x = var_12376_cast_0, y = var_12378_cast)[name = tensor("input_713_cast")]; + tensor var_12382 = const()[name = tensor("op_12382"), val = tensor([1, 1])]; + tensor var_12384 = const()[name = tensor("op_12384"), val = tensor([1, 1])]; + tensor var_12386_pad_type_0 = const()[name = tensor("op_12386_pad_type_0"), val = tensor("custom")]; + tensor var_12386_pad_0 = const()[name = tensor("op_12386_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1821546368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1826461632))), name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1826461824)))]; + tensor var_12386_cast = conv(bias = unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_12384, groups = var_31, pad = var_12386_pad_0, pad_type = var_12386_pad_type_0, strides = var_12382, weight = unet_up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_713_cast)[name = tensor("op_12386_cast")]; + tensor hidden_states_495_cast = add(x = var_12386_cast, y = inputs_383_cast)[name = tensor("hidden_states_495_cast")]; + tensor var_12388 = const()[name = tensor("op_12388"), val = tensor([2, 1280, 32, 32])]; + tensor input_715_cast = reshape(shape = var_12388, x = hidden_states_495_cast)[name = tensor("input_715_cast")]; + tensor var_12392 = const()[name = tensor("op_12392"), val = tensor([1, 1])]; + tensor var_12394 = const()[name = tensor("op_12394"), val = tensor([1, 1])]; + tensor hidden_states_497_pad_type_0 = const()[name = tensor("hidden_states_497_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_497_pad_0 = const()[name = tensor("hidden_states_497_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1826464448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827693312))), name = tensor("unet_up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor unet_up_blocks_0_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827693504)))]; + tensor hidden_states_497_cast = conv(bias = unet_up_blocks_0_attentions_2_proj_out_bias_to_fp16, dilations = var_12394, groups = var_31, pad = hidden_states_497_pad_0, pad_type = hidden_states_497_pad_type_0, strides = var_12392, weight = unet_up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized, x = input_715_cast)[name = tensor("hidden_states_497_cast")]; + tensor input_717_cast = add(x = hidden_states_497_cast, y = hidden_states_431_cast)[name = tensor("input_717_cast")]; + tensor input_719_scale_factor_height_0 = const()[name = tensor("input_719_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_719_scale_factor_width_0 = const()[name = tensor("input_719_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_719_cast = upsample_nearest_neighbor(scale_factor_height = input_719_scale_factor_height_0, scale_factor_width = input_719_scale_factor_width_0, x = input_717_cast)[name = tensor("input_719_cast")]; + tensor var_12403 = const()[name = tensor("op_12403"), val = tensor([1, 1])]; + tensor var_12405 = const()[name = tensor("op_12405"), val = tensor([1, 1])]; + tensor hidden_states_499_pad_type_0 = const()[name = tensor("hidden_states_499_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_499_pad_0 = const()[name = tensor("hidden_states_499_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827696128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1838755392))), name = tensor("unet_up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor unet_up_blocks_0_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("unet_up_blocks_0_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1838755584)))]; + tensor hidden_states_499_cast = conv(bias = unet_up_blocks_0_upsamplers_0_conv_bias_to_fp16, dilations = var_12405, groups = var_31, pad = hidden_states_499_pad_0, pad_type = hidden_states_499_pad_type_0, strides = var_12403, weight = unet_up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized, x = input_719_cast)[name = tensor("hidden_states_499_cast")]; + tensor input_721_interleave_0 = const()[name = tensor("input_721_interleave_0"), val = tensor(false)]; + tensor input_721_cast = concat(axis = var_31, interleave = input_721_interleave_0, values = (hidden_states_499_cast, input_113_cast))[name = tensor("input_721_cast")]; + tensor reshape_120_shape_0 = const()[name = tensor("reshape_120_shape_0"), val = tensor([2, 32, 60, 64, 64])]; + tensor reshape_120_cast = reshape(shape = reshape_120_shape_0, x = input_721_cast)[name = tensor("reshape_120_cast")]; + tensor reduce_mean_90_axes_0 = const()[name = tensor("reduce_mean_90_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_90_keep_dims_0 = const()[name = tensor("reduce_mean_90_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_90_cast = reduce_mean(axes = reduce_mean_90_axes_0, keep_dims = reduce_mean_90_keep_dims_0, x = reshape_120_cast)[name = tensor("reduce_mean_90_cast")]; + tensor sub_60_cast = sub(x = reshape_120_cast, y = reduce_mean_90_cast)[name = tensor("sub_60_cast")]; + tensor square_30_cast = square(x = sub_60_cast)[name = tensor("square_30_cast")]; + tensor reduce_mean_92_axes_0 = const()[name = tensor("reduce_mean_92_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_92_keep_dims_0 = const()[name = tensor("reduce_mean_92_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_92_cast = reduce_mean(axes = reduce_mean_92_axes_0, keep_dims = reduce_mean_92_keep_dims_0, x = square_30_cast)[name = tensor("reduce_mean_92_cast")]; + tensor add_60_y_0_to_fp16 = const()[name = tensor("add_60_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_60_cast = add(x = reduce_mean_92_cast, y = add_60_y_0_to_fp16)[name = tensor("add_60_cast")]; + tensor sqrt_30_cast = sqrt(x = add_60_cast)[name = tensor("sqrt_30_cast")]; + tensor real_div_30_cast = real_div(x = sub_60_cast, y = sqrt_30_cast)[name = tensor("real_div_30_cast")]; + tensor reshape_121_shape_0 = const()[name = tensor("reshape_121_shape_0"), val = tensor([2, 1920, 64, 64])]; + tensor reshape_121_cast = reshape(shape = reshape_121_shape_0, x = real_div_30_cast)[name = tensor("reshape_121_cast")]; + tensor add_61_gamma_0_to_fp16 = const()[name = tensor("add_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1838758208)))]; + tensor add_61_beta_0_to_fp16 = const()[name = tensor("add_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1838762112)))]; + tensor add_61_epsilon_0_to_fp16 = const()[name = tensor("add_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_61_cast = batch_norm(beta = add_61_beta_0_to_fp16, epsilon = add_61_epsilon_0_to_fp16, gamma = add_61_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_121_cast)[name = tensor("add_61_cast")]; + tensor input_725_cast = silu(x = add_61_cast)[name = tensor("input_725_cast")]; + tensor var_12436 = const()[name = tensor("op_12436"), val = tensor([1, 1])]; + tensor var_12438 = const()[name = tensor("op_12438"), val = tensor([1, 1])]; + tensor hidden_states_501_pad_type_0 = const()[name = tensor("hidden_states_501_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_501_pad_0 = const()[name = tensor("hidden_states_501_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1838766016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847060480))), name = tensor("unet_up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1920, 3, 3])]; + tensor unet_up_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847060672)))]; + tensor hidden_states_501_cast = conv(bias = unet_up_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_12438, groups = var_31, pad = hidden_states_501_pad_0, pad_type = hidden_states_501_pad_type_0, strides = var_12436, weight = unet_up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_725_cast)[name = tensor("hidden_states_501_cast")]; + tensor var_12444 = const()[name = tensor("op_12444"), val = tensor([1, 1])]; + tensor var_12446 = const()[name = tensor("op_12446"), val = tensor([1, 1])]; + tensor temb_23_pad_type_0 = const()[name = tensor("temb_23_pad_type_0"), val = tensor("custom")]; + tensor temb_23_pad_0 = const()[name = tensor("temb_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847062016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847676480))), name = tensor("unet_up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847676672)))]; + tensor temb_23_cast = conv(bias = unet_up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_12446, groups = var_31, pad = temb_23_pad_0, pad_type = temb_23_pad_type_0, strides = var_12444, weight = unet_up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_23_cast")]; + tensor input_729_cast = add(x = hidden_states_501_cast, y = temb_23_cast)[name = tensor("input_729_cast")]; + tensor reshape_124_shape_0 = const()[name = tensor("reshape_124_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_124_cast = reshape(shape = reshape_124_shape_0, x = input_729_cast)[name = tensor("reshape_124_cast")]; + tensor reduce_mean_93_axes_0 = const()[name = tensor("reduce_mean_93_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_93_keep_dims_0 = const()[name = tensor("reduce_mean_93_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_93_cast = reduce_mean(axes = reduce_mean_93_axes_0, keep_dims = reduce_mean_93_keep_dims_0, x = reshape_124_cast)[name = tensor("reduce_mean_93_cast")]; + tensor sub_62_cast = sub(x = reshape_124_cast, y = reduce_mean_93_cast)[name = tensor("sub_62_cast")]; + tensor square_31_cast = square(x = sub_62_cast)[name = tensor("square_31_cast")]; + tensor reduce_mean_95_axes_0 = const()[name = tensor("reduce_mean_95_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_95_keep_dims_0 = const()[name = tensor("reduce_mean_95_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_95_cast = reduce_mean(axes = reduce_mean_95_axes_0, keep_dims = reduce_mean_95_keep_dims_0, x = square_31_cast)[name = tensor("reduce_mean_95_cast")]; + tensor add_62_y_0_to_fp16 = const()[name = tensor("add_62_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_62_cast = add(x = reduce_mean_95_cast, y = add_62_y_0_to_fp16)[name = tensor("add_62_cast")]; + tensor sqrt_31_cast = sqrt(x = add_62_cast)[name = tensor("sqrt_31_cast")]; + tensor real_div_31_cast = real_div(x = sub_62_cast, y = sqrt_31_cast)[name = tensor("real_div_31_cast")]; + tensor reshape_125_shape_0 = const()[name = tensor("reshape_125_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_125_cast = reshape(shape = reshape_125_shape_0, x = real_div_31_cast)[name = tensor("reshape_125_cast")]; + tensor add_63_gamma_0_to_fp16 = const()[name = tensor("add_63_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847678016)))]; + tensor add_63_beta_0_to_fp16 = const()[name = tensor("add_63_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847679360)))]; + tensor add_63_epsilon_0_to_fp16 = const()[name = tensor("add_63_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_63_cast = batch_norm(beta = add_63_beta_0_to_fp16, epsilon = add_63_epsilon_0_to_fp16, gamma = add_63_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_125_cast)[name = tensor("add_63_cast")]; + tensor input_733_cast = silu(x = add_63_cast)[name = tensor("input_733_cast")]; + tensor var_12456 = const()[name = tensor("op_12456"), val = tensor([1, 1])]; + tensor var_12458 = const()[name = tensor("op_12458"), val = tensor([1, 1])]; + tensor hidden_states_503_pad_type_0 = const()[name = tensor("hidden_states_503_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_503_pad_0 = const()[name = tensor("hidden_states_503_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847680704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1850445568))), name = tensor("unet_up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_up_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1850445760)))]; + tensor hidden_states_503_cast = conv(bias = unet_up_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_12458, groups = var_31, pad = hidden_states_503_pad_0, pad_type = hidden_states_503_pad_type_0, strides = var_12456, weight = unet_up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_733_cast)[name = tensor("hidden_states_503_cast")]; + tensor var_12463 = const()[name = tensor("op_12463"), val = tensor([1, 1])]; + tensor var_12465 = const()[name = tensor("op_12465"), val = tensor([1, 1])]; + tensor x_11_pad_type_0 = const()[name = tensor("x_11_pad_type_0"), val = tensor("custom")]; + tensor x_11_pad_0 = const()[name = tensor("x_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1850447104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851368768))), name = tensor("unet_up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1920, 1, 1])]; + tensor unet_up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851368960)))]; + tensor x_11_cast = conv(bias = unet_up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_12465, groups = var_31, pad = x_11_pad_0, pad_type = x_11_pad_type_0, strides = var_12463, weight = unet_up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_721_cast)[name = tensor("x_11_cast")]; + tensor hidden_states_505_cast = add(x = x_11_cast, y = hidden_states_503_cast)[name = tensor("hidden_states_505_cast")]; + tensor reshape_128_shape_0 = const()[name = tensor("reshape_128_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_128_cast = reshape(shape = reshape_128_shape_0, x = hidden_states_505_cast)[name = tensor("reshape_128_cast")]; + tensor reduce_mean_96_axes_0 = const()[name = tensor("reduce_mean_96_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_96_keep_dims_0 = const()[name = tensor("reduce_mean_96_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_96_cast = reduce_mean(axes = reduce_mean_96_axes_0, keep_dims = reduce_mean_96_keep_dims_0, x = reshape_128_cast)[name = tensor("reduce_mean_96_cast")]; + tensor sub_64_cast = sub(x = reshape_128_cast, y = reduce_mean_96_cast)[name = tensor("sub_64_cast")]; + tensor square_32_cast = square(x = sub_64_cast)[name = tensor("square_32_cast")]; + tensor reduce_mean_98_axes_0 = const()[name = tensor("reduce_mean_98_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_98_keep_dims_0 = const()[name = tensor("reduce_mean_98_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_98_cast = reduce_mean(axes = reduce_mean_98_axes_0, keep_dims = reduce_mean_98_keep_dims_0, x = square_32_cast)[name = tensor("reduce_mean_98_cast")]; + tensor add_64_y_0_to_fp16 = const()[name = tensor("add_64_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_64_cast = add(x = reduce_mean_98_cast, y = add_64_y_0_to_fp16)[name = tensor("add_64_cast")]; + tensor sqrt_32_cast = sqrt(x = add_64_cast)[name = tensor("sqrt_32_cast")]; + tensor real_div_32_cast = real_div(x = sub_64_cast, y = sqrt_32_cast)[name = tensor("real_div_32_cast")]; + tensor reshape_129_shape_0 = const()[name = tensor("reshape_129_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_129_cast = reshape(shape = reshape_129_shape_0, x = real_div_32_cast)[name = tensor("reshape_129_cast")]; + tensor add_65_gamma_0_to_fp16 = const()[name = tensor("add_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851370304)))]; + tensor add_65_beta_0_to_fp16 = const()[name = tensor("add_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851371648)))]; + tensor add_65_epsilon_0_to_fp16 = const()[name = tensor("add_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_65_cast = batch_norm(beta = add_65_beta_0_to_fp16, epsilon = add_65_epsilon_0_to_fp16, gamma = add_65_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_129_cast)[name = tensor("add_65_cast")]; + tensor var_12487 = const()[name = tensor("op_12487"), val = tensor([1, 1])]; + tensor var_12489 = const()[name = tensor("op_12489"), val = tensor([1, 1])]; + tensor hidden_states_507_pad_type_0 = const()[name = tensor("hidden_states_507_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_507_pad_0 = const()[name = tensor("hidden_states_507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851372992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851680256))), name = tensor("unet_up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851680448)))]; + tensor hidden_states_507_cast = conv(bias = unet_up_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_12489, groups = var_31, pad = hidden_states_507_pad_0, pad_type = hidden_states_507_pad_type_0, strides = var_12487, weight = unet_up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_65_cast)[name = tensor("hidden_states_507_cast")]; + tensor var_12494 = const()[name = tensor("op_12494"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_385_cast = reshape(shape = var_12494, x = hidden_states_507_cast)[name = tensor("inputs_385_cast")]; + tensor var_12504 = const()[name = tensor("op_12504"), val = tensor([1])]; + tensor channels_mean_385_cast = reduce_mean(axes = var_12504, keep_dims = var_23, x = inputs_385_cast)[name = tensor("channels_mean_385_cast")]; + tensor zero_mean_385_cast = sub(x = inputs_385_cast, y = channels_mean_385_cast)[name = tensor("zero_mean_385_cast")]; + tensor zero_mean_sq_385_cast = mul(x = zero_mean_385_cast, y = zero_mean_385_cast)[name = tensor("zero_mean_sq_385_cast")]; + tensor var_12508 = const()[name = tensor("op_12508"), val = tensor([1])]; + tensor var_12509_cast = reduce_mean(axes = var_12508, keep_dims = var_23, x = zero_mean_sq_385_cast)[name = tensor("op_12509_cast")]; + tensor var_12510_to_fp16 = const()[name = tensor("op_12510_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12511_cast = add(x = var_12509_cast, y = var_12510_to_fp16)[name = tensor("op_12511_cast")]; + tensor denom_385_epsilon_0_to_fp16 = const()[name = tensor("denom_385_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_385_cast = rsqrt(epsilon = denom_385_epsilon_0_to_fp16, x = var_12511_cast)[name = tensor("denom_385_cast")]; + tensor out_385_cast = mul(x = zero_mean_385_cast, y = denom_385_cast)[name = tensor("out_385_cast")]; + tensor var_12515_to_fp16 = const()[name = tensor("op_12515_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851681792)))]; + tensor var_12516_cast = add(x = out_385_cast, y = var_12515_to_fp16)[name = tensor("op_12516_cast")]; + tensor var_12518_to_fp16 = const()[name = tensor("op_12518_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851683136)))]; + tensor hidden_states_509_cast = mul(x = var_12516_cast, y = var_12518_to_fp16)[name = tensor("hidden_states_509_cast")]; + tensor var_12525 = const()[name = tensor("op_12525"), val = tensor([1, 1])]; + tensor var_12527 = const()[name = tensor("op_12527"), val = tensor([1, 1])]; + tensor q_257_pad_type_0 = const()[name = tensor("q_257_pad_type_0"), val = tensor("custom")]; + tensor q_257_pad_0 = const()[name = tensor("q_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851684480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851991744))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_257_cast = conv(dilations = var_12527, groups = var_31, pad = q_257_pad_0, pad_type = q_257_pad_type_0, strides = var_12525, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("q_257_cast")]; + tensor var_12531 = const()[name = tensor("op_12531"), val = tensor([1, 1])]; + tensor var_12533 = const()[name = tensor("op_12533"), val = tensor([1, 1])]; + tensor k_257_pad_type_0 = const()[name = tensor("k_257_pad_type_0"), val = tensor("custom")]; + tensor k_257_pad_0 = const()[name = tensor("k_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1851991936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852299200))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_257_cast = conv(dilations = var_12533, groups = var_31, pad = k_257_pad_0, pad_type = k_257_pad_type_0, strides = var_12531, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("k_257_cast")]; + tensor var_12537 = const()[name = tensor("op_12537"), val = tensor([1, 1])]; + tensor var_12539 = const()[name = tensor("op_12539"), val = tensor([1, 1])]; + tensor v_257_pad_type_0 = const()[name = tensor("v_257_pad_type_0"), val = tensor("custom")]; + tensor v_257_pad_0 = const()[name = tensor("v_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852299392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852606656))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_257_cast = conv(dilations = var_12539, groups = var_31, pad = v_257_pad_0, pad_type = v_257_pad_type_0, strides = var_12537, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("v_257_cast")]; + tensor var_12543 = const()[name = tensor("op_12543"), val = tensor([2, 10, 64, -1])]; + tensor var_12544_cast = reshape(shape = var_12543, x = q_257_cast)[name = tensor("op_12544_cast")]; + tensor var_12545 = const()[name = tensor("op_12545"), val = tensor([2, 10, 64, -1])]; + tensor var_12546_cast = reshape(shape = var_12545, x = k_257_cast)[name = tensor("op_12546_cast")]; + tensor var_12547 = const()[name = tensor("op_12547"), val = tensor([2, 10, 64, -1])]; + tensor var_12548_cast = reshape(shape = var_12547, x = v_257_cast)[name = tensor("op_12548_cast")]; + tensor attn_weights_513_transpose_x_0 = const()[name = tensor("attn_weights_513_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_513_transpose_y_0 = const()[name = tensor("attn_weights_513_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_513_cast = matmul(transpose_x = attn_weights_513_transpose_x_0, transpose_y = attn_weights_513_transpose_y_0, x = var_12544_cast, y = var_12546_cast)[name = tensor("attn_weights_513_cast")]; + tensor attn_weights_515_cast = mul(x = attn_weights_513_cast, y = var_12_to_fp16)[name = tensor("attn_weights_515_cast")]; + tensor var_12552_cast = softmax(axis = var_18, x = attn_weights_515_cast)[name = tensor("op_12552_cast")]; + tensor attn_257_transpose_x_0 = const()[name = tensor("attn_257_transpose_x_0"), val = tensor(false)]; + tensor attn_257_transpose_y_0 = const()[name = tensor("attn_257_transpose_y_0"), val = tensor(true)]; + tensor attn_257_cast = matmul(transpose_x = attn_257_transpose_x_0, transpose_y = attn_257_transpose_y_0, x = var_12548_cast, y = var_12552_cast)[name = tensor("attn_257_cast")]; + tensor var_12556 = const()[name = tensor("op_12556"), val = tensor([2, 640, 1, -1])]; + tensor input_737_cast = reshape(shape = var_12556, x = attn_257_cast)[name = tensor("input_737_cast")]; + tensor var_12561 = const()[name = tensor("op_12561"), val = tensor([1, 1])]; + tensor var_12563 = const()[name = tensor("op_12563"), val = tensor([1, 1])]; + tensor var_12565_pad_type_0 = const()[name = tensor("op_12565_pad_type_0"), val = tensor("custom")]; + tensor var_12565_pad_0 = const()[name = tensor("op_12565_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852606848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852914112))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852914304)))]; + tensor var_12565_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_12563, groups = var_31, pad = var_12565_pad_0, pad_type = var_12565_pad_type_0, strides = var_12561, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_737_cast)[name = tensor("op_12565_cast")]; + tensor inputs_387_cast = add(x = var_12565_cast, y = inputs_385_cast)[name = tensor("inputs_387_cast")]; + tensor var_12569 = const()[name = tensor("op_12569"), val = tensor([1])]; + tensor channels_mean_387_cast = reduce_mean(axes = var_12569, keep_dims = var_23, x = inputs_387_cast)[name = tensor("channels_mean_387_cast")]; + tensor zero_mean_387_cast = sub(x = inputs_387_cast, y = channels_mean_387_cast)[name = tensor("zero_mean_387_cast")]; + tensor zero_mean_sq_387_cast = mul(x = zero_mean_387_cast, y = zero_mean_387_cast)[name = tensor("zero_mean_sq_387_cast")]; + tensor var_12573 = const()[name = tensor("op_12573"), val = tensor([1])]; + tensor var_12574_cast = reduce_mean(axes = var_12573, keep_dims = var_23, x = zero_mean_sq_387_cast)[name = tensor("op_12574_cast")]; + tensor var_12575_to_fp16 = const()[name = tensor("op_12575_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12576_cast = add(x = var_12574_cast, y = var_12575_to_fp16)[name = tensor("op_12576_cast")]; + tensor denom_387_epsilon_0_to_fp16 = const()[name = tensor("denom_387_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_387_cast = rsqrt(epsilon = denom_387_epsilon_0_to_fp16, x = var_12576_cast)[name = tensor("denom_387_cast")]; + tensor out_387_cast = mul(x = zero_mean_387_cast, y = denom_387_cast)[name = tensor("out_387_cast")]; + tensor var_12580_to_fp16 = const()[name = tensor("op_12580_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852915648)))]; + tensor var_12581_cast = add(x = out_387_cast, y = var_12580_to_fp16)[name = tensor("op_12581_cast")]; + tensor var_12583_to_fp16 = const()[name = tensor("op_12583_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852916992)))]; + tensor hidden_states_511_cast = mul(x = var_12581_cast, y = var_12583_to_fp16)[name = tensor("hidden_states_511_cast")]; + tensor var_12590 = const()[name = tensor("op_12590"), val = tensor([1, 1])]; + tensor var_12592 = const()[name = tensor("op_12592"), val = tensor([1, 1])]; + tensor q_259_pad_type_0 = const()[name = tensor("q_259_pad_type_0"), val = tensor("custom")]; + tensor q_259_pad_0 = const()[name = tensor("q_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1852918336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853225600))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_259_cast = conv(dilations = var_12592, groups = var_31, pad = q_259_pad_0, pad_type = q_259_pad_type_0, strides = var_12590, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_511_cast)[name = tensor("q_259_cast")]; + tensor var_12596 = const()[name = tensor("op_12596"), val = tensor([1, 1])]; + tensor var_12598 = const()[name = tensor("op_12598"), val = tensor([1, 1])]; + tensor k_259_pad_type_0 = const()[name = tensor("k_259_pad_type_0"), val = tensor("custom")]; + tensor k_259_pad_0 = const()[name = tensor("k_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853225792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1854208896))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_259_cast = conv(dilations = var_12598, groups = var_31, pad = k_259_pad_0, pad_type = k_259_pad_type_0, strides = var_12596, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_259_cast")]; + tensor var_12602 = const()[name = tensor("op_12602"), val = tensor([1, 1])]; + tensor var_12604 = const()[name = tensor("op_12604"), val = tensor([1, 1])]; + tensor v_259_pad_type_0 = const()[name = tensor("v_259_pad_type_0"), val = tensor("custom")]; + tensor v_259_pad_0 = const()[name = tensor("v_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1854209088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855192192))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_259_cast = conv(dilations = var_12604, groups = var_31, pad = v_259_pad_0, pad_type = v_259_pad_type_0, strides = var_12602, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_259_cast")]; + tensor var_12608 = const()[name = tensor("op_12608"), val = tensor([2, 10, 64, -1])]; + tensor var_12609_cast = reshape(shape = var_12608, x = q_259_cast)[name = tensor("op_12609_cast")]; + tensor var_12610 = const()[name = tensor("op_12610"), val = tensor([2, 10, 64, -1])]; + tensor var_12611_cast = reshape(shape = var_12610, x = k_259_cast)[name = tensor("op_12611_cast")]; + tensor var_12612 = const()[name = tensor("op_12612"), val = tensor([2, 10, 64, -1])]; + tensor var_12613_cast = reshape(shape = var_12612, x = v_259_cast)[name = tensor("op_12613_cast")]; + tensor attn_weights_517_transpose_x_0 = const()[name = tensor("attn_weights_517_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_517_transpose_y_0 = const()[name = tensor("attn_weights_517_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_517_cast = matmul(transpose_x = attn_weights_517_transpose_x_0, transpose_y = attn_weights_517_transpose_y_0, x = var_12609_cast, y = var_12611_cast)[name = tensor("attn_weights_517_cast")]; + tensor attn_weights_519_cast = mul(x = attn_weights_517_cast, y = var_12_to_fp16)[name = tensor("attn_weights_519_cast")]; + tensor var_12617_cast = softmax(axis = var_18, x = attn_weights_519_cast)[name = tensor("op_12617_cast")]; + tensor attn_259_transpose_x_0 = const()[name = tensor("attn_259_transpose_x_0"), val = tensor(false)]; + tensor attn_259_transpose_y_0 = const()[name = tensor("attn_259_transpose_y_0"), val = tensor(true)]; + tensor attn_259_cast = matmul(transpose_x = attn_259_transpose_x_0, transpose_y = attn_259_transpose_y_0, x = var_12613_cast, y = var_12617_cast)[name = tensor("attn_259_cast")]; + tensor var_12621 = const()[name = tensor("op_12621"), val = tensor([2, 640, 1, -1])]; + tensor input_739_cast = reshape(shape = var_12621, x = attn_259_cast)[name = tensor("input_739_cast")]; + tensor var_12626 = const()[name = tensor("op_12626"), val = tensor([1, 1])]; + tensor var_12628 = const()[name = tensor("op_12628"), val = tensor([1, 1])]; + tensor var_12630_pad_type_0 = const()[name = tensor("op_12630_pad_type_0"), val = tensor("custom")]; + tensor var_12630_pad_0 = const()[name = tensor("op_12630_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855192384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855499648))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855499840)))]; + tensor var_12630_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_12628, groups = var_31, pad = var_12630_pad_0, pad_type = var_12630_pad_type_0, strides = var_12626, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_739_cast)[name = tensor("op_12630_cast")]; + tensor inputs_389_cast = add(x = var_12630_cast, y = inputs_387_cast)[name = tensor("inputs_389_cast")]; + tensor var_12634 = const()[name = tensor("op_12634"), val = tensor([1])]; + tensor channels_mean_389_cast = reduce_mean(axes = var_12634, keep_dims = var_23, x = inputs_389_cast)[name = tensor("channels_mean_389_cast")]; + tensor zero_mean_389_cast = sub(x = inputs_389_cast, y = channels_mean_389_cast)[name = tensor("zero_mean_389_cast")]; + tensor zero_mean_sq_389_cast = mul(x = zero_mean_389_cast, y = zero_mean_389_cast)[name = tensor("zero_mean_sq_389_cast")]; + tensor var_12638 = const()[name = tensor("op_12638"), val = tensor([1])]; + tensor var_12639_cast = reduce_mean(axes = var_12638, keep_dims = var_23, x = zero_mean_sq_389_cast)[name = tensor("op_12639_cast")]; + tensor var_12640_to_fp16 = const()[name = tensor("op_12640_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12641_cast = add(x = var_12639_cast, y = var_12640_to_fp16)[name = tensor("op_12641_cast")]; + tensor denom_389_epsilon_0_to_fp16 = const()[name = tensor("denom_389_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_389_cast = rsqrt(epsilon = denom_389_epsilon_0_to_fp16, x = var_12641_cast)[name = tensor("denom_389_cast")]; + tensor out_389_cast = mul(x = zero_mean_389_cast, y = denom_389_cast)[name = tensor("out_389_cast")]; + tensor var_12645_to_fp16 = const()[name = tensor("op_12645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855501184)))]; + tensor var_12646_cast = add(x = out_389_cast, y = var_12645_to_fp16)[name = tensor("op_12646_cast")]; + tensor var_12648_to_fp16 = const()[name = tensor("op_12648_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855502528)))]; + tensor input_741_cast = mul(x = var_12646_cast, y = var_12648_to_fp16)[name = tensor("input_741_cast")]; + tensor var_12656 = const()[name = tensor("op_12656"), val = tensor([1, 1])]; + tensor var_12658 = const()[name = tensor("op_12658"), val = tensor([1, 1])]; + tensor var_12660_pad_type_0 = const()[name = tensor("op_12660_pad_type_0"), val = tensor("custom")]; + tensor var_12660_pad_0 = const()[name = tensor("op_12660_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855503872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1857961536))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1857961728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1857965632))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_12660_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_12658, groups = var_31, pad = var_12660_pad_0, pad_type = var_12660_pad_type_0, strides = var_12656, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_741_cast)[name = tensor("op_12660_cast")]; + tensor var_12661_split_sizes_0 = const()[name = tensor("op_12661_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12661_axis_0 = const()[name = tensor("op_12661_axis_0"), val = tensor(1)]; + tensor var_12661_cast_0, tensor var_12661_cast_1 = split(axis = var_12661_axis_0, split_sizes = var_12661_split_sizes_0, x = var_12660_cast)[name = tensor("op_12661_cast")]; + tensor var_12663_mode_0 = const()[name = tensor("op_12663_mode_0"), val = tensor("EXACT")]; + tensor var_12663_cast = gelu(mode = var_12663_mode_0, x = var_12661_cast_1)[name = tensor("op_12663_cast")]; + tensor input_743_cast = mul(x = var_12661_cast_0, y = var_12663_cast)[name = tensor("input_743_cast")]; + tensor var_12667 = const()[name = tensor("op_12667"), val = tensor([1, 1])]; + tensor var_12669 = const()[name = tensor("op_12669"), val = tensor([1, 1])]; + tensor var_12671_pad_type_0 = const()[name = tensor("op_12671_pad_type_0"), val = tensor("custom")]; + tensor var_12671_pad_0 = const()[name = tensor("op_12671_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1857965824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859194688))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859194880)))]; + tensor var_12671_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_12669, groups = var_31, pad = var_12671_pad_0, pad_type = var_12671_pad_type_0, strides = var_12667, weight = unet_up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_743_cast)[name = tensor("op_12671_cast")]; + tensor inputs_391_cast = add(x = var_12671_cast, y = inputs_389_cast)[name = tensor("inputs_391_cast")]; + tensor var_12681 = const()[name = tensor("op_12681"), val = tensor([1])]; + tensor channels_mean_391_cast = reduce_mean(axes = var_12681, keep_dims = var_23, x = inputs_391_cast)[name = tensor("channels_mean_391_cast")]; + tensor zero_mean_391_cast = sub(x = inputs_391_cast, y = channels_mean_391_cast)[name = tensor("zero_mean_391_cast")]; + tensor zero_mean_sq_391_cast = mul(x = zero_mean_391_cast, y = zero_mean_391_cast)[name = tensor("zero_mean_sq_391_cast")]; + tensor var_12685 = const()[name = tensor("op_12685"), val = tensor([1])]; + tensor var_12686_cast = reduce_mean(axes = var_12685, keep_dims = var_23, x = zero_mean_sq_391_cast)[name = tensor("op_12686_cast")]; + tensor var_12687_to_fp16 = const()[name = tensor("op_12687_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12688_cast = add(x = var_12686_cast, y = var_12687_to_fp16)[name = tensor("op_12688_cast")]; + tensor denom_391_epsilon_0_to_fp16 = const()[name = tensor("denom_391_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_391_cast = rsqrt(epsilon = denom_391_epsilon_0_to_fp16, x = var_12688_cast)[name = tensor("denom_391_cast")]; + tensor out_391_cast = mul(x = zero_mean_391_cast, y = denom_391_cast)[name = tensor("out_391_cast")]; + tensor var_12692_to_fp16 = const()[name = tensor("op_12692_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859196224)))]; + tensor var_12693_cast = add(x = out_391_cast, y = var_12692_to_fp16)[name = tensor("op_12693_cast")]; + tensor var_12695_to_fp16 = const()[name = tensor("op_12695_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859197568)))]; + tensor hidden_states_515_cast = mul(x = var_12693_cast, y = var_12695_to_fp16)[name = tensor("hidden_states_515_cast")]; + tensor var_12702 = const()[name = tensor("op_12702"), val = tensor([1, 1])]; + tensor var_12704 = const()[name = tensor("op_12704"), val = tensor([1, 1])]; + tensor q_261_pad_type_0 = const()[name = tensor("q_261_pad_type_0"), val = tensor("custom")]; + tensor q_261_pad_0 = const()[name = tensor("q_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859198912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859506176))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_261_cast = conv(dilations = var_12704, groups = var_31, pad = q_261_pad_0, pad_type = q_261_pad_type_0, strides = var_12702, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("q_261_cast")]; + tensor var_12708 = const()[name = tensor("op_12708"), val = tensor([1, 1])]; + tensor var_12710 = const()[name = tensor("op_12710"), val = tensor([1, 1])]; + tensor k_261_pad_type_0 = const()[name = tensor("k_261_pad_type_0"), val = tensor("custom")]; + tensor k_261_pad_0 = const()[name = tensor("k_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859506368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859813632))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_261_cast = conv(dilations = var_12710, groups = var_31, pad = k_261_pad_0, pad_type = k_261_pad_type_0, strides = var_12708, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("k_261_cast")]; + tensor var_12714 = const()[name = tensor("op_12714"), val = tensor([1, 1])]; + tensor var_12716 = const()[name = tensor("op_12716"), val = tensor([1, 1])]; + tensor v_261_pad_type_0 = const()[name = tensor("v_261_pad_type_0"), val = tensor("custom")]; + tensor v_261_pad_0 = const()[name = tensor("v_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859813824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860121088))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_261_cast = conv(dilations = var_12716, groups = var_31, pad = v_261_pad_0, pad_type = v_261_pad_type_0, strides = var_12714, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("v_261_cast")]; + tensor var_12720 = const()[name = tensor("op_12720"), val = tensor([2, 10, 64, -1])]; + tensor var_12721_cast = reshape(shape = var_12720, x = q_261_cast)[name = tensor("op_12721_cast")]; + tensor var_12722 = const()[name = tensor("op_12722"), val = tensor([2, 10, 64, -1])]; + tensor var_12723_cast = reshape(shape = var_12722, x = k_261_cast)[name = tensor("op_12723_cast")]; + tensor var_12724 = const()[name = tensor("op_12724"), val = tensor([2, 10, 64, -1])]; + tensor var_12725_cast = reshape(shape = var_12724, x = v_261_cast)[name = tensor("op_12725_cast")]; + tensor attn_weights_521_transpose_x_0 = const()[name = tensor("attn_weights_521_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_521_transpose_y_0 = const()[name = tensor("attn_weights_521_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_521_cast = matmul(transpose_x = attn_weights_521_transpose_x_0, transpose_y = attn_weights_521_transpose_y_0, x = var_12721_cast, y = var_12723_cast)[name = tensor("attn_weights_521_cast")]; + tensor attn_weights_523_cast = mul(x = attn_weights_521_cast, y = var_12_to_fp16)[name = tensor("attn_weights_523_cast")]; + tensor var_12729_cast = softmax(axis = var_18, x = attn_weights_523_cast)[name = tensor("op_12729_cast")]; + tensor attn_261_transpose_x_0 = const()[name = tensor("attn_261_transpose_x_0"), val = tensor(false)]; + tensor attn_261_transpose_y_0 = const()[name = tensor("attn_261_transpose_y_0"), val = tensor(true)]; + tensor attn_261_cast = matmul(transpose_x = attn_261_transpose_x_0, transpose_y = attn_261_transpose_y_0, x = var_12725_cast, y = var_12729_cast)[name = tensor("attn_261_cast")]; + tensor var_12733 = const()[name = tensor("op_12733"), val = tensor([2, 640, 1, -1])]; + tensor input_745_cast = reshape(shape = var_12733, x = attn_261_cast)[name = tensor("input_745_cast")]; + tensor var_12738 = const()[name = tensor("op_12738"), val = tensor([1, 1])]; + tensor var_12740 = const()[name = tensor("op_12740"), val = tensor([1, 1])]; + tensor var_12742_pad_type_0 = const()[name = tensor("op_12742_pad_type_0"), val = tensor("custom")]; + tensor var_12742_pad_0 = const()[name = tensor("op_12742_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860121280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860428544))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860428736)))]; + tensor var_12742_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_12740, groups = var_31, pad = var_12742_pad_0, pad_type = var_12742_pad_type_0, strides = var_12738, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_745_cast)[name = tensor("op_12742_cast")]; + tensor inputs_393_cast = add(x = var_12742_cast, y = inputs_391_cast)[name = tensor("inputs_393_cast")]; + tensor var_12746 = const()[name = tensor("op_12746"), val = tensor([1])]; + tensor channels_mean_393_cast = reduce_mean(axes = var_12746, keep_dims = var_23, x = inputs_393_cast)[name = tensor("channels_mean_393_cast")]; + tensor zero_mean_393_cast = sub(x = inputs_393_cast, y = channels_mean_393_cast)[name = tensor("zero_mean_393_cast")]; + tensor zero_mean_sq_393_cast = mul(x = zero_mean_393_cast, y = zero_mean_393_cast)[name = tensor("zero_mean_sq_393_cast")]; + tensor var_12750 = const()[name = tensor("op_12750"), val = tensor([1])]; + tensor var_12751_cast = reduce_mean(axes = var_12750, keep_dims = var_23, x = zero_mean_sq_393_cast)[name = tensor("op_12751_cast")]; + tensor var_12752_to_fp16 = const()[name = tensor("op_12752_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12753_cast = add(x = var_12751_cast, y = var_12752_to_fp16)[name = tensor("op_12753_cast")]; + tensor denom_393_epsilon_0_to_fp16 = const()[name = tensor("denom_393_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_393_cast = rsqrt(epsilon = denom_393_epsilon_0_to_fp16, x = var_12753_cast)[name = tensor("denom_393_cast")]; + tensor out_393_cast = mul(x = zero_mean_393_cast, y = denom_393_cast)[name = tensor("out_393_cast")]; + tensor var_12757_to_fp16 = const()[name = tensor("op_12757_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860430080)))]; + tensor var_12758_cast = add(x = out_393_cast, y = var_12757_to_fp16)[name = tensor("op_12758_cast")]; + tensor var_12760_to_fp16 = const()[name = tensor("op_12760_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860431424)))]; + tensor hidden_states_517_cast = mul(x = var_12758_cast, y = var_12760_to_fp16)[name = tensor("hidden_states_517_cast")]; + tensor var_12767 = const()[name = tensor("op_12767"), val = tensor([1, 1])]; + tensor var_12769 = const()[name = tensor("op_12769"), val = tensor([1, 1])]; + tensor q_263_pad_type_0 = const()[name = tensor("q_263_pad_type_0"), val = tensor("custom")]; + tensor q_263_pad_0 = const()[name = tensor("q_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860432768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860740032))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_263_cast = conv(dilations = var_12769, groups = var_31, pad = q_263_pad_0, pad_type = q_263_pad_type_0, strides = var_12767, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_517_cast)[name = tensor("q_263_cast")]; + tensor var_12773 = const()[name = tensor("op_12773"), val = tensor([1, 1])]; + tensor var_12775 = const()[name = tensor("op_12775"), val = tensor([1, 1])]; + tensor k_263_pad_type_0 = const()[name = tensor("k_263_pad_type_0"), val = tensor("custom")]; + tensor k_263_pad_0 = const()[name = tensor("k_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860740224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1861723328))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_263_cast = conv(dilations = var_12775, groups = var_31, pad = k_263_pad_0, pad_type = k_263_pad_type_0, strides = var_12773, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_263_cast")]; + tensor var_12779 = const()[name = tensor("op_12779"), val = tensor([1, 1])]; + tensor var_12781 = const()[name = tensor("op_12781"), val = tensor([1, 1])]; + tensor v_263_pad_type_0 = const()[name = tensor("v_263_pad_type_0"), val = tensor("custom")]; + tensor v_263_pad_0 = const()[name = tensor("v_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1861723520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1862706624))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_263_cast = conv(dilations = var_12781, groups = var_31, pad = v_263_pad_0, pad_type = v_263_pad_type_0, strides = var_12779, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_263_cast")]; + tensor var_12785 = const()[name = tensor("op_12785"), val = tensor([2, 10, 64, -1])]; + tensor var_12786_cast = reshape(shape = var_12785, x = q_263_cast)[name = tensor("op_12786_cast")]; + tensor var_12787 = const()[name = tensor("op_12787"), val = tensor([2, 10, 64, -1])]; + tensor var_12788_cast = reshape(shape = var_12787, x = k_263_cast)[name = tensor("op_12788_cast")]; + tensor var_12789 = const()[name = tensor("op_12789"), val = tensor([2, 10, 64, -1])]; + tensor var_12790_cast = reshape(shape = var_12789, x = v_263_cast)[name = tensor("op_12790_cast")]; + tensor attn_weights_525_transpose_x_0 = const()[name = tensor("attn_weights_525_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_525_transpose_y_0 = const()[name = tensor("attn_weights_525_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_525_cast = matmul(transpose_x = attn_weights_525_transpose_x_0, transpose_y = attn_weights_525_transpose_y_0, x = var_12786_cast, y = var_12788_cast)[name = tensor("attn_weights_525_cast")]; + tensor attn_weights_527_cast = mul(x = attn_weights_525_cast, y = var_12_to_fp16)[name = tensor("attn_weights_527_cast")]; + tensor var_12794_cast = softmax(axis = var_18, x = attn_weights_527_cast)[name = tensor("op_12794_cast")]; + tensor attn_263_transpose_x_0 = const()[name = tensor("attn_263_transpose_x_0"), val = tensor(false)]; + tensor attn_263_transpose_y_0 = const()[name = tensor("attn_263_transpose_y_0"), val = tensor(true)]; + tensor attn_263_cast = matmul(transpose_x = attn_263_transpose_x_0, transpose_y = attn_263_transpose_y_0, x = var_12790_cast, y = var_12794_cast)[name = tensor("attn_263_cast")]; + tensor var_12798 = const()[name = tensor("op_12798"), val = tensor([2, 640, 1, -1])]; + tensor input_747_cast = reshape(shape = var_12798, x = attn_263_cast)[name = tensor("input_747_cast")]; + tensor var_12803 = const()[name = tensor("op_12803"), val = tensor([1, 1])]; + tensor var_12805 = const()[name = tensor("op_12805"), val = tensor([1, 1])]; + tensor var_12807_pad_type_0 = const()[name = tensor("op_12807_pad_type_0"), val = tensor("custom")]; + tensor var_12807_pad_0 = const()[name = tensor("op_12807_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1862706816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863014080))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863014272)))]; + tensor var_12807_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_12805, groups = var_31, pad = var_12807_pad_0, pad_type = var_12807_pad_type_0, strides = var_12803, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_747_cast)[name = tensor("op_12807_cast")]; + tensor inputs_395_cast = add(x = var_12807_cast, y = inputs_393_cast)[name = tensor("inputs_395_cast")]; + tensor var_12811 = const()[name = tensor("op_12811"), val = tensor([1])]; + tensor channels_mean_395_cast = reduce_mean(axes = var_12811, keep_dims = var_23, x = inputs_395_cast)[name = tensor("channels_mean_395_cast")]; + tensor zero_mean_395_cast = sub(x = inputs_395_cast, y = channels_mean_395_cast)[name = tensor("zero_mean_395_cast")]; + tensor zero_mean_sq_395_cast = mul(x = zero_mean_395_cast, y = zero_mean_395_cast)[name = tensor("zero_mean_sq_395_cast")]; + tensor var_12815 = const()[name = tensor("op_12815"), val = tensor([1])]; + tensor var_12816_cast = reduce_mean(axes = var_12815, keep_dims = var_23, x = zero_mean_sq_395_cast)[name = tensor("op_12816_cast")]; + tensor var_12817_to_fp16 = const()[name = tensor("op_12817_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12818_cast = add(x = var_12816_cast, y = var_12817_to_fp16)[name = tensor("op_12818_cast")]; + tensor denom_395_epsilon_0_to_fp16 = const()[name = tensor("denom_395_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_395_cast = rsqrt(epsilon = denom_395_epsilon_0_to_fp16, x = var_12818_cast)[name = tensor("denom_395_cast")]; + tensor out_395_cast = mul(x = zero_mean_395_cast, y = denom_395_cast)[name = tensor("out_395_cast")]; + tensor var_12822_to_fp16 = const()[name = tensor("op_12822_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863015616)))]; + tensor var_12823_cast = add(x = out_395_cast, y = var_12822_to_fp16)[name = tensor("op_12823_cast")]; + tensor var_12825_to_fp16 = const()[name = tensor("op_12825_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863016960)))]; + tensor input_749_cast = mul(x = var_12823_cast, y = var_12825_to_fp16)[name = tensor("input_749_cast")]; + tensor var_12833 = const()[name = tensor("op_12833"), val = tensor([1, 1])]; + tensor var_12835 = const()[name = tensor("op_12835"), val = tensor([1, 1])]; + tensor var_12837_pad_type_0 = const()[name = tensor("op_12837_pad_type_0"), val = tensor("custom")]; + tensor var_12837_pad_0 = const()[name = tensor("op_12837_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863018304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1865475968))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1865476160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1865480064))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_12837_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_12835, groups = var_31, pad = var_12837_pad_0, pad_type = var_12837_pad_type_0, strides = var_12833, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_749_cast)[name = tensor("op_12837_cast")]; + tensor var_12838_split_sizes_0 = const()[name = tensor("op_12838_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12838_axis_0 = const()[name = tensor("op_12838_axis_0"), val = tensor(1)]; + tensor var_12838_cast_0, tensor var_12838_cast_1 = split(axis = var_12838_axis_0, split_sizes = var_12838_split_sizes_0, x = var_12837_cast)[name = tensor("op_12838_cast")]; + tensor var_12840_mode_0 = const()[name = tensor("op_12840_mode_0"), val = tensor("EXACT")]; + tensor var_12840_cast = gelu(mode = var_12840_mode_0, x = var_12838_cast_1)[name = tensor("op_12840_cast")]; + tensor input_751_cast = mul(x = var_12838_cast_0, y = var_12840_cast)[name = tensor("input_751_cast")]; + tensor var_12844 = const()[name = tensor("op_12844"), val = tensor([1, 1])]; + tensor var_12846 = const()[name = tensor("op_12846"), val = tensor([1, 1])]; + tensor var_12848_pad_type_0 = const()[name = tensor("op_12848_pad_type_0"), val = tensor("custom")]; + tensor var_12848_pad_0 = const()[name = tensor("op_12848_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1865480256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1866709120))), name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1866709312)))]; + tensor var_12848_cast = conv(bias = unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_12846, groups = var_31, pad = var_12848_pad_0, pad_type = var_12848_pad_type_0, strides = var_12844, weight = unet_up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_751_cast)[name = tensor("op_12848_cast")]; + tensor hidden_states_521_cast = add(x = var_12848_cast, y = inputs_395_cast)[name = tensor("hidden_states_521_cast")]; + tensor var_12850 = const()[name = tensor("op_12850"), val = tensor([2, 640, 64, 64])]; + tensor input_753_cast = reshape(shape = var_12850, x = hidden_states_521_cast)[name = tensor("input_753_cast")]; + tensor var_12854 = const()[name = tensor("op_12854"), val = tensor([1, 1])]; + tensor var_12856 = const()[name = tensor("op_12856"), val = tensor([1, 1])]; + tensor hidden_states_523_pad_type_0 = const()[name = tensor("hidden_states_523_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_523_pad_0 = const()[name = tensor("hidden_states_523_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1866710656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1867017920))), name = tensor("unet_up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1867018112)))]; + tensor hidden_states_523_cast = conv(bias = unet_up_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_12856, groups = var_31, pad = hidden_states_523_pad_0, pad_type = hidden_states_523_pad_type_0, strides = var_12854, weight = unet_up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_753_cast)[name = tensor("hidden_states_523_cast")]; + tensor hidden_states_525_cast = add(x = hidden_states_523_cast, y = hidden_states_505_cast)[name = tensor("hidden_states_525_cast")]; + tensor input_755_interleave_0 = const()[name = tensor("input_755_interleave_0"), val = tensor(false)]; + tensor input_755_cast = concat(axis = var_31, interleave = input_755_interleave_0, values = (hidden_states_525_cast, input_79_cast))[name = tensor("input_755_cast")]; + tensor reshape_132_shape_0 = const()[name = tensor("reshape_132_shape_0"), val = tensor([2, 32, 40, 64, 64])]; + tensor reshape_132_cast = reshape(shape = reshape_132_shape_0, x = input_755_cast)[name = tensor("reshape_132_cast")]; + tensor reduce_mean_99_axes_0 = const()[name = tensor("reduce_mean_99_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_99_keep_dims_0 = const()[name = tensor("reduce_mean_99_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_99_cast = reduce_mean(axes = reduce_mean_99_axes_0, keep_dims = reduce_mean_99_keep_dims_0, x = reshape_132_cast)[name = tensor("reduce_mean_99_cast")]; + tensor sub_66_cast = sub(x = reshape_132_cast, y = reduce_mean_99_cast)[name = tensor("sub_66_cast")]; + tensor square_33_cast = square(x = sub_66_cast)[name = tensor("square_33_cast")]; + tensor reduce_mean_101_axes_0 = const()[name = tensor("reduce_mean_101_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_101_keep_dims_0 = const()[name = tensor("reduce_mean_101_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_101_cast = reduce_mean(axes = reduce_mean_101_axes_0, keep_dims = reduce_mean_101_keep_dims_0, x = square_33_cast)[name = tensor("reduce_mean_101_cast")]; + tensor add_66_y_0_to_fp16 = const()[name = tensor("add_66_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_66_cast = add(x = reduce_mean_101_cast, y = add_66_y_0_to_fp16)[name = tensor("add_66_cast")]; + tensor sqrt_33_cast = sqrt(x = add_66_cast)[name = tensor("sqrt_33_cast")]; + tensor real_div_33_cast = real_div(x = sub_66_cast, y = sqrt_33_cast)[name = tensor("real_div_33_cast")]; + tensor reshape_133_shape_0 = const()[name = tensor("reshape_133_shape_0"), val = tensor([2, 1280, 64, 64])]; + tensor reshape_133_cast = reshape(shape = reshape_133_shape_0, x = real_div_33_cast)[name = tensor("reshape_133_cast")]; + tensor add_67_gamma_0_to_fp16 = const()[name = tensor("add_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1867019456)))]; + tensor add_67_beta_0_to_fp16 = const()[name = tensor("add_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1867022080)))]; + tensor add_67_epsilon_0_to_fp16 = const()[name = tensor("add_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_67_cast = batch_norm(beta = add_67_beta_0_to_fp16, epsilon = add_67_epsilon_0_to_fp16, gamma = add_67_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_133_cast)[name = tensor("add_67_cast")]; + tensor input_759_cast = silu(x = add_67_cast)[name = tensor("input_759_cast")]; + tensor var_12874 = const()[name = tensor("op_12874"), val = tensor([1, 1])]; + tensor var_12876 = const()[name = tensor("op_12876"), val = tensor([1, 1])]; + tensor hidden_states_527_pad_type_0 = const()[name = tensor("hidden_states_527_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_527_pad_0 = const()[name = tensor("hidden_states_527_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1867024704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1872554368))), name = tensor("unet_up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1280, 3, 3])]; + tensor unet_up_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1872554560)))]; + tensor hidden_states_527_cast = conv(bias = unet_up_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_12876, groups = var_31, pad = hidden_states_527_pad_0, pad_type = hidden_states_527_pad_type_0, strides = var_12874, weight = unet_up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_759_cast)[name = tensor("hidden_states_527_cast")]; + tensor var_12882 = const()[name = tensor("op_12882"), val = tensor([1, 1])]; + tensor var_12884 = const()[name = tensor("op_12884"), val = tensor([1, 1])]; + tensor temb_25_pad_type_0 = const()[name = tensor("temb_25_pad_type_0"), val = tensor("custom")]; + tensor temb_25_pad_0 = const()[name = tensor("temb_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1872555904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1873170368))), name = tensor("unet_up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1873170560)))]; + tensor temb_25_cast = conv(bias = unet_up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_12884, groups = var_31, pad = temb_25_pad_0, pad_type = temb_25_pad_type_0, strides = var_12882, weight = unet_up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_25_cast")]; + tensor input_763_cast = add(x = hidden_states_527_cast, y = temb_25_cast)[name = tensor("input_763_cast")]; + tensor reshape_136_shape_0 = const()[name = tensor("reshape_136_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_136_cast = reshape(shape = reshape_136_shape_0, x = input_763_cast)[name = tensor("reshape_136_cast")]; + tensor reduce_mean_102_axes_0 = const()[name = tensor("reduce_mean_102_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_102_keep_dims_0 = const()[name = tensor("reduce_mean_102_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_102_cast = reduce_mean(axes = reduce_mean_102_axes_0, keep_dims = reduce_mean_102_keep_dims_0, x = reshape_136_cast)[name = tensor("reduce_mean_102_cast")]; + tensor sub_68_cast = sub(x = reshape_136_cast, y = reduce_mean_102_cast)[name = tensor("sub_68_cast")]; + tensor square_34_cast = square(x = sub_68_cast)[name = tensor("square_34_cast")]; + tensor reduce_mean_104_axes_0 = const()[name = tensor("reduce_mean_104_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_104_keep_dims_0 = const()[name = tensor("reduce_mean_104_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_104_cast = reduce_mean(axes = reduce_mean_104_axes_0, keep_dims = reduce_mean_104_keep_dims_0, x = square_34_cast)[name = tensor("reduce_mean_104_cast")]; + tensor add_68_y_0_to_fp16 = const()[name = tensor("add_68_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_68_cast = add(x = reduce_mean_104_cast, y = add_68_y_0_to_fp16)[name = tensor("add_68_cast")]; + tensor sqrt_34_cast = sqrt(x = add_68_cast)[name = tensor("sqrt_34_cast")]; + tensor real_div_34_cast = real_div(x = sub_68_cast, y = sqrt_34_cast)[name = tensor("real_div_34_cast")]; + tensor reshape_137_shape_0 = const()[name = tensor("reshape_137_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_137_cast = reshape(shape = reshape_137_shape_0, x = real_div_34_cast)[name = tensor("reshape_137_cast")]; + tensor add_69_gamma_0_to_fp16 = const()[name = tensor("add_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1873171904)))]; + tensor add_69_beta_0_to_fp16 = const()[name = tensor("add_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1873173248)))]; + tensor add_69_epsilon_0_to_fp16 = const()[name = tensor("add_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_69_cast = batch_norm(beta = add_69_beta_0_to_fp16, epsilon = add_69_epsilon_0_to_fp16, gamma = add_69_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_137_cast)[name = tensor("add_69_cast")]; + tensor input_767_cast = silu(x = add_69_cast)[name = tensor("input_767_cast")]; + tensor var_12894 = const()[name = tensor("op_12894"), val = tensor([1, 1])]; + tensor var_12896 = const()[name = tensor("op_12896"), val = tensor([1, 1])]; + tensor hidden_states_529_pad_type_0 = const()[name = tensor("hidden_states_529_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_529_pad_0 = const()[name = tensor("hidden_states_529_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1873174592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1875939456))), name = tensor("unet_up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_up_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1875939648)))]; + tensor hidden_states_529_cast = conv(bias = unet_up_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_12896, groups = var_31, pad = hidden_states_529_pad_0, pad_type = hidden_states_529_pad_type_0, strides = var_12894, weight = unet_up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_767_cast)[name = tensor("hidden_states_529_cast")]; + tensor var_12901 = const()[name = tensor("op_12901"), val = tensor([1, 1])]; + tensor var_12903 = const()[name = tensor("op_12903"), val = tensor([1, 1])]; + tensor x_13_pad_type_0 = const()[name = tensor("x_13_pad_type_0"), val = tensor("custom")]; + tensor x_13_pad_0 = const()[name = tensor("x_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1875940992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876555456))), name = tensor("unet_up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876555648)))]; + tensor x_13_cast = conv(bias = unet_up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_12903, groups = var_31, pad = x_13_pad_0, pad_type = x_13_pad_type_0, strides = var_12901, weight = unet_up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_755_cast)[name = tensor("x_13_cast")]; + tensor hidden_states_531_cast = add(x = x_13_cast, y = hidden_states_529_cast)[name = tensor("hidden_states_531_cast")]; + tensor reshape_140_shape_0 = const()[name = tensor("reshape_140_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_140_cast = reshape(shape = reshape_140_shape_0, x = hidden_states_531_cast)[name = tensor("reshape_140_cast")]; + tensor reduce_mean_105_axes_0 = const()[name = tensor("reduce_mean_105_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_105_keep_dims_0 = const()[name = tensor("reduce_mean_105_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_105_cast = reduce_mean(axes = reduce_mean_105_axes_0, keep_dims = reduce_mean_105_keep_dims_0, x = reshape_140_cast)[name = tensor("reduce_mean_105_cast")]; + tensor sub_70_cast = sub(x = reshape_140_cast, y = reduce_mean_105_cast)[name = tensor("sub_70_cast")]; + tensor square_35_cast = square(x = sub_70_cast)[name = tensor("square_35_cast")]; + tensor reduce_mean_107_axes_0 = const()[name = tensor("reduce_mean_107_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_107_keep_dims_0 = const()[name = tensor("reduce_mean_107_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_107_cast = reduce_mean(axes = reduce_mean_107_axes_0, keep_dims = reduce_mean_107_keep_dims_0, x = square_35_cast)[name = tensor("reduce_mean_107_cast")]; + tensor add_70_y_0_to_fp16 = const()[name = tensor("add_70_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_70_cast = add(x = reduce_mean_107_cast, y = add_70_y_0_to_fp16)[name = tensor("add_70_cast")]; + tensor sqrt_35_cast = sqrt(x = add_70_cast)[name = tensor("sqrt_35_cast")]; + tensor real_div_35_cast = real_div(x = sub_70_cast, y = sqrt_35_cast)[name = tensor("real_div_35_cast")]; + tensor reshape_141_shape_0 = const()[name = tensor("reshape_141_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_141_cast = reshape(shape = reshape_141_shape_0, x = real_div_35_cast)[name = tensor("reshape_141_cast")]; + tensor add_71_gamma_0_to_fp16 = const()[name = tensor("add_71_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876556992)))]; + tensor add_71_beta_0_to_fp16 = const()[name = tensor("add_71_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876558336)))]; + tensor add_71_epsilon_0_to_fp16 = const()[name = tensor("add_71_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_71_cast = batch_norm(beta = add_71_beta_0_to_fp16, epsilon = add_71_epsilon_0_to_fp16, gamma = add_71_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_141_cast)[name = tensor("add_71_cast")]; + tensor var_12925 = const()[name = tensor("op_12925"), val = tensor([1, 1])]; + tensor var_12927 = const()[name = tensor("op_12927"), val = tensor([1, 1])]; + tensor hidden_states_533_pad_type_0 = const()[name = tensor("hidden_states_533_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_533_pad_0 = const()[name = tensor("hidden_states_533_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876559680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876866944))), name = tensor("unet_up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876867136)))]; + tensor hidden_states_533_cast = conv(bias = unet_up_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_12927, groups = var_31, pad = hidden_states_533_pad_0, pad_type = hidden_states_533_pad_type_0, strides = var_12925, weight = unet_up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_71_cast)[name = tensor("hidden_states_533_cast")]; + tensor var_12932 = const()[name = tensor("op_12932"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_397_cast = reshape(shape = var_12932, x = hidden_states_533_cast)[name = tensor("inputs_397_cast")]; + tensor var_12942 = const()[name = tensor("op_12942"), val = tensor([1])]; + tensor channels_mean_397_cast = reduce_mean(axes = var_12942, keep_dims = var_23, x = inputs_397_cast)[name = tensor("channels_mean_397_cast")]; + tensor zero_mean_397_cast = sub(x = inputs_397_cast, y = channels_mean_397_cast)[name = tensor("zero_mean_397_cast")]; + tensor zero_mean_sq_397_cast = mul(x = zero_mean_397_cast, y = zero_mean_397_cast)[name = tensor("zero_mean_sq_397_cast")]; + tensor var_12946 = const()[name = tensor("op_12946"), val = tensor([1])]; + tensor var_12947_cast = reduce_mean(axes = var_12946, keep_dims = var_23, x = zero_mean_sq_397_cast)[name = tensor("op_12947_cast")]; + tensor var_12948_to_fp16 = const()[name = tensor("op_12948_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12949_cast = add(x = var_12947_cast, y = var_12948_to_fp16)[name = tensor("op_12949_cast")]; + tensor denom_397_epsilon_0_to_fp16 = const()[name = tensor("denom_397_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_397_cast = rsqrt(epsilon = denom_397_epsilon_0_to_fp16, x = var_12949_cast)[name = tensor("denom_397_cast")]; + tensor out_397_cast = mul(x = zero_mean_397_cast, y = denom_397_cast)[name = tensor("out_397_cast")]; + tensor var_12953_to_fp16 = const()[name = tensor("op_12953_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876868480)))]; + tensor var_12954_cast = add(x = out_397_cast, y = var_12953_to_fp16)[name = tensor("op_12954_cast")]; + tensor var_12956_to_fp16 = const()[name = tensor("op_12956_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876869824)))]; + tensor hidden_states_535_cast = mul(x = var_12954_cast, y = var_12956_to_fp16)[name = tensor("hidden_states_535_cast")]; + tensor var_12963 = const()[name = tensor("op_12963"), val = tensor([1, 1])]; + tensor var_12965 = const()[name = tensor("op_12965"), val = tensor([1, 1])]; + tensor q_265_pad_type_0 = const()[name = tensor("q_265_pad_type_0"), val = tensor("custom")]; + tensor q_265_pad_0 = const()[name = tensor("q_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876871168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877178432))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_265_cast = conv(dilations = var_12965, groups = var_31, pad = q_265_pad_0, pad_type = q_265_pad_type_0, strides = var_12963, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("q_265_cast")]; + tensor var_12969 = const()[name = tensor("op_12969"), val = tensor([1, 1])]; + tensor var_12971 = const()[name = tensor("op_12971"), val = tensor([1, 1])]; + tensor k_265_pad_type_0 = const()[name = tensor("k_265_pad_type_0"), val = tensor("custom")]; + tensor k_265_pad_0 = const()[name = tensor("k_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877178624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877485888))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_265_cast = conv(dilations = var_12971, groups = var_31, pad = k_265_pad_0, pad_type = k_265_pad_type_0, strides = var_12969, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("k_265_cast")]; + tensor var_12975 = const()[name = tensor("op_12975"), val = tensor([1, 1])]; + tensor var_12977 = const()[name = tensor("op_12977"), val = tensor([1, 1])]; + tensor v_265_pad_type_0 = const()[name = tensor("v_265_pad_type_0"), val = tensor("custom")]; + tensor v_265_pad_0 = const()[name = tensor("v_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877486080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877793344))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_265_cast = conv(dilations = var_12977, groups = var_31, pad = v_265_pad_0, pad_type = v_265_pad_type_0, strides = var_12975, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("v_265_cast")]; + tensor var_12981 = const()[name = tensor("op_12981"), val = tensor([2, 10, 64, -1])]; + tensor var_12982_cast = reshape(shape = var_12981, x = q_265_cast)[name = tensor("op_12982_cast")]; + tensor var_12983 = const()[name = tensor("op_12983"), val = tensor([2, 10, 64, -1])]; + tensor var_12984_cast = reshape(shape = var_12983, x = k_265_cast)[name = tensor("op_12984_cast")]; + tensor var_12985 = const()[name = tensor("op_12985"), val = tensor([2, 10, 64, -1])]; + tensor var_12986_cast = reshape(shape = var_12985, x = v_265_cast)[name = tensor("op_12986_cast")]; + tensor attn_weights_529_transpose_x_0 = const()[name = tensor("attn_weights_529_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_529_transpose_y_0 = const()[name = tensor("attn_weights_529_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_529_cast = matmul(transpose_x = attn_weights_529_transpose_x_0, transpose_y = attn_weights_529_transpose_y_0, x = var_12982_cast, y = var_12984_cast)[name = tensor("attn_weights_529_cast")]; + tensor attn_weights_531_cast = mul(x = attn_weights_529_cast, y = var_12_to_fp16)[name = tensor("attn_weights_531_cast")]; + tensor var_12990_cast = softmax(axis = var_18, x = attn_weights_531_cast)[name = tensor("op_12990_cast")]; + tensor attn_265_transpose_x_0 = const()[name = tensor("attn_265_transpose_x_0"), val = tensor(false)]; + tensor attn_265_transpose_y_0 = const()[name = tensor("attn_265_transpose_y_0"), val = tensor(true)]; + tensor attn_265_cast = matmul(transpose_x = attn_265_transpose_x_0, transpose_y = attn_265_transpose_y_0, x = var_12986_cast, y = var_12990_cast)[name = tensor("attn_265_cast")]; + tensor var_12994 = const()[name = tensor("op_12994"), val = tensor([2, 640, 1, -1])]; + tensor input_771_cast = reshape(shape = var_12994, x = attn_265_cast)[name = tensor("input_771_cast")]; + tensor var_12999 = const()[name = tensor("op_12999"), val = tensor([1, 1])]; + tensor var_13001 = const()[name = tensor("op_13001"), val = tensor([1, 1])]; + tensor var_13003_pad_type_0 = const()[name = tensor("op_13003_pad_type_0"), val = tensor("custom")]; + tensor var_13003_pad_0 = const()[name = tensor("op_13003_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1877793536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878100800))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878100992)))]; + tensor var_13003_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13001, groups = var_31, pad = var_13003_pad_0, pad_type = var_13003_pad_type_0, strides = var_12999, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_771_cast)[name = tensor("op_13003_cast")]; + tensor inputs_399_cast = add(x = var_13003_cast, y = inputs_397_cast)[name = tensor("inputs_399_cast")]; + tensor var_13007 = const()[name = tensor("op_13007"), val = tensor([1])]; + tensor channels_mean_399_cast = reduce_mean(axes = var_13007, keep_dims = var_23, x = inputs_399_cast)[name = tensor("channels_mean_399_cast")]; + tensor zero_mean_399_cast = sub(x = inputs_399_cast, y = channels_mean_399_cast)[name = tensor("zero_mean_399_cast")]; + tensor zero_mean_sq_399_cast = mul(x = zero_mean_399_cast, y = zero_mean_399_cast)[name = tensor("zero_mean_sq_399_cast")]; + tensor var_13011 = const()[name = tensor("op_13011"), val = tensor([1])]; + tensor var_13012_cast = reduce_mean(axes = var_13011, keep_dims = var_23, x = zero_mean_sq_399_cast)[name = tensor("op_13012_cast")]; + tensor var_13013_to_fp16 = const()[name = tensor("op_13013_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13014_cast = add(x = var_13012_cast, y = var_13013_to_fp16)[name = tensor("op_13014_cast")]; + tensor denom_399_epsilon_0_to_fp16 = const()[name = tensor("denom_399_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_399_cast = rsqrt(epsilon = denom_399_epsilon_0_to_fp16, x = var_13014_cast)[name = tensor("denom_399_cast")]; + tensor out_399_cast = mul(x = zero_mean_399_cast, y = denom_399_cast)[name = tensor("out_399_cast")]; + tensor var_13018_to_fp16 = const()[name = tensor("op_13018_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878102336)))]; + tensor var_13019_cast = add(x = out_399_cast, y = var_13018_to_fp16)[name = tensor("op_13019_cast")]; + tensor var_13021_to_fp16 = const()[name = tensor("op_13021_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878103680)))]; + tensor hidden_states_537_cast = mul(x = var_13019_cast, y = var_13021_to_fp16)[name = tensor("hidden_states_537_cast")]; + tensor var_13028 = const()[name = tensor("op_13028"), val = tensor([1, 1])]; + tensor var_13030 = const()[name = tensor("op_13030"), val = tensor([1, 1])]; + tensor q_267_pad_type_0 = const()[name = tensor("q_267_pad_type_0"), val = tensor("custom")]; + tensor q_267_pad_0 = const()[name = tensor("q_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878105024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878412288))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_267_cast = conv(dilations = var_13030, groups = var_31, pad = q_267_pad_0, pad_type = q_267_pad_type_0, strides = var_13028, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_537_cast)[name = tensor("q_267_cast")]; + tensor var_13034 = const()[name = tensor("op_13034"), val = tensor([1, 1])]; + tensor var_13036 = const()[name = tensor("op_13036"), val = tensor([1, 1])]; + tensor k_267_pad_type_0 = const()[name = tensor("k_267_pad_type_0"), val = tensor("custom")]; + tensor k_267_pad_0 = const()[name = tensor("k_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1878412480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1879395584))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_267_cast = conv(dilations = var_13036, groups = var_31, pad = k_267_pad_0, pad_type = k_267_pad_type_0, strides = var_13034, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_267_cast")]; + tensor var_13040 = const()[name = tensor("op_13040"), val = tensor([1, 1])]; + tensor var_13042 = const()[name = tensor("op_13042"), val = tensor([1, 1])]; + tensor v_267_pad_type_0 = const()[name = tensor("v_267_pad_type_0"), val = tensor("custom")]; + tensor v_267_pad_0 = const()[name = tensor("v_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1879395776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880378880))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_267_cast = conv(dilations = var_13042, groups = var_31, pad = v_267_pad_0, pad_type = v_267_pad_type_0, strides = var_13040, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_267_cast")]; + tensor var_13046 = const()[name = tensor("op_13046"), val = tensor([2, 10, 64, -1])]; + tensor var_13047_cast = reshape(shape = var_13046, x = q_267_cast)[name = tensor("op_13047_cast")]; + tensor var_13048 = const()[name = tensor("op_13048"), val = tensor([2, 10, 64, -1])]; + tensor var_13049_cast = reshape(shape = var_13048, x = k_267_cast)[name = tensor("op_13049_cast")]; + tensor var_13050 = const()[name = tensor("op_13050"), val = tensor([2, 10, 64, -1])]; + tensor var_13051_cast = reshape(shape = var_13050, x = v_267_cast)[name = tensor("op_13051_cast")]; + tensor attn_weights_533_transpose_x_0 = const()[name = tensor("attn_weights_533_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_533_transpose_y_0 = const()[name = tensor("attn_weights_533_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_533_cast = matmul(transpose_x = attn_weights_533_transpose_x_0, transpose_y = attn_weights_533_transpose_y_0, x = var_13047_cast, y = var_13049_cast)[name = tensor("attn_weights_533_cast")]; + tensor attn_weights_535_cast = mul(x = attn_weights_533_cast, y = var_12_to_fp16)[name = tensor("attn_weights_535_cast")]; + tensor var_13055_cast = softmax(axis = var_18, x = attn_weights_535_cast)[name = tensor("op_13055_cast")]; + tensor attn_267_transpose_x_0 = const()[name = tensor("attn_267_transpose_x_0"), val = tensor(false)]; + tensor attn_267_transpose_y_0 = const()[name = tensor("attn_267_transpose_y_0"), val = tensor(true)]; + tensor attn_267_cast = matmul(transpose_x = attn_267_transpose_x_0, transpose_y = attn_267_transpose_y_0, x = var_13051_cast, y = var_13055_cast)[name = tensor("attn_267_cast")]; + tensor var_13059 = const()[name = tensor("op_13059"), val = tensor([2, 640, 1, -1])]; + tensor input_773_cast = reshape(shape = var_13059, x = attn_267_cast)[name = tensor("input_773_cast")]; + tensor var_13064 = const()[name = tensor("op_13064"), val = tensor([1, 1])]; + tensor var_13066 = const()[name = tensor("op_13066"), val = tensor([1, 1])]; + tensor var_13068_pad_type_0 = const()[name = tensor("op_13068_pad_type_0"), val = tensor("custom")]; + tensor var_13068_pad_0 = const()[name = tensor("op_13068_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880379072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880686336))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880686528)))]; + tensor var_13068_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13066, groups = var_31, pad = var_13068_pad_0, pad_type = var_13068_pad_type_0, strides = var_13064, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_773_cast)[name = tensor("op_13068_cast")]; + tensor inputs_401_cast = add(x = var_13068_cast, y = inputs_399_cast)[name = tensor("inputs_401_cast")]; + tensor var_13072 = const()[name = tensor("op_13072"), val = tensor([1])]; + tensor channels_mean_401_cast = reduce_mean(axes = var_13072, keep_dims = var_23, x = inputs_401_cast)[name = tensor("channels_mean_401_cast")]; + tensor zero_mean_401_cast = sub(x = inputs_401_cast, y = channels_mean_401_cast)[name = tensor("zero_mean_401_cast")]; + tensor zero_mean_sq_401_cast = mul(x = zero_mean_401_cast, y = zero_mean_401_cast)[name = tensor("zero_mean_sq_401_cast")]; + tensor var_13076 = const()[name = tensor("op_13076"), val = tensor([1])]; + tensor var_13077_cast = reduce_mean(axes = var_13076, keep_dims = var_23, x = zero_mean_sq_401_cast)[name = tensor("op_13077_cast")]; + tensor var_13078_to_fp16 = const()[name = tensor("op_13078_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13079_cast = add(x = var_13077_cast, y = var_13078_to_fp16)[name = tensor("op_13079_cast")]; + tensor denom_401_epsilon_0_to_fp16 = const()[name = tensor("denom_401_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_401_cast = rsqrt(epsilon = denom_401_epsilon_0_to_fp16, x = var_13079_cast)[name = tensor("denom_401_cast")]; + tensor out_401_cast = mul(x = zero_mean_401_cast, y = denom_401_cast)[name = tensor("out_401_cast")]; + tensor var_13083_to_fp16 = const()[name = tensor("op_13083_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880687872)))]; + tensor var_13084_cast = add(x = out_401_cast, y = var_13083_to_fp16)[name = tensor("op_13084_cast")]; + tensor var_13086_to_fp16 = const()[name = tensor("op_13086_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880689216)))]; + tensor input_775_cast = mul(x = var_13084_cast, y = var_13086_to_fp16)[name = tensor("input_775_cast")]; + tensor var_13094 = const()[name = tensor("op_13094"), val = tensor([1, 1])]; + tensor var_13096 = const()[name = tensor("op_13096"), val = tensor([1, 1])]; + tensor var_13098_pad_type_0 = const()[name = tensor("op_13098_pad_type_0"), val = tensor("custom")]; + tensor var_13098_pad_0 = const()[name = tensor("op_13098_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1880690560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883148224))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883148416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883152320))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_13098_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_13096, groups = var_31, pad = var_13098_pad_0, pad_type = var_13098_pad_type_0, strides = var_13094, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_775_cast)[name = tensor("op_13098_cast")]; + tensor var_13099_split_sizes_0 = const()[name = tensor("op_13099_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13099_axis_0 = const()[name = tensor("op_13099_axis_0"), val = tensor(1)]; + tensor var_13099_cast_0, tensor var_13099_cast_1 = split(axis = var_13099_axis_0, split_sizes = var_13099_split_sizes_0, x = var_13098_cast)[name = tensor("op_13099_cast")]; + tensor var_13101_mode_0 = const()[name = tensor("op_13101_mode_0"), val = tensor("EXACT")]; + tensor var_13101_cast = gelu(mode = var_13101_mode_0, x = var_13099_cast_1)[name = tensor("op_13101_cast")]; + tensor input_777_cast = mul(x = var_13099_cast_0, y = var_13101_cast)[name = tensor("input_777_cast")]; + tensor var_13105 = const()[name = tensor("op_13105"), val = tensor([1, 1])]; + tensor var_13107 = const()[name = tensor("op_13107"), val = tensor([1, 1])]; + tensor var_13109_pad_type_0 = const()[name = tensor("op_13109_pad_type_0"), val = tensor("custom")]; + tensor var_13109_pad_0 = const()[name = tensor("op_13109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883152512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884381376))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884381568)))]; + tensor var_13109_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13107, groups = var_31, pad = var_13109_pad_0, pad_type = var_13109_pad_type_0, strides = var_13105, weight = unet_up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_777_cast)[name = tensor("op_13109_cast")]; + tensor inputs_403_cast = add(x = var_13109_cast, y = inputs_401_cast)[name = tensor("inputs_403_cast")]; + tensor var_13119 = const()[name = tensor("op_13119"), val = tensor([1])]; + tensor channels_mean_403_cast = reduce_mean(axes = var_13119, keep_dims = var_23, x = inputs_403_cast)[name = tensor("channels_mean_403_cast")]; + tensor zero_mean_403_cast = sub(x = inputs_403_cast, y = channels_mean_403_cast)[name = tensor("zero_mean_403_cast")]; + tensor zero_mean_sq_403_cast = mul(x = zero_mean_403_cast, y = zero_mean_403_cast)[name = tensor("zero_mean_sq_403_cast")]; + tensor var_13123 = const()[name = tensor("op_13123"), val = tensor([1])]; + tensor var_13124_cast = reduce_mean(axes = var_13123, keep_dims = var_23, x = zero_mean_sq_403_cast)[name = tensor("op_13124_cast")]; + tensor var_13125_to_fp16 = const()[name = tensor("op_13125_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13126_cast = add(x = var_13124_cast, y = var_13125_to_fp16)[name = tensor("op_13126_cast")]; + tensor denom_403_epsilon_0_to_fp16 = const()[name = tensor("denom_403_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_403_cast = rsqrt(epsilon = denom_403_epsilon_0_to_fp16, x = var_13126_cast)[name = tensor("denom_403_cast")]; + tensor out_403_cast = mul(x = zero_mean_403_cast, y = denom_403_cast)[name = tensor("out_403_cast")]; + tensor var_13130_to_fp16 = const()[name = tensor("op_13130_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884382912)))]; + tensor var_13131_cast = add(x = out_403_cast, y = var_13130_to_fp16)[name = tensor("op_13131_cast")]; + tensor var_13133_to_fp16 = const()[name = tensor("op_13133_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884384256)))]; + tensor hidden_states_541_cast = mul(x = var_13131_cast, y = var_13133_to_fp16)[name = tensor("hidden_states_541_cast")]; + tensor var_13140 = const()[name = tensor("op_13140"), val = tensor([1, 1])]; + tensor var_13142 = const()[name = tensor("op_13142"), val = tensor([1, 1])]; + tensor q_269_pad_type_0 = const()[name = tensor("q_269_pad_type_0"), val = tensor("custom")]; + tensor q_269_pad_0 = const()[name = tensor("q_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884385600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884692864))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_269_cast = conv(dilations = var_13142, groups = var_31, pad = q_269_pad_0, pad_type = q_269_pad_type_0, strides = var_13140, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("q_269_cast")]; + tensor var_13146 = const()[name = tensor("op_13146"), val = tensor([1, 1])]; + tensor var_13148 = const()[name = tensor("op_13148"), val = tensor([1, 1])]; + tensor k_269_pad_type_0 = const()[name = tensor("k_269_pad_type_0"), val = tensor("custom")]; + tensor k_269_pad_0 = const()[name = tensor("k_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1884693056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885000320))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_269_cast = conv(dilations = var_13148, groups = var_31, pad = k_269_pad_0, pad_type = k_269_pad_type_0, strides = var_13146, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("k_269_cast")]; + tensor var_13152 = const()[name = tensor("op_13152"), val = tensor([1, 1])]; + tensor var_13154 = const()[name = tensor("op_13154"), val = tensor([1, 1])]; + tensor v_269_pad_type_0 = const()[name = tensor("v_269_pad_type_0"), val = tensor("custom")]; + tensor v_269_pad_0 = const()[name = tensor("v_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885000512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885307776))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_269_cast = conv(dilations = var_13154, groups = var_31, pad = v_269_pad_0, pad_type = v_269_pad_type_0, strides = var_13152, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("v_269_cast")]; + tensor var_13158 = const()[name = tensor("op_13158"), val = tensor([2, 10, 64, -1])]; + tensor var_13159_cast = reshape(shape = var_13158, x = q_269_cast)[name = tensor("op_13159_cast")]; + tensor var_13160 = const()[name = tensor("op_13160"), val = tensor([2, 10, 64, -1])]; + tensor var_13161_cast = reshape(shape = var_13160, x = k_269_cast)[name = tensor("op_13161_cast")]; + tensor var_13162 = const()[name = tensor("op_13162"), val = tensor([2, 10, 64, -1])]; + tensor var_13163_cast = reshape(shape = var_13162, x = v_269_cast)[name = tensor("op_13163_cast")]; + tensor attn_weights_537_transpose_x_0 = const()[name = tensor("attn_weights_537_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_537_transpose_y_0 = const()[name = tensor("attn_weights_537_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_537_cast = matmul(transpose_x = attn_weights_537_transpose_x_0, transpose_y = attn_weights_537_transpose_y_0, x = var_13159_cast, y = var_13161_cast)[name = tensor("attn_weights_537_cast")]; + tensor attn_weights_539_cast = mul(x = attn_weights_537_cast, y = var_12_to_fp16)[name = tensor("attn_weights_539_cast")]; + tensor var_13167_cast = softmax(axis = var_18, x = attn_weights_539_cast)[name = tensor("op_13167_cast")]; + tensor attn_269_transpose_x_0 = const()[name = tensor("attn_269_transpose_x_0"), val = tensor(false)]; + tensor attn_269_transpose_y_0 = const()[name = tensor("attn_269_transpose_y_0"), val = tensor(true)]; + tensor attn_269_cast = matmul(transpose_x = attn_269_transpose_x_0, transpose_y = attn_269_transpose_y_0, x = var_13163_cast, y = var_13167_cast)[name = tensor("attn_269_cast")]; + tensor var_13171 = const()[name = tensor("op_13171"), val = tensor([2, 640, 1, -1])]; + tensor input_779_cast = reshape(shape = var_13171, x = attn_269_cast)[name = tensor("input_779_cast")]; + tensor var_13176 = const()[name = tensor("op_13176"), val = tensor([1, 1])]; + tensor var_13178 = const()[name = tensor("op_13178"), val = tensor([1, 1])]; + tensor var_13180_pad_type_0 = const()[name = tensor("op_13180_pad_type_0"), val = tensor("custom")]; + tensor var_13180_pad_0 = const()[name = tensor("op_13180_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885307968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885615232))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885615424)))]; + tensor var_13180_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13178, groups = var_31, pad = var_13180_pad_0, pad_type = var_13180_pad_type_0, strides = var_13176, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_779_cast)[name = tensor("op_13180_cast")]; + tensor inputs_405_cast = add(x = var_13180_cast, y = inputs_403_cast)[name = tensor("inputs_405_cast")]; + tensor var_13184 = const()[name = tensor("op_13184"), val = tensor([1])]; + tensor channels_mean_405_cast = reduce_mean(axes = var_13184, keep_dims = var_23, x = inputs_405_cast)[name = tensor("channels_mean_405_cast")]; + tensor zero_mean_405_cast = sub(x = inputs_405_cast, y = channels_mean_405_cast)[name = tensor("zero_mean_405_cast")]; + tensor zero_mean_sq_405_cast = mul(x = zero_mean_405_cast, y = zero_mean_405_cast)[name = tensor("zero_mean_sq_405_cast")]; + tensor var_13188 = const()[name = tensor("op_13188"), val = tensor([1])]; + tensor var_13189_cast = reduce_mean(axes = var_13188, keep_dims = var_23, x = zero_mean_sq_405_cast)[name = tensor("op_13189_cast")]; + tensor var_13190_to_fp16 = const()[name = tensor("op_13190_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13191_cast = add(x = var_13189_cast, y = var_13190_to_fp16)[name = tensor("op_13191_cast")]; + tensor denom_405_epsilon_0_to_fp16 = const()[name = tensor("denom_405_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_405_cast = rsqrt(epsilon = denom_405_epsilon_0_to_fp16, x = var_13191_cast)[name = tensor("denom_405_cast")]; + tensor out_405_cast = mul(x = zero_mean_405_cast, y = denom_405_cast)[name = tensor("out_405_cast")]; + tensor var_13195_to_fp16 = const()[name = tensor("op_13195_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885616768)))]; + tensor var_13196_cast = add(x = out_405_cast, y = var_13195_to_fp16)[name = tensor("op_13196_cast")]; + tensor var_13198_to_fp16 = const()[name = tensor("op_13198_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885618112)))]; + tensor hidden_states_543_cast = mul(x = var_13196_cast, y = var_13198_to_fp16)[name = tensor("hidden_states_543_cast")]; + tensor var_13205 = const()[name = tensor("op_13205"), val = tensor([1, 1])]; + tensor var_13207 = const()[name = tensor("op_13207"), val = tensor([1, 1])]; + tensor q_271_pad_type_0 = const()[name = tensor("q_271_pad_type_0"), val = tensor("custom")]; + tensor q_271_pad_0 = const()[name = tensor("q_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885619456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885926720))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_271_cast = conv(dilations = var_13207, groups = var_31, pad = q_271_pad_0, pad_type = q_271_pad_type_0, strides = var_13205, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_543_cast)[name = tensor("q_271_cast")]; + tensor var_13211 = const()[name = tensor("op_13211"), val = tensor([1, 1])]; + tensor var_13213 = const()[name = tensor("op_13213"), val = tensor([1, 1])]; + tensor k_271_pad_type_0 = const()[name = tensor("k_271_pad_type_0"), val = tensor("custom")]; + tensor k_271_pad_0 = const()[name = tensor("k_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1885926912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1886910016))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_271_cast = conv(dilations = var_13213, groups = var_31, pad = k_271_pad_0, pad_type = k_271_pad_type_0, strides = var_13211, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_271_cast")]; + tensor var_13217 = const()[name = tensor("op_13217"), val = tensor([1, 1])]; + tensor var_13219 = const()[name = tensor("op_13219"), val = tensor([1, 1])]; + tensor v_271_pad_type_0 = const()[name = tensor("v_271_pad_type_0"), val = tensor("custom")]; + tensor v_271_pad_0 = const()[name = tensor("v_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1886910208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1887893312))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_271_cast = conv(dilations = var_13219, groups = var_31, pad = v_271_pad_0, pad_type = v_271_pad_type_0, strides = var_13217, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_271_cast")]; + tensor var_13223 = const()[name = tensor("op_13223"), val = tensor([2, 10, 64, -1])]; + tensor var_13224_cast = reshape(shape = var_13223, x = q_271_cast)[name = tensor("op_13224_cast")]; + tensor var_13225 = const()[name = tensor("op_13225"), val = tensor([2, 10, 64, -1])]; + tensor var_13226_cast = reshape(shape = var_13225, x = k_271_cast)[name = tensor("op_13226_cast")]; + tensor var_13227 = const()[name = tensor("op_13227"), val = tensor([2, 10, 64, -1])]; + tensor var_13228_cast = reshape(shape = var_13227, x = v_271_cast)[name = tensor("op_13228_cast")]; + tensor attn_weights_541_transpose_x_0 = const()[name = tensor("attn_weights_541_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_541_transpose_y_0 = const()[name = tensor("attn_weights_541_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_541_cast = matmul(transpose_x = attn_weights_541_transpose_x_0, transpose_y = attn_weights_541_transpose_y_0, x = var_13224_cast, y = var_13226_cast)[name = tensor("attn_weights_541_cast")]; + tensor attn_weights_543_cast = mul(x = attn_weights_541_cast, y = var_12_to_fp16)[name = tensor("attn_weights_543_cast")]; + tensor var_13232_cast = softmax(axis = var_18, x = attn_weights_543_cast)[name = tensor("op_13232_cast")]; + tensor attn_271_transpose_x_0 = const()[name = tensor("attn_271_transpose_x_0"), val = tensor(false)]; + tensor attn_271_transpose_y_0 = const()[name = tensor("attn_271_transpose_y_0"), val = tensor(true)]; + tensor attn_271_cast = matmul(transpose_x = attn_271_transpose_x_0, transpose_y = attn_271_transpose_y_0, x = var_13228_cast, y = var_13232_cast)[name = tensor("attn_271_cast")]; + tensor var_13236 = const()[name = tensor("op_13236"), val = tensor([2, 640, 1, -1])]; + tensor input_781_cast = reshape(shape = var_13236, x = attn_271_cast)[name = tensor("input_781_cast")]; + tensor var_13241 = const()[name = tensor("op_13241"), val = tensor([1, 1])]; + tensor var_13243 = const()[name = tensor("op_13243"), val = tensor([1, 1])]; + tensor var_13245_pad_type_0 = const()[name = tensor("op_13245_pad_type_0"), val = tensor("custom")]; + tensor var_13245_pad_0 = const()[name = tensor("op_13245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1887893504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1888200768))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1888200960)))]; + tensor var_13245_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13243, groups = var_31, pad = var_13245_pad_0, pad_type = var_13245_pad_type_0, strides = var_13241, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_781_cast)[name = tensor("op_13245_cast")]; + tensor inputs_407_cast = add(x = var_13245_cast, y = inputs_405_cast)[name = tensor("inputs_407_cast")]; + tensor var_13249 = const()[name = tensor("op_13249"), val = tensor([1])]; + tensor channels_mean_407_cast = reduce_mean(axes = var_13249, keep_dims = var_23, x = inputs_407_cast)[name = tensor("channels_mean_407_cast")]; + tensor zero_mean_407_cast = sub(x = inputs_407_cast, y = channels_mean_407_cast)[name = tensor("zero_mean_407_cast")]; + tensor zero_mean_sq_407_cast = mul(x = zero_mean_407_cast, y = zero_mean_407_cast)[name = tensor("zero_mean_sq_407_cast")]; + tensor var_13253 = const()[name = tensor("op_13253"), val = tensor([1])]; + tensor var_13254_cast = reduce_mean(axes = var_13253, keep_dims = var_23, x = zero_mean_sq_407_cast)[name = tensor("op_13254_cast")]; + tensor var_13255_to_fp16 = const()[name = tensor("op_13255_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13256_cast = add(x = var_13254_cast, y = var_13255_to_fp16)[name = tensor("op_13256_cast")]; + tensor denom_407_epsilon_0_to_fp16 = const()[name = tensor("denom_407_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_407_cast = rsqrt(epsilon = denom_407_epsilon_0_to_fp16, x = var_13256_cast)[name = tensor("denom_407_cast")]; + tensor out_407_cast = mul(x = zero_mean_407_cast, y = denom_407_cast)[name = tensor("out_407_cast")]; + tensor var_13260_to_fp16 = const()[name = tensor("op_13260_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1888202304)))]; + tensor var_13261_cast = add(x = out_407_cast, y = var_13260_to_fp16)[name = tensor("op_13261_cast")]; + tensor var_13263_to_fp16 = const()[name = tensor("op_13263_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1888203648)))]; + tensor input_783_cast = mul(x = var_13261_cast, y = var_13263_to_fp16)[name = tensor("input_783_cast")]; + tensor var_13271 = const()[name = tensor("op_13271"), val = tensor([1, 1])]; + tensor var_13273 = const()[name = tensor("op_13273"), val = tensor([1, 1])]; + tensor var_13275_pad_type_0 = const()[name = tensor("op_13275_pad_type_0"), val = tensor("custom")]; + tensor var_13275_pad_0 = const()[name = tensor("op_13275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1888204992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1890662656))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1890662848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1890666752))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_13275_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_13273, groups = var_31, pad = var_13275_pad_0, pad_type = var_13275_pad_type_0, strides = var_13271, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_783_cast)[name = tensor("op_13275_cast")]; + tensor var_13276_split_sizes_0 = const()[name = tensor("op_13276_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13276_axis_0 = const()[name = tensor("op_13276_axis_0"), val = tensor(1)]; + tensor var_13276_cast_0, tensor var_13276_cast_1 = split(axis = var_13276_axis_0, split_sizes = var_13276_split_sizes_0, x = var_13275_cast)[name = tensor("op_13276_cast")]; + tensor var_13278_mode_0 = const()[name = tensor("op_13278_mode_0"), val = tensor("EXACT")]; + tensor var_13278_cast = gelu(mode = var_13278_mode_0, x = var_13276_cast_1)[name = tensor("op_13278_cast")]; + tensor input_785_cast = mul(x = var_13276_cast_0, y = var_13278_cast)[name = tensor("input_785_cast")]; + tensor var_13282 = const()[name = tensor("op_13282"), val = tensor([1, 1])]; + tensor var_13284 = const()[name = tensor("op_13284"), val = tensor([1, 1])]; + tensor var_13286_pad_type_0 = const()[name = tensor("op_13286_pad_type_0"), val = tensor("custom")]; + tensor var_13286_pad_0 = const()[name = tensor("op_13286_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1890666944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1891895808))), name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1891896000)))]; + tensor var_13286_cast = conv(bias = unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13284, groups = var_31, pad = var_13286_pad_0, pad_type = var_13286_pad_type_0, strides = var_13282, weight = unet_up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_785_cast)[name = tensor("op_13286_cast")]; + tensor hidden_states_547_cast = add(x = var_13286_cast, y = inputs_407_cast)[name = tensor("hidden_states_547_cast")]; + tensor var_13288 = const()[name = tensor("op_13288"), val = tensor([2, 640, 64, 64])]; + tensor input_787_cast = reshape(shape = var_13288, x = hidden_states_547_cast)[name = tensor("input_787_cast")]; + tensor var_13292 = const()[name = tensor("op_13292"), val = tensor([1, 1])]; + tensor var_13294 = const()[name = tensor("op_13294"), val = tensor([1, 1])]; + tensor hidden_states_549_pad_type_0 = const()[name = tensor("hidden_states_549_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_549_pad_0 = const()[name = tensor("hidden_states_549_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1891897344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892204608))), name = tensor("unet_up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892204800)))]; + tensor hidden_states_549_cast = conv(bias = unet_up_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_13294, groups = var_31, pad = hidden_states_549_pad_0, pad_type = hidden_states_549_pad_type_0, strides = var_13292, weight = unet_up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_787_cast)[name = tensor("hidden_states_549_cast")]; + tensor hidden_states_551_cast = add(x = hidden_states_549_cast, y = hidden_states_531_cast)[name = tensor("hidden_states_551_cast")]; + tensor input_789_interleave_0 = const()[name = tensor("input_789_interleave_0"), val = tensor(false)]; + tensor input_789_cast = concat(axis = var_31, interleave = input_789_interleave_0, values = (hidden_states_551_cast, input_45_cast))[name = tensor("input_789_cast")]; + tensor reshape_144_shape_0 = const()[name = tensor("reshape_144_shape_0"), val = tensor([2, 32, 30, 64, 64])]; + tensor reshape_144_cast = reshape(shape = reshape_144_shape_0, x = input_789_cast)[name = tensor("reshape_144_cast")]; + tensor reduce_mean_108_axes_0 = const()[name = tensor("reduce_mean_108_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_108_keep_dims_0 = const()[name = tensor("reduce_mean_108_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_108_cast = reduce_mean(axes = reduce_mean_108_axes_0, keep_dims = reduce_mean_108_keep_dims_0, x = reshape_144_cast)[name = tensor("reduce_mean_108_cast")]; + tensor sub_72_cast = sub(x = reshape_144_cast, y = reduce_mean_108_cast)[name = tensor("sub_72_cast")]; + tensor square_36_cast = square(x = sub_72_cast)[name = tensor("square_36_cast")]; + tensor reduce_mean_110_axes_0 = const()[name = tensor("reduce_mean_110_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_110_keep_dims_0 = const()[name = tensor("reduce_mean_110_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_110_cast = reduce_mean(axes = reduce_mean_110_axes_0, keep_dims = reduce_mean_110_keep_dims_0, x = square_36_cast)[name = tensor("reduce_mean_110_cast")]; + tensor add_72_y_0_to_fp16 = const()[name = tensor("add_72_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_72_cast = add(x = reduce_mean_110_cast, y = add_72_y_0_to_fp16)[name = tensor("add_72_cast")]; + tensor sqrt_36_cast = sqrt(x = add_72_cast)[name = tensor("sqrt_36_cast")]; + tensor real_div_36_cast = real_div(x = sub_72_cast, y = sqrt_36_cast)[name = tensor("real_div_36_cast")]; + tensor reshape_145_shape_0 = const()[name = tensor("reshape_145_shape_0"), val = tensor([2, 960, 64, 64])]; + tensor reshape_145_cast = reshape(shape = reshape_145_shape_0, x = real_div_36_cast)[name = tensor("reshape_145_cast")]; + tensor add_73_mean_0_to_fp16 = const()[name = tensor("add_73_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892206144)))]; + tensor add_73_variance_0_to_fp16 = const()[name = tensor("add_73_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892208128)))]; + tensor add_73_gamma_0_to_fp16 = const()[name = tensor("add_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892210112)))]; + tensor add_73_beta_0_to_fp16 = const()[name = tensor("add_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892212096)))]; + tensor add_73_epsilon_0_to_fp16 = const()[name = tensor("add_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_73_cast = batch_norm(beta = add_73_beta_0_to_fp16, epsilon = add_73_epsilon_0_to_fp16, gamma = add_73_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_145_cast)[name = tensor("add_73_cast")]; + tensor input_793_cast = silu(x = add_73_cast)[name = tensor("input_793_cast")]; + tensor var_13312 = const()[name = tensor("op_13312"), val = tensor([1, 1])]; + tensor var_13314 = const()[name = tensor("op_13314"), val = tensor([1, 1])]; + tensor hidden_states_553_pad_type_0 = const()[name = tensor("hidden_states_553_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_553_pad_0 = const()[name = tensor("hidden_states_553_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1892214080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896361344))), name = tensor("unet_up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([640, 960, 3, 3])]; + tensor unet_up_blocks_1_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896361536)))]; + tensor hidden_states_553_cast = conv(bias = unet_up_blocks_1_resnets_2_conv1_bias_to_fp16, dilations = var_13314, groups = var_31, pad = hidden_states_553_pad_0, pad_type = hidden_states_553_pad_type_0, strides = var_13312, weight = unet_up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized, x = input_793_cast)[name = tensor("hidden_states_553_cast")]; + tensor var_13320 = const()[name = tensor("op_13320"), val = tensor([1, 1])]; + tensor var_13322 = const()[name = tensor("op_13322"), val = tensor([1, 1])]; + tensor temb_27_pad_type_0 = const()[name = tensor("temb_27_pad_type_0"), val = tensor("custom")]; + tensor temb_27_pad_0 = const()[name = tensor("temb_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896362880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896977344))), name = tensor("unet_up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor unet_up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896977536)))]; + tensor temb_27_cast = conv(bias = unet_up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13322, groups = var_31, pad = temb_27_pad_0, pad_type = temb_27_pad_type_0, strides = var_13320, weight = unet_up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_27_cast")]; + tensor input_797_cast = add(x = hidden_states_553_cast, y = temb_27_cast)[name = tensor("input_797_cast")]; + tensor reshape_148_shape_0 = const()[name = tensor("reshape_148_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_148_cast = reshape(shape = reshape_148_shape_0, x = input_797_cast)[name = tensor("reshape_148_cast")]; + tensor reduce_mean_111_axes_0 = const()[name = tensor("reduce_mean_111_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_111_keep_dims_0 = const()[name = tensor("reduce_mean_111_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_111_cast = reduce_mean(axes = reduce_mean_111_axes_0, keep_dims = reduce_mean_111_keep_dims_0, x = reshape_148_cast)[name = tensor("reduce_mean_111_cast")]; + tensor sub_74_cast = sub(x = reshape_148_cast, y = reduce_mean_111_cast)[name = tensor("sub_74_cast")]; + tensor square_37_cast = square(x = sub_74_cast)[name = tensor("square_37_cast")]; + tensor reduce_mean_113_axes_0 = const()[name = tensor("reduce_mean_113_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_113_keep_dims_0 = const()[name = tensor("reduce_mean_113_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_113_cast = reduce_mean(axes = reduce_mean_113_axes_0, keep_dims = reduce_mean_113_keep_dims_0, x = square_37_cast)[name = tensor("reduce_mean_113_cast")]; + tensor add_74_y_0_to_fp16 = const()[name = tensor("add_74_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_74_cast = add(x = reduce_mean_113_cast, y = add_74_y_0_to_fp16)[name = tensor("add_74_cast")]; + tensor sqrt_37_cast = sqrt(x = add_74_cast)[name = tensor("sqrt_37_cast")]; + tensor real_div_37_cast = real_div(x = sub_74_cast, y = sqrt_37_cast)[name = tensor("real_div_37_cast")]; + tensor reshape_149_shape_0 = const()[name = tensor("reshape_149_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_149_cast = reshape(shape = reshape_149_shape_0, x = real_div_37_cast)[name = tensor("reshape_149_cast")]; + tensor add_75_gamma_0_to_fp16 = const()[name = tensor("add_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896978880)))]; + tensor add_75_beta_0_to_fp16 = const()[name = tensor("add_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896980224)))]; + tensor add_75_epsilon_0_to_fp16 = const()[name = tensor("add_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_75_cast = batch_norm(beta = add_75_beta_0_to_fp16, epsilon = add_75_epsilon_0_to_fp16, gamma = add_75_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_149_cast)[name = tensor("add_75_cast")]; + tensor input_801_cast = silu(x = add_75_cast)[name = tensor("input_801_cast")]; + tensor var_13332 = const()[name = tensor("op_13332"), val = tensor([1, 1])]; + tensor var_13334 = const()[name = tensor("op_13334"), val = tensor([1, 1])]; + tensor hidden_states_555_pad_type_0 = const()[name = tensor("hidden_states_555_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_555_pad_0 = const()[name = tensor("hidden_states_555_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1896981568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1899746432))), name = tensor("unet_up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_up_blocks_1_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1899746624)))]; + tensor hidden_states_555_cast = conv(bias = unet_up_blocks_1_resnets_2_conv2_bias_to_fp16, dilations = var_13334, groups = var_31, pad = hidden_states_555_pad_0, pad_type = hidden_states_555_pad_type_0, strides = var_13332, weight = unet_up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized, x = input_801_cast)[name = tensor("hidden_states_555_cast")]; + tensor var_13339 = const()[name = tensor("op_13339"), val = tensor([1, 1])]; + tensor var_13341 = const()[name = tensor("op_13341"), val = tensor([1, 1])]; + tensor x_15_pad_type_0 = const()[name = tensor("x_15_pad_type_0"), val = tensor("custom")]; + tensor x_15_pad_0 = const()[name = tensor("x_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1899747968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900208832))), name = tensor("unet_up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 960, 1, 1])]; + tensor unet_up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900209024)))]; + tensor x_15_cast = conv(bias = unet_up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13341, groups = var_31, pad = x_15_pad_0, pad_type = x_15_pad_type_0, strides = var_13339, weight = unet_up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_789_cast)[name = tensor("x_15_cast")]; + tensor hidden_states_557_cast = add(x = x_15_cast, y = hidden_states_555_cast)[name = tensor("hidden_states_557_cast")]; + tensor reshape_152_shape_0 = const()[name = tensor("reshape_152_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_152_cast = reshape(shape = reshape_152_shape_0, x = hidden_states_557_cast)[name = tensor("reshape_152_cast")]; + tensor reduce_mean_114_axes_0 = const()[name = tensor("reduce_mean_114_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_114_keep_dims_0 = const()[name = tensor("reduce_mean_114_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_114_cast = reduce_mean(axes = reduce_mean_114_axes_0, keep_dims = reduce_mean_114_keep_dims_0, x = reshape_152_cast)[name = tensor("reduce_mean_114_cast")]; + tensor sub_76_cast = sub(x = reshape_152_cast, y = reduce_mean_114_cast)[name = tensor("sub_76_cast")]; + tensor square_38_cast = square(x = sub_76_cast)[name = tensor("square_38_cast")]; + tensor reduce_mean_116_axes_0 = const()[name = tensor("reduce_mean_116_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_116_keep_dims_0 = const()[name = tensor("reduce_mean_116_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_116_cast = reduce_mean(axes = reduce_mean_116_axes_0, keep_dims = reduce_mean_116_keep_dims_0, x = square_38_cast)[name = tensor("reduce_mean_116_cast")]; + tensor add_76_y_0_to_fp16 = const()[name = tensor("add_76_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_76_cast = add(x = reduce_mean_116_cast, y = add_76_y_0_to_fp16)[name = tensor("add_76_cast")]; + tensor sqrt_38_cast = sqrt(x = add_76_cast)[name = tensor("sqrt_38_cast")]; + tensor real_div_38_cast = real_div(x = sub_76_cast, y = sqrt_38_cast)[name = tensor("real_div_38_cast")]; + tensor reshape_153_shape_0 = const()[name = tensor("reshape_153_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_153_cast = reshape(shape = reshape_153_shape_0, x = real_div_38_cast)[name = tensor("reshape_153_cast")]; + tensor add_77_gamma_0_to_fp16 = const()[name = tensor("add_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900210368)))]; + tensor add_77_beta_0_to_fp16 = const()[name = tensor("add_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900211712)))]; + tensor add_77_epsilon_0_to_fp16 = const()[name = tensor("add_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_77_cast = batch_norm(beta = add_77_beta_0_to_fp16, epsilon = add_77_epsilon_0_to_fp16, gamma = add_77_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_153_cast)[name = tensor("add_77_cast")]; + tensor var_13363 = const()[name = tensor("op_13363"), val = tensor([1, 1])]; + tensor var_13365 = const()[name = tensor("op_13365"), val = tensor([1, 1])]; + tensor hidden_states_559_pad_type_0 = const()[name = tensor("hidden_states_559_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_559_pad_0 = const()[name = tensor("hidden_states_559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900213056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900520320))), name = tensor("unet_up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900520512)))]; + tensor hidden_states_559_cast = conv(bias = unet_up_blocks_1_attentions_2_proj_in_bias_to_fp16, dilations = var_13365, groups = var_31, pad = hidden_states_559_pad_0, pad_type = hidden_states_559_pad_type_0, strides = var_13363, weight = unet_up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized, x = add_77_cast)[name = tensor("hidden_states_559_cast")]; + tensor var_13370 = const()[name = tensor("op_13370"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_409_cast = reshape(shape = var_13370, x = hidden_states_559_cast)[name = tensor("inputs_409_cast")]; + tensor var_13380 = const()[name = tensor("op_13380"), val = tensor([1])]; + tensor channels_mean_409_cast = reduce_mean(axes = var_13380, keep_dims = var_23, x = inputs_409_cast)[name = tensor("channels_mean_409_cast")]; + tensor zero_mean_409_cast = sub(x = inputs_409_cast, y = channels_mean_409_cast)[name = tensor("zero_mean_409_cast")]; + tensor zero_mean_sq_409_cast = mul(x = zero_mean_409_cast, y = zero_mean_409_cast)[name = tensor("zero_mean_sq_409_cast")]; + tensor var_13384 = const()[name = tensor("op_13384"), val = tensor([1])]; + tensor var_13385_cast = reduce_mean(axes = var_13384, keep_dims = var_23, x = zero_mean_sq_409_cast)[name = tensor("op_13385_cast")]; + tensor var_13386_to_fp16 = const()[name = tensor("op_13386_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13387_cast = add(x = var_13385_cast, y = var_13386_to_fp16)[name = tensor("op_13387_cast")]; + tensor denom_409_epsilon_0_to_fp16 = const()[name = tensor("denom_409_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_409_cast = rsqrt(epsilon = denom_409_epsilon_0_to_fp16, x = var_13387_cast)[name = tensor("denom_409_cast")]; + tensor out_409_cast = mul(x = zero_mean_409_cast, y = denom_409_cast)[name = tensor("out_409_cast")]; + tensor var_13391_to_fp16 = const()[name = tensor("op_13391_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900521856)))]; + tensor var_13392_cast = add(x = out_409_cast, y = var_13391_to_fp16)[name = tensor("op_13392_cast")]; + tensor var_13394_to_fp16 = const()[name = tensor("op_13394_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900523200)))]; + tensor hidden_states_561_cast = mul(x = var_13392_cast, y = var_13394_to_fp16)[name = tensor("hidden_states_561_cast")]; + tensor var_13401 = const()[name = tensor("op_13401"), val = tensor([1, 1])]; + tensor var_13403 = const()[name = tensor("op_13403"), val = tensor([1, 1])]; + tensor q_273_pad_type_0 = const()[name = tensor("q_273_pad_type_0"), val = tensor("custom")]; + tensor q_273_pad_0 = const()[name = tensor("q_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900524544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900831808))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_273_cast = conv(dilations = var_13403, groups = var_31, pad = q_273_pad_0, pad_type = q_273_pad_type_0, strides = var_13401, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("q_273_cast")]; + tensor var_13407 = const()[name = tensor("op_13407"), val = tensor([1, 1])]; + tensor var_13409 = const()[name = tensor("op_13409"), val = tensor([1, 1])]; + tensor k_273_pad_type_0 = const()[name = tensor("k_273_pad_type_0"), val = tensor("custom")]; + tensor k_273_pad_0 = const()[name = tensor("k_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1900832000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901139264))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_273_cast = conv(dilations = var_13409, groups = var_31, pad = k_273_pad_0, pad_type = k_273_pad_type_0, strides = var_13407, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("k_273_cast")]; + tensor var_13413 = const()[name = tensor("op_13413"), val = tensor([1, 1])]; + tensor var_13415 = const()[name = tensor("op_13415"), val = tensor([1, 1])]; + tensor v_273_pad_type_0 = const()[name = tensor("v_273_pad_type_0"), val = tensor("custom")]; + tensor v_273_pad_0 = const()[name = tensor("v_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901139456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901446720))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_273_cast = conv(dilations = var_13415, groups = var_31, pad = v_273_pad_0, pad_type = v_273_pad_type_0, strides = var_13413, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("v_273_cast")]; + tensor var_13419 = const()[name = tensor("op_13419"), val = tensor([2, 10, 64, -1])]; + tensor var_13420_cast = reshape(shape = var_13419, x = q_273_cast)[name = tensor("op_13420_cast")]; + tensor var_13421 = const()[name = tensor("op_13421"), val = tensor([2, 10, 64, -1])]; + tensor var_13422_cast = reshape(shape = var_13421, x = k_273_cast)[name = tensor("op_13422_cast")]; + tensor var_13423 = const()[name = tensor("op_13423"), val = tensor([2, 10, 64, -1])]; + tensor var_13424_cast = reshape(shape = var_13423, x = v_273_cast)[name = tensor("op_13424_cast")]; + tensor attn_weights_545_transpose_x_0 = const()[name = tensor("attn_weights_545_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_545_transpose_y_0 = const()[name = tensor("attn_weights_545_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_545_cast = matmul(transpose_x = attn_weights_545_transpose_x_0, transpose_y = attn_weights_545_transpose_y_0, x = var_13420_cast, y = var_13422_cast)[name = tensor("attn_weights_545_cast")]; + tensor attn_weights_547_cast = mul(x = attn_weights_545_cast, y = var_12_to_fp16)[name = tensor("attn_weights_547_cast")]; + tensor var_13428_cast = softmax(axis = var_18, x = attn_weights_547_cast)[name = tensor("op_13428_cast")]; + tensor attn_273_transpose_x_0 = const()[name = tensor("attn_273_transpose_x_0"), val = tensor(false)]; + tensor attn_273_transpose_y_0 = const()[name = tensor("attn_273_transpose_y_0"), val = tensor(true)]; + tensor attn_273_cast = matmul(transpose_x = attn_273_transpose_x_0, transpose_y = attn_273_transpose_y_0, x = var_13424_cast, y = var_13428_cast)[name = tensor("attn_273_cast")]; + tensor var_13432 = const()[name = tensor("op_13432"), val = tensor([2, 640, 1, -1])]; + tensor input_805_cast = reshape(shape = var_13432, x = attn_273_cast)[name = tensor("input_805_cast")]; + tensor var_13437 = const()[name = tensor("op_13437"), val = tensor([1, 1])]; + tensor var_13439 = const()[name = tensor("op_13439"), val = tensor([1, 1])]; + tensor var_13441_pad_type_0 = const()[name = tensor("op_13441_pad_type_0"), val = tensor("custom")]; + tensor var_13441_pad_0 = const()[name = tensor("op_13441_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901446912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901754176))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901754368)))]; + tensor var_13441_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13439, groups = var_31, pad = var_13441_pad_0, pad_type = var_13441_pad_type_0, strides = var_13437, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_805_cast)[name = tensor("op_13441_cast")]; + tensor inputs_411_cast = add(x = var_13441_cast, y = inputs_409_cast)[name = tensor("inputs_411_cast")]; + tensor var_13445 = const()[name = tensor("op_13445"), val = tensor([1])]; + tensor channels_mean_411_cast = reduce_mean(axes = var_13445, keep_dims = var_23, x = inputs_411_cast)[name = tensor("channels_mean_411_cast")]; + tensor zero_mean_411_cast = sub(x = inputs_411_cast, y = channels_mean_411_cast)[name = tensor("zero_mean_411_cast")]; + tensor zero_mean_sq_411_cast = mul(x = zero_mean_411_cast, y = zero_mean_411_cast)[name = tensor("zero_mean_sq_411_cast")]; + tensor var_13449 = const()[name = tensor("op_13449"), val = tensor([1])]; + tensor var_13450_cast = reduce_mean(axes = var_13449, keep_dims = var_23, x = zero_mean_sq_411_cast)[name = tensor("op_13450_cast")]; + tensor var_13451_to_fp16 = const()[name = tensor("op_13451_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13452_cast = add(x = var_13450_cast, y = var_13451_to_fp16)[name = tensor("op_13452_cast")]; + tensor denom_411_epsilon_0_to_fp16 = const()[name = tensor("denom_411_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_411_cast = rsqrt(epsilon = denom_411_epsilon_0_to_fp16, x = var_13452_cast)[name = tensor("denom_411_cast")]; + tensor out_411_cast = mul(x = zero_mean_411_cast, y = denom_411_cast)[name = tensor("out_411_cast")]; + tensor var_13456_to_fp16 = const()[name = tensor("op_13456_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901755712)))]; + tensor var_13457_cast = add(x = out_411_cast, y = var_13456_to_fp16)[name = tensor("op_13457_cast")]; + tensor var_13459_to_fp16 = const()[name = tensor("op_13459_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901757056)))]; + tensor hidden_states_563_cast = mul(x = var_13457_cast, y = var_13459_to_fp16)[name = tensor("hidden_states_563_cast")]; + tensor var_13466 = const()[name = tensor("op_13466"), val = tensor([1, 1])]; + tensor var_13468 = const()[name = tensor("op_13468"), val = tensor([1, 1])]; + tensor q_275_pad_type_0 = const()[name = tensor("q_275_pad_type_0"), val = tensor("custom")]; + tensor q_275_pad_0 = const()[name = tensor("q_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1901758400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1902065664))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_275_cast = conv(dilations = var_13468, groups = var_31, pad = q_275_pad_0, pad_type = q_275_pad_type_0, strides = var_13466, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_563_cast)[name = tensor("q_275_cast")]; + tensor var_13472 = const()[name = tensor("op_13472"), val = tensor([1, 1])]; + tensor var_13474 = const()[name = tensor("op_13474"), val = tensor([1, 1])]; + tensor k_275_pad_type_0 = const()[name = tensor("k_275_pad_type_0"), val = tensor("custom")]; + tensor k_275_pad_0 = const()[name = tensor("k_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1902065856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1903048960))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_275_cast = conv(dilations = var_13474, groups = var_31, pad = k_275_pad_0, pad_type = k_275_pad_type_0, strides = var_13472, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_275_cast")]; + tensor var_13478 = const()[name = tensor("op_13478"), val = tensor([1, 1])]; + tensor var_13480 = const()[name = tensor("op_13480"), val = tensor([1, 1])]; + tensor v_275_pad_type_0 = const()[name = tensor("v_275_pad_type_0"), val = tensor("custom")]; + tensor v_275_pad_0 = const()[name = tensor("v_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1903049152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904032256))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_275_cast = conv(dilations = var_13480, groups = var_31, pad = v_275_pad_0, pad_type = v_275_pad_type_0, strides = var_13478, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_275_cast")]; + tensor var_13484 = const()[name = tensor("op_13484"), val = tensor([2, 10, 64, -1])]; + tensor var_13485_cast = reshape(shape = var_13484, x = q_275_cast)[name = tensor("op_13485_cast")]; + tensor var_13486 = const()[name = tensor("op_13486"), val = tensor([2, 10, 64, -1])]; + tensor var_13487_cast = reshape(shape = var_13486, x = k_275_cast)[name = tensor("op_13487_cast")]; + tensor var_13488 = const()[name = tensor("op_13488"), val = tensor([2, 10, 64, -1])]; + tensor var_13489_cast = reshape(shape = var_13488, x = v_275_cast)[name = tensor("op_13489_cast")]; + tensor attn_weights_549_transpose_x_0 = const()[name = tensor("attn_weights_549_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_549_transpose_y_0 = const()[name = tensor("attn_weights_549_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_549_cast = matmul(transpose_x = attn_weights_549_transpose_x_0, transpose_y = attn_weights_549_transpose_y_0, x = var_13485_cast, y = var_13487_cast)[name = tensor("attn_weights_549_cast")]; + tensor attn_weights_551_cast = mul(x = attn_weights_549_cast, y = var_12_to_fp16)[name = tensor("attn_weights_551_cast")]; + tensor var_13493_cast = softmax(axis = var_18, x = attn_weights_551_cast)[name = tensor("op_13493_cast")]; + tensor attn_275_transpose_x_0 = const()[name = tensor("attn_275_transpose_x_0"), val = tensor(false)]; + tensor attn_275_transpose_y_0 = const()[name = tensor("attn_275_transpose_y_0"), val = tensor(true)]; + tensor attn_275_cast = matmul(transpose_x = attn_275_transpose_x_0, transpose_y = attn_275_transpose_y_0, x = var_13489_cast, y = var_13493_cast)[name = tensor("attn_275_cast")]; + tensor var_13497 = const()[name = tensor("op_13497"), val = tensor([2, 640, 1, -1])]; + tensor input_807_cast = reshape(shape = var_13497, x = attn_275_cast)[name = tensor("input_807_cast")]; + tensor var_13502 = const()[name = tensor("op_13502"), val = tensor([1, 1])]; + tensor var_13504 = const()[name = tensor("op_13504"), val = tensor([1, 1])]; + tensor var_13506_pad_type_0 = const()[name = tensor("op_13506_pad_type_0"), val = tensor("custom")]; + tensor var_13506_pad_0 = const()[name = tensor("op_13506_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904032448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904339712))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904339904)))]; + tensor var_13506_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13504, groups = var_31, pad = var_13506_pad_0, pad_type = var_13506_pad_type_0, strides = var_13502, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_807_cast)[name = tensor("op_13506_cast")]; + tensor inputs_413_cast = add(x = var_13506_cast, y = inputs_411_cast)[name = tensor("inputs_413_cast")]; + tensor var_13510 = const()[name = tensor("op_13510"), val = tensor([1])]; + tensor channels_mean_413_cast = reduce_mean(axes = var_13510, keep_dims = var_23, x = inputs_413_cast)[name = tensor("channels_mean_413_cast")]; + tensor zero_mean_413_cast = sub(x = inputs_413_cast, y = channels_mean_413_cast)[name = tensor("zero_mean_413_cast")]; + tensor zero_mean_sq_413_cast = mul(x = zero_mean_413_cast, y = zero_mean_413_cast)[name = tensor("zero_mean_sq_413_cast")]; + tensor var_13514 = const()[name = tensor("op_13514"), val = tensor([1])]; + tensor var_13515_cast = reduce_mean(axes = var_13514, keep_dims = var_23, x = zero_mean_sq_413_cast)[name = tensor("op_13515_cast")]; + tensor var_13516_to_fp16 = const()[name = tensor("op_13516_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13517_cast = add(x = var_13515_cast, y = var_13516_to_fp16)[name = tensor("op_13517_cast")]; + tensor denom_413_epsilon_0_to_fp16 = const()[name = tensor("denom_413_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_413_cast = rsqrt(epsilon = denom_413_epsilon_0_to_fp16, x = var_13517_cast)[name = tensor("denom_413_cast")]; + tensor out_413_cast = mul(x = zero_mean_413_cast, y = denom_413_cast)[name = tensor("out_413_cast")]; + tensor var_13521_to_fp16 = const()[name = tensor("op_13521_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904341248)))]; + tensor var_13522_cast = add(x = out_413_cast, y = var_13521_to_fp16)[name = tensor("op_13522_cast")]; + tensor var_13524_to_fp16 = const()[name = tensor("op_13524_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904342592)))]; + tensor input_809_cast = mul(x = var_13522_cast, y = var_13524_to_fp16)[name = tensor("input_809_cast")]; + tensor var_13532 = const()[name = tensor("op_13532"), val = tensor([1, 1])]; + tensor var_13534 = const()[name = tensor("op_13534"), val = tensor([1, 1])]; + tensor var_13536_pad_type_0 = const()[name = tensor("op_13536_pad_type_0"), val = tensor("custom")]; + tensor var_13536_pad_0 = const()[name = tensor("op_13536_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1904343936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1906801600))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1906801792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1906805696))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_13536_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_13534, groups = var_31, pad = var_13536_pad_0, pad_type = var_13536_pad_type_0, strides = var_13532, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_809_cast)[name = tensor("op_13536_cast")]; + tensor var_13537_split_sizes_0 = const()[name = tensor("op_13537_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13537_axis_0 = const()[name = tensor("op_13537_axis_0"), val = tensor(1)]; + tensor var_13537_cast_0, tensor var_13537_cast_1 = split(axis = var_13537_axis_0, split_sizes = var_13537_split_sizes_0, x = var_13536_cast)[name = tensor("op_13537_cast")]; + tensor var_13539_mode_0 = const()[name = tensor("op_13539_mode_0"), val = tensor("EXACT")]; + tensor var_13539_cast = gelu(mode = var_13539_mode_0, x = var_13537_cast_1)[name = tensor("op_13539_cast")]; + tensor input_811_cast = mul(x = var_13537_cast_0, y = var_13539_cast)[name = tensor("input_811_cast")]; + tensor var_13543 = const()[name = tensor("op_13543"), val = tensor([1, 1])]; + tensor var_13545 = const()[name = tensor("op_13545"), val = tensor([1, 1])]; + tensor var_13547_pad_type_0 = const()[name = tensor("op_13547_pad_type_0"), val = tensor("custom")]; + tensor var_13547_pad_0 = const()[name = tensor("op_13547_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1906805888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908034752))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908034944)))]; + tensor var_13547_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13545, groups = var_31, pad = var_13547_pad_0, pad_type = var_13547_pad_type_0, strides = var_13543, weight = unet_up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_811_cast)[name = tensor("op_13547_cast")]; + tensor inputs_415_cast = add(x = var_13547_cast, y = inputs_413_cast)[name = tensor("inputs_415_cast")]; + tensor var_13557 = const()[name = tensor("op_13557"), val = tensor([1])]; + tensor channels_mean_415_cast = reduce_mean(axes = var_13557, keep_dims = var_23, x = inputs_415_cast)[name = tensor("channels_mean_415_cast")]; + tensor zero_mean_415_cast = sub(x = inputs_415_cast, y = channels_mean_415_cast)[name = tensor("zero_mean_415_cast")]; + tensor zero_mean_sq_415_cast = mul(x = zero_mean_415_cast, y = zero_mean_415_cast)[name = tensor("zero_mean_sq_415_cast")]; + tensor var_13561 = const()[name = tensor("op_13561"), val = tensor([1])]; + tensor var_13562_cast = reduce_mean(axes = var_13561, keep_dims = var_23, x = zero_mean_sq_415_cast)[name = tensor("op_13562_cast")]; + tensor var_13563_to_fp16 = const()[name = tensor("op_13563_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13564_cast = add(x = var_13562_cast, y = var_13563_to_fp16)[name = tensor("op_13564_cast")]; + tensor denom_415_epsilon_0_to_fp16 = const()[name = tensor("denom_415_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_415_cast = rsqrt(epsilon = denom_415_epsilon_0_to_fp16, x = var_13564_cast)[name = tensor("denom_415_cast")]; + tensor out_415_cast = mul(x = zero_mean_415_cast, y = denom_415_cast)[name = tensor("out_415_cast")]; + tensor var_13568_to_fp16 = const()[name = tensor("op_13568_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908036288)))]; + tensor var_13569_cast = add(x = out_415_cast, y = var_13568_to_fp16)[name = tensor("op_13569_cast")]; + tensor var_13571_to_fp16 = const()[name = tensor("op_13571_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908037632)))]; + tensor hidden_states_567_cast = mul(x = var_13569_cast, y = var_13571_to_fp16)[name = tensor("hidden_states_567_cast")]; + tensor var_13578 = const()[name = tensor("op_13578"), val = tensor([1, 1])]; + tensor var_13580 = const()[name = tensor("op_13580"), val = tensor([1, 1])]; + tensor q_277_pad_type_0 = const()[name = tensor("q_277_pad_type_0"), val = tensor("custom")]; + tensor q_277_pad_0 = const()[name = tensor("q_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908038976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908346240))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_277_cast = conv(dilations = var_13580, groups = var_31, pad = q_277_pad_0, pad_type = q_277_pad_type_0, strides = var_13578, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("q_277_cast")]; + tensor var_13584 = const()[name = tensor("op_13584"), val = tensor([1, 1])]; + tensor var_13586 = const()[name = tensor("op_13586"), val = tensor([1, 1])]; + tensor k_277_pad_type_0 = const()[name = tensor("k_277_pad_type_0"), val = tensor("custom")]; + tensor k_277_pad_0 = const()[name = tensor("k_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908346432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908653696))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_277_cast = conv(dilations = var_13586, groups = var_31, pad = k_277_pad_0, pad_type = k_277_pad_type_0, strides = var_13584, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("k_277_cast")]; + tensor var_13590 = const()[name = tensor("op_13590"), val = tensor([1, 1])]; + tensor var_13592 = const()[name = tensor("op_13592"), val = tensor([1, 1])]; + tensor v_277_pad_type_0 = const()[name = tensor("v_277_pad_type_0"), val = tensor("custom")]; + tensor v_277_pad_0 = const()[name = tensor("v_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908653888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908961152))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_277_cast = conv(dilations = var_13592, groups = var_31, pad = v_277_pad_0, pad_type = v_277_pad_type_0, strides = var_13590, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("v_277_cast")]; + tensor var_13596 = const()[name = tensor("op_13596"), val = tensor([2, 10, 64, -1])]; + tensor var_13597_cast = reshape(shape = var_13596, x = q_277_cast)[name = tensor("op_13597_cast")]; + tensor var_13598 = const()[name = tensor("op_13598"), val = tensor([2, 10, 64, -1])]; + tensor var_13599_cast = reshape(shape = var_13598, x = k_277_cast)[name = tensor("op_13599_cast")]; + tensor var_13600 = const()[name = tensor("op_13600"), val = tensor([2, 10, 64, -1])]; + tensor var_13601_cast = reshape(shape = var_13600, x = v_277_cast)[name = tensor("op_13601_cast")]; + tensor attn_weights_553_transpose_x_0 = const()[name = tensor("attn_weights_553_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_553_transpose_y_0 = const()[name = tensor("attn_weights_553_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_553_cast = matmul(transpose_x = attn_weights_553_transpose_x_0, transpose_y = attn_weights_553_transpose_y_0, x = var_13597_cast, y = var_13599_cast)[name = tensor("attn_weights_553_cast")]; + tensor attn_weights_555_cast = mul(x = attn_weights_553_cast, y = var_12_to_fp16)[name = tensor("attn_weights_555_cast")]; + tensor var_13605_cast = softmax(axis = var_18, x = attn_weights_555_cast)[name = tensor("op_13605_cast")]; + tensor attn_277_transpose_x_0 = const()[name = tensor("attn_277_transpose_x_0"), val = tensor(false)]; + tensor attn_277_transpose_y_0 = const()[name = tensor("attn_277_transpose_y_0"), val = tensor(true)]; + tensor attn_277_cast = matmul(transpose_x = attn_277_transpose_x_0, transpose_y = attn_277_transpose_y_0, x = var_13601_cast, y = var_13605_cast)[name = tensor("attn_277_cast")]; + tensor var_13609 = const()[name = tensor("op_13609"), val = tensor([2, 640, 1, -1])]; + tensor input_813_cast = reshape(shape = var_13609, x = attn_277_cast)[name = tensor("input_813_cast")]; + tensor var_13614 = const()[name = tensor("op_13614"), val = tensor([1, 1])]; + tensor var_13616 = const()[name = tensor("op_13616"), val = tensor([1, 1])]; + tensor var_13618_pad_type_0 = const()[name = tensor("op_13618_pad_type_0"), val = tensor("custom")]; + tensor var_13618_pad_0 = const()[name = tensor("op_13618_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1908961344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909268608))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909268800)))]; + tensor var_13618_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13616, groups = var_31, pad = var_13618_pad_0, pad_type = var_13618_pad_type_0, strides = var_13614, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_813_cast)[name = tensor("op_13618_cast")]; + tensor inputs_417_cast = add(x = var_13618_cast, y = inputs_415_cast)[name = tensor("inputs_417_cast")]; + tensor var_13622 = const()[name = tensor("op_13622"), val = tensor([1])]; + tensor channels_mean_417_cast = reduce_mean(axes = var_13622, keep_dims = var_23, x = inputs_417_cast)[name = tensor("channels_mean_417_cast")]; + tensor zero_mean_417_cast = sub(x = inputs_417_cast, y = channels_mean_417_cast)[name = tensor("zero_mean_417_cast")]; + tensor zero_mean_sq_417_cast = mul(x = zero_mean_417_cast, y = zero_mean_417_cast)[name = tensor("zero_mean_sq_417_cast")]; + tensor var_13626 = const()[name = tensor("op_13626"), val = tensor([1])]; + tensor var_13627_cast = reduce_mean(axes = var_13626, keep_dims = var_23, x = zero_mean_sq_417_cast)[name = tensor("op_13627_cast")]; + tensor var_13628_to_fp16 = const()[name = tensor("op_13628_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13629_cast = add(x = var_13627_cast, y = var_13628_to_fp16)[name = tensor("op_13629_cast")]; + tensor denom_417_epsilon_0_to_fp16 = const()[name = tensor("denom_417_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_417_cast = rsqrt(epsilon = denom_417_epsilon_0_to_fp16, x = var_13629_cast)[name = tensor("denom_417_cast")]; + tensor out_417_cast = mul(x = zero_mean_417_cast, y = denom_417_cast)[name = tensor("out_417_cast")]; + tensor var_13633_to_fp16 = const()[name = tensor("op_13633_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909270144)))]; + tensor var_13634_cast = add(x = out_417_cast, y = var_13633_to_fp16)[name = tensor("op_13634_cast")]; + tensor var_13636_to_fp16 = const()[name = tensor("op_13636_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909271488)))]; + tensor hidden_states_569_cast = mul(x = var_13634_cast, y = var_13636_to_fp16)[name = tensor("hidden_states_569_cast")]; + tensor var_13643 = const()[name = tensor("op_13643"), val = tensor([1, 1])]; + tensor var_13645 = const()[name = tensor("op_13645"), val = tensor([1, 1])]; + tensor q_pad_type_0 = const()[name = tensor("q_pad_type_0"), val = tensor("custom")]; + tensor q_pad_0 = const()[name = tensor("q_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909272832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909580096))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_cast = conv(dilations = var_13645, groups = var_31, pad = q_pad_0, pad_type = q_pad_type_0, strides = var_13643, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_569_cast)[name = tensor("q_cast")]; + tensor var_13649 = const()[name = tensor("op_13649"), val = tensor([1, 1])]; + tensor var_13651 = const()[name = tensor("op_13651"), val = tensor([1, 1])]; + tensor k_pad_type_0 = const()[name = tensor("k_pad_type_0"), val = tensor("custom")]; + tensor k_pad_0 = const()[name = tensor("k_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1909580288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1910563392))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_cast = conv(dilations = var_13651, groups = var_31, pad = k_pad_0, pad_type = k_pad_type_0, strides = var_13649, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_cast")]; + tensor var_13655 = const()[name = tensor("op_13655"), val = tensor([1, 1])]; + tensor var_13657 = const()[name = tensor("op_13657"), val = tensor([1, 1])]; + tensor v_pad_type_0 = const()[name = tensor("v_pad_type_0"), val = tensor("custom")]; + tensor v_pad_0 = const()[name = tensor("v_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1910563584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911546688))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_cast = conv(dilations = var_13657, groups = var_31, pad = v_pad_0, pad_type = v_pad_type_0, strides = var_13655, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_cast")]; + tensor var_13661 = const()[name = tensor("op_13661"), val = tensor([2, 10, 64, -1])]; + tensor var_13662_cast = reshape(shape = var_13661, x = q_cast)[name = tensor("op_13662_cast")]; + tensor var_13663 = const()[name = tensor("op_13663"), val = tensor([2, 10, 64, -1])]; + tensor var_13664_cast = reshape(shape = var_13663, x = k_cast)[name = tensor("op_13664_cast")]; + tensor var_13665 = const()[name = tensor("op_13665"), val = tensor([2, 10, 64, -1])]; + tensor var_13666_cast = reshape(shape = var_13665, x = v_cast)[name = tensor("op_13666_cast")]; + tensor attn_weights_557_transpose_x_0 = const()[name = tensor("attn_weights_557_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_557_transpose_y_0 = const()[name = tensor("attn_weights_557_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_557_cast = matmul(transpose_x = attn_weights_557_transpose_x_0, transpose_y = attn_weights_557_transpose_y_0, x = var_13662_cast, y = var_13664_cast)[name = tensor("attn_weights_557_cast")]; + tensor attn_weights_cast = mul(x = attn_weights_557_cast, y = var_12_to_fp16)[name = tensor("attn_weights_cast")]; + tensor var_13670_cast = softmax(axis = var_18, x = attn_weights_cast)[name = tensor("op_13670_cast")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_13666_cast, y = var_13670_cast)[name = tensor("attn_cast")]; + tensor var_13674 = const()[name = tensor("op_13674"), val = tensor([2, 640, 1, -1])]; + tensor input_815_cast = reshape(shape = var_13674, x = attn_cast)[name = tensor("input_815_cast")]; + tensor var_13679 = const()[name = tensor("op_13679"), val = tensor([1, 1])]; + tensor var_13681 = const()[name = tensor("op_13681"), val = tensor([1, 1])]; + tensor var_13683_pad_type_0 = const()[name = tensor("op_13683_pad_type_0"), val = tensor("custom")]; + tensor var_13683_pad_0 = const()[name = tensor("op_13683_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911546880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911854144))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911854336)))]; + tensor var_13683_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13681, groups = var_31, pad = var_13683_pad_0, pad_type = var_13683_pad_type_0, strides = var_13679, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_815_cast)[name = tensor("op_13683_cast")]; + tensor inputs_cast = add(x = var_13683_cast, y = inputs_417_cast)[name = tensor("inputs_cast")]; + tensor var_13687 = const()[name = tensor("op_13687"), val = tensor([1])]; + tensor channels_mean_cast = reduce_mean(axes = var_13687, keep_dims = var_23, x = inputs_cast)[name = tensor("channels_mean_cast")]; + tensor zero_mean_cast = sub(x = inputs_cast, y = channels_mean_cast)[name = tensor("zero_mean_cast")]; + tensor zero_mean_sq_cast = mul(x = zero_mean_cast, y = zero_mean_cast)[name = tensor("zero_mean_sq_cast")]; + tensor var_13691 = const()[name = tensor("op_13691"), val = tensor([1])]; + tensor var_13692_cast = reduce_mean(axes = var_13691, keep_dims = var_23, x = zero_mean_sq_cast)[name = tensor("op_13692_cast")]; + tensor var_13693_to_fp16 = const()[name = tensor("op_13693_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13694_cast = add(x = var_13692_cast, y = var_13693_to_fp16)[name = tensor("op_13694_cast")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_13694_cast)[name = tensor("denom_cast")]; + tensor out_cast = mul(x = zero_mean_cast, y = denom_cast)[name = tensor("out_cast")]; + tensor var_13698_to_fp16 = const()[name = tensor("op_13698_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911855680)))]; + tensor var_13699_cast = add(x = out_cast, y = var_13698_to_fp16)[name = tensor("op_13699_cast")]; + tensor var_13701_to_fp16 = const()[name = tensor("op_13701_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911857024)))]; + tensor input_817_cast = mul(x = var_13699_cast, y = var_13701_to_fp16)[name = tensor("input_817_cast")]; + tensor var_13709 = const()[name = tensor("op_13709"), val = tensor([1, 1])]; + tensor var_13711 = const()[name = tensor("op_13711"), val = tensor([1, 1])]; + tensor var_13713_pad_type_0 = const()[name = tensor("op_13713_pad_type_0"), val = tensor("custom")]; + tensor var_13713_pad_0 = const()[name = tensor("op_13713_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1911858368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1914316032))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1914316224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1914320128))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized"), shape = tensor([5120])]; + tensor var_13713_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16_palettized, dilations = var_13711, groups = var_31, pad = var_13713_pad_0, pad_type = var_13713_pad_type_0, strides = var_13709, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_817_cast)[name = tensor("op_13713_cast")]; + tensor var_13714_split_sizes_0 = const()[name = tensor("op_13714_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13714_axis_0 = const()[name = tensor("op_13714_axis_0"), val = tensor(1)]; + tensor var_13714_cast_0, tensor var_13714_cast_1 = split(axis = var_13714_axis_0, split_sizes = var_13714_split_sizes_0, x = var_13713_cast)[name = tensor("op_13714_cast")]; + tensor var_13716_mode_0 = const()[name = tensor("op_13716_mode_0"), val = tensor("EXACT")]; + tensor var_13716_cast = gelu(mode = var_13716_mode_0, x = var_13714_cast_1)[name = tensor("op_13716_cast")]; + tensor input_819_cast = mul(x = var_13714_cast_0, y = var_13716_cast)[name = tensor("input_819_cast")]; + tensor var_13720 = const()[name = tensor("op_13720"), val = tensor([1, 1])]; + tensor var_13722 = const()[name = tensor("op_13722"), val = tensor([1, 1])]; + tensor var_13724_pad_type_0 = const()[name = tensor("op_13724_pad_type_0"), val = tensor("custom")]; + tensor var_13724_pad_0 = const()[name = tensor("op_13724_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1914320320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915549184))), name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915549376)))]; + tensor var_13724_cast = conv(bias = unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13722, groups = var_31, pad = var_13724_pad_0, pad_type = var_13724_pad_type_0, strides = var_13720, weight = unet_up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_819_cast)[name = tensor("op_13724_cast")]; + tensor hidden_states_573_cast = add(x = var_13724_cast, y = inputs_cast)[name = tensor("hidden_states_573_cast")]; + tensor var_13726 = const()[name = tensor("op_13726"), val = tensor([2, 640, 64, 64])]; + tensor input_821_cast = reshape(shape = var_13726, x = hidden_states_573_cast)[name = tensor("input_821_cast")]; + tensor var_13730 = const()[name = tensor("op_13730"), val = tensor([1, 1])]; + tensor var_13732 = const()[name = tensor("op_13732"), val = tensor([1, 1])]; + tensor hidden_states_575_pad_type_0 = const()[name = tensor("hidden_states_575_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_575_pad_0 = const()[name = tensor("hidden_states_575_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915550720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915857984))), name = tensor("unet_up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor unet_up_blocks_1_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915858176)))]; + tensor hidden_states_575_cast = conv(bias = unet_up_blocks_1_attentions_2_proj_out_bias_to_fp16, dilations = var_13732, groups = var_31, pad = hidden_states_575_pad_0, pad_type = hidden_states_575_pad_type_0, strides = var_13730, weight = unet_up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized, x = input_821_cast)[name = tensor("hidden_states_575_cast")]; + tensor input_823_cast = add(x = hidden_states_575_cast, y = hidden_states_557_cast)[name = tensor("input_823_cast")]; + tensor input_825_scale_factor_height_0 = const()[name = tensor("input_825_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_825_scale_factor_width_0 = const()[name = tensor("input_825_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_825_cast = upsample_nearest_neighbor(scale_factor_height = input_825_scale_factor_height_0, scale_factor_width = input_825_scale_factor_width_0, x = input_823_cast)[name = tensor("input_825_cast")]; + tensor var_13741 = const()[name = tensor("op_13741"), val = tensor([1, 1])]; + tensor var_13743 = const()[name = tensor("op_13743"), val = tensor([1, 1])]; + tensor hidden_states_577_pad_type_0 = const()[name = tensor("hidden_states_577_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_577_pad_0 = const()[name = tensor("hidden_states_577_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1915859520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1918624384))), name = tensor("unet_up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor unet_up_blocks_1_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("unet_up_blocks_1_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1918624576)))]; + tensor hidden_states_577_cast = conv(bias = unet_up_blocks_1_upsamplers_0_conv_bias_to_fp16, dilations = var_13743, groups = var_31, pad = hidden_states_577_pad_0, pad_type = hidden_states_577_pad_type_0, strides = var_13741, weight = unet_up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized, x = input_825_cast)[name = tensor("hidden_states_577_cast")]; + tensor input_827_interleave_0 = const()[name = tensor("input_827_interleave_0"), val = tensor(false)]; + tensor input_827_cast = concat(axis = var_31, interleave = input_827_interleave_0, values = (hidden_states_577_cast, input_43_cast))[name = tensor("input_827_cast")]; + tensor reshape_156_shape_0 = const()[name = tensor("reshape_156_shape_0"), val = tensor([2, 32, 30, 128, 128])]; + tensor reshape_156_cast = reshape(shape = reshape_156_shape_0, x = input_827_cast)[name = tensor("reshape_156_cast")]; + tensor reduce_mean_117_axes_0 = const()[name = tensor("reduce_mean_117_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_117_keep_dims_0 = const()[name = tensor("reduce_mean_117_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_117_cast = reduce_mean(axes = reduce_mean_117_axes_0, keep_dims = reduce_mean_117_keep_dims_0, x = reshape_156_cast)[name = tensor("reduce_mean_117_cast")]; + tensor sub_78_cast = sub(x = reshape_156_cast, y = reduce_mean_117_cast)[name = tensor("sub_78_cast")]; + tensor square_39_cast = square(x = sub_78_cast)[name = tensor("square_39_cast")]; + tensor reduce_mean_119_axes_0 = const()[name = tensor("reduce_mean_119_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_119_keep_dims_0 = const()[name = tensor("reduce_mean_119_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_119_cast = reduce_mean(axes = reduce_mean_119_axes_0, keep_dims = reduce_mean_119_keep_dims_0, x = square_39_cast)[name = tensor("reduce_mean_119_cast")]; + tensor add_78_y_0_to_fp16 = const()[name = tensor("add_78_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_78_cast = add(x = reduce_mean_119_cast, y = add_78_y_0_to_fp16)[name = tensor("add_78_cast")]; + tensor sqrt_39_cast = sqrt(x = add_78_cast)[name = tensor("sqrt_39_cast")]; + tensor real_div_39_cast = real_div(x = sub_78_cast, y = sqrt_39_cast)[name = tensor("real_div_39_cast")]; + tensor reshape_157_shape_0 = const()[name = tensor("reshape_157_shape_0"), val = tensor([2, 960, 128, 128])]; + tensor reshape_157_cast = reshape(shape = reshape_157_shape_0, x = real_div_39_cast)[name = tensor("reshape_157_cast")]; + tensor add_79_gamma_0_to_fp16 = const()[name = tensor("add_79_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1918625920)))]; + tensor add_79_beta_0_to_fp16 = const()[name = tensor("add_79_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1918627904)))]; + tensor add_79_epsilon_0_to_fp16 = const()[name = tensor("add_79_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_79_cast = batch_norm(beta = add_79_beta_0_to_fp16, epsilon = add_79_epsilon_0_to_fp16, gamma = add_79_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_157_cast)[name = tensor("add_79_cast")]; + tensor input_831_cast = silu(x = add_79_cast)[name = tensor("input_831_cast")]; + tensor var_13766 = const()[name = tensor("op_13766"), val = tensor([1, 1])]; + tensor var_13768 = const()[name = tensor("op_13768"), val = tensor([1, 1])]; + tensor hidden_states_579_pad_type_0 = const()[name = tensor("hidden_states_579_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_579_pad_0 = const()[name = tensor("hidden_states_579_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1918629888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1920703552))), name = tensor("unet_up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([320, 960, 3, 3])]; + tensor unet_up_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1920703744)))]; + tensor hidden_states_579_cast = conv(bias = unet_up_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_13768, groups = var_31, pad = hidden_states_579_pad_0, pad_type = hidden_states_579_pad_type_0, strides = var_13766, weight = unet_up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_831_cast)[name = tensor("hidden_states_579_cast")]; + tensor var_13774 = const()[name = tensor("op_13774"), val = tensor([1, 1])]; + tensor var_13776 = const()[name = tensor("op_13776"), val = tensor([1, 1])]; + tensor temb_29_pad_type_0 = const()[name = tensor("temb_29_pad_type_0"), val = tensor("custom")]; + tensor temb_29_pad_0 = const()[name = tensor("temb_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1920704448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921011712))), name = tensor("unet_up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor unet_up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921011904)))]; + tensor temb_29_cast = conv(bias = unet_up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_13776, groups = var_31, pad = temb_29_pad_0, pad_type = temb_29_pad_type_0, strides = var_13774, weight = unet_up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_29_cast")]; + tensor input_835_cast = add(x = hidden_states_579_cast, y = temb_29_cast)[name = tensor("input_835_cast")]; + tensor reshape_160_shape_0 = const()[name = tensor("reshape_160_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_160_cast = reshape(shape = reshape_160_shape_0, x = input_835_cast)[name = tensor("reshape_160_cast")]; + tensor reduce_mean_120_axes_0 = const()[name = tensor("reduce_mean_120_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_120_keep_dims_0 = const()[name = tensor("reduce_mean_120_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_120_cast = reduce_mean(axes = reduce_mean_120_axes_0, keep_dims = reduce_mean_120_keep_dims_0, x = reshape_160_cast)[name = tensor("reduce_mean_120_cast")]; + tensor sub_80_cast = sub(x = reshape_160_cast, y = reduce_mean_120_cast)[name = tensor("sub_80_cast")]; + tensor square_40_cast = square(x = sub_80_cast)[name = tensor("square_40_cast")]; + tensor reduce_mean_122_axes_0 = const()[name = tensor("reduce_mean_122_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_122_keep_dims_0 = const()[name = tensor("reduce_mean_122_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_122_cast = reduce_mean(axes = reduce_mean_122_axes_0, keep_dims = reduce_mean_122_keep_dims_0, x = square_40_cast)[name = tensor("reduce_mean_122_cast")]; + tensor add_80_y_0_to_fp16 = const()[name = tensor("add_80_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_80_cast = add(x = reduce_mean_122_cast, y = add_80_y_0_to_fp16)[name = tensor("add_80_cast")]; + tensor sqrt_40_cast = sqrt(x = add_80_cast)[name = tensor("sqrt_40_cast")]; + tensor real_div_40_cast = real_div(x = sub_80_cast, y = sqrt_40_cast)[name = tensor("real_div_40_cast")]; + tensor reshape_161_shape_0 = const()[name = tensor("reshape_161_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_161_cast = reshape(shape = reshape_161_shape_0, x = real_div_40_cast)[name = tensor("reshape_161_cast")]; + tensor add_81_gamma_0_to_fp16 = const()[name = tensor("add_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921012608)))]; + tensor add_81_beta_0_to_fp16 = const()[name = tensor("add_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921013312)))]; + tensor add_81_epsilon_0_to_fp16 = const()[name = tensor("add_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_81_cast = batch_norm(beta = add_81_beta_0_to_fp16, epsilon = add_81_epsilon_0_to_fp16, gamma = add_81_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_161_cast)[name = tensor("add_81_cast")]; + tensor input_839_cast = silu(x = add_81_cast)[name = tensor("input_839_cast")]; + tensor var_13786 = const()[name = tensor("op_13786"), val = tensor([1, 1])]; + tensor var_13788 = const()[name = tensor("op_13788"), val = tensor([1, 1])]; + tensor hidden_states_581_pad_type_0 = const()[name = tensor("hidden_states_581_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_581_pad_0 = const()[name = tensor("hidden_states_581_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921014016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921705280))), name = tensor("unet_up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_up_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921705472)))]; + tensor hidden_states_581_cast = conv(bias = unet_up_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_13788, groups = var_31, pad = hidden_states_581_pad_0, pad_type = hidden_states_581_pad_type_0, strides = var_13786, weight = unet_up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_839_cast)[name = tensor("hidden_states_581_cast")]; + tensor var_13793 = const()[name = tensor("op_13793"), val = tensor([1, 1])]; + tensor var_13795 = const()[name = tensor("op_13795"), val = tensor([1, 1])]; + tensor x_17_pad_type_0 = const()[name = tensor("x_17_pad_type_0"), val = tensor("custom")]; + tensor x_17_pad_0 = const()[name = tensor("x_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921706176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921936640))), name = tensor("unet_up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 960, 1, 1])]; + tensor unet_up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921936832)))]; + tensor x_17_cast = conv(bias = unet_up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_13795, groups = var_31, pad = x_17_pad_0, pad_type = x_17_pad_type_0, strides = var_13793, weight = unet_up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_827_cast)[name = tensor("x_17_cast")]; + tensor hidden_states_583_cast = add(x = x_17_cast, y = hidden_states_581_cast)[name = tensor("hidden_states_583_cast")]; + tensor input_841_interleave_0 = const()[name = tensor("input_841_interleave_0"), val = tensor(false)]; + tensor input_841_cast = concat(axis = var_31, interleave = input_841_interleave_0, values = (hidden_states_583_cast, input_29_cast))[name = tensor("input_841_cast")]; + tensor reshape_164_shape_0 = const()[name = tensor("reshape_164_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_164_cast = reshape(shape = reshape_164_shape_0, x = input_841_cast)[name = tensor("reshape_164_cast")]; + tensor reduce_mean_123_axes_0 = const()[name = tensor("reduce_mean_123_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_123_keep_dims_0 = const()[name = tensor("reduce_mean_123_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_123_cast = reduce_mean(axes = reduce_mean_123_axes_0, keep_dims = reduce_mean_123_keep_dims_0, x = reshape_164_cast)[name = tensor("reduce_mean_123_cast")]; + tensor sub_82_cast = sub(x = reshape_164_cast, y = reduce_mean_123_cast)[name = tensor("sub_82_cast")]; + tensor square_41_cast = square(x = sub_82_cast)[name = tensor("square_41_cast")]; + tensor reduce_mean_125_axes_0 = const()[name = tensor("reduce_mean_125_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_125_keep_dims_0 = const()[name = tensor("reduce_mean_125_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_125_cast = reduce_mean(axes = reduce_mean_125_axes_0, keep_dims = reduce_mean_125_keep_dims_0, x = square_41_cast)[name = tensor("reduce_mean_125_cast")]; + tensor add_82_y_0_to_fp16 = const()[name = tensor("add_82_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_82_cast = add(x = reduce_mean_125_cast, y = add_82_y_0_to_fp16)[name = tensor("add_82_cast")]; + tensor sqrt_41_cast = sqrt(x = add_82_cast)[name = tensor("sqrt_41_cast")]; + tensor real_div_41_cast = real_div(x = sub_82_cast, y = sqrt_41_cast)[name = tensor("real_div_41_cast")]; + tensor reshape_165_shape_0 = const()[name = tensor("reshape_165_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_165_cast = reshape(shape = reshape_165_shape_0, x = real_div_41_cast)[name = tensor("reshape_165_cast")]; + tensor add_83_gamma_0_to_fp16 = const()[name = tensor("add_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921937536)))]; + tensor add_83_beta_0_to_fp16 = const()[name = tensor("add_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921938880)))]; + tensor add_83_epsilon_0_to_fp16 = const()[name = tensor("add_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_83_cast = batch_norm(beta = add_83_beta_0_to_fp16, epsilon = add_83_epsilon_0_to_fp16, gamma = add_83_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_165_cast)[name = tensor("add_83_cast")]; + tensor input_845_cast = silu(x = add_83_cast)[name = tensor("input_845_cast")]; + tensor var_13813 = const()[name = tensor("op_13813"), val = tensor([1, 1])]; + tensor var_13815 = const()[name = tensor("op_13815"), val = tensor([1, 1])]; + tensor hidden_states_585_pad_type_0 = const()[name = tensor("hidden_states_585_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_585_pad_0 = const()[name = tensor("hidden_states_585_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1921940224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923322688))), name = tensor("unet_up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([320, 640, 3, 3])]; + tensor unet_up_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923322880)))]; + tensor hidden_states_585_cast = conv(bias = unet_up_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_13815, groups = var_31, pad = hidden_states_585_pad_0, pad_type = hidden_states_585_pad_type_0, strides = var_13813, weight = unet_up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_845_cast)[name = tensor("hidden_states_585_cast")]; + tensor var_13821 = const()[name = tensor("op_13821"), val = tensor([1, 1])]; + tensor var_13823 = const()[name = tensor("op_13823"), val = tensor([1, 1])]; + tensor temb_31_pad_type_0 = const()[name = tensor("temb_31_pad_type_0"), val = tensor("custom")]; + tensor temb_31_pad_0 = const()[name = tensor("temb_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923323584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923630848))), name = tensor("unet_up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor unet_up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923631040)))]; + tensor temb_31_cast = conv(bias = unet_up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_13823, groups = var_31, pad = temb_31_pad_0, pad_type = temb_31_pad_type_0, strides = var_13821, weight = unet_up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_31_cast")]; + tensor input_849_cast = add(x = hidden_states_585_cast, y = temb_31_cast)[name = tensor("input_849_cast")]; + tensor reshape_168_shape_0 = const()[name = tensor("reshape_168_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_168_cast = reshape(shape = reshape_168_shape_0, x = input_849_cast)[name = tensor("reshape_168_cast")]; + tensor reduce_mean_126_axes_0 = const()[name = tensor("reduce_mean_126_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_126_keep_dims_0 = const()[name = tensor("reduce_mean_126_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_126_cast = reduce_mean(axes = reduce_mean_126_axes_0, keep_dims = reduce_mean_126_keep_dims_0, x = reshape_168_cast)[name = tensor("reduce_mean_126_cast")]; + tensor sub_84_cast = sub(x = reshape_168_cast, y = reduce_mean_126_cast)[name = tensor("sub_84_cast")]; + tensor square_42_cast = square(x = sub_84_cast)[name = tensor("square_42_cast")]; + tensor reduce_mean_128_axes_0 = const()[name = tensor("reduce_mean_128_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_128_keep_dims_0 = const()[name = tensor("reduce_mean_128_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_128_cast = reduce_mean(axes = reduce_mean_128_axes_0, keep_dims = reduce_mean_128_keep_dims_0, x = square_42_cast)[name = tensor("reduce_mean_128_cast")]; + tensor add_84_y_0_to_fp16 = const()[name = tensor("add_84_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_84_cast = add(x = reduce_mean_128_cast, y = add_84_y_0_to_fp16)[name = tensor("add_84_cast")]; + tensor sqrt_42_cast = sqrt(x = add_84_cast)[name = tensor("sqrt_42_cast")]; + tensor real_div_42_cast = real_div(x = sub_84_cast, y = sqrt_42_cast)[name = tensor("real_div_42_cast")]; + tensor reshape_169_shape_0 = const()[name = tensor("reshape_169_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_169_cast = reshape(shape = reshape_169_shape_0, x = real_div_42_cast)[name = tensor("reshape_169_cast")]; + tensor add_85_gamma_0_to_fp16 = const()[name = tensor("add_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923631744)))]; + tensor add_85_beta_0_to_fp16 = const()[name = tensor("add_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923632448)))]; + tensor add_85_epsilon_0_to_fp16 = const()[name = tensor("add_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_85_cast = batch_norm(beta = add_85_beta_0_to_fp16, epsilon = add_85_epsilon_0_to_fp16, gamma = add_85_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_169_cast)[name = tensor("add_85_cast")]; + tensor input_853_cast = silu(x = add_85_cast)[name = tensor("input_853_cast")]; + tensor var_13833 = const()[name = tensor("op_13833"), val = tensor([1, 1])]; + tensor var_13835 = const()[name = tensor("op_13835"), val = tensor([1, 1])]; + tensor hidden_states_587_pad_type_0 = const()[name = tensor("hidden_states_587_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_587_pad_0 = const()[name = tensor("hidden_states_587_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1923633152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924324416))), name = tensor("unet_up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_up_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924324608)))]; + tensor hidden_states_587_cast = conv(bias = unet_up_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_13835, groups = var_31, pad = hidden_states_587_pad_0, pad_type = hidden_states_587_pad_type_0, strides = var_13833, weight = unet_up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_853_cast)[name = tensor("hidden_states_587_cast")]; + tensor var_13840 = const()[name = tensor("op_13840"), val = tensor([1, 1])]; + tensor var_13842 = const()[name = tensor("op_13842"), val = tensor([1, 1])]; + tensor x_19_pad_type_0 = const()[name = tensor("x_19_pad_type_0"), val = tensor("custom")]; + tensor x_19_pad_0 = const()[name = tensor("x_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924325312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924478976))), name = tensor("unet_up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor unet_up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924479168)))]; + tensor x_19_cast = conv(bias = unet_up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13842, groups = var_31, pad = x_19_pad_0, pad_type = x_19_pad_type_0, strides = var_13840, weight = unet_up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_841_cast)[name = tensor("x_19_cast")]; + tensor hidden_states_589_cast = add(x = x_19_cast, y = hidden_states_587_cast)[name = tensor("hidden_states_589_cast")]; + tensor input_855_interleave_0 = const()[name = tensor("input_855_interleave_0"), val = tensor(false)]; + tensor input_855_cast = concat(axis = var_31, interleave = input_855_interleave_0, values = (hidden_states_589_cast, input_13_cast))[name = tensor("input_855_cast")]; + tensor reshape_172_shape_0 = const()[name = tensor("reshape_172_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_172_cast = reshape(shape = reshape_172_shape_0, x = input_855_cast)[name = tensor("reshape_172_cast")]; + tensor reduce_mean_129_axes_0 = const()[name = tensor("reduce_mean_129_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_129_keep_dims_0 = const()[name = tensor("reduce_mean_129_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_129_cast = reduce_mean(axes = reduce_mean_129_axes_0, keep_dims = reduce_mean_129_keep_dims_0, x = reshape_172_cast)[name = tensor("reduce_mean_129_cast")]; + tensor sub_86_cast = sub(x = reshape_172_cast, y = reduce_mean_129_cast)[name = tensor("sub_86_cast")]; + tensor square_43_cast = square(x = sub_86_cast)[name = tensor("square_43_cast")]; + tensor reduce_mean_131_axes_0 = const()[name = tensor("reduce_mean_131_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_131_keep_dims_0 = const()[name = tensor("reduce_mean_131_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_131_cast = reduce_mean(axes = reduce_mean_131_axes_0, keep_dims = reduce_mean_131_keep_dims_0, x = square_43_cast)[name = tensor("reduce_mean_131_cast")]; + tensor add_86_y_0_to_fp16 = const()[name = tensor("add_86_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_86_cast = add(x = reduce_mean_131_cast, y = add_86_y_0_to_fp16)[name = tensor("add_86_cast")]; + tensor sqrt_43_cast = sqrt(x = add_86_cast)[name = tensor("sqrt_43_cast")]; + tensor real_div_43_cast = real_div(x = sub_86_cast, y = sqrt_43_cast)[name = tensor("real_div_43_cast")]; + tensor reshape_173_shape_0 = const()[name = tensor("reshape_173_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_173_cast = reshape(shape = reshape_173_shape_0, x = real_div_43_cast)[name = tensor("reshape_173_cast")]; + tensor add_87_gamma_0_to_fp16 = const()[name = tensor("add_87_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924479872)))]; + tensor add_87_beta_0_to_fp16 = const()[name = tensor("add_87_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924481216)))]; + tensor add_87_epsilon_0_to_fp16 = const()[name = tensor("add_87_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_87_cast = batch_norm(beta = add_87_beta_0_to_fp16, epsilon = add_87_epsilon_0_to_fp16, gamma = add_87_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_173_cast)[name = tensor("add_87_cast")]; + tensor input_859_cast = silu(x = add_87_cast)[name = tensor("input_859_cast")]; + tensor var_13860 = const()[name = tensor("op_13860"), val = tensor([1, 1])]; + tensor var_13862 = const()[name = tensor("op_13862"), val = tensor([1, 1])]; + tensor hidden_states_591_pad_type_0 = const()[name = tensor("hidden_states_591_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_591_pad_0 = const()[name = tensor("hidden_states_591_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1924482560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1925865024))), name = tensor("unet_up_blocks_2_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([320, 640, 3, 3])]; + tensor unet_up_blocks_2_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1925865216)))]; + tensor hidden_states_591_cast = conv(bias = unet_up_blocks_2_resnets_2_conv1_bias_to_fp16, dilations = var_13862, groups = var_31, pad = hidden_states_591_pad_0, pad_type = hidden_states_591_pad_type_0, strides = var_13860, weight = unet_up_blocks_2_resnets_2_conv1_weight_to_fp16_palettized, x = input_859_cast)[name = tensor("hidden_states_591_cast")]; + tensor var_13868 = const()[name = tensor("op_13868"), val = tensor([1, 1])]; + tensor var_13870 = const()[name = tensor("op_13870"), val = tensor([1, 1])]; + tensor temb_pad_type_0 = const()[name = tensor("temb_pad_type_0"), val = tensor("custom")]; + tensor temb_pad_0 = const()[name = tensor("temb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1925865920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926173184))), name = tensor("unet_up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor unet_up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926173376)))]; + tensor temb_cast = conv(bias = unet_up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13870, groups = var_31, pad = temb_pad_0, pad_type = temb_pad_type_0, strides = var_13868, weight = unet_up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_cast")]; + tensor input_863_cast = add(x = hidden_states_591_cast, y = temb_cast)[name = tensor("input_863_cast")]; + tensor reshape_176_shape_0 = const()[name = tensor("reshape_176_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_176_cast = reshape(shape = reshape_176_shape_0, x = input_863_cast)[name = tensor("reshape_176_cast")]; + tensor reduce_mean_132_axes_0 = const()[name = tensor("reduce_mean_132_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_132_keep_dims_0 = const()[name = tensor("reduce_mean_132_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_132_cast = reduce_mean(axes = reduce_mean_132_axes_0, keep_dims = reduce_mean_132_keep_dims_0, x = reshape_176_cast)[name = tensor("reduce_mean_132_cast")]; + tensor sub_88_cast = sub(x = reshape_176_cast, y = reduce_mean_132_cast)[name = tensor("sub_88_cast")]; + tensor square_44_cast = square(x = sub_88_cast)[name = tensor("square_44_cast")]; + tensor reduce_mean_134_axes_0 = const()[name = tensor("reduce_mean_134_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_134_keep_dims_0 = const()[name = tensor("reduce_mean_134_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_134_cast = reduce_mean(axes = reduce_mean_134_axes_0, keep_dims = reduce_mean_134_keep_dims_0, x = square_44_cast)[name = tensor("reduce_mean_134_cast")]; + tensor add_88_y_0_to_fp16 = const()[name = tensor("add_88_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_88_cast = add(x = reduce_mean_134_cast, y = add_88_y_0_to_fp16)[name = tensor("add_88_cast")]; + tensor sqrt_44_cast = sqrt(x = add_88_cast)[name = tensor("sqrt_44_cast")]; + tensor real_div_44_cast = real_div(x = sub_88_cast, y = sqrt_44_cast)[name = tensor("real_div_44_cast")]; + tensor reshape_177_shape_0 = const()[name = tensor("reshape_177_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_177_cast = reshape(shape = reshape_177_shape_0, x = real_div_44_cast)[name = tensor("reshape_177_cast")]; + tensor add_89_gamma_0_to_fp16 = const()[name = tensor("add_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926174080)))]; + tensor add_89_beta_0_to_fp16 = const()[name = tensor("add_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926174784)))]; + tensor add_89_epsilon_0_to_fp16 = const()[name = tensor("add_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_89_cast = batch_norm(beta = add_89_beta_0_to_fp16, epsilon = add_89_epsilon_0_to_fp16, gamma = add_89_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_177_cast)[name = tensor("add_89_cast")]; + tensor input_867_cast = silu(x = add_89_cast)[name = tensor("input_867_cast")]; + tensor var_13880 = const()[name = tensor("op_13880"), val = tensor([1, 1])]; + tensor var_13882 = const()[name = tensor("op_13882"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926175488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926866752))), name = tensor("unet_up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor unet_up_blocks_2_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926866944)))]; + tensor hidden_states_cast = conv(bias = unet_up_blocks_2_resnets_2_conv2_bias_to_fp16, dilations = var_13882, groups = var_31, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_13880, weight = unet_up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized, x = input_867_cast)[name = tensor("hidden_states_cast")]; + tensor var_13887 = const()[name = tensor("op_13887"), val = tensor([1, 1])]; + tensor var_13889 = const()[name = tensor("op_13889"), val = tensor([1, 1])]; + tensor x_pad_type_0 = const()[name = tensor("x_pad_type_0"), val = tensor("custom")]; + tensor x_pad_0 = const()[name = tensor("x_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor unet_up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1926867648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927021312))), name = tensor("unet_up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor unet_up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("unet_up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927021504)))]; + tensor x_cast = conv(bias = unet_up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13889, groups = var_31, pad = x_pad_0, pad_type = x_pad_type_0, strides = var_13887, weight = unet_up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_855_cast)[name = tensor("x_cast")]; + tensor input_869_cast = add(x = x_cast, y = hidden_states_cast)[name = tensor("input_869_cast")]; + tensor reshape_180_shape_0 = const()[name = tensor("reshape_180_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_180_cast = reshape(shape = reshape_180_shape_0, x = input_869_cast)[name = tensor("reshape_180_cast")]; + tensor reduce_mean_135_axes_0 = const()[name = tensor("reduce_mean_135_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_135_keep_dims_0 = const()[name = tensor("reduce_mean_135_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_135_cast = reduce_mean(axes = reduce_mean_135_axes_0, keep_dims = reduce_mean_135_keep_dims_0, x = reshape_180_cast)[name = tensor("reduce_mean_135_cast")]; + tensor sub_90_cast = sub(x = reshape_180_cast, y = reduce_mean_135_cast)[name = tensor("sub_90_cast")]; + tensor square_45_cast = square(x = sub_90_cast)[name = tensor("square_45_cast")]; + tensor reduce_mean_137_axes_0 = const()[name = tensor("reduce_mean_137_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_137_keep_dims_0 = const()[name = tensor("reduce_mean_137_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_137_cast = reduce_mean(axes = reduce_mean_137_axes_0, keep_dims = reduce_mean_137_keep_dims_0, x = square_45_cast)[name = tensor("reduce_mean_137_cast")]; + tensor add_90_y_0_to_fp16 = const()[name = tensor("add_90_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_90_cast = add(x = reduce_mean_137_cast, y = add_90_y_0_to_fp16)[name = tensor("add_90_cast")]; + tensor sqrt_45_cast = sqrt(x = add_90_cast)[name = tensor("sqrt_45_cast")]; + tensor real_div_45_cast = real_div(x = sub_90_cast, y = sqrt_45_cast)[name = tensor("real_div_45_cast")]; + tensor reshape_181_shape_0 = const()[name = tensor("reshape_181_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_181_cast = reshape(shape = reshape_181_shape_0, x = real_div_45_cast)[name = tensor("reshape_181_cast")]; + tensor add_91_gamma_0_to_fp16 = const()[name = tensor("add_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927022208)))]; + tensor add_91_beta_0_to_fp16 = const()[name = tensor("add_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927022912)))]; + tensor add_91_epsilon_0_to_fp16 = const()[name = tensor("add_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_91_cast = batch_norm(beta = add_91_beta_0_to_fp16, epsilon = add_91_epsilon_0_to_fp16, gamma = add_91_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_181_cast)[name = tensor("add_91_cast")]; + tensor input_cast = silu(x = add_91_cast)[name = tensor("input_cast")]; + tensor var_13899 = const()[name = tensor("op_13899"), val = tensor([1, 1])]; + tensor var_13901 = const()[name = tensor("op_13901"), val = tensor([1, 1])]; + tensor var_13903_pad_type_0 = const()[name = tensor("op_13903_pad_type_0"), val = tensor("custom")]; + tensor var_13903_pad_0 = const()[name = tensor("op_13903_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor unet_conv_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927023616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927032320))), name = tensor("unet_conv_out_weight_to_fp16_palettized"), shape = tensor([4, 320, 3, 3])]; + tensor unet_conv_out_bias_to_fp16 = const()[name = tensor("unet_conv_out_bias_to_fp16"), val = tensor([0x1.6e8p-9, -0x1.6ccp-10, 0x1.ff8p-10, -0x1.9dp-9])]; + tensor var_13903_cast = conv(bias = unet_conv_out_bias_to_fp16, dilations = var_13901, groups = var_31, pad = var_13903_pad_0, pad_type = var_13903_pad_type_0, strides = var_13899, weight = unet_conv_out_weight_to_fp16_palettized, x = input_cast)[name = tensor("op_13903_cast")]; + tensor var_13903_cast_to_fp32_dtype_0 = const()[name = tensor("op_13903_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor noise_pred = cast(dtype = var_13903_cast_to_fp32_dtype_0, x = var_13903_cast)[name = tensor("cast_0")]; + } -> (noise_pred); +} \ No newline at end of file