Datasets:
entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|
ResidualDenseBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ry/cryiobypkawkmdldevamc2xgthshbnrm6is6ppwpjekkiy25lx7z.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pd/cpd4meipbe2zv2hwyhb6cjpvxof6tzs5i6jeu6dqinxcb26whgcv.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp20 & xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + ((-8) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3c/c3cglho5wyr4b4bwpkwiar5a4l3ewyyzusu3gf2a2qlomqutylik.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp32 & xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + ((-12) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x3), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/la/clagauvrhsnsmt2s2mrzhhfmw6omixx6owqe7iwdjdtls52tkc5v.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2, %where_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 20
x0 = xindex % 16
x2 = (xindex // 320)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp35 & xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + ((-12) + x1), tmp35 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tmp45 = tl.full([1], 20, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr7 + (x0 + (16*((-16) + x1)) + (64*x2)), tmp44 & xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + ((-16) + x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + (x3), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iu/ciubqy3qr45bvfhdurk7hjkhteasgy5mlezftic2idhmf7qyh6ar.py
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_4
# out => add
# out5 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_mul_4 = async_compile.triton('triton_poi_fused_add_convolution_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s5/cs5o65bd5qls62jyofgsrvlsxw6ku55z7wudf4ziucds3lbj4c7j.py
# Topologically Sorted Source Nodes: [conv2d_3, out4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out4 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, primals_3, buf1, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf0, primals_3, buf2, primals_5, buf3, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf5, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, grid=grid(1280), stream=stream0)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_4.run(buf9, primals_11, primals_1, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, out4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf6, primals_9, buf10, 256, grid=grid(256), stream=stream0)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, out3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf4, primals_7, buf11, 256, grid=grid(256), stream=stream0)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf2, primals_5, buf12, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, out1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_3, buf13, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp20 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + (-8 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tl.full([1], 16, tl.int64)
tmp35 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + (-12 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 20
x0 = xindex % 16
x2 = xindex // 320
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp35 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + (-12 + x1), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tl.full([1], 20, tl.int64)
tmp47 = tl.load(in_ptr7 + (x0 + 16 * (-16 + x1) + 64 * x2), tmp44 &
xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + (-16 + x1), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, primals_3, buf1,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_1[grid(768)](primals_1, buf0, primals_3, buf2,
primals_5, buf3, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_2[grid(1024)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf5, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(1280)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, XBLOCK
=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_add_convolution_mul_4[grid(256)](buf9, primals_11,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf6, primals_9, buf10, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf4, primals_7, buf11, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf2, primals_5, buf12, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf0, primals_3, buf13, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_3
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13)
class ResidualDenseBlockNew(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualDenseBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| cyun-404/PieESRGAN | ResidualDenseBlock | false | 3,381 | [
"Apache-2.0"
] | 0 | 22ffe683bf2389b646429494d1bc88e61a9d72c5 | https://github.com/cyun-404/PieESRGAN/tree/22ffe683bf2389b646429494d1bc88e61a9d72c5 |
TanhGaussianDistParams | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5zzpz6m6kai3axr7c2ij4mngrlsrjl6xdg3ukojun5qp6ky3r5.py
# Topologically Sorted Source Nodes: [log_std, add, mul, log_std_1, std, mul_1, z, action, sub], Original ATen: [aten.tanh, aten.add, aten.mul, aten.exp, aten.sub]
# Source node to ATen node mapping:
# action => tanh_1
# add => add
# log_std => tanh
# log_std_1 => add_1
# mul => mul
# mul_1 => mul_1
# std => exp
# sub => sub
# z => add_2
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_7,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 11.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -20), kwargs = {})
# %exp : [num_users=4] = call_function[target=torch.ops.aten.exp.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%normal_functional, %exp), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %mul_1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %view_5), kwargs = {})
triton_poi_fused_add_exp_mul_sub_tanh_1 = async_compile.triton('triton_poi_fused_add_exp_mul_sub_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_sub_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_sub_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp10 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 11.0
tmp5 = tmp3 * tmp4
tmp6 = -20.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.exp(tmp7)
tmp11 = tmp10 * tmp8
tmp12 = tmp9 + tmp11
tmp13 = tmp12 - tmp9
tmp14 = libdevice.tanh(tmp12)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
tl.store(out_ptr2 + (x0), tmp13, xmask)
tl.store(out_ptr3 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6r/c6rdrkmlczh5g4bcyxaso75xz5huarnchrjyckq264jw7cieitql.py
# Topologically Sorted Source Nodes: [var, log_scale, pow_2, neg, mul_2, truediv, sub_1, sub_2, pow_3, sub_3, add_3, log_1, log_prob, log_prob_1], Original ATen: [aten.pow, aten.log, aten.neg, aten.mul, aten.div, aten.sub, aten.rsub, aten.add, aten.sum]
# Source node to ATen node mapping:
# add_3 => add_3
# log_1 => log_1
# log_prob => sub_4
# log_prob_1 => sum_1
# log_scale => log
# mul_2 => mul_2
# neg => neg
# pow_2 => pow_2
# pow_3 => pow_3
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# truediv => div
# var => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%exp,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %log), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, 0.9189385332046727), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%tanh_1, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, 1e-06), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_4, [-1], True), kwargs = {})
triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2 = async_compile.triton('triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = -tmp1
tmp4 = tmp3 * tmp3
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 / tmp6
tmp8 = tl_math.log(tmp3)
tmp9 = tmp7 - tmp8
tmp10 = 0.9189385332046727
tmp11 = tmp9 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp13
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp11 - tmp18
tmp21 = tmp20 * tmp20
tmp22 = -tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp5
tmp26 = tmp22 / tmp25
tmp27 = tl_math.log(tmp23)
tmp28 = tmp26 - tmp27
tmp29 = tmp28 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp14 - tmp31
tmp33 = tmp32 + tmp16
tmp34 = tl_math.log(tmp33)
tmp35 = tmp29 - tmp34
tmp36 = tmp19 + tmp35
tmp38 = tmp37 * tmp37
tmp39 = -tmp38
tmp41 = tmp40 * tmp40
tmp42 = tmp41 * tmp5
tmp43 = tmp39 / tmp42
tmp44 = tl_math.log(tmp40)
tmp45 = tmp43 - tmp44
tmp46 = tmp45 - tmp10
tmp48 = tmp47 * tmp47
tmp49 = tmp14 - tmp48
tmp50 = tmp49 + tmp16
tmp51 = tl_math.log(tmp50)
tmp52 = tmp46 - tmp51
tmp53 = tmp36 + tmp52
tmp55 = tmp54 * tmp54
tmp56 = -tmp55
tmp58 = tmp57 * tmp57
tmp59 = tmp58 * tmp5
tmp60 = tmp56 / tmp59
tmp61 = tl_math.log(tmp57)
tmp62 = tmp60 - tmp61
tmp63 = tmp62 - tmp10
tmp65 = tmp64 * tmp64
tmp66 = tmp14 - tmp65
tmp67 = tmp66 + tmp16
tmp68 = tl_math.log(tmp67)
tmp69 = tmp63 - tmp68
tmp70 = tmp53 + tmp69
tl.store(out_ptr0 + (x0), tmp70, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf15, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf14, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.normal_functional]
buf8 = torch.ops.aten.normal_functional.default(buf7)
buf9 = buf8
del buf8
buf6 = buf7; del buf7 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_std, add, mul, log_std_1, std, mul_1, z, action, sub], Original ATen: [aten.tanh, aten.add, aten.mul, aten.exp, aten.sub]
triton_poi_fused_add_exp_mul_sub_tanh_1.run(buf5, buf4, buf9, buf6, buf10, buf12, buf11, 256, grid=grid(256), stream=stream0)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [var, log_scale, pow_2, neg, mul_2, truediv, sub_1, sub_2, pow_3, sub_3, add_3, log_1, log_prob, log_prob_1], Original ATen: [aten.pow, aten.log, aten.neg, aten.mul, aten.div, aten.sub, aten.rsub, aten.add, aten.sum]
triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2.run(buf12, buf6, buf11, buf13, 64, grid=grid(64), stream=stream0)
return (buf11, buf13, buf10, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, buf6, buf9, buf11, buf12, primals_8, primals_6, buf14, primals_4, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_sub_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp10 = tl.load(in_ptr2 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 11.0
tmp5 = tmp3 * tmp4
tmp6 = -20.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.exp(tmp7)
tmp11 = tmp10 * tmp8
tmp12 = tmp9 + tmp11
tmp13 = tmp12 - tmp9
tmp14 = libdevice.tanh(tmp12)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp13, xmask)
tl.store(out_ptr3 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp54 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp2 = -tmp1
tmp4 = tmp3 * tmp3
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 / tmp6
tmp8 = tl_math.log(tmp3)
tmp9 = tmp7 - tmp8
tmp10 = 0.9189385332046727
tmp11 = tmp9 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp13
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp11 - tmp18
tmp21 = tmp20 * tmp20
tmp22 = -tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp5
tmp26 = tmp22 / tmp25
tmp27 = tl_math.log(tmp23)
tmp28 = tmp26 - tmp27
tmp29 = tmp28 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp14 - tmp31
tmp33 = tmp32 + tmp16
tmp34 = tl_math.log(tmp33)
tmp35 = tmp29 - tmp34
tmp36 = tmp19 + tmp35
tmp38 = tmp37 * tmp37
tmp39 = -tmp38
tmp41 = tmp40 * tmp40
tmp42 = tmp41 * tmp5
tmp43 = tmp39 / tmp42
tmp44 = tl_math.log(tmp40)
tmp45 = tmp43 - tmp44
tmp46 = tmp45 - tmp10
tmp48 = tmp47 * tmp47
tmp49 = tmp14 - tmp48
tmp50 = tmp49 + tmp16
tmp51 = tl_math.log(tmp50)
tmp52 = tmp46 - tmp51
tmp53 = tmp36 + tmp52
tmp55 = tmp54 * tmp54
tmp56 = -tmp55
tmp58 = tmp57 * tmp57
tmp59 = tmp58 * tmp5
tmp60 = tmp56 / tmp59
tmp61 = tl_math.log(tmp57)
tmp62 = tmp60 - tmp61
tmp63 = tmp62 - tmp10
tmp65 = tmp64 * tmp64
tmp66 = tmp14 - tmp65
tmp67 = tmp66 + tmp16
tmp68 = tl_math.log(tmp67)
tmp69 = tmp63 - tmp68
tmp70 = tmp53 + tmp69
tl.store(out_ptr0 + x0, tmp70, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
buf9 = buf8
del buf8
buf6 = buf7
del buf7
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_sub_tanh_1[grid(256)](buf5, buf4, buf9,
buf6, buf10, buf12, buf11, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2[grid(64)](buf12
, buf6, buf11, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
return (buf11, buf13, buf10, reinterpret_tensor(buf4, (4, 4, 4, 4), (64,
16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1),
0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, buf6, buf9,
buf11, buf12, primals_8, primals_6, buf14, primals_4, buf15)
def identity(x: 'torch.Tensor') ->torch.Tensor:
"""Return input without any change."""
return x
def init_layer_uniform(layer: 'nn.Linear', init_w: 'float'=0.003) ->nn.Linear:
"""Init uniform parameters on the single layer"""
layer.weight.data.uniform_(-init_w, init_w)
layer.bias.data.uniform_(-init_w, init_w)
return layer
class MLP(nn.Module):
"""Baseline of Multilayer perceptron.
Attributes:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): sizes of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
hidden_layers (list): list containing linear layers
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, output_activation:
'Callable'=identity, linear_layer: 'nn.Module'=nn.Linear,
use_output_layer: 'bool'=True, n_category: 'int'=-1, init_fn:
'Callable'=init_layer_uniform):
"""Initialize.
Args:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): number of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
linear_layer (nn.Module): linear layer of mlp
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
init_fn (Callable): weight initialization function bound for the last layer
"""
super(MLP, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.linear_layer = linear_layer
self.use_output_layer = use_output_layer
self.n_category = n_category
self.hidden_layers: 'list' = []
in_size = self.input_size
for i, next_size in enumerate(hidden_sizes):
fc = self.linear_layer(in_size, next_size)
in_size = next_size
self.__setattr__('hidden_fc{}'.format(i), fc)
self.hidden_layers.append(fc)
if self.use_output_layer:
self.output_layer = self.linear_layer(in_size, output_size)
self.output_layer = init_fn(self.output_layer)
else:
self.output_layer = identity
self.output_activation = identity
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
for hidden_layer in self.hidden_layers:
x = self.hidden_activation(hidden_layer(x))
x = self.output_activation(self.output_layer(x))
return x
class GaussianDist(MLP):
"""Multilayer perceptron with Gaussian distribution output.
Attributes:
mu_activation (function): bounding function for mean
log_std_min (float): lower bound of log std
log_std_max (float): upper bound of log std
mu_layer (nn.Linear): output layer for mean
log_std_layer (nn.Linear): output layer for log std
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, mu_activation:
'Callable'=torch.tanh, log_std_min: 'float'=-20, log_std_max:
'float'=2, init_fn: 'Callable'=init_layer_uniform):
"""Initialize."""
super(GaussianDist, self).__init__(input_size=input_size,
output_size=output_size, hidden_sizes=hidden_sizes,
hidden_activation=hidden_activation, use_output_layer=False)
self.mu_activation = mu_activation
self.log_std_min = log_std_min
self.log_std_max = log_std_max
in_size = hidden_sizes[-1]
self.log_std_layer = nn.Linear(in_size, output_size)
self.log_std_layer = init_fn(self.log_std_layer)
self.mu_layer = nn.Linear(in_size, output_size)
self.mu_layer = init_fn(self.mu_layer)
def get_dist_params(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Return gausian distribution parameters."""
hidden = super(GaussianDist, self).forward(x)
mu = self.mu_activation(self.mu_layer(hidden))
log_std = torch.tanh(self.log_std_layer(hidden))
log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min
) * (log_std + 1)
std = torch.exp(log_std)
return mu, log_std, std
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Forward method implementation."""
mu, _, std = self.get_dist_params(x)
dist = Normal(mu, std)
action = dist.sample()
return action, dist
class TanhGaussianDistParamsNew(GaussianDist):
"""Multilayer perceptron with Gaussian distribution output."""
def __init__(self, **kwargs):
"""Initialize."""
super(TanhGaussianDistParamsNew, self).__init__(**kwargs,
mu_activation=identity)
def forward(self, input_0):
primals_1 = self.hidden_fc0.weight
primals_2 = self.hidden_fc0.bias
primals_4 = self.hidden_fc1.weight
primals_5 = self.hidden_fc1.bias
primals_6 = self.log_std_layer.weight
primals_7 = self.log_std_layer.bias
primals_8 = self.mu_layer.weight
primals_9 = self.mu_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1], output[2], output[3], output[4]
| MrSyee/rl_algorithms | TanhGaussianDistParams | false | 5,618 | [
"MIT"
] | 1 | 5b5276982032f8a8a614b9466849b7b3ef245b3e | https://github.com/MrSyee/rl_algorithms/tree/5b5276982032f8a8a614b9466849b7b3ef245b3e |
GaussianFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/ux/cux3rdvmurnb775fshvmb35y2xr5glzkvviyrjmg7ygqg2dz3lsx.py
# Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# log => log
# log_1 => log_1
# loss => add_2
# loss_1 => mean
# loss_reg => mul_4
# mul => mul
# mul_2 => mul_2
# neg => neg
# neg_1 => neg_1
# neg_loss => mul_3
# neg_weights => pow_1
# pos_loss => mul_1
# pos_weights => eq
# pow_2 => pow_2
# pow_3 => pow_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-12), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_2), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %eq), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-12), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_1,), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %pow_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp0
tmp7 = tmp6 * tmp6
tmp8 = tmp4 * tmp7
tmp10 = tmp9 == tmp5
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 + tmp1
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp0 * tmp0
tmp17 = tmp15 * tmp16
tmp18 = tmp5 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp17 * tmp20
tmp22 = tmp12 + tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp25 / tmp26
tmp28 = tmp27 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp0
tmp7 = tmp6 * tmp6
tmp8 = tmp4 * tmp7
tmp10 = tmp9 == tmp5
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 + tmp1
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp0 * tmp0
tmp17 = tmp15 * tmp16
tmp18 = tmp5 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp17 * tmp20
tmp22 = tmp12 + tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp25 / tmp26
tmp28 = tmp27 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
class GaussianFocalLossNew(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negtive samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0
):
super(GaussianFocalLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Andrew-Zhu/DyFPN | GaussianFocalLoss | false | 7,729 | [
"Apache-2.0"
] | 32 | a74463b59c4ce28253c2449a07c0f6692a0147a1 | https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1 |
DecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ue/cueewzxuy4vxtpvu3vvsk7dj77iejxhffcljrvuqynhlke7j7x72.py
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
# Source node to ATen node mapping:
# attn => clone
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 2.0), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_0 = async_compile.triton('triton_poi_fused_clone_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ri/cricgdtr5c24l63g746gjtdd45qor3pkzmi7qmyygyd24ejrijb7.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_19, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/r7/cr7rb6culc7hicmne2a4ips5pg6g22yb6ww46h6efrp4i2of5v4k.py
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_19, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fr/cfrvmolzdvsekgv4ejh4rokymzyx2ewy7rgqbypf7vpbjl3l2peb.py
# Topologically Sorted Source Nodes: [q_10], Original ATen: [aten.add]
# Source node to ATen node mapping:
# q_10 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_38, %add_2), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/za/czafm2jgycg472xvixvwihaduwf6hk7i4vs4m7lhryedtui37wqi.py
# Topologically Sorted Source Nodes: [q_11], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_11 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_40, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4d/c4dqmytly5zolisxgude22db5p3pbommvxualmvn6ercwtoppkm7.py
# Topologically Sorted Source Nodes: [q_11], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_11 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_3, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_40, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_40, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_13), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_14), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ol/coljc3cs7q6hio3b326qftbftlnedhni7mopq6hnmq3reufgrywy.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_43,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_48, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bg/cbg7owgmfi2mnbyuzqnl62xpa7l324g7fysqnvnh2yuplhxd5hpv.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x => view_49
# Graph fragment:
# %view_49 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_48, [16, 4]), kwargs = {})
triton_poi_fused_view_11 = async_compile.triton('triton_poi_fused_view_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ly/clych72evfl43h75debtcdhychoab7iw7f2ch2b5ebe3jjglsndg.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_50, %add_5), kwargs = {})
triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_div_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_5.run(buf11, primals_1, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_6.run(buf11, primals_1, buf12, buf13, primals_6, primals_7, buf14, 64, grid=grid(64), stream=stream0)
del primals_7
buf15 = reinterpret_tensor(buf9, (16, 16), (16, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf16)
del primals_10
buf17 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv_1, attn_2], Original ATen: [aten.div, aten.clone]
triton_poi_fused_clone_div_0.run(buf15, buf18, 256, grid=grid(256), stream=stream0)
buf19 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf16, buf19, 64, 4, grid=grid(64, 4), stream=stream0)
buf20 = reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf20, buf21, 256, grid=grid(256), stream=stream0)
buf22 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf21, buf22, 256, grid=grid(256), stream=stream0)
buf23 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf17, buf23, 256, grid=grid(256), stream=stream0)
buf24 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf24, buf25, 256, grid=grid(256), stream=stream0)
del buf24
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf25, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [q_10], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf27, buf14, 64, grid=grid(64), stream=stream0)
buf28 = buf13; del buf13 # reuse
buf29 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [q_11], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf27, buf28, buf29, 16, grid=grid(16), stream=stream0)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_11], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf27, buf28, buf29, primals_13, primals_14, buf30, 64, grid=grid(64), stream=stream0)
del primals_14
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0); del buf31 # reuse
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_10.run(buf32, primals_16, buf39, 64, grid=grid(64), stream=stream0)
del primals_16
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.view]
triton_poi_fused_view_11.run(buf32, buf33, 64, grid=grid(64), stream=stream0)
buf34 = reinterpret_tensor(buf32, (16, 4), (4, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf33, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_12.run(buf35, primals_18, buf30, 64, grid=grid(64), stream=stream0)
del primals_18
buf36 = buf29; del buf29 # reuse
buf37 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf35, buf36, buf37, 16, grid=grid(16), stream=stream0)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf35, buf36, buf37, primals_19, primals_20, buf38, 64, grid=grid(64), stream=stream0)
del buf36
del buf37
del primals_20
return (buf38, buf7, buf22, primals_1, primals_6, primals_13, primals_19, buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf22, reinterpret_tensor(buf25, (16, 16), (16, 1), 0), buf27, reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf33, buf35, primals_17, buf39, primals_15, primals_12, reinterpret_tensor(buf23, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf18, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0), primals_9, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from abc import ABC
import torch.nn as nn
from torch import matmul
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_6, primals_7, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf15 = reinterpret_tensor(buf9, (16, 16), (16, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf16)
del primals_10
buf17 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_0[grid(256)](buf15, buf18, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf19 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf15
triton_poi_fused_clone_1[grid(64, 4)](buf16, buf19, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0)
del buf16
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20
)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf20, buf21, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf20
triton_poi_fused__softmax_3[grid(256)](buf21, buf22, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf23 = buf21
del buf21
triton_poi_fused_clone_4[grid(256)](buf17, buf23, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0)
del buf17
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24
)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf24, buf25, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf24
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf25, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
triton_poi_fused_add_7[grid(64)](buf27, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = buf13
del buf13
buf29 = buf12
del buf12
triton_poi_fused_native_layer_norm_8[grid(16)](buf27, buf28, buf29,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf27, buf28, buf29,
primals_13, primals_14, buf30, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_14
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_10[grid(64)](buf32,
primals_16, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_view_11[grid(64)](buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf34 = reinterpret_tensor(buf32, (16, 4), (4, 1), 0)
del buf32
extern_kernels.mm(buf33, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0)
del buf34
triton_poi_fused_add_12[grid(64)](buf35, primals_18, buf30, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
buf36 = buf29
del buf29
buf37 = buf28
del buf28
triton_poi_fused_native_layer_norm_8[grid(16)](buf35, buf36, buf37,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf35, buf36, buf37,
primals_19, primals_20, buf38, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf36
del buf37
del primals_20
return (buf38, buf7, buf22, primals_1, primals_6, primals_13,
primals_19, buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf22,
reinterpret_tensor(buf25, (16, 16), (16, 1), 0), buf27,
reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf33, buf35,
primals_17, buf39, primals_15, primals_12, reinterpret_tensor(buf23,
(16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf18, (16, 4, 4), (
16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0),
primals_9, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class DecoderLayerNew(nn.Module, ABC):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0, input_1):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.fc.weight
primals_6 = self.slf_attn.layer_norm.weight
primals_7 = self.slf_attn.layer_norm.bias
primals_9 = self.enc_attn.w_qs.weight
primals_10 = self.enc_attn.w_ks.weight
primals_11 = self.enc_attn.w_vs.weight
primals_12 = self.enc_attn.fc.weight
primals_13 = self.enc_attn.layer_norm.weight
primals_14 = self.enc_attn.layer_norm.bias
primals_15 = self.pos_ffn.w_1.weight
primals_16 = self.pos_ffn.w_1.bias
primals_17 = self.pos_ffn.w_2.weight
primals_18 = self.pos_ffn.w_2.bias
primals_19 = self.pos_ffn.layer_norm.weight
primals_20 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0], output[1], output[2]
| superMC5657/transformer | DecoderLayer | false | 10,892 | [
"MIT"
] | 0 | b9d9ca3a5f307f6587330a8235e8d5a2a3650510 | https://github.com/superMC5657/transformer/tree/b9d9ca3a5f307f6587330a8235e8d5a2a3650510 |
FrameMaxPool | import torch
import torch.nn as nn
class FrameMaxPool(nn.Module):
def __init__(self, input_size, hidden_size, stride):
super(FrameMaxPool, self).__init__()
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.max_pool = nn.MaxPool1d(stride)
def forward(self, visual_input):
vis_h = torch.relu(self.vis_conv(visual_input))
vis_h = self.max_pool(vis_h)
return vis_h
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.full([1], 0, tl.int8)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp4, xmask)
tl.store(out_ptr2 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.int8)
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0[grid
(16)](buf1, primals_2, buf2, buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf3, (4, 4), (4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf2, buf4
class FrameMaxPoolNew(nn.Module):
def __init__(self, input_size, hidden_size, stride):
super(FrameMaxPoolNew, self).__init__()
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.max_pool = nn.MaxPool1d(stride)
def forward(self, input_0):
primals_1 = self.vis_conv.weight
primals_2 = self.vis_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| CFM-MSG/Code_LEORN | FrameMaxPool | false | 4,922 | [
"MIT"
] | 1 | fabea1e1ded973a4db692e51e2df442bde55f626 | https://github.com/CFM-MSG/Code_LEORN/tree/fabea1e1ded973a4db692e51e2df442bde55f626 |
ConvEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/53/c53345w2ogarycgzyrcothtqrrb7taubpprhokfthwhic4knqepc.py
# Topologically Sorted Source Nodes: [conv_out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv_out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 5)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 5), (20, 5, 1))
buf1 = reinterpret_tensor(buf0, (4, 5), (5, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv_out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 20, grid=grid(20), stream=stream0)
del primals_2
return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 5), (20, 5, 1))
buf1 = reinterpret_tensor(buf0, (4, 5), (5, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(20)](buf1,
primals_2, buf2, 20, XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4,
1), 0), buf2
def pytorch_activation(name='relu'):
if name == 'tanh':
return nn.Tanh()
if name == 'identity':
return nn.Identity()
if name == 'hardtanh':
return nn.Hardtanh()
if name == 'prelu':
return nn.PReLU()
if name == 'sigmoid':
return nn.Sigmoid()
if name == 'log_sigmoid':
return nn.LogSigmoid()
return nn.ReLU()
class ConvEncoderNew(nn.Module):
def __init__(self, insz, outsz, filtsz, pdrop, activation_type='relu'):
super(ConvEncoderNew, self).__init__()
self.outsz = outsz
pad = filtsz // 2
self.conv = nn.Conv1d(insz, outsz, filtsz, padding=pad)
self.act = pytorch_activation(activation_type)
self.dropout = nn.Dropout(pdrop)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| amyhemmeter/baseline | ConvEncoder | false | 3,093 | [
"Apache-2.0"
] | 0 | 101a393398570747d14a32eb3af72664e2774c8b | https://github.com/amyhemmeter/baseline/tree/101a393398570747d14a32eb3af72664e2774c8b |
Mlp | "# AOT ID: ['0_forward']\nfrom ctypes import c_void_p, c_long, c_int\nimport torch\nimport math\nimp(...TRUNCATED) | "import torch\nfrom torch._inductor.select_algorithm import extern_kernels\nimport triton\nimport tr(...TRUNCATED) | bubbliiiing/classification-pytorch | Mlp | false | 14,988 | [
"MIT"
] | 88 | ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 | https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 |
WRNUnit | "# AOT ID: ['0_forward']\nfrom ctypes import c_void_p, c_long, c_int\nimport torch\nimport math\nimp(...TRUNCATED) | "import torch\nfrom torch._inductor.select_algorithm import extern_kernels\nimport triton\nimport tr(...TRUNCATED) | HyperGAN/imgclsmob | WRNUnit | false | 17,691 | [
"MIT"
] | 9 | 88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 | https://github.com/HyperGAN/imgclsmob/tree/88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 |
RobertaClassificationHead | "# AOT ID: ['0_forward']\nfrom ctypes import c_void_p, c_long, c_int\nimport torch\nimport math\nimp(...TRUNCATED) | "import torch\nfrom torch._inductor.select_algorithm import extern_kernels\nimport triton\nimport tr(...TRUNCATED) | AlexShypula/CodeGen | RobertaClassificationHead | false | 14,554 | [
"MIT"
] | 241 | 2e5f8090c4369fd3f0ebec4a867503edc1362d5d | https://github.com/AlexShypula/CodeGen/tree/2e5f8090c4369fd3f0ebec4a867503edc1362d5d |
RealConv2d | "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass RealConv2d(nn.Modul(...TRUNCATED) | "import torch\nfrom torch._inductor.select_algorithm import extern_kernels\nimport triton\nimport tr(...TRUNCATED) | JamesLiao714/FullSubNet | RealConv2d | false | 607 | [
"MIT"
] | 0 | dad740bac35b5d7544c97740ae59101455acdc40 | https://github.com/JamesLiao714/FullSubNet/tree/dad740bac35b5d7544c97740ae59101455acdc40 |
TEEN-D/Code_Opt_Triton_Shuffled
Overview
This dataset, TEEN-D/Code_Opt_Triton_Shuffled
, is a shuffled version of the extended TEEN-D/Code_Opt_Triton dataset (which itself is an extension of GPUMODE/Inductor_Created_Data_Permissive). It provides a collection of pairs of original (PyTorch or Triton) programs and their corresponding optimized Triton code, designed for training machine learning models for code translation and optimization tasks targeting GPUs.
The key characteristic of this dataset is that the order of the data points has been randomly shuffled. This randomization is crucial for training robust machine learning models as it helps to prevent the model from learning spurious correlations based on the order of the training examples.
This dataset is part of an effort to improve the ability of LLMs to generate efficient GPU kernels by providing them with a diverse and appropriately structured training dataset.
License: This dataset is derived from the GPUMODE/Inductor_Created_Data_Permissive dataset, which is released under the MIT License. Consequently, this extended and shuffled version is also made available under the MIT License. You are free to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of this dataset, subject to the terms of the MIT License.
Rationale
This extended dataset, pairing optimized Triton with both original Triton and Python code, enhances training for preference-based methods like GRPO/DPO by:
- Providing richer preference signals through diverse (original, optimized) pairs.
- Improving the LLM's understanding of semantic equivalence between PyTorch and Triton.
- Reducing bias towards a specific input format (Triton vs. Python).
- Potentially creating more meaningful comparison groups for GRPO.
- Offering more diverse "rejected" samples for DPO against the "accepted" optimized Triton.
Dataset Creation
This dataset was created through the following process:
Extension of the Source Dataset: The GPUMODE/Inductor_Created_Data_Permissive dataset was first extended to create the TEEN-D/Code_Opt_Triton dataset. This involved pairing each optimized Triton code snippet with both its original Triton code (if available) and its corresponding Python code.
Shuffling: The resulting extended dataset was then shuffled using the
shuffle()
method provided by thedatasets
library. A fixed random seed was used to ensure reproducibility of the shuffling process.
Data Structure
The data structure of this dataset is identical to that of the TEEN-D/Code_Opt_Triton dataset. Each entry includes the following fields:
- entry_point: The entry point of the code.
- original_triton_python_code: Contains either the original Triton code or the Python code from the source dataset.
- optimised_triton_code: The optimized Triton code generated by
torch.compile
. - repo_name: The name of the repository in the format
username/repository
. - module_name: The name of the PyTorch module.
- synthetic: A boolean indicating if the data is synthetic.
- uuid: A unique identifier for the entry.
- licenses: List of licenses associated with the repository.
- stars: Number of GitHub stars the repository has.
- sha: The commit SHA hash used for version reference.
- repo_link: Direct link to the repository at the specific commit (GitHub URL).
- stringlengths: (Please refer to the
README.md
of the TEEN-D/Code_Opt_Triton dataset for details on this field.)
The sole difference between this dataset and the original extended version is the randomized order of the examples.
Usage Examples
You can load this shuffled dataset using the datasets
library in Python:
from datasets import load_dataset
shuffled_dataset = load_dataset("TEEN-D/Code_Opt_Triton_Shuffled")
# Example: Print the first 5 examples
print(shuffled_dataset['train'][:5])
- Downloads last month
- 45