File size: 2,786 Bytes
1dc29e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cutlass_extensions/include/cutlass_extensions/gemm/kernel/mixed_gemm_B_layout.h"
#include "common.h"
#include <cuda_runtime.h>
inline int getSMVersion()
{
int device{-1};
cudaGetDevice(&device);
int sm_major = 0;
int sm_minor = 0;
cudaDeviceGetAttribute(&sm_major, cudaDevAttrComputeCapabilityMajor, device);
cudaDeviceGetAttribute(&sm_minor, cudaDevAttrComputeCapabilityMinor, device);
return sm_major * 10 + sm_minor;
}
namespace tensorrt_llm
{
namespace kernels
{
template <typename TypeB, typename Layout>
struct SupportedLayout
{
static constexpr bool value = false;
};
template <>
struct SupportedLayout<uint8_t, cutlass::layout::ColumnMajorTileInterleave<64, 2>>
{
static constexpr bool value = true;
};
template <>
struct SupportedLayout<cutlass::uint4b_t, cutlass::layout::ColumnMajorTileInterleave<64, 4>>
{
static constexpr bool value = true;
};
template <typename TypeB, typename Arch>
bool isEnabled()
{
using Layout = typename cutlass::gemm::kernel::LayoutDetailsB<TypeB, Arch>::Layout;
return SupportedLayout<TypeB, Layout>::value;
}
template <typename TypeB>
bool isEnabledForArch(int arch)
{
if (arch >= 70 && arch < 75)
{
return isEnabled<TypeB, cutlass::arch::Sm70>();
}
else if (arch >= 75 && arch < 80)
{
return isEnabled<TypeB, cutlass::arch::Sm75>();
}
else if (arch >= 80 && arch <= 90)
{
return isEnabled<TypeB, cutlass::arch::Sm80>();
}
else
{
// TLLM_CHECK_WITH_INFO(false, "Unsupported Arch");
assert(0);
return false;
}
}
inline bool isWeightOnlyBatchedGemvEnabled(WeightOnlyQuantType qtype)
{
const int arch = getSMVersion();
if (qtype == WeightOnlyQuantType::Int4b)
{
return isEnabledForArch<cutlass::uint4b_t>(arch);
}
else if (qtype == WeightOnlyQuantType::Int8b)
{
return isEnabledForArch<uint8_t>(arch);
}
else
{
assert(0);
// TLLM_CHECK_WITH_INFO(false, "Unsupported WeightOnlyQuantType");
return false;
}
}
} // namespace kernels
} // namespace tensorrt_llm
|