Compare commits

...

4 Commits

Author SHA1 Message Date
longfei li
a1aa7fd0d6 Merge branch 'main' of http://192.168.0.100:3000/squall/torch_ext 2025-03-27 03:49:33 +08:00
longfei li
c77f9602ea test triton, seems like very well. 2025-03-27 03:44:28 +08:00
longfei li
58093d7a71 试了一下写softmax,又学到一点。可以了 2024-12-29 01:23:00 +08:00
longfei li
acdacc2592 测试一下。 2024-12-27 21:55:12 +08:00
10 changed files with 974 additions and 8 deletions

View File

@ -1,6 +1,15 @@
#ifndef CORE_H
#define CORE_H
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
#define TYPING_DISPATCH(scalar_t, ...) \
switch (scalar_t) \
@ -49,5 +58,6 @@ void test_cute_tensor();
void md_mm(const torch::Tensor &src);
void block_sum(const torch::Tensor &src, torch::Tensor &dest);
void md_block_sum(const torch::Tensor &src, torch::Tensor &dest);
void softmax(const torch::Tensor &src, torch::Tensor &dest);
void rms_norm(torch::Tensor &states, float eps, float gamma);
#endif

View File

@ -18,5 +18,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
m.def("md_mm", &md_mm, "just a test of multi dimension mm");
m.def("block_sum", &block_sum, "test block sum");
m.def("md_block_sum", &md_block_sum, "multi dimension block sum");
m.def("softmax", &softmax, "test softmax example");
m.def("rms_norm", &rms_norm, "rms noram");
}

View File

@ -117,15 +117,133 @@ __global__ void md_row_sum_kernel(const float *src, float *dest, int stride_a, i
void md_block_sum(const torch::Tensor &src, torch::Tensor &dest)
{
int block_size = 1024;
dim3 grid(src.size(0), src.size(1), (src.size(2) + block_size - 1) / block_size);
dim3 block(block_size);
md_row_sum_kernel<<<grid, block>>>(src.data_ptr<float>(),
dest.data_ptr<float>(),
src.stride(0),
src.stride(1),
src.size(0),
src.size(1),
src.size(2));
printf("this is the device num:%d\n", src.get_device());
int dev = src.get_device();
cudaStream_t stream = at::cuda::getCurrentCUDAStream(dev);
md_row_sum_kernel<<<grid, block, 0, stream>>>(src.data_ptr<float>(),
dest.data_ptr<float>(),
src.stride(0),
src.stride(1),
src.size(0),
src.size(1),
src.size(2));
}
void interaction(const torch::Tensor &src)
{
int block_size = 1024;
dim3 grid(src.size(0), src.size(1), (src.size(2) + block_size - 1) / block_size);
dim3 block(block_size);
printf("this is the device num:%d\n", src.get_device());
int dev = src.get_device();
cudaStream_t stream = at::cuda::getCurrentCUDAStream(dev);
// seems can do some other things.
}
template <typename s_scalar>
__device__ s_scalar exp(s_scalar a)
{
return expf(a);
}
template <>
__device__ __nv_bfloat16 exp(__nv_bfloat16 a)
{
float tmp = __bfloat162float(a);
float tmp_score = expf(tmp);
return __float2bfloat16(tmp_score);
}
template <>
__device__ __half exp(__half a)
{
float tmp = __half2float(a);
float tmp_score = expf(tmp);
return __float2half(tmp_score);
}
template <>
__device__ float exp(float a)
{
return expf(a);
}
template <typename scalar_t>
__device__ float fi_cast(scalar_t a)
{
return a;
}
template <>
__device__ float fi_cast(__nv_bfloat16 a)
{
return __bfloat162float(a);
}
template <>
__device__ float fi_cast(__half a)
{
return __half2float(a);
}
template <int BLOCK_SIZE, typename scalar_t>
__global__ void softmax_kernel(const scalar_t *src, scalar_t *dest, int hidden_dim)
{
int tid = threadIdx.x;
int offset = blockIdx.x * hidden_dim;
__shared__ scalar_t smem[BLOCK_SIZE];
float local_sum = 0.0f;
for (int i = tid; i < hidden_dim; i += blockDim.x)
{
// sum the res;
int tmp_index = offset + i;
scalar_t tmp_score = exp(src[tmp_index]);
dest[tmp_index] = tmp_score;
local_sum += tmp_score;
}
if (tid < BLOCK_SIZE)
smem[tid] = local_sum;
else
smem[tid] = 0.0f;
__syncthreads();
typedef cub::BlockReduce<scalar_t, BLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
scalar_t sum = BlockReduce(temp_storage).Sum(smem[tid]);
// remember the block reduce sum means only the first thread has the real sum.
if (tid == 0)
smem[0] = sum;
__syncthreads();
for (int i = tid; i < hidden_dim; i += blockDim.x)
{
int tmp_index = offset + i;
scalar_t tmp_score = dest[tmp_index] / smem[0];
dest[tmp_index] = tmp_score;
}
}
void softmax(const torch::Tensor &src, torch::Tensor &dest)
{
int batch_num = src.size(0);
int hidden_dim = src.size(1);
int block_size = 1024;
dim3 grid(batch_num);
dim3 block(block_size);
VLLM_DISPATCH_FLOATING_TYPES(
src.scalar_type(), "softmax",
[&]
{
int dev = src.get_device();
cudaStream_t stream = at::cuda::getCurrentCUDAStream(dev);
softmax_kernel<1024, scalar_t><<<grid, block, 0, stream>>>(
src.data_ptr<scalar_t>(),
dest.data_ptr<scalar_t>(),
hidden_dim); });
}
template <int head_num = 8>

36
csrc/quantize.cu Normal file
View File

@ -0,0 +1,36 @@
#include <cuda_fp16.h>
#include <cuda_fp8.h>
__global__ void quantize(const half *src, __nv_fp8_storage_t *dest, int x_len, int y_len)
{
int x_start = threadIdx.x * blockDim.x;
int y_start = threadIdx.y * blockDim.y;
__shared__ half max_value;
max_value = __float2half(-10000.0f);
for (int i = 0; i < blockDim.x; i++)
{
for (int j = 0; j < blockDim.x; j++)
{
if (x_start + i < x_len && y_start + j < y_len)
{
int real_offset = (y_start + j) * x_len + x_start + i;
max_value = __hmax(src[real_offset], max_value);
}
}
}
for (int i = 0; i < blockDim.x; i++)
{
for (int j = 0; j < blockDim.y; j++)
{
if (x_start + i < x_len && y_start + j < y_len)
{
int real_offset = (y_start + j) * x_len + x_start + i;
half tmp = __hdiv(src[real_offset], max_value);
dest[real_offset] = __nv_cvt_halfraw_to_fp8(__nv_half_raw(tmp), __NV_SATFINITE, __NV_E5M2);
}
}
}
}

View File

@ -11,6 +11,7 @@ files = [
"csrc/core_bind.cpp",
"csrc/max.cu",
"csrc/md.cu",
"csrc/quantize.cu",
"csrc/layernorm.cu",
]
extension = CUDAExtension(

View File

@ -2,7 +2,7 @@ import torch
import torch_cuda_ext.core as core
n = 1000000
for i in range(1000):
for i in range(100):
src = torch.randn(size=(n,)).float().cuda()
dest_n = int((n + 1024 - 1) / 1024)
dest = torch.zeros(size=(dest_n,)).float().cuda()
@ -31,3 +31,48 @@ core.md_block_sum(src, dest)
real_sum = src.sum(dim=-1)
diff = real_sum - dest
print(diff)
for k in range(128, 4096, 128):
for j in range(1024, 4096, 1024):
a = torch.randn(size=(k, j)).half().cuda()
b = torch.empty_like(a)
num_runs = 100
times = []
for _ in range(num_runs):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
core.softmax(a, b)
end.record()
torch.cuda.synchronize() # 等待 CUDA 操作完成
elapsed_time = start.elapsed_time(end) / 1000 # 转换为秒
times.append(elapsed_time)
own_avg_time = sum(times) / num_runs
own_std_time = (sum((t - own_avg_time) ** 2 for t in times) / num_runs) ** 0.5
print(f"own softmax cost time: {own_avg_time}, {own_std_time}")
times = []
for _ in range(num_runs):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
res = torch.softmax(a, dim=1)
end.record()
torch.cuda.synchronize() # 等待 CUDA 操作完成
elapsed_time = start.elapsed_time(end) / 1000 # 转换为秒
times.append(elapsed_time)
avg_time = sum(times) / num_runs
std_time = (sum((t - avg_time) ** 2 for t in times) / num_runs) ** 0.5
print(f"torch softmax cost time: {avg_time}, {std_time}")
# print("this is b", b)
diff = (res - b).abs().max()
if diff < 1e-4:
print("softmax is good")
time_diff_rate = (own_avg_time - avg_time) / avg_time
print(f"{k}, {j} matrix result {time_diff_rate}")
else:
print("softmax is not equal")

468
tests/test_mma.py Normal file
View File

@ -0,0 +1,468 @@
import torch
import triton
import triton.language as tl
DEVICE = "cuda"
def is_cuda():
return triton.runtime.driver.active.get_current_target().backend == "cuda"
def is_hip_cdna2():
target = triton.runtime.driver.active.get_current_target()
return target.backend == "hip" and target.arch == "gfx90a"
def get_cuda_autotune_config():
return [
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
# Good config for fp8 inputs.
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 256,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 256,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
]
def get_hip_autotune_config():
return [
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 16,
"GROUP_SIZE_M": 1,
"waves_per_eu": 2,
},
num_warps=4,
num_stages=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 256,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 16,
"GROUP_SIZE_M": 4,
"waves_per_eu": 2,
},
num_warps=8,
num_stages=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 1,
"waves_per_eu": 2,
},
num_warps=8,
num_stages=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
"waves_per_eu": 3,
},
num_warps=4,
num_stages=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 1,
"waves_per_eu": 8,
},
num_warps=4,
num_stages=2,
),
]
def get_autotune_config():
if is_cuda():
return get_cuda_autotune_config()
else:
return get_hip_autotune_config()
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=get_autotune_config(),
key=["M", "N", "K"],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am,
stride_ak, #
stride_bk,
stride_bn, #
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr, #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
)
matmul_kernel[grid](
a,
b,
c, #
M,
N,
K, #
a.stride(0),
a.stride(1), #
b.stride(0),
b.stride(1), #
c.stride(0),
c.stride(1), #
ACTIVATION=activation, #
)
return c
torch.manual_seed(0)
a = torch.randn((512, 512), device=DEVICE, dtype=torch.float16)
b = torch.randn((512, 512), device=DEVICE, dtype=torch.float16)
triton_output = matmul(a, b)
torch_output = torch.matmul(a, b)
print(f"triton_output_with_fp16_inputs={triton_output}")
print(f"torch_output_with_fp16_inputs={torch_output}")
# Bigger tolerance for AMD CDNA2 devices.
# CDNA2 devices use reduced precision fp16 and bf16 and flush input and
# output denormal values to zero. Detailed info is at: https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
rtol = 1e-2 if is_hip_cdna2() else 0
if torch.allclose(triton_output, torch_output, atol=1e-2, rtol=rtol):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
TORCH_HAS_FP8 = hasattr(torch, "float8_e5m2")
if TORCH_HAS_FP8 and is_cuda():
torch.manual_seed(0)
a = torch.randn((512, 512), device=DEVICE, dtype=torch.float16)
b = torch.randn((512, 512), device=DEVICE, dtype=torch.float16)
a = a.to(torch.float8_e5m2)
# pre-transpose b for efficiency.
b = b.T
b = b.to(torch.float8_e5m2)
triton_output = matmul(a, b)
torch_output = torch.matmul(a.to(torch.float16), b.to(torch.float16))
print(f"triton_output_with_fp8_inputs={triton_output}")
print(f"torch_output_with_fp8_inputs={torch_output}")
if torch.allclose(triton_output, torch_output, atol=0.125, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
ref_lib = "cuBLAS" if is_cuda() else "rocBLAS"
configs = []
for fp8_inputs in [False, True]:
if fp8_inputs and (not TORCH_HAS_FP8 or not is_cuda()):
continue
configs.append(
triton.testing.Benchmark(
x_names=["M", "N", "K"], # Argument names to use as an x-axis for the plot
x_vals=[
128 * i for i in range(2, 33)
], # Different possible values for `x_name`
line_arg="provider", # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
# Don't compare to cublas for fp8 cases as torch.matmul doesn't support fp8 at the moment.
line_vals=(
["triton"] if fp8_inputs else [ref_lib.lower(), "triton"]
), # Label name for the lines
line_names=["Triton"] if fp8_inputs else [ref_lib, "Triton"], # Line styles
styles=[("green", "-"), ("blue", "-")],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance-"
+ (
"fp16" if not fp8_inputs else "fp8"
), # Name for the plot, used also as a file name for saving the plot.
args={"fp8_inputs": fp8_inputs},
)
)
@triton.testing.perf_report(configs)
def benchmark(M, N, K, provider, fp8_inputs):
a = torch.randn((M, K), device=DEVICE, dtype=torch.float16)
b = torch.randn((K, N), device=DEVICE, dtype=torch.float16)
if TORCH_HAS_FP8 and fp8_inputs:
a = a.to(torch.float8_e5m2)
b = b.T
b = b.to(torch.float8_e5m2)
quantiles = [0.5, 0.2, 0.8]
if provider == ref_lib.lower():
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch.matmul(a, b), quantiles=quantiles
)
if provider == "triton":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: matmul(a, b), quantiles=quantiles
)
perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
return perf(ms), perf(max_ms), perf(min_ms)
benchmark.run(show_plots=True, print_data=True)

27
tests/test_profille.py Normal file
View File

@ -0,0 +1,27 @@
import torch
import torch.nn as nn
import torch.optim as optim
from torch.profiler import profile, record_function, ProfilerActivity
# 定义模型和优化器
model = nn.Linear(100, 10).cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01)
# 启动 Profiler
with profile(
activities=[ProfilerActivity.CUDA, ProfilerActivity.CPU], # 监控 GPU 和 CPU
record_shapes=True, # 记录张量形状
profile_memory=True, # 分析内存使用
with_stack=True, # 记录调用栈
) as prof:
for _ in range(10):
x = torch.randn(64, 100).cuda()
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
# 输出分析结果
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
prof.export_chrome_trace("./trace.json")

84
tests/test_triton.py Normal file
View File

@ -0,0 +1,84 @@
import torch
import triton
import triton.language as tl
@triton.jit
def softmax_kernel(
output_ptr,
input_ptr,
input_row_stride,
output_row_stride,
n_cols,
BLOCK_SIZE: tl.constexpr,
):
# 获取当前程序的行索引
row_idx = tl.program_id(0)
# 计算输入和输出行的起始指针
row_start_ptr = input_ptr + row_idx * input_row_stride
output_row_start_ptr = output_ptr + row_idx * output_row_stride
# 将输入数据加载到本地内存
row_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + row_offsets
row = tl.load(input_ptrs, mask=row_offsets < n_cols, other=-float("inf"))
# 计算 Softmax
row_minus_max = row - tl.max(row, axis=0) # 数值稳定性:减去最大值
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
# 将结果写回输出
output_ptrs = output_row_start_ptr + row_offsets
tl.store(output_ptrs, softmax_output, mask=row_offsets < n_cols)
def softmax(x):
n_rows, n_cols = x.shape
# 分配输出张量
output = torch.empty_like(x)
# 定义 GPU 内核的网格和块大小
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = 4
if BLOCK_SIZE >= 2048:
num_warps = 8
if BLOCK_SIZE >= 4096:
num_warps = 16
# 启动 Triton 内核
softmax_kernel[(n_rows,)](
output,
x,
x.stride(0),
output.stride(0),
n_cols,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return output
# 测试 Softmax
if __name__ == "__main__":
# 创建一个随机矩阵
x = torch.randn(4, 16, device="cuda")
# 使用 Triton 计算 Softmax
output_triton = softmax(x)
# 使用 PyTorch 计算 Softmax 作为参考
output_torch = torch.softmax(x, dim=1)
# 检查结果是否一致
print("Input:")
print(x)
print("Triton Softmax:")
print(output_triton)
print("PyTorch Softmax:")
print(output_torch)
print(f"Are close: {torch.allclose(output_triton, output_torch, atol=1e-5)}")

176
tests/test_triton_mma.py Normal file
View File

@ -0,0 +1,176 @@
# coding=utf-8
import torch
import triton
import triton.language as tl
@triton.jit
def add_kernel(a_ptr, b_ptr, c_ptr, numel, BLOCK_SIZE: tl.constexpr):
xidx = tl.program_id(0)
index = xidx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = index < numel
a = tl.load(a_ptr + index, mask=mask)
b = tl.load(b_ptr + index, mask=mask)
c = a + b
tl.store(c_ptr + index, c, mask=mask)
@triton.jit
def add_mat_kernel(
a_ptr,
b_ptr,
c_ptr,
stride_m,
stride_n,
m,
n,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
midx = tl.program_id(0)
nidx = tl.program_id(1)
m_offset = midx * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
n_offset = nidx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (m_offset[:, None] < m) & (n_offset[None, :] < n)
index = m_offset[:, None] * stride_m + n_offset[None, :] * stride_n
a = tl.load(a_ptr + index, mask=mask)
b = tl.load(b_ptr + index, mask=mask)
c = a + b
tl.store(c_ptr + index, c, mask=mask)
@triton.jit
def threed_mat_kernel(
a_ptr,
b_ptr,
c_ptr,
stride_1,
stride_m,
stride_n,
num_token,
m,
n,
TOKEN_BLOCK: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
token_idx = tl.program_id(0)
midx = tl.program_id(1)
nidx = tl.program_id(2)
# tl.device_print("token idx:", token_idx)
m_offset = midx * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
n_offset = nidx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (m_offset[:, None] < m) & (n_offset[None, :] < n)
index = (
token_idx * stride_1
+ m_offset[:, None] * stride_m
+ n_offset[None, :] * stride_n
)
a = tl.load(a_ptr + index, mask=mask)
b = tl.load(b_ptr + index, mask=mask)
c = a + b
tl.store(c_ptr + index, c, mask=mask)
@triton.jit
def mma_kernel(
a_ptr,
b_ptr,
c_ptr,
m,
n,
k,
stride_am,
stride_an,
stride_bm,
stride_bk,
stride_cm,
stride_ck,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
midx = tl.program_id(0)
nidx = tl.program_id(1)
a_m_offset = midx * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
a_n_offset = nidx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
a_mask = (a_m_offset[:, None] < m) & (a_n_offset[None, :] < n)
a_index = a_m_offset[:, None] * stride_am + a_n_offset[None, :] * stride_an
a = tl.load(a_ptr + a_index, mask=a_mask)
acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
def test_add_kernel():
a = torch.randn(size=(1024,), device="cuda")
b = torch.randn(size=(1024,), device="cuda")
c = torch.empty_like(a)
BLOCK_SIZE = 32
grid = lambda meta: (triton.cdiv(a.numel(), meta["BLOCK_SIZE"]),)
add_kernel[grid](a, b, c, a.numel(), BLOCK_SIZE)
real_c = a + b
assert torch.allclose(real_c, c), "not equal"
print("all right")
def test_add_mat_kernel():
a = torch.randn(size=(127, 255), device="cuda")
b = torch.randn(size=(127, 255), device="cuda")
c = torch.empty_like(a)
BLOCK_SIZE_M = 32
BLOCK_SIZE_N = 16
grid = lambda meta: (
triton.cdiv(a.size(0), meta["BLOCK_SIZE_M"]),
triton.cdiv(a.size(1), meta["BLOCK_SIZE_N"]),
)
add_mat_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.size(0),
a.size(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
)
real_c = a + b
assert torch.allclose(c, real_c), "not equal"
print("all right")
def test_three_dimension():
num_token = 128
a = torch.randn(size=(num_token, 127, 255), device="cuda")
b = torch.randn(size=(num_token, 127, 255), device="cuda")
c = torch.empty_like(a)
BLOCK_SIZE_M = 32
BLOCK_SIZE_N = 16
TOKEN_BLOCK = a.size(0)
grid = lambda meta: (
a.size(0),
triton.cdiv(a.size(1), meta["BLOCK_SIZE_M"]),
triton.cdiv(a.size(2), meta["BLOCK_SIZE_N"]),
)
threed_mat_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(2),
a.size(0),
a.size(1),
a.size(2),
TOKEN_BLOCK,
BLOCK_SIZE_M,
BLOCK_SIZE_N,
)
real_c = a + b
assert torch.allclose(c, real_c), "not equal"
print("all right")
if __name__ == "__main__":
test_add_kernel()
test_add_mat_kernel()
test_three_dimension()