
CUTLASS 2.0 Substantially refactored for - Better performance, particularly for native Turing Tensor Cores - Robust and durable templates spanning the design space - Encapsulated functionality embodying modern C++11 programming techniques - Optimized containers and data types for efficient, generic, portable device code Updates to: - Quick start guide - Documentation - Utilities - CUTLASS Profiler Native Turing Tensor Cores - Efficient GEMM kernels targeting Turing Tensor Cores - Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands Coverage of existing CUTLASS functionality: - GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs - Volta Tensor Cores through native mma.sync and through WMMA API - Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions - Batched GEMM operations - Complex-valued GEMMs Note: this commit and all that follow require a host compiler supporting C++11 or greater.
24 lines
1.4 KiB
C++
24 lines
1.4 KiB
C++
#pragma once
|
|
|
|
#include "cuda_runtime.h"
|
|
|
|
#define CUTLASS_CHECK(status) \
|
|
{ \
|
|
cutlass::Status error = status; \
|
|
if (error != cutlass::Status::kSuccess) { \
|
|
std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \
|
|
<< std::endl; \
|
|
exit(EXIT_FAILURE); \
|
|
} \
|
|
}
|
|
|
|
#define CUDA_CHECK(status) \
|
|
{ \
|
|
cudaError_t error = status; \
|
|
if (error != cudaSuccess) { \
|
|
std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \
|
|
<< " at line: " << __LINE__ << std::endl; \
|
|
exit(EXIT_FAILURE); \
|
|
} \
|
|
}
|