
CUTLASS 2.0 Substantially refactored for - Better performance, particularly for native Turing Tensor Cores - Robust and durable templates spanning the design space - Encapsulated functionality embodying modern C++11 programming techniques - Optimized containers and data types for efficient, generic, portable device code Updates to: - Quick start guide - Documentation - Utilities - CUTLASS Profiler Native Turing Tensor Cores - Efficient GEMM kernels targeting Turing Tensor Cores - Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands Coverage of existing CUTLASS functionality: - GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs - Volta Tensor Cores through native mma.sync and through WMMA API - Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions - Batched GEMM operations - Complex-valued GEMMs Note: this commit and all that follow require a host compiler supporting C++11 or greater.
10 lines
790 B
JavaScript
10 lines
790 B
JavaScript
var searchData=
|
|
[
|
|
['batchedgemmcoord',['BatchedGemmCoord',['../structcutlass_1_1gemm_1_1BatchedGemmCoord.html',1,'cutlass::gemm']]],
|
|
['batchedreduction',['BatchedReduction',['../structcutlass_1_1reduction_1_1BatchedReduction.html',1,'cutlass::reduction']]],
|
|
['batchedreductiontraits',['BatchedReductionTraits',['../structcutlass_1_1reduction_1_1BatchedReductionTraits.html',1,'cutlass::reduction']]],
|
|
['blockforeach',['BlockForEach',['../structcutlass_1_1reference_1_1device_1_1BlockForEach.html',1,'cutlass::reference::device']]],
|
|
['blockforeach',['BlockForEach',['../structcutlass_1_1reference_1_1host_1_1BlockForEach.html',1,'cutlass::reference::host']]],
|
|
['bool_5fconstant',['bool_constant',['../structcutlass_1_1platform_1_1bool__constant.html',1,'cutlass::platform']]]
|
|
];
|