
CUTLASS 2.0 Substantially refactored for - Better performance, particularly for native Turing Tensor Cores - Robust and durable templates spanning the design space - Encapsulated functionality embodying modern C++11 programming techniques - Optimized containers and data types for efficient, generic, portable device code Updates to: - Quick start guide - Documentation - Utilities - CUTLASS Profiler Native Turing Tensor Cores - Efficient GEMM kernels targeting Turing Tensor Cores - Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands Coverage of existing CUTLASS functionality: - GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs - Volta Tensor Cores through native mma.sync and through WMMA API - Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions - Batched GEMM operations - Complex-valued GEMMs Note: this commit and all that follow require a host compiler supporting C++11 or greater.
11 lines
534 B
JavaScript
11 lines
534 B
JavaScript
var searchData=
|
|
[
|
|
['wmma_2eh',['wmma.h',['../wmma_8h.html',1,'']]],
|
|
['wmma_5farray_2eh',['wmma_array.h',['../wmma__array_8h.html',1,'']]],
|
|
['wmma_5fptx_2eh',['wmma_ptx.h',['../wmma__ptx_8h.html',1,'']]],
|
|
['wmma_5fsm70_2eh',['wmma_sm70.h',['../wmma__sm70_8h.html',1,'']]],
|
|
['wmma_5fsm72_2eh',['wmma_sm72.h',['../wmma__sm72_8h.html',1,'']]],
|
|
['wmma_5fsm75_2eh',['wmma_sm75.h',['../wmma__sm75_8h.html',1,'']]],
|
|
['wmma_5ftensor_5fop_5fpolicy_2eh',['wmma_tensor_op_policy.h',['../wmma__tensor__op__policy_8h.html',1,'']]]
|
|
];
|