| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | /***************************************************************************************************
 | 
					
						
							| 
									
										
										
										
											2021-02-26 22:58:26 +08:00
										 |  |  |  * Copyright (c) 2017-2021, NVIDIA CORPORATION.  All rights reserved. | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Redistribution and use in source and binary forms, with or without modification, are permitted | 
					
						
							|  |  |  |  * provided that the following conditions are met: | 
					
						
							|  |  |  |  *     * Redistributions of source code must retain the above copyright notice, this list of | 
					
						
							|  |  |  |  *       conditions and the following disclaimer. | 
					
						
							|  |  |  |  *     * Redistributions in binary form must reproduce the above copyright notice, this list of | 
					
						
							|  |  |  |  *       conditions and the following disclaimer in the documentation and/or other materials | 
					
						
							|  |  |  |  *       provided with the distribution. | 
					
						
							|  |  |  |  *     * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used | 
					
						
							|  |  |  |  *       to endorse or promote products derived from this software without specific prior written | 
					
						
							|  |  |  |  *       permission. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR | 
					
						
							|  |  |  |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND | 
					
						
							|  |  |  |  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE | 
					
						
							|  |  |  |  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | 
					
						
							|  |  |  |  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | 
					
						
							|  |  |  |  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | 
					
						
							|  |  |  |  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
					
						
							|  |  |  |  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  **************************************************************************************************/ | 
					
						
							|  |  |  | /*! \file
 | 
					
						
							|  |  |  |     \brief Tests for device-wide GEMM interface | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  | #pragma once
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | #include <iostream>
 | 
					
						
							|  |  |  | #include <fstream>
 | 
					
						
							|  |  |  | #include <sstream>
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include "../../common/cutlass_unit_test.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include "cutlass/util/host_tensor.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/tensor_view_io.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/distribution.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/reference/host/tensor_fill.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/reference/host/tensor_copy.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/reference/host/tensor_compare.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/reference/host/tensor_norm.h"
 | 
					
						
							|  |  |  | #include "cutlass/util/reference/host/gemm.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  | #include "testbed_utils.h"
 | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | namespace test { | 
					
						
							|  |  |  | namespace gemm { | 
					
						
							|  |  |  | namespace device { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | template <typename Gemm> | 
					
						
							|  |  |  | struct Testbed { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   using ElementAccumulator = typename Gemm::ElementAccumulator; | 
					
						
							|  |  |  |   using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /// Initialization
 | 
					
						
							|  |  |  |   cutlass::Distribution::Kind init_A; | 
					
						
							|  |  |  |   cutlass::Distribution::Kind init_B; | 
					
						
							|  |  |  |   cutlass::Distribution::Kind init_C; | 
					
						
							|  |  |  |   uint64_t seed; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A; | 
					
						
							|  |  |  |   cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B; | 
					
						
							|  |  |  |   cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C; | 
					
						
							|  |  |  |   cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D; | 
					
						
							|  |  |  |   cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   //
 | 
					
						
							|  |  |  |   // Methods
 | 
					
						
							|  |  |  |   //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Testbed( | 
					
						
							|  |  |  |     cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, | 
					
						
							|  |  |  |     cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, | 
					
						
							|  |  |  |     cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, | 
					
						
							|  |  |  |     uint64_t seed_ = 2080 | 
					
						
							|  |  |  |   ): | 
					
						
							|  |  |  |     init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /// Helper to initialize a tensor view
 | 
					
						
							|  |  |  |   template <typename Element, typename Layout> | 
					
						
							|  |  |  |   bool initialize_tensor( | 
					
						
							|  |  |  |     cutlass::TensorView<Element, Layout> view,  | 
					
						
							|  |  |  |     cutlass::Distribution::Kind dist_kind, | 
					
						
							|  |  |  |     uint64_t seed) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (dist_kind == cutlass::Distribution::Uniform) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       double scope_max, scope_min; | 
					
						
							|  |  |  |       int bits_input = cutlass::sizeof_bits<Element>::value; | 
					
						
							|  |  |  |       int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       if (bits_input == 1) { | 
					
						
							|  |  |  |         scope_max = 2; | 
					
						
							|  |  |  |         scope_min = 0; | 
					
						
							|  |  |  |       } else if (bits_input <= 8) { | 
					
						
							|  |  |  |         scope_max = 2; | 
					
						
							|  |  |  |         scope_min = -2; | 
					
						
							|  |  |  |       } else if (bits_output == 16) { | 
					
						
							|  |  |  |         scope_max = 5; | 
					
						
							|  |  |  |         scope_min = -5; | 
					
						
							|  |  |  |       } else { | 
					
						
							|  |  |  |         scope_max = 8; | 
					
						
							|  |  |  |         scope_min = -8; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       cutlass::reference::host::TensorFillRandomUniform( | 
					
						
							|  |  |  |         view, seed, scope_max, scope_min, 0); | 
					
						
							|  |  |  |     }  | 
					
						
							|  |  |  |     else if (dist_kind == cutlass::Distribution::Identity) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       cutlass::reference::host::TensorFillIdentity(view); | 
					
						
							|  |  |  |     }  | 
					
						
							|  |  |  |     else if (dist_kind == cutlass::Distribution::Gaussian) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else if (dist_kind == cutlass::Distribution::Sequential) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       cutlass::reference::host::BlockFillSequential( | 
					
						
							|  |  |  |         view.data(), view.capacity()); | 
					
						
							|  |  |  |     }  | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       // TODO: Implement the rest
 | 
					
						
							|  |  |  |       EXPECT_TRUE(false) << "Not implemented"; | 
					
						
							|  |  |  |       return false; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return true; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /// Initializes data structures
 | 
					
						
							|  |  |  |   void initialize(cutlass::gemm::GemmCoord problem_size) { | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Allocate the GEMM workspace
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     tensor_A.resize(problem_size.mk()); | 
					
						
							|  |  |  |     tensor_B.resize(problem_size.kn()); | 
					
						
							|  |  |  |     tensor_C.resize(problem_size.mn()); | 
					
						
							|  |  |  |     tensor_D.resize(problem_size.mn()); | 
					
						
							|  |  |  |     reference_D.resize(problem_size.mn(), false); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019)); | 
					
						
							|  |  |  |     EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018)); | 
					
						
							|  |  |  |     EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     // It is possible to randomly initialize to all zeros, so override this with non-zeros
 | 
					
						
							|  |  |  |     // in the upper left corner of each operand.
 | 
					
						
							|  |  |  |     tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1); | 
					
						
							|  |  |  |     tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1); | 
					
						
							|  |  |  |     tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     tensor_A.sync_device(); | 
					
						
							|  |  |  |     tensor_B.sync_device(); | 
					
						
							|  |  |  |     tensor_C.sync_device(); | 
					
						
							|  |  |  |     tensor_D.sync_device(); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /// Compares computed reference with device reference and outputs to a file if incorrect
 | 
					
						
							|  |  |  |   bool compare_reference( | 
					
						
							|  |  |  |     cutlass::gemm::GemmCoord problem_size,  | 
					
						
							|  |  |  |     ElementCompute alpha,  | 
					
						
							|  |  |  |     ElementCompute beta) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     tensor_D.sync_host(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); | 
					
						
							|  |  |  |     EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); | 
					
						
							|  |  |  |     EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     if (tensor_D.size() > 1) | 
					
						
							|  |  |  |       EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (reference_D.size() > 1) | 
					
						
							|  |  |  |       EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     EXPECT_TRUE(passed); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (!passed) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       std::stringstream fname; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       fname << "error_Gemm_device_"  | 
					
						
							|  |  |  |         << problem_size.m() << "x" | 
					
						
							|  |  |  |         << problem_size.n() << "x" | 
					
						
							|  |  |  |         << problem_size.k() << "_" | 
					
						
							|  |  |  |         << Gemm::ThreadblockShape::kM << "x"   | 
					
						
							|  |  |  |         << Gemm::ThreadblockShape::kN << "x"   | 
					
						
							|  |  |  |         << Gemm::ThreadblockShape::kK << "_" | 
					
						
							|  |  |  |         << Gemm::WarpShape::kM << "x"   | 
					
						
							|  |  |  |         << Gemm::WarpShape::kN << "x"   | 
					
						
							|  |  |  |         << Gemm::WarpShape::kK << ".txt"; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       std::ofstream file(fname.str()); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       file | 
					
						
							|  |  |  |         << "problem: " << problem_size  | 
					
						
							|  |  |  |         << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       file  | 
					
						
							|  |  |  |         << "A =\n" << tensor_A.host_view() | 
					
						
							|  |  |  |         << "\nB =\n" << tensor_B.host_view() | 
					
						
							|  |  |  |         << "\nC =\n" << tensor_C.host_view() | 
					
						
							|  |  |  |         << "\n\nReference =\n" << reference_D.host_view() | 
					
						
							|  |  |  |         << "\nComputed =\n" << tensor_D.host_view(); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return passed; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /// Verifies the result is a GEMM
 | 
					
						
							|  |  |  |   bool verify( | 
					
						
							|  |  |  |     cutlass::gemm::GemmCoord problem_size,  | 
					
						
							|  |  |  |     ElementCompute alpha,  | 
					
						
							|  |  |  |     ElementCompute beta) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Verify
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cutlass::reference::host::Gemm< | 
					
						
							|  |  |  |         typename Gemm::ElementA, typename Gemm::LayoutA, | 
					
						
							|  |  |  |         typename Gemm::ElementB, typename Gemm::LayoutB, | 
					
						
							|  |  |  |         typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, | 
					
						
							|  |  |  |         ElementAccumulator, typename Gemm::Operator> | 
					
						
							|  |  |  |         reference_gemm; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     reference_gemm( | 
					
						
							|  |  |  |       problem_size, | 
					
						
							|  |  |  |       alpha,  | 
					
						
							|  |  |  |       tensor_A.host_ref(),  | 
					
						
							|  |  |  |       tensor_B.host_ref(),  | 
					
						
							|  |  |  |       beta,  | 
					
						
							|  |  |  |       reference_D.host_ref(),  | 
					
						
							|  |  |  |       ElementAccumulator(0) | 
					
						
							|  |  |  |     ); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return compare_reference(problem_size, alpha, beta); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 13:25:25 +08:00
										 |  |  | 	/// Determine if the CUDA device is sufficient to run the kernel
 | 
					
						
							|  |  |  |   bool sufficient() const { | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Determine SMEM requirements and waive if not satisfied
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     int smem_size = int(sizeof(typename Gemm::GemmKernel::SharedStorage)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cudaDeviceProp properties; | 
					
						
							|  |  |  |     int device_idx; | 
					
						
							|  |  |  |     cudaError_t result = cudaGetDevice(&device_idx); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (result != cudaSuccess) { | 
					
						
							|  |  |  |       throw std::runtime_error("cudaGetDevice() API call failed."); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     result = cudaGetDeviceProperties(&properties, device_idx); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (result != cudaSuccess) { | 
					
						
							|  |  |  |       throw std::runtime_error("cudaGetDeviceProperties() failed"); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (properties.sharedMemPerMultiprocessor < smem_size) { | 
					
						
							|  |  |  |       return false; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return true; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  |   /// Executes one test
 | 
					
						
							|  |  |  |   bool run( | 
					
						
							|  |  |  |     cutlass::gemm::GemmCoord problem_size,  | 
					
						
							|  |  |  |     int split_k_slices = 1, | 
					
						
							|  |  |  |     ElementCompute alpha = ElementCompute(1),  | 
					
						
							|  |  |  |     ElementCompute beta = ElementCompute(0)) { | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-26 22:58:26 +08:00
										 |  |  |     // Waive test if insufficient CUDA device
 | 
					
						
							|  |  |  |     if (!sufficient()) { | 
					
						
							|  |  |  |       if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { | 
					
						
							|  |  |  |         std::cerr << "Test waived due to insufficient CUDA device." << std::endl; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |       return true; | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     this->initialize(problem_size); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Initialize the GEMM operator
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     typename Gemm::Arguments arguments{ | 
					
						
							|  |  |  |       problem_size, | 
					
						
							|  |  |  |       tensor_A.device_ref(), | 
					
						
							|  |  |  |       tensor_B.device_ref(), | 
					
						
							|  |  |  |       tensor_C.device_ref(), | 
					
						
							|  |  |  |       tensor_D.device_ref(), | 
					
						
							|  |  |  |       {alpha, beta}, | 
					
						
							|  |  |  |       split_k_slices | 
					
						
							|  |  |  |     }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     Gemm gemm_op; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     size_t workspace_size = Gemm::get_workspace_size(arguments); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 13:25:25 +08:00
										 |  |  |     if (status != cutlass::Status::kSuccess) { | 
					
						
							|  |  |  |       cudaError_t error = cudaGetLastError(); | 
					
						
							|  |  |  |       std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; | 
					
						
							|  |  |  |       return true; | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Run the GEMM
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     status = gemm_op(); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  |     // Verify
 | 
					
						
							|  |  |  |     //
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     bool passed = this->verify(problem_size, alpha, beta); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if (!passed) { | 
					
						
							|  |  |  |       std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return passed; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | template <typename Gemm> | 
					
						
							|  |  |  | bool TestAllGemm() { | 
					
						
							|  |  |  |   bool passed = true; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int const kMinimumOperandElementSize =  | 
					
						
							|  |  |  |     std::min( | 
					
						
							|  |  |  |       int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),  | 
					
						
							|  |  |  |       int(cutlass::sizeof_bits<typename Gemm::ElementB>::value)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int const kAlignment = cutlass::platform::is_same< | 
					
						
							|  |  |  |                               typename Gemm::OperatorClass,  | 
					
						
							|  |  |  |                               cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   // int8_t gemm alignment constraints
 | 
					
						
							|  |  |  |   int const kAlignmentM = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int const kAlignmentN = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::RowMajor>::value ? 4 : kAlignment; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int const kAlignmentK = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value && | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value && | 
					
						
							|  |  |  |                           (cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::RowMajor>::value || | 
					
						
							|  |  |  |                           cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::ColumnMajor>::value) ? 4 : kAlignment; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  |   int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM}; | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  |   int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN}; | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |   int problem_size_k[] = { | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  |       kAlignmentK, Gemm::ThreadblockShape::kK * (Gemm::kStages + 1) - kAlignmentK}; | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |   int split_k_slices[] = { | 
					
						
							|  |  |  |     1, 2, 3 | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   double problem_alpha[] = { | 
					
						
							|  |  |  |     1 | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   double problem_beta[] = { | 
					
						
							|  |  |  |     2.0 | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Testbed<Gemm> testbed; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (int m : problem_size_m) { | 
					
						
							|  |  |  |     for (int n : problem_size_n) { | 
					
						
							|  |  |  |       for (int k : problem_size_k) { | 
					
						
							|  |  |  |         for (int split_k : split_k_slices) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           if (!Gemm::kSplitKSerial && split_k > 1) { | 
					
						
							|  |  |  |             continue; | 
					
						
							|  |  |  |           } | 
					
						
							| 
									
										
										
										
											2020-04-08 04:51:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |           if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) { | 
					
						
							|  |  |  |             continue; | 
					
						
							|  |  |  |           } | 
					
						
							| 
									
										
										
										
											2019-11-20 08:55:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |           for (auto alpha : problem_alpha) { | 
					
						
							|  |  |  |             for (auto beta : problem_beta) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               cutlass::gemm::GemmCoord problem_size(m, n, k); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               passed = testbed.run( | 
					
						
							|  |  |  |                 problem_size,  | 
					
						
							|  |  |  |                 split_k, | 
					
						
							|  |  |  |                 cutlass::from_real<ElementCompute>(alpha),  | 
					
						
							|  |  |  |                 cutlass::from_real<ElementCompute>(beta) | 
					
						
							|  |  |  |               ); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               if (!passed) { | 
					
						
							|  |  |  |                 return false; | 
					
						
							|  |  |  |               } | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   return passed; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					
						
							|  |  |  | template <typename Gemm> | 
					
						
							|  |  |  | bool TestGemmPerf(int iterations = 1) { | 
					
						
							|  |  |  |   bool passed = true; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int problem_size_m[] = { 2048 }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int problem_size_n[] = { 4352 }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int problem_size_k[] = { 4096  }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   int split_k_slices[] = { 1 }; | 
					
						
							|  |  |  |   double problem_alpha[] = { 1 }; | 
					
						
							|  |  |  |   double problem_beta[] = { 0.0 }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   Testbed<Gemm> testbed; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (int m : problem_size_m) { | 
					
						
							|  |  |  |     for (int n : problem_size_n) { | 
					
						
							|  |  |  |       for (int k : problem_size_k) { | 
					
						
							|  |  |  |         for (int split_k : split_k_slices) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           if (!Gemm::kSplitKSerial && split_k > 1) { | 
					
						
							|  |  |  |             continue; | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |           for (auto alpha : problem_alpha) { | 
					
						
							|  |  |  |             for (auto beta : problem_beta) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               cutlass::gemm::GemmCoord problem_size(m, n, k); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               for (int i = 0; i < iterations; i++){ | 
					
						
							|  |  |  |                 passed = testbed.run( | 
					
						
							|  |  |  |                   problem_size,  | 
					
						
							|  |  |  |                   split_k, | 
					
						
							|  |  |  |                   cutlass::from_real<ElementCompute>(alpha),  | 
					
						
							|  |  |  |                   cutlass::from_real<ElementCompute>(beta) | 
					
						
							|  |  |  |                 ); | 
					
						
							|  |  |  |               } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |               if (!passed) { | 
					
						
							|  |  |  |                 return false; | 
					
						
							|  |  |  |               } | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |           } | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   return passed; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | } // namespace device
 | 
					
						
							|  |  |  | } // namespace gemm
 | 
					
						
							|  |  |  | } // namespace test
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /////////////////////////////////////////////////////////////////////////////////////////////////
 | 
					
						
							|  |  |  | 
 |