cutlass/test/unit/core/fast_numeric_conversion.cu
Manish Gupta 7d8317a63e
Support for Mixed Input TensorOp (#1084)
* Passing warp-level mixed input F16*(S8/U8) tests

* passing device-level mixed input F16*(S8/U8) tests

* add to profiler - I8 (111 TFLOPs), U (123 TFLOPs)

* fast numeric conversions (I8 = 132 TFLOPs, U8 = 148 TFLOPs)

* Speedup reference compilation (REVERT THIS COMMIT)

* wider_add.u32_packed_sub.f16x2 (I8 = 132TFLOP/s, U8 = 170 TFLOP/s)

* Improve s8->f16 cvt and support bf16*u8 @158 TFLOPs

* BF16 * S8 (142 TFLOPs)

* Handle mixed-input upcast on OperandA (Support [S8|U8]*[F16|BF16]

* rename OpMultiplyAddMixedInput to OpMultiplyAddMixedInputUpcast

* Add device-level test and profiler support for upcast on operand A

* Move shfl before the cvt and reduce #shfls by 1/2

* fix smem_usage calculation for mixed_input types

* uncomment the stuff (getting ready for merge)

* profiler changes and mixed-input reference

* mixed input reference are in a new file

* use platform instead of std

* comments and typo only

* Use CreateGemmOperator and delete CreateMixedInputGemmOperator

* copyright for new files

* rebase follow-up
2023-09-27 11:18:30 -04:00

177 lines
6.0 KiB
Plaintext

/***************************************************************************************************
* Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for conversion operators.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
namespace kernel {
/// Simple conversion function
template <typename Destination, typename Source, int Count>
__global__ void convert(
cutlass::Array<Destination, Count> *destination,
cutlass::Array<Source, Count> const *source) {
cutlass::FastNumericArrayConverter<Destination, Source, Count> convert;
*destination = convert(*source);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Destination, typename Source, int Count>
void run_test_integer_range_limited() {
const int kN = Count;
dim3 grid(1, 1);
dim3 block(1, 1);
cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN});
cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN});
for (int i = 0; i < kN; ++i) {
source.host_data()[i] = Source(i % 4);
}
source.sync_device();
convert<Destination, Source, kN><<< grid, block >>>(
reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()),
reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data())
);
destination.sync_host();
for (int i = 0; i < kN; ++i) {
EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i]));
}
}
template <typename Destination, typename Source, int Count>
void run_test_integer_range_all() {
const int kN = Count;
dim3 grid(1, 1);
dim3 block(1, 1);
cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN});
cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN});
int const kIntSourceMin = std::numeric_limits<Source>::min();
int const kIntSourceMax = std::numeric_limits<Source>::max();
int const kIntRange = kIntSourceMax - kIntSourceMin + 1;
for (int i = 0; i < kN; ++i) {
source.host_data()[i] = Source(kIntSourceMin + (i % kIntRange));
}
source.sync_device();
convert<Destination, Source, kN><<< grid, block >>>(
reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()),
reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data())
);
destination.sync_host();
// Verify conversion
bool passed = true;
for (int i = 0; i < kN; ++i) {
if(!(float(destination.host_data()[i]) == float(source.host_data()[i]))) {
passed = false;
break;
}
}
EXPECT_TRUE(passed) << " FastNumericArrayConverter failed";
// Print out results for the failed conversion.
if (!passed) {
for (int i = 0; i < kN; ++i) {
std::cout << "source(" << float(source.host_data()[i]) << ") -> "
<< "destination ("<< float(destination.host_data()[i]) << ")" << std::endl;
}
}
std::flush(std::cout);
}
} // namespace kernel
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(FastNumericConversion, s32_to_f32) {
int const kN = 4;
using Source = int;
using Destination = float;
test::core::kernel::run_test_integer_range_limited<Destination, Source, kN>();
}
TEST(FastNumericConversion, s8_to_f16_array) {
int const kN = 256;
using Source = int8_t;
using Destination = cutlass::half_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, u8_to_f16_array) {
int const kN = 256;
using Source = uint8_t;
using Destination = cutlass::half_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, u8_to_bf16_array) {
int const kN = 256;
using Source = uint8_t;
using Destination = cutlass::bfloat16_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, s8_to_bf16_array) {
int const kN = 256;
using Source = int8_t;
using Destination = cutlass::bfloat16_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}