/*************************************************************************************************** * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include #include #include #include #include #include #include #include #include #include #include namespace cute { // // Engine -- owning or non-owning data store // // concept Engine { // using value_type = ; // iterator begin(); // }; template using ArrayEngine = typename conditional<(sizeof_bits::value % 8 == 0), array_aligned, array_subbyte>::type; template struct ViewEngine { using value_type = typename cute::remove_cvref())>::type; using iterator = Iterator; iterator storage_; CUTE_HOST_DEVICE constexpr iterator const& begin() const { return storage_; } CUTE_HOST_DEVICE constexpr iterator& begin() { return storage_; } }; template struct is_rmem> : is_rmem {}; template struct is_smem> : is_smem {}; template struct is_gmem> : is_gmem {}; template struct ConstViewEngine { using value_type = typename cute::remove_cvref())>::type; using iterator = Iterator; iterator storage_; CUTE_HOST_DEVICE constexpr iterator const& begin() const { return storage_; } }; template struct is_rmem> : is_rmem {}; template struct is_smem> : is_smem {}; template struct is_gmem> : is_gmem {}; // // Tensor // template struct Tensor { using value_type = typename Engine::value_type; //using pointer = typename engine_traits::pointer; //using const_pointer = typename engine_traits::const_pointer; //using reference = typename engine_traits::reference; //using const_reference = typename engine_traits::const_reference; using engine_type = Engine; using layout_type = Layout; CUTE_HOST_DEVICE constexpr Tensor() {} template CUTE_HOST_DEVICE constexpr Tensor(Ptr const& ptr, Layout const& layout) : rep_(layout, ptr) { } // // Accessors // static constexpr int rank = Layout::rank; CUTE_HOST_DEVICE constexpr decltype(auto) tensor() const { return *this; } CUTE_HOST_DEVICE constexpr decltype(auto) layout() const { return get<0>(rep_); } CUTE_HOST_DEVICE constexpr decltype(auto) engine() const { return get<1>(rep_); } CUTE_HOST_DEVICE constexpr decltype(auto) engine() { return get<1>(rep_); } CUTE_HOST_DEVICE constexpr decltype(auto) data() const { return engine().begin(); } CUTE_HOST_DEVICE constexpr decltype(auto) data() { return engine().begin(); } CUTE_HOST_DEVICE constexpr decltype(auto) shape() const { return layout().shape(); } CUTE_HOST_DEVICE constexpr auto size() const { return cute::size(shape()); } CUTE_HOST_DEVICE constexpr decltype(auto) stride() const { return layout().stride(); } // // Indexing op() and op[] // // Index into this tensor like an array by computing the offset via layout() template CUTE_HOST_DEVICE constexpr decltype(auto) operator[](Coord const& coord) { return data()[layout()(coord)]; } template CUTE_HOST_DEVICE constexpr decltype(auto) operator[](Coord const& coord) const { return data()[layout()(coord)]; } template CUTE_HOST_DEVICE constexpr decltype(auto) operator()(Coord const& coord) { if constexpr (has_underscore::value) { auto const& [sliced_layout,offset] = slice_and_offset(coord, layout()); return make_tensor(data() + offset, sliced_layout); } else { return data()[layout()(coord)]; } CUTE_GCC_UNREACHABLE; } template CUTE_HOST_DEVICE constexpr decltype(auto) operator()(Coord const& coord) const { if constexpr (has_underscore::value) { auto const& [sliced_layout,offset] = slice_and_offset(coord, layout()); return make_tensor(data() + offset, sliced_layout); } else { return data()[layout()(coord)]; } CUTE_GCC_UNREACHABLE; } // op() convenience function for multi-dimensional coordinates template CUTE_HOST_DEVICE constexpr decltype(auto) operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) { return operator()(make_coord(c0,c1,cs...)); } template CUTE_HOST_DEVICE constexpr decltype(auto) operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const { return operator()(make_coord(c0,c1,cs...)); } // // Compose // template CUTE_HOST_DEVICE constexpr auto compose(Layouts const&... layouts) { return make_tensor(data(), layout().compose(layouts...)); } template CUTE_HOST_DEVICE constexpr auto compose(Layouts const&... layouts) const { return make_tensor(data(), layout().compose(layouts...)); } // // Tile // template CUTE_HOST_DEVICE constexpr auto tile(Layouts const&... layouts) { return make_tensor(data(), layout().tile(layouts...)); } template CUTE_HOST_DEVICE constexpr auto tile(Layouts const&... layouts) const { return make_tensor(data(), layout().tile(layouts...)); } // // Utility // template ::value)> CUTE_HOST_DEVICE constexpr auto get_1d_coord(Int const& linear_idx) const { return layout().get_1d_coord(linear_idx); } template ::value)> CUTE_HOST_DEVICE constexpr auto get_hier_coord(Int const& linear_idx) const { return layout().get_hier_coord(linear_idx); } template ::value)> CUTE_HOST_DEVICE constexpr auto get_flat_coord(Int const& linear_idx) const { return layout().get_flat_coord(linear_idx); } cute::tuple rep_; }; template struct is_tensor : false_type {}; template struct is_tensor> : true_type {}; template struct is_rmem> : is_rmem {}; template struct is_smem> : is_smem {}; template struct is_gmem> : is_gmem {}; // Customization point for creation of owning and non-owning Tensors template struct MakeTensor { template ::value && is_layout::value)> CUTE_HOST_DEVICE constexpr auto operator()(Layout const& layout) const { static_assert(is_static::value, "Dynamic owning tensors not supported"); using Engine = ArrayEngine>; return Tensor(); } template ::value && is_layout::value)> CUTE_HOST_DEVICE constexpr auto operator()(T const& iter, Layout const& layout) { using Engine = ViewEngine; return Tensor(iter, layout); } template ::value)> CUTE_HOST_DEVICE constexpr auto operator()(LayoutArg const& arg, LayoutArgs const&... args) const { return operator()(make_layout(arg, args...)); } template ::value)> CUTE_HOST_DEVICE constexpr auto operator()(T const& iter, LayoutArg const& arg, LayoutArgs const&... args) { return operator()(iter, make_layout(arg, args...)); } }; // // make_tensor // // Make an owning Tensor that will allocate a static array // e.g. make_tensor(Int<12>{}) template CUTE_HOST_DEVICE constexpr auto make_tensor(Args const&... args) { return MakeTensor{}(args...); } // Make a non-owning Tensor that will use a pointer (view) // e.g. make_tensor(vec.data(), 12) template CUTE_HOST_DEVICE constexpr auto make_tensor(Iterator const& iter, Args const&... args) { return MakeTensor{}(iter, args...); } // // make_tensor_like // Make a register tensor the same type and shape and (if possible) order as another tensor // template CUTE_HOST_DEVICE constexpr auto make_tensor_like(Layout const& layout) { if constexpr (is_static::value) { return make_tensor(make_ordered_layout(layout)); } else { return make_tensor(make_layout(layout.shape())); } } template CUTE_HOST_DEVICE constexpr auto make_tensor_like(Tensor const& tensor) { return make_tensor_like(tensor.layout()); } template CUTE_HOST_DEVICE constexpr auto make_tensor_like(Tensor const& tensor) { return make_tensor_like(tensor.layout()); } // // make_fragment_like -- // Make a tensor the same shape and (if possible) order as another tensor, with special // consideration of the 0th mode. The 0th mode is commonly used for MMA_Atoms or Copy_Atoms // so this allocates the 0th mode with LayoutLeft regardless of the reference layout. // template CUTE_HOST_DEVICE constexpr auto make_fragment_like(Layout const& layout) { return make_tensor(make_fragment_like(layout)); } template CUTE_HOST_DEVICE constexpr auto make_fragment_like(Tensor const& tensor) { return make_fragment_like(tensor.layout()); } template CUTE_HOST_DEVICE constexpr auto make_fragment_like(Tensor const& tensor) { return make_fragment_like(tensor.layout()); } // // make_identity_tensor // template CUTE_HOST_DEVICE constexpr auto make_identity_tensor(Shape const& shape) { return make_tensor(ArithmeticTupleIterator(as_arithmetic_tuple(repeat_like(shape, Int<0>{}))), make_identity_layout(shape)); } // // Utilities // // Return the subtensor of a mode template >::value)> CUTE_HOST_DEVICE constexpr decltype(auto) tensor(Tensor&& tensor) { return std::forward(tensor); } template >::value)> CUTE_HOST_DEVICE constexpr decltype(auto) tensor(Tensor&& tensor) { return make_tensor(std::forward(tensor).data(), get(tensor.layout())); } // Return the subtensor of a range of modes template >::value)> CUTE_HOST_DEVICE constexpr decltype(auto) take(Tensor&& tensor) { return make_tensor(std::forward(tensor).data(), take(tensor.layout())); } // Return the layout of a mode template CUTE_HOST_DEVICE constexpr decltype(auto) layout(Tensor const& tensor) { return layout(tensor.layout()); } // Return the shape of a mode template CUTE_HOST_DEVICE constexpr decltype(auto) shape(Tensor const& tensor) { return shape(tensor.layout()); } // Return the stride of a mode template CUTE_HOST_DEVICE constexpr decltype(auto) stride(Tensor const& tensor) { return stride(tensor.layout()); } // Return the number of elements in a mode template CUTE_HOST_DEVICE constexpr decltype(auto) size(Tensor const& tensor) { return size(tensor.layout()); } // Return the rank of a mode template CUTE_HOST_DEVICE constexpr auto rank(Tensor const& tensor) { return rank(tensor.layout()); } // Return the depth of a mode template CUTE_HOST_DEVICE constexpr auto depth(Tensor const& tensor) { return depth(tensor.layout()); } // // Operations to manipulate Tensors like a Layout // template >::value)> CUTE_HOST_DEVICE constexpr auto flatten(Tensor&& tensor) { return make_tensor(std::forward(tensor).data(), flatten(tensor.layout())); } template >::value)> CUTE_HOST_DEVICE constexpr auto coalesce(Tensor&& tensor) { return make_tensor(std::forward(tensor).data(), coalesce(tensor.layout())); } template >::value)> CUTE_HOST_DEVICE constexpr auto coalesce(Tensor&& tensor, Profile const& profile) { return make_tensor(std::forward(tensor).data(), coalesce(tensor.layout(), profile)); } // Group the modes [B,E) into a single mode // e.g. group<2,4>(make_tensor(Layout>{})) // => make_tensor(Layout,_5,_6>>{}) template >::value)> CUTE_HOST_DEVICE constexpr auto group_modes(Tensor&& tensor) { return make_tensor(std::forward(tensor).data(), group(tensor.layout())); } // // Recast // // NOTE: This is very dangerous to do // -- doesn't check dynamic integer divisibility // -- doesn't check alignment // A tagged version for dispatching template >::value)> CUTE_HOST_DEVICE constexpr auto recast(Tensor&& tensor, type_list) { using OldType = typename remove_cvref_t::value_type; auto old_layout = tensor.layout(); auto new_layout = recast(old_layout); // If this is an upcast of a normal Layout with static negative strides, then offset as well if constexpr (sizeof(OldType) < sizeof(NewType) && not is_composed_layout::value) { auto shape_diff = transform(flatten(old_layout.shape()), flatten(new_layout.shape()), minus{}); auto extent_diff = transform(shape_diff, flatten(old_layout.stride()), multiplies{}); auto offset = fold(extent_diff, Int<0>{}, [](auto const& i, auto const& a) { return i + cute::min(a,Int<0>{}); }); return make_tensor(recast(std::forward(tensor).data() + offset), new_layout); } else { return make_tensor(recast(std::forward(tensor).data() ), new_layout); } CUTE_GCC_UNREACHABLE; } template >::value)> CUTE_HOST_DEVICE constexpr auto recast(Tensor&& tensor) { return recast(std::forward(tensor), type_list{}); } // // max_common_vector // /* Return Int such that N is the maximum number of continguous elements * that logically correspond in the tensors of @a a and @a b. This is, * the number of elements that could reasonably be vectorized into a single load/store. * * @returns Int with N >= 0 * * A return value of Int<0> indicates that no such conclusion can be made and no * vectorization should be attempted. */ template CUTE_HOST_DEVICE constexpr auto max_common_vector(Tensor const& a, Tensor const& b) { using SrcType = typename Tensor::value_type; using DstType = typename Tensor::value_type; using SrcRef = decltype(*(a.data())); using DstRef = decltype(*(b.data())); // Determine if vectorization candidates at all if constexpr (// Should be the same value_types, else the copy is also performing a cast sizeof(SrcType) == sizeof(DstType) && // The types should be trivially copyable so that vectorization is valid is_trivially_copyable::value && is_trivially_copyable::value && // Should be load/storing real data, rather than implicit iterators or such is_reference::value && is_reference::value) { return max_common_vector(a.layout(), b.layout()); } else { return Int<0>{}; } CUTE_GCC_UNREACHABLE; } // // Key algebraic operations // template >::value)> CUTE_HOST_DEVICE constexpr auto logical_divide(Tensor && tensor, Tile const& tile) { return make_tensor(std::forward(tensor).data(), logical_divide(tensor.layout(), tile)); } // zipped_divide is logical_divide with modes gathered into standard form ((BLK_A,BLK_B),(a,b)) template >::value)> CUTE_HOST_DEVICE constexpr auto zipped_divide(Tensor && tensor, Tile const& tile) // Layout or Tile { return make_tensor(std::forward(tensor).data(), zipped_divide(tensor.layout(), tile)); } // tiled_divide is logical_divide with the second output mode flattened ((BLK_A,BLK_B),a,b) template >::value)> CUTE_HOST_DEVICE constexpr auto tiled_divide(Tensor && tensor, Tile const& tile) // Layout or Tile { return make_tensor(std::forward(tensor).data(), tiled_divide(tensor.layout(), tile)); } // logical_product on a Tensor doesn't make sense since it often increases cosize // // Logical Divide utilities: local_partition and local_tile // template >::value)> CUTE_HOST_DEVICE constexpr auto local_partition(Tensor && tensor, Tile const& tile, Coord const& coord) { constexpr int R1 = decltype(rank(tensor))::value; // Split the modes of tensor according to the modes of tile // zipped_divide returns something like ((VEC_A,VEC_B,...),(a,b,...)) // The_coord is the coord into the first mode, flatten the rest return zipped_divide(std::forward(tensor), tile)(coord, repeat(_)); } template >::value)> CUTE_HOST_DEVICE constexpr auto local_partition(Tensor && tensor, Tile const& tile, Coord const& coord, Projection const& proj) { return local_partition(std::forward(tensor), dice(proj, tile), dice(proj, coord)); } // Special case with Layout and Integral that extracts the coord first // e.g. local_partition(tensor, ThrLayout, threadIdx.x) template >::value && is_integral::value)> CUTE_HOST_DEVICE auto local_partition(Tensor && tensor, Layout const& tile, Index const& index) { return local_partition(std::forward(tensor), product_each(shape(tile)), tile.get_flat_coord(index)); } // Special case with Layout and Integral that extracts the coord first // e.g. local_partition(tensor, ThrLayout, threadIdx.x, Step<_1,X,_1>{}) template >::value && is_integral::value)> CUTE_HOST_DEVICE auto local_partition(Tensor && tensor, Layout const& tile, Index const& index, Projection const& proj) { return local_partition(std::forward(tensor), dice(proj, product_each(shape(tile))), dice(proj, tile).get_flat_coord(index)); } template >::value)> CUTE_HOST_DEVICE constexpr auto local_tile(Tensor && tensor, Tile const& tile, Coord const& coord) { constexpr int R0 = decltype(rank(tile))::value; constexpr int R1 = decltype(rank(tensor))::value; // Split the modes of tensor according to the modes of tile // zipped_divide returns something like ((VEC_A,VEC_B,...),(a,b,...)) // The padded_coord is the coord into the second mode, flatten the rest return zipped_divide(std::forward(tensor), tile)(repeat(_), append(coord,_)); } template >::value)> CUTE_HOST_DEVICE auto local_tile(Tensor && tensor, Tile const& tile, Coord const& coord, Proj const& proj) { return local_tile(std::forward(tensor), dice(proj, tile), dice(proj, coord)); } // // Display utilities // template CUTE_HOST_DEVICE void print_tensor(Tensor const& tensor) { auto format = get_format(tensor(0)); using type = typename decltype(format)::type; if constexpr (Layout::rank == 1) { for (int m = 0; m < size(tensor); ++m) { printf(format.format, format.digits, type(tensor(m))); printf("\n"); } } else if constexpr (Layout::rank == 2) { for (int m = 0; m < size<0>(tensor); ++m) { for (int n = 0; n < size<1>(tensor); ++n) { printf(format.format, format.digits, type(tensor(m,n))); } printf("\n"); } } else if constexpr (Layout::rank == 3) { print_tensor(tensor(_,_,0)); for (int k = 1; k < size<2>(tensor); ++k) { for (int i = 0; i < format.digits*size<1>(tensor); ++i) { print("-"); } print("\n"); print_tensor(tensor(_,_,k)); } } else if constexpr (Layout::rank == 4) { print_tensor(tensor(_,_,_,0)); for (int p = 1; p < size<3>(tensor); ++p) { for (int i = 0; i < format.digits*size<1>(tensor); ++i) { print("="); } print("\n"); print_tensor(tensor(_,_,_,p)); } } } template CUTE_HOST_DEVICE void print(Tensor const& tensor) { print(tensor.layout()); print("\n"); print_tensor(tensor); } #if !defined(__CUDACC_RTC__) template CUTE_HOST std::ostream& print_tensor_os(std::ostream& os, Tensor const& tensor) { int digits = 9; if constexpr (Layout::rank == 1) { for (int m = 0; m < size(tensor); ++m) { os << std::setw(digits) << tensor(m) << std::endl; } } else if constexpr (Layout::rank == 2) { for (int m = 0; m < size<0>(tensor); ++m) { for (int n = 0; n < size<1>(tensor); ++n) { os << std::setw(digits) << tensor(m,n); } os << std::endl; } } else if constexpr (Layout::rank == 3) { print_tensor_os(os, tensor(_,_,0)); for (int k = 1; k < size<2>(tensor); ++k) { for (int i = 0; i < digits*size<1>(tensor); ++i) { os << "-"; } os << std::endl; print_tensor_os(os, tensor(_,_,k)); } } else if constexpr (Layout::rank == 4) { print_tensor_os(os, tensor(_,_,_,0)); for (int p = 1; p < size<3>(tensor); ++p) { for (int i = 0; i < digits*size<1>(tensor); ++i) { os << "="; } os << std::endl; print_tensor_os(os, tensor(_,_,_,p)); } } return os; } template CUTE_HOST std::ostream& operator<<(std::ostream& os, Tensor const& tensor) { os << tensor.layout() << std::endl; return print_tensor_os(os, tensor); } #endif // !defined(__CUDACC_RTC__) } // end namespace cute // // Extended Engines // #include // // Tensor Algorithms // #include #include #include #include #include #include