From ed93df7bf5a7fe1898a9a67db006deaf98bc4577 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Sun, 27 Oct 2024 21:51:35 +0100 Subject: [PATCH] refac: update readme.md --- README.md | 6 +++--- include/tlib/detail/layout.h | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 647a00d..e6d1221 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ High-Performance Tensor-Matrix Multiplication Library - TLIB(TTM) [![Language](https://img.shields.io/badge/C%2B%2B-17-blue.svg)](https://en.wikipedia.org/wiki/C%2B%2B#Standardization) [![License](https://img.shields.io/badge/license-GPL-blue.svg)](https://github.com/bassoy/ttm/blob/master/LICENSE) [![Wiki](https://img.shields.io/badge/ttm-wiki-blue.svg)](https://github.com/bassoy/ttm/wiki) -[![Gitter](https://img.shields.io/badge/ttm-chat%20on%20gitter-4eb899.svg)](https://gitter.im/bassoy) +[![Discussions](https://img.shields.io/badge/ttm-discussions-blue.svg)](https://github.com/bassoy/ttm/discussions) [![Build Status](https://github.com/bassoy/ttm/actions/workflows/test.yml/badge.svg)](https://github.com/bassoy/ttm/actions) ## Summary @@ -37,8 +37,8 @@ Please have a look at the [wiki](https://github.com/bassoy/ttm/wiki) page for mo * For large tensors reaches peak matrix-times-matrix performance ### Requirements -* Requires the tensor elements to be contiguously stored in memory. -* Element types must be either `float` or `double`. +* Requires the tensor elements to be contiguously stored in memory +* Element types must be either `float` or `double` ## Python Example ```python diff --git a/include/tlib/detail/layout.h b/include/tlib/detail/layout.h index a1dc045..0b63d41 100644 --- a/include/tlib/detail/layout.h +++ b/include/tlib/detail/layout.h @@ -120,7 +120,7 @@ inline void compute_last_order_layout(OutputIt begin, OutputIt end) template inline auto inverse_mode(InputIt layout_begin, InputIt layout_end, SizeType mode ) { - using value_type = typename std::iterator_traits::value_type; + using value_type = typename std::iterator_traits::value_type; if(!is_valid_layout(layout_begin,layout_end)) throw std::runtime_error("Error in tlib::detail::inverse_mode(): input layout is not valid."); @@ -130,13 +130,13 @@ inline auto inverse_mode(InputIt layout_begin, InputIt layout_end, SizeType mode auto const p = static_cast(p_); - if(mode==0u || mode > SizeType(p)) - throw std::runtime_error("Error in tlib::detail::inverse_mode(): mode should be one-based and equal to or less than layout size."); + if(mode==0u || mode > SizeType(p)) + throw std::runtime_error("Error in tlib::detail::inverse_mode(): mode should be one-based and equal to or less than layout size."); auto inverse_mode = value_type{0u}; for(; inverse_mode < p; ++inverse_mode) - if(layout_begin[inverse_mode] == value_type(mode)) - break; + if(layout_begin[inverse_mode] == value_type(mode)) + break; assert(inverse_mode < p);