diff --git a/examples/android/android-simple/app/src/main/cpp/KomputeJniNative.cpp b/examples/android/android-simple/app/src/main/cpp/KomputeJniNative.cpp index ecbc70c4..b8b162d4 100644 --- a/examples/android/android-simple/app/src/main/cpp/KomputeJniNative.cpp +++ b/examples/android/android-simple/app/src/main/cpp/KomputeJniNative.cpp @@ -23,8 +23,8 @@ // Allows us to use the C++ sleep function to wait when loading the // Vulkan library in android -#include #include +#include static std::vector jfloatArrayToVector(JNIEnv* env, const jfloatArray& fromArray) diff --git a/examples/android/android-simple/app/src/main/cpp/KomputeModelML.cpp b/examples/android/android-simple/app/src/main/cpp/KomputeModelML.cpp index 1ff15855..e5a9c237 100644 --- a/examples/android/android-simple/app/src/main/cpp/KomputeModelML.cpp +++ b/examples/android/android-simple/app/src/main/cpp/KomputeModelML.cpp @@ -3,7 +3,6 @@ #include "my_shader.hpp" #include - KomputeModelML::KomputeModelML() {} KomputeModelML::~KomputeModelML() {} diff --git a/examples/array_multiplication/src/main.cpp b/examples/array_multiplication/src/main.cpp index 28b7d49b..43f9e080 100644 --- a/examples/array_multiplication/src/main.cpp +++ b/examples/array_multiplication/src/main.cpp @@ -3,8 +3,8 @@ #include #include -#include #include +#include int main() diff --git a/examples/godot_examples/custom_module/kompute_summator/KomputeSummatorNode.cpp b/examples/godot_examples/custom_module/kompute_summator/KomputeSummatorNode.cpp index cef2d26d..0afb100c 100644 --- a/examples/godot_examples/custom_module/kompute_summator/KomputeSummatorNode.cpp +++ b/examples/godot_examples/custom_module/kompute_summator/KomputeSummatorNode.cpp @@ -43,7 +43,8 @@ KomputeSummatorNode::add(float value) void KomputeSummatorNode::reset() -{} +{ +} float KomputeSummatorNode::get_total() const @@ -97,7 +98,8 @@ KomputeSummatorNode::_init() void KomputeSummatorNode::_process(float delta) -{} +{ +} void KomputeSummatorNode::_bind_methods() diff --git a/examples/godot_examples/gdnative_shared/src/KomputeSummator.cpp b/examples/godot_examples/gdnative_shared/src/KomputeSummator.cpp index 277fb69d..3e9f81d7 100644 --- a/examples/godot_examples/gdnative_shared/src/KomputeSummator.cpp +++ b/examples/godot_examples/gdnative_shared/src/KomputeSummator.cpp @@ -43,7 +43,8 @@ KomputeSummator::add(float value) void KomputeSummator::reset() -{} +{ +} float KomputeSummator::get_total() const @@ -97,7 +98,8 @@ KomputeSummator::_init() void KomputeSummator::_process(float delta) -{} +{ +} void KomputeSummator::_register_methods() diff --git a/examples/godot_logistic_regression/custom_module/kompute_model_ml/KomputeModelMLNode.cpp b/examples/godot_logistic_regression/custom_module/kompute_model_ml/KomputeModelMLNode.cpp index bf9b9ef8..2da54323 100644 --- a/examples/godot_logistic_regression/custom_module/kompute_model_ml/KomputeModelMLNode.cpp +++ b/examples/godot_logistic_regression/custom_module/kompute_model_ml/KomputeModelMLNode.cpp @@ -147,7 +147,8 @@ KomputeModelMLNode::_init() void KomputeModelMLNode::_process(float delta) -{} +{ +} void KomputeModelMLNode::_bind_methods() diff --git a/examples/godot_logistic_regression/gdnative_shared/src/KomputeModelML.cpp b/examples/godot_logistic_regression/gdnative_shared/src/KomputeModelML.cpp index 2efbbfdd..e25acad4 100644 --- a/examples/godot_logistic_regression/gdnative_shared/src/KomputeModelML.cpp +++ b/examples/godot_logistic_regression/gdnative_shared/src/KomputeModelML.cpp @@ -151,7 +151,8 @@ KomputeModelML::_init() void KomputeModelML::_process(float delta) -{} +{ +} void KomputeModelML::_register_methods() diff --git a/examples/logistic_regression/src/main.cpp b/examples/logistic_regression/src/main.cpp index e149a0bd..7279bb10 100644 --- a/examples/logistic_regression/src/main.cpp +++ b/examples/logistic_regression/src/main.cpp @@ -20,17 +20,13 @@ main() std::shared_ptr> y = mgr.tensor({ 0, 0, 0, 1, 1 }); std::shared_ptr> wIn = mgr.tensor({ 0.001, 0.001 }); - std::shared_ptr> wOutI = - mgr.tensor({ 0, 0, 0, 0, 0 }); - std::shared_ptr> wOutJ = - mgr.tensor({ 0, 0, 0, 0, 0 }); + std::shared_ptr> wOutI = mgr.tensor({ 0, 0, 0, 0, 0 }); + std::shared_ptr> wOutJ = mgr.tensor({ 0, 0, 0, 0, 0 }); std::shared_ptr> bIn = mgr.tensor({ 0 }); - std::shared_ptr> bOut = - mgr.tensor({ 0, 0, 0, 0, 0 }); + std::shared_ptr> bOut = mgr.tensor({ 0, 0, 0, 0, 0 }); - std::shared_ptr> lOut = - mgr.tensor({ 0, 0, 0, 0, 0 }); + std::shared_ptr> lOut = mgr.tensor({ 0, 0, 0, 0, 0 }); std::vector> params = { xI, xJ, y, wIn, wOutI, wOutJ, @@ -40,9 +36,8 @@ main() std::vector spirv2{ 0x1, 0x2 }; - std::vector spirv( - shader::MY_SHADER_COMP_SPV.begin(), - shader::MY_SHADER_COMP_SPV.end()); + std::vector spirv(shader::MY_SHADER_COMP_SPV.begin(), + shader::MY_SHADER_COMP_SPV.end()); std::shared_ptr algorithm = mgr.algorithm( params, spirv, kp::Workgroup({ 5 }), std::vector({ 5.0 })); @@ -69,9 +64,7 @@ main() wIn->data()[1], bIn->data()[0]); - if (wIn->data()[0] > 0.01 || - wIn->data()[1] < 1.0 || - bIn->data()[0] > 0.0) { + if (wIn->data()[0] > 0.01 || wIn->data()[1] < 1.0 || bIn->data()[0] > 0.0) { throw std::runtime_error("Result does not match"); } } diff --git a/src/Manager.cpp b/src/Manager.cpp index a8acc114..301b4a6f 100644 --- a/src/Manager.cpp +++ b/src/Manager.cpp @@ -176,8 +176,10 @@ Manager::createInstance() #ifdef __APPLE__ // Required for backwards compatibility for MacOS M1 devices // https://stackoverflow.com/questions/72374316/validation-error-on-device-extension-on-m1-mac - applicationExtensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); - computeInstanceCreateInfo.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR; + applicationExtensions.push_back( + VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); + computeInstanceCreateInfo.flags |= + vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR; #endif if (!applicationExtensions.empty()) { diff --git a/src/OpTensorCopy.cpp b/src/OpTensorCopy.cpp index 1eaf428b..04336af8 100644 --- a/src/OpTensorCopy.cpp +++ b/src/OpTensorCopy.cpp @@ -62,17 +62,19 @@ OpTensorCopy::postEval(const vk::CommandBuffer& /*commandBuffer*/) KP_LOG_DEBUG("Kompute OpTensorCopy postEval called"); // Do not copy on CPU side if source is storage tensor - if (this->mTensors[0]->tensorType() == kp::Tensor::TensorTypes::eStorage) - { - KP_LOG_DEBUG("Kompute OpTensorCopy not copying tensor source given it's of eStorage type"); + if (this->mTensors[0]->tensorType() == kp::Tensor::TensorTypes::eStorage) { + KP_LOG_DEBUG("Kompute OpTensorCopy not copying tensor source given " + "it's of eStorage type"); return; } void* data = this->mTensors[0]->rawData(); // Copy the data from the first tensor into all the tensors for (size_t i = 1; i < this->mTensors.size(); i++) { - if (this->mTensors[i]->tensorType() == kp::Tensor::TensorTypes::eStorage) { - KP_LOG_DEBUG("Kompute OpTensorCopy not copying to tensor dest given it's of eStorage type"); + if (this->mTensors[i]->tensorType() == + kp::Tensor::TensorTypes::eStorage) { + KP_LOG_DEBUG("Kompute OpTensorCopy not copying to tensor dest " + "given it's of eStorage type"); continue; } this->mTensors[i]->setRawData(data); diff --git a/src/Sequence.cpp b/src/Sequence.cpp index 25b72f6a..d5d127ff 100644 --- a/src/Sequence.cpp +++ b/src/Sequence.cpp @@ -214,8 +214,9 @@ Sequence::destroy() return; } - if(this->mFence) { - this->mDevice->destroy(this->mFence, (vk::Optional)nullptr); + if (this->mFence) { + this->mDevice->destroy( + this->mFence, (vk::Optional)nullptr); } if (this->mFreeCommandBuffer) { diff --git a/src/Tensor.cpp b/src/Tensor.cpp index ad5cac9a..5a906578 100644 --- a/src/Tensor.cpp +++ b/src/Tensor.cpp @@ -156,8 +156,8 @@ Tensor::mapRawData() } else if (this->mTensorType == TensorTypes::eDevice) { hostVisibleMemory = this->mStagingMemory; } else { - KP_LOG_WARN( - "Kompute Tensor mapping data not supported on {} tensor", toString(this->tensorType())); + KP_LOG_WARN("Kompute Tensor mapping data not supported on {} tensor", + toString(this->tensorType())); return; } @@ -167,7 +167,6 @@ Tensor::mapRawData() // flush this->mRawData = this->mDevice->mapMemory( *hostVisibleMemory, 0, bufferSize, vk::MemoryMapFlags()); - } void @@ -183,8 +182,8 @@ Tensor::unmapRawData() } else if (this->mTensorType == TensorTypes::eDevice) { hostVisibleMemory = this->mStagingMemory; } else { - KP_LOG_WARN( - "Kompute Tensor mapping data not supported on {} tensor", toString(this->tensorType())); + KP_LOG_WARN("Kompute Tensor mapping data not supported on {} tensor", + toString(this->tensorType())); return; } diff --git a/src/include/kompute/logger/Logger.hpp b/src/include/kompute/logger/Logger.hpp index 64468cbe..6e06918a 100644 --- a/src/include/kompute/logger/Logger.hpp +++ b/src/include/kompute/logger/Logger.hpp @@ -24,8 +24,8 @@ static const char* KOMPUTE_LOG_TAG = "KomputeLog"; #else #if KOMPUTE_BUILD_PYTHON -#include #include +#include namespace py = pybind11; // from python/src/main.cpp extern py::object kp_trace, kp_debug, kp_info, kp_warning, kp_error; @@ -88,7 +88,7 @@ setupLogger(); fmt::print("[{} {}] [debug] [{}:{}] {}\n", \ __DATE__, \ __TIME__, \ - __FILE_NAME__, \ + __FILE_NAME__, \ __LINE__, \ fmt::format(__VA_ARGS__)) #else diff --git a/src/logger/Logger.cpp b/src/logger/Logger.cpp index 69df2b60..58bd84c1 100644 --- a/src/logger/Logger.cpp +++ b/src/logger/Logger.cpp @@ -52,17 +52,14 @@ setupLogger() // TODO: Add flag in compile flags std::shared_ptr logger = #if KOMPUTE_SPDLOG_ASYNC_LOGGING - std::make_shared( - "", - sinks.begin(), - sinks.end(), - spdlog::thread_pool(), - spdlog::async_overflow_policy::block); + std::make_shared( + "", + sinks.begin(), + sinks.end(), + spdlog::thread_pool(), + spdlog::async_overflow_policy::block); #else - std::make_shared( - "", - sinks.begin(), - sinks.end()); + std::make_shared("", sinks.begin(), sinks.end()); #endif logger->set_level(getLogLevel()); diff --git a/test/TestOpShadersFromStringAndFile.cpp b/test/TestOpShadersFromStringAndFile.cpp index ab29b5f9..dc2a0ecb 100644 --- a/test/TestOpShadersFromStringAndFile.cpp +++ b/test/TestOpShadersFromStringAndFile.cpp @@ -10,13 +10,15 @@ #include "test_shader.hpp" // Introducing custom struct that can be used for tensors -struct TestStruct { +struct TestStruct +{ float x; uint32_t y; int32_t z; // Creating an == operator overload for the comparison below - bool operator==(const TestStruct rhs) const { + bool operator==(const TestStruct rhs) const + { return this->x == rhs.x && this->y == rhs.y && this->z == rhs.z; } }; @@ -55,8 +57,10 @@ TEST(TestShader, ShaderRawDataFromConstructorCustomDataType) kp::Manager mgr; - std::shared_ptr> tensorA = mgr.tensorT({ { 0.1, 2, 3} }); - std::shared_ptr> tensorB = mgr.tensorT({ { 0.0, 0, 0} }); + std::shared_ptr> tensorA = + mgr.tensorT({ { 0.1, 2, 3 } }); + std::shared_ptr> tensorB = + mgr.tensorT({ { 0.0, 0, 0 } }); std::vector spirv = compileSource(shader); @@ -67,8 +71,10 @@ TEST(TestShader, ShaderRawDataFromConstructorCustomDataType) ->eval(mgr.algorithm(params, spirv)) ->eval(params); - EXPECT_EQ(tensorA->vector(), std::vector({ TestStruct{0.1, 2, 3} })); - EXPECT_EQ(tensorB->vector(), std::vector({ TestStruct{0.1, 2, 3} })); + EXPECT_EQ(tensorA->vector(), + std::vector({ TestStruct{ 0.1, 2, 3 } })); + EXPECT_EQ(tensorB->vector(), + std::vector({ TestStruct{ 0.1, 2, 3 } })); } TEST(TestShaderEndianness, ShaderRawDataFromConstructor) @@ -151,4 +157,3 @@ TEST(TestOpAlgoCreate, ShaderCompiledDataFromConstructor) EXPECT_EQ(tensorA->vector(), std::vector({ 0, 1, 2 })); EXPECT_EQ(tensorB->vector(), std::vector({ 3, 4, 5 })); } - diff --git a/test/TestOpTensorCopy.cpp b/test/TestOpTensorCopy.cpp index 9f8de608..60e0c485 100644 --- a/test/TestOpTensorCopy.cpp +++ b/test/TestOpTensorCopy.cpp @@ -169,13 +169,13 @@ TEST(TestOpTensorCopy, CopyThroughStorageTensor) std::shared_ptr> tensorOut = mgr.tensor(testVecOut); // Tensor storage requires a vector to be passed only to reflect size std::shared_ptr> tensorStorage = - mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage); + mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage); mgr.sequence() - ->eval({ tensorIn, tensorOut }) - ->eval({ tensorIn, tensorStorage }) - ->eval({ tensorStorage, tensorOut }) - ->eval({ tensorIn, tensorOut }); + ->eval({ tensorIn, tensorOut }) + ->eval({ tensorIn, tensorStorage }) + ->eval({ tensorStorage, tensorOut }) + ->eval({ tensorIn, tensorOut }); // Making sure the GPU holds the same vector EXPECT_EQ(tensorIn->vector(), tensorOut->vector()); @@ -192,7 +192,7 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms) std::shared_ptr> tensorOut = mgr.tensor(testVecOut); // Tensor storage requires a vector to be passed only to reflect size std::shared_ptr> tensorStorage = - mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage); + mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage); EXPECT_TRUE(tensorIn->isInit()); EXPECT_TRUE(tensorOut->isInit()); @@ -213,9 +213,8 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms) } )"); - auto algoA = mgr.algorithm( - { tensorIn, tensorStorage }, - compileSource(shaderA)); + auto algoA = + mgr.algorithm({ tensorIn, tensorStorage }, compileSource(shaderA)); // Copy from storage tensor to output tensor std::string shaderB = (R"( @@ -233,15 +232,14 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms) } )"); - auto algoB = mgr.algorithm( - { tensorStorage, tensorOut }, - compileSource(shaderB)); + auto algoB = + mgr.algorithm({ tensorStorage, tensorOut }, compileSource(shaderB)); mgr.sequence() - ->eval({ tensorIn }) - ->eval(algoA) - ->eval(algoB) - ->eval({ tensorOut }); + ->eval({ tensorIn }) + ->eval(algoA) + ->eval(algoB) + ->eval({ tensorOut }); // Making sure the GPU holds the same vector EXPECT_EQ(tensorIn->vector(), tensorOut->vector());