From fd3e2de04f19be8c70268f4c5e24a3b4bdcb3e56 Mon Sep 17 00:00:00 2001 From: benjamin Date: Tue, 28 Jan 2020 18:35:44 -0600 Subject: [PATCH 1/2] Added multiply_with_talsh to contraction_utils --- src/contraction_utils.cpp | 96 ++++++++++++++++++++++++++++++++++++++- src/contraction_utils.h | 4 ++ 2 files changed, 99 insertions(+), 1 deletion(-) diff --git a/src/contraction_utils.cpp b/src/contraction_utils.cpp index 49ad4eda..55fc3a2b 100644 --- a/src/contraction_utils.cpp +++ b/src/contraction_utils.cpp @@ -8,6 +8,9 @@ #include #include "errors.h" +// NEW +#include "talshxx.hpp" +// NEW UP TO HERE namespace qflex { @@ -233,7 +236,12 @@ void ContractionData::ContractGrid( std::string result_id = result_space(patch_max_size_[op.expand.id]); Tensor& result = get_scratch(result_id); try { - multiply(prev, next, result, get_scratch(kGeneralSpace).data()); + // NEW REMOVE + //multiply(prev, next, result, get_scratch(kGeneralSpace).data()); + // NEW REMOVE UP TO HERE + // NEW + multiply_with_talsh(prev, next, result); + // NEW UP TO HERE } catch (const std::string& err_msg) { throw ERROR_MSG("Failed to call multiply(). Error:\n\t[", err_msg, "]"); @@ -622,10 +630,96 @@ void ContractGrid(const std::list& ordering, active_patches[patch] = false; } try { + // NEW + talsh::initialize(); + std::cout << "Initialized TALSH.\n"; + // NEW UP TO HERE data.ContractGrid(ordering, /*output_index = */ 0, active_patches); + // NEW + talsh::shutdown(); + std::cout << "Shut down TALSH.\n"; + // NEW UP TO HERE } catch (const std::string& err_msg) { throw ERROR_MSG("Failed to call ContractGrid(). Error:\n\t[", err_msg, "]"); } } +// NEW +void multiply_with_talsh(Tensor& A, Tensor& B, Tensor& C) { + // Remove this and call _ALPHABET from tensor.cpp + const std::vector _ALPHABET( + {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"}); + if (A.data() == C.data()) { + throw ERROR_MSG("A and C cannot be the same tensor: ", + C.tensor_to_string()); + } + if (B.data() == C.data()) { + throw ERROR_MSG("B and C cannot be the same tensor: ", + C.tensor_to_string()); + } + + // Get unique indices. + std::unordered_set unique_indices; + std::vector tensors({&A, &B, &C}); + for (auto T : tensors) { + for (const auto& index : T->get_indices()) { + unique_indices.insert(index); + } + } + std::unordered_map index_letter; + { + std::size_t alphabet_pos = 0; + for (const auto& index : unique_indices) { + index_letter[index] = _ALPHABET[alphabet_pos]; + ++alphabet_pos; + } + } + // Create contraction string, reversing each tensors indexes, so that TALSH + // contracts properly, since it follows FORTRAN convention. + std::string contraction_string = "D("; + std::string index; + for (int i = C.get_indices().size() - 1; i >= 0; --i) { + index = C.get_indices().at(i); + contraction_string += index_letter.at(index) + ","; + } + contraction_string[contraction_string.size() - 1] = ')'; + contraction_string += "+=L("; + for (int i = A.get_indices().size() - 1; i >= 0; --i) { + index = A.get_indices().at(i); + contraction_string += index_letter.at(index) + ","; + } + contraction_string[contraction_string.size() - 1] = ')'; + contraction_string += "*R("; + for (int i = B.get_indices().size() - 1; i >= 0; --i) { + index = B.get_indices().at(i); + contraction_string += index_letter.at(index) + ","; + } + contraction_string[contraction_string.size() - 1] = ')'; + + bool done; + int errc; + std::vector signature_D; + for (const auto& dim : C.get_dimensions()) { + signature_D.push_back(dim); + } + talsh::Tensor D(signature_D, C.data()); + std::vector signature_L; + for (const auto& dim : A.get_dimensions()) { + signature_L.push_back(dim); + } + talsh::Tensor L(signature_L, A.data()); + std::vector signature_R; + for (const auto& dim : B.get_dimensions()) { + signature_R.push_back(dim); + } + talsh::Tensor R(signature_R, B.data()); + errc = D.contractAccumulate(nullptr, contraction_string, L, R, DEV_HOST, + 0, s_type(1.0), false); + done = D.sync(); +} +// NEW UP TO HERE + } // namespace qflex diff --git a/src/contraction_utils.h b/src/contraction_utils.h index f31b9209..47a514d2 100644 --- a/src/contraction_utils.h +++ b/src/contraction_utils.h @@ -219,6 +219,10 @@ void ContractGrid(const std::list& ordering, std::vector>* tensor_grid, std::vector>* amplitudes); +// NEW +void multiply_with_talsh(Tensor& A, Tensor& B, Tensor& C); +// NEW UP TO HERE + } // namespace qflex #endif // CONTRACTION_UTILS_ From c3fdf2a64f3ffdcce2bf9643b2b8b773bd4f5cb3 Mon Sep 17 00:00:00 2001 From: benjamin Date: Wed, 29 Jan 2020 11:25:19 -0600 Subject: [PATCH 2/2] Linked correctly to TALSH. Added dry flag to qFlex native multiply. Called correctly TALSH contractAccumulate when integrated within qFlex. --- src/contraction_utils.cpp | 67 +++++++++++++++++++++++++-------------- src/tensor.cpp | 20 +++++++++++- src/tensor.h | 8 ++++- 3 files changed, 70 insertions(+), 25 deletions(-) diff --git a/src/contraction_utils.cpp b/src/contraction_utils.cpp index 55fc3a2b..decec789 100644 --- a/src/contraction_utils.cpp +++ b/src/contraction_utils.cpp @@ -326,7 +326,12 @@ void ContractionData::ContractGrid( result_space(patch_max_size_[op.merge.target_id]); Tensor& result = get_scratch(result_id); try { - multiply(patch_1, patch_2, result, get_scratch(kGeneralSpace).data()); + // NEW REMOVE + //multiply(patch_1, patch_2, result, get_scratch(kGeneralSpace).data()); + // NEW REMOVE UP TO HERE + // NEW + multiply_with_talsh(patch_1, patch_2, result); + // NEW UP TO HERE } catch (const std::string& err_msg) { throw ERROR_MSG("Failed to call multiply(). Error:\n\t[", err_msg, "]"); @@ -631,7 +636,8 @@ void ContractGrid(const std::list& ordering, } try { // NEW - talsh::initialize(); + std::size_t init_space(10000000000); + talsh::initialize(&init_space); std::cout << "Initialized TALSH.\n"; // NEW UP TO HERE data.ContractGrid(ordering, /*output_index = */ 0, active_patches); @@ -645,6 +651,21 @@ void ContractGrid(const std::list& ordering, } // NEW + +// Concatenate strings separated by commas in the result. +std::string comma_concatenate_reversed( + const std::vector& strings, + const std::unordered_map& index_letter) { + std::string result_string = ""; + if (strings.size() > 0) { + for (size_t i = strings.size() - 1; i > 0; --i) { + result_string += index_letter.at(strings[i]) + ","; + } + result_string += index_letter.at(strings[0]); + } + return result_string; +} + void multiply_with_talsh(Tensor& A, Tensor& B, Tensor& C) { // Remove this and call _ALPHABET from tensor.cpp const std::vector _ALPHABET( @@ -663,7 +684,7 @@ void multiply_with_talsh(Tensor& A, Tensor& B, Tensor& C) { // Get unique indices. std::unordered_set unique_indices; - std::vector tensors({&A, &B, &C}); + std::vector tensors({&A, &B}); for (auto T : tensors) { for (const auto& index : T->get_indices()) { unique_indices.insert(index); @@ -677,42 +698,42 @@ void multiply_with_talsh(Tensor& A, Tensor& B, Tensor& C) { ++alphabet_pos; } } + + // Update dimensions and indexes on qFlex tensors + multiply(A, B, C, nullptr, true); + // Create contraction string, reversing each tensors indexes, so that TALSH // contracts properly, since it follows FORTRAN convention. std::string contraction_string = "D("; std::string index; - for (int i = C.get_indices().size() - 1; i >= 0; --i) { - index = C.get_indices().at(i); - contraction_string += index_letter.at(index) + ","; - } - contraction_string[contraction_string.size() - 1] = ')'; - contraction_string += "+=L("; - for (int i = A.get_indices().size() - 1; i >= 0; --i) { - index = A.get_indices().at(i); - contraction_string += index_letter.at(index) + ","; - } - contraction_string[contraction_string.size() - 1] = ')'; - contraction_string += "*R("; - for (int i = B.get_indices().size() - 1; i >= 0; --i) { - index = B.get_indices().at(i); - contraction_string += index_letter.at(index) + ","; - } - contraction_string[contraction_string.size() - 1] = ')'; + + contraction_string += comma_concatenate_reversed(C.get_indices(), + index_letter); + contraction_string += ")+=L("; + contraction_string += comma_concatenate_reversed(A.get_indices(), + index_letter); + contraction_string += ")*R("; + contraction_string += comma_concatenate_reversed(B.get_indices(), + index_letter); + contraction_string += ")"; bool done; int errc; std::vector signature_D; - for (const auto& dim : C.get_dimensions()) { + for (int i = C.get_dimensions().size() - 1; i >= 0; --i) { + size_t dim = C.get_dimensions()[i]; signature_D.push_back(dim); } talsh::Tensor D(signature_D, C.data()); std::vector signature_L; - for (const auto& dim : A.get_dimensions()) { + for (int i = A.get_dimensions().size() - 1; i >= 0; --i) { + size_t dim = A.get_dimensions()[i]; signature_L.push_back(dim); } talsh::Tensor L(signature_L, A.data()); std::vector signature_R; - for (const auto& dim : B.get_dimensions()) { + for (int i = B.get_dimensions().size() - 1; i >= 0; --i) { + size_t dim = B.get_dimensions()[i]; signature_R.push_back(dim); } talsh::Tensor R(signature_R, B.data()); diff --git a/src/tensor.cpp b/src/tensor.cpp index 9fec18e6..9a89e331 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -960,10 +960,22 @@ void _multiply_vv(const s_type* A_data, const s_type* B_data, s_type* C_data, cblas_cdotu_sub(k, A_data, 1, B_data, 1, C_data); } -void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy) { +// NEW ERASES +//void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy) { +// NEW ERASES UP TO HERE` +// NEW +void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy, + bool dry /*= false*/) { +// NEW UP TO HERE` + // NEW + if (!dry) { + // NEW UP TO HERE if (scratch_copy == nullptr) { throw ERROR_MSG("Scratch copy must be non-null."); } + // NEW + } + // NEW UP TO HERE if (A.data() == C.data()) { throw ERROR_MSG("A and C cannot be the same tensor: ", @@ -1020,6 +1032,9 @@ void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy) { if (global::verbose > 1) t0 = std::chrono::high_resolution_clock::now(); + // NEW + if (!dry) { + // NEW UP TO HERE // Reorder. std::vector A_new_ordering = _vector_union(left_indices, common_indices); @@ -1096,6 +1111,9 @@ void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy) { std::cerr << "Time multiplying A*B: " << time_span.count() << "s\n"; t0 = std::chrono::high_resolution_clock::now(); } + // NEW + } + // NEW UP TO HERE // Set indices and dimensions of C. std::vector C_indices = diff --git a/src/tensor.h b/src/tensor.h index 219d1b10..37859e7a 100644 --- a/src/tensor.h +++ b/src/tensor.h @@ -410,7 +410,13 @@ void _multiply_vv(const s_type* A_data, const s_type* B_data, s_type* C_data, * reordering. It has to allocate at least as much max(A.size(), B.size()) * memory. */ -void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy); +// NEW ERASES +//void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy); +// NEW ERASES UP TO HERE +// NEW +void multiply(Tensor& A, Tensor& B, Tensor& C, s_type* scratch_copy, + bool dry = false); +// NEW UP TO HERE /** * Returns the size of the tensor product of A and B.