|
| 1 | +#include "jacobi_kernels.cuh" |
| 2 | +#include "jacobi.h" |
| 3 | + |
| 4 | +#include <vector> |
| 5 | +#include <iomanip> |
| 6 | +#include <iostream> |
| 7 | +#include <cuda/api.hpp> |
| 8 | + |
| 9 | +static void finalize_error( |
| 10 | + const cuda::stream_t& stream, span<double> d_sum, const cuda::launch_configuration_t& launch_config, |
| 11 | + double& sum, int k, const span<double> x_to_overwrite) |
| 12 | +{ |
| 13 | + stream.enqueue.memzero(d_sum); |
| 14 | + auto final_error_launch_config = launch_config; |
| 15 | + final_error_launch_config.dimensions.grid.x = (N_ROWS / final_error_launch_config.dimensions.block.x) + 1; |
| 16 | + auto warps_per_block = final_error_launch_config.dimensions.block.x / cuda::warp_size; |
| 17 | + final_error_launch_config.dynamic_shared_memory_size = (warps_per_block + 1) * sizeof(double); |
| 18 | + // TODO: Double-check the original source to ensure we're using the right x here |
| 19 | + stream.enqueue.kernel_launch(finalError, final_error_launch_config, x_to_overwrite.data(), d_sum.data()); |
| 20 | + stream.enqueue.copy(&sum, d_sum); |
| 21 | + stream.synchronize(); |
| 22 | + report_error_sum("GPU", k + 1, sum); |
| 23 | +} |
| 24 | + |
| 25 | +template<> |
| 26 | +double do_jacobi_inner<computation_method_t::graph_with_set_kernel_params>( |
| 27 | + const cuda::device_t &device, |
| 28 | + const cuda::stream_t &stream, |
| 29 | + span<float const> A, |
| 30 | + span<double const> b, |
| 31 | + float convergence_threshold, |
| 32 | + int num_iterations, |
| 33 | + span<double> x, |
| 34 | + span<double> x_new, |
| 35 | + span<double> d_sum) |
| 36 | +{ |
| 37 | + auto launch_config = cuda::launch_config_builder() |
| 38 | + .block_size(256) |
| 39 | + .grid_dimensions((N_ROWS / ROWS_PER_CTA) + 2, 1, 1) |
| 40 | + .build(); |
| 41 | + |
| 42 | + double sum; |
| 43 | + |
| 44 | + auto graph = cuda::graph::create(); |
| 45 | + |
| 46 | + using cuda::graph::node::kind_t; |
| 47 | + |
| 48 | + auto memset_node = [&] { |
| 49 | + cuda::graph::node::parameters_t<kind_t::memory_set> params; |
| 50 | + params.value = 0; |
| 51 | + params.width_in_bytes = 4; |
| 52 | + params.region = d_sum; |
| 53 | + return graph.insert.node<kind_t::memory_set>(params); |
| 54 | + }(); |
| 55 | + |
| 56 | + auto jacobi_kernel = cuda::kernel::get(device, JacobiMethod); |
| 57 | + struct { cuda::graph::node::parameters_t<kind_t::kernel_launch> odd, even; } kernel_params = { |
| 58 | + { jacobi_kernel, launch_config, cuda::graph::make_kernel_argument_pointers(A, b, convergence_threshold, x, x_new, d_sum) }, |
| 59 | + { jacobi_kernel, launch_config, cuda::graph::make_kernel_argument_pointers(A, b, convergence_threshold, x_new, x, d_sum) }, |
| 60 | + }; |
| 61 | + auto jacobi_kernel_node = graph.insert.node<kind_t::kernel_launch>(kernel_params.even); |
| 62 | + |
| 63 | + graph.insert.edge(memset_node, jacobi_kernel_node); |
| 64 | + |
| 65 | + auto memcpy_node = [&] { |
| 66 | + cuda::memory::copy_parameters_t<3> params; |
| 67 | + params.set_source(d_sum); |
| 68 | + params.set_destination(&sum, 1); |
| 69 | + params.set_extent<double>(1); |
| 70 | + params.clear_offsets(); |
| 71 | + params.clear_rest(); |
| 72 | + return graph.insert.node<cuda::graph::node::kind_t::memcpy>(params); |
| 73 | + }(); |
| 74 | + |
| 75 | + graph.insert.edge(jacobi_kernel_node, memcpy_node); |
| 76 | + |
| 77 | + |
| 78 | + cuda::graph::instance_t instance = graph.instantiate(); |
| 79 | + |
| 80 | +// ::std::cout << "settings node params for the kernel node with k == " << k << " and params.marshalled_arguments.size() = " |
| 81 | +// << params.marshalled_arguments.size() << std::endl; |
| 82 | + |
| 83 | + for (int k = 0; k < num_iterations; k++) { |
| 84 | + instance.launch(stream); |
| 85 | + stream.synchronize(); |
| 86 | + |
| 87 | + if (sum <= convergence_threshold) { |
| 88 | + auto x_to_overwrite = ((k & 1) == 0) ? x : x_new; |
| 89 | + finalize_error(stream, d_sum, launch_config, sum, k, x_to_overwrite); |
| 90 | + break; |
| 91 | + } |
| 92 | + // Odd iterations have an even value of k, since we start with k == 0; |
| 93 | + // but - here we sent |
| 94 | + const auto& next_iteration_params = ((k & 1) == 0) ? kernel_params.even : kernel_params.odd; |
| 95 | + instance.set_node_parameters<kind_t::kernel_launch>(jacobi_kernel_node, next_iteration_params); |
| 96 | + } |
| 97 | + return sum; |
| 98 | +} |
| 99 | + |
| 100 | +template<> |
| 101 | +double do_jacobi_inner<computation_method_t::graph_with_exec_update>( |
| 102 | + const cuda::device_t &, |
| 103 | + const cuda::stream_t &stream, |
| 104 | + span<float const> A, |
| 105 | + span<double const> b, |
| 106 | + float convergence_threshold, |
| 107 | + int num_iterations, |
| 108 | + span<double> x, |
| 109 | + span<double> x_new, |
| 110 | + span<double> d_sum) |
| 111 | +{ |
| 112 | + auto launch_config = cuda::launch_config_builder() |
| 113 | + .block_size(256) |
| 114 | + .grid_dimensions((N_ROWS / ROWS_PER_CTA) + 2, 1, 1) |
| 115 | + .build(); |
| 116 | + |
| 117 | + ::std::unique_ptr<cuda::graph::instance_t> instance_ptr{}; |
| 118 | + |
| 119 | + double sum = 0.0; |
| 120 | + for (int k = 0; k < num_iterations; k++) { |
| 121 | + stream.begin_capture(cuda::stream::capture::mode_t::global); |
| 122 | + stream.enqueue.memzero(d_sum); |
| 123 | + auto x_to_read = ((k & 1) == 0) ? x : x_new; |
| 124 | + auto x_to_overwrite = ((k & 1) == 0) ? x_new : x; |
| 125 | + stream.enqueue.kernel_launch(JacobiMethod, launch_config, |
| 126 | + A.data(), b.data(), convergence_threshold, x_to_read.data(), x_to_overwrite.data(), d_sum.data()); |
| 127 | + stream.enqueue.copy(&sum, d_sum); |
| 128 | + auto graph = stream.end_capture(); |
| 129 | + |
| 130 | + if (instance_ptr == nullptr) { |
| 131 | + auto instance = graph.instantiate(); |
| 132 | + instance_ptr.reset(new cuda::graph::instance_t{::std::move(instance)}); |
| 133 | + } |
| 134 | + else { |
| 135 | + instance_ptr->update(graph); |
| 136 | + // Note: The original code tried to re-instantiate if the update |
| 137 | + // of the instance failed, we don't do this. |
| 138 | + } |
| 139 | + stream.enqueue.graph_launch(*instance_ptr); |
| 140 | + stream.synchronize(); |
| 141 | + |
| 142 | + if (sum <= convergence_threshold) { |
| 143 | + finalize_error(stream, d_sum, launch_config, sum, k, x_to_overwrite); |
| 144 | + break; |
| 145 | + } |
| 146 | + } |
| 147 | + |
| 148 | + return sum; |
| 149 | +} |
| 150 | + |
| 151 | +template<> |
| 152 | +double do_jacobi_inner<computation_method_t::non_graph_gpu>( |
| 153 | + const cuda::device_t &, |
| 154 | + const cuda::stream_t &stream, |
| 155 | + span<float const> A, |
| 156 | + span<double const> b, |
| 157 | + float convergence_threshold, |
| 158 | + int num_iterations, |
| 159 | + span<double> x, |
| 160 | + span<double> x_new, |
| 161 | + span<double> d_sum) |
| 162 | +{ |
| 163 | + auto launch_config = cuda::launch_config_builder() |
| 164 | + .block_size(256) |
| 165 | + .grid_dimensions((N_ROWS / ROWS_PER_CTA) + 2, 1, 1) |
| 166 | + .build(); |
| 167 | + |
| 168 | + double sum; |
| 169 | + for (int k = 0; k < num_iterations; k++) { |
| 170 | + stream.enqueue.memzero(d_sum); |
| 171 | + auto x_to_read = ((k & 1) == 0) ? x : x_new; |
| 172 | + auto x_to_overwrite = ((k & 1) == 0) ? x_new : x; |
| 173 | + stream.enqueue.kernel_launch(JacobiMethod, launch_config, |
| 174 | + A.data(), b.data(), convergence_threshold, x_to_read.data(), x_to_overwrite.data(), d_sum.data()); |
| 175 | + stream.enqueue.copy(&sum, d_sum); |
| 176 | + stream.synchronize(); |
| 177 | + |
| 178 | + if (sum <= convergence_threshold) { |
| 179 | + finalize_error(stream, d_sum, launch_config, sum, k, x_to_overwrite); |
| 180 | + break; |
| 181 | + } |
| 182 | + } |
| 183 | + |
| 184 | + return sum; |
| 185 | +} |
| 186 | + |
0 commit comments