Compadre 1.6.4
Loading...
Searching...
No Matches
GMLS_Host.cpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Compadre: COMpatible PArticle Discretization and REmap Toolkit
4//
5// Copyright 2018 NTESS and the Compadre contributors.
6// SPDX-License-Identifier: BSD-2-Clause
7// *****************************************************************************
8// @HEADER
9#include <iostream>
10#include <string>
11#include <vector>
12#include <map>
13#include <stdlib.h>
14#include <cstdio>
15#include <random>
16
17#include <Compadre_Config.h>
18#include <Compadre_GMLS.hpp>
20#include "GMLS_Tutorial.hpp"
22
23#ifdef COMPADRE_USE_MPI
24#include <mpi.h>
25#endif
26
27#include <Kokkos_Timer.hpp>
28#include <Kokkos_Core.hpp>
29
30using namespace Compadre;
31
32int main (int argc, char* args[])
33{
34
35#ifdef COMPADRE_USE_MPI
36 MPI_Init(&argc, &args);
37#endif
38
39bool all_passed = true;
40
41{
42
43 CommandLineProcessor clp(argc, args);
44 auto order = clp.order;
45 auto dimension = clp.dimension;
46 auto number_target_coords = clp.number_target_coords;
47 auto constraint_name = clp.constraint_name;
48 auto solver_name = clp.solver_name;
49 auto problem_name = clp.problem_name;
50
51 const double failure_tolerance = 1e-9;
52
53 const int offset = 15;
54 std::mt19937 rng(50);
55 const int min_neighbors = 1*Compadre::GMLS::getNP(order);
56 const int max_neighbors = 1*Compadre::GMLS::getNP(order)*1.15;
57 std::cout << min_neighbors << " " << max_neighbors << std::endl;
58 std::uniform_int_distribution<int> gen_num_neighbors(min_neighbors, max_neighbors); // uniform, unbiased
59
60
61 Kokkos::initialize(argc, args);
62 Kokkos::Timer timer;
63 Kokkos::Profiling::pushRegion("Setup");
64
65
66 const int N = 40000;
67 std::uniform_int_distribution<int> gen_neighbor_number(offset, N); // 0 to 10 are junk (part of test)
68
69
70 Kokkos::View<int**, Kokkos::HostSpace> neighbor_lists("neighbor lists", number_target_coords, max_neighbors+1); // first column is # of neighbors
71 Kokkos::View<double**, Kokkos::HostSpace> source_coords("neighbor coordinates", N, dimension);
72 Kokkos::View<double*, Kokkos::HostSpace> epsilon("h supports", number_target_coords);
73
74 for (int i=0; i<number_target_coords; i++) {
75 epsilon(i) = 0.5;
76 }
77
78// // fake coordinates not to be used
79 for(int i = 0; i < offset; i++){
80 for(int j = 0; j < dimension; j++){
81 source_coords(i,j) = 0.1;
82 }
83 }
84
85 // filling others with random coordinates
86 for(int i = offset; i < N; i++){ //ignore first ten entries
87 double randx = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*epsilon(0)/2.0;
88 double randy = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*epsilon(0)/2.0;
89 double randz = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*epsilon(0)/2.0;
90 source_coords(i,0) = randx;
91 if (dimension>1) source_coords(i,1) = randy;
92 if (dimension>2) source_coords(i,2) = randz;
93 }
94
95 const double target_epsilon = 0.1;
96 // fill target coords
97 Kokkos::View<double**, Kokkos::HostSpace> target_coords ("target coordinates", number_target_coords, dimension);
98 for(int i = 0; i < number_target_coords; i++){ //ignore first ten entries
99 double randx = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*target_epsilon/2.0;
100 double randy = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*target_epsilon/2.0;
101 double randz = (2.0*(double)rand() / (double) RAND_MAX - 1.0)*target_epsilon/2.0;
102 target_coords(i,0) = randx;
103 if (dimension>1) target_coords(i,1) = randy;
104 if (dimension>2) target_coords(i,2) = randz;
105 }
106
107 // randomly fill neighbor lists
108 for (int i=0; i<number_target_coords; i++) {
109// int r = gen_num_neighbors(rng);
110// assert(r<source_coords.extent(0)-offset);
111 int r = max_neighbors;
112 neighbor_lists(i,0) = r; // number of neighbors is random between max and min
113
114 for(int j=0; j<r; j++){
115 neighbor_lists(i,j+1) = offset + j + 1;
116// bool placed = false;
117// while (!placed) {
118// int ind = gen_neighbor_number(rng);
119// bool located = false;
120// for (int k=1; k<j+1; k++) {
121// if (neighbor_lists(i,k) == ind) {
122// located = true;
123// break;
124// }
125// }
126// if (!located) {
127// neighbor_lists(i,j+1) = ind;
128// placed = true;
129// } // neighbors can be from 10,...,N-1
130// }
131 }
132 }
133
134 Kokkos::Profiling::popRegion();
135 timer.reset();
136
137 GMLS my_GMLS(order, dimension,
138 solver_name.c_str(), problem_name.c_str(), constraint_name.c_str(),
139 2 /*manifold order*/);
140 my_GMLS.setProblemData(neighbor_lists, source_coords, target_coords, epsilon);
141 my_GMLS.setWeightingParameter(10);
142
143 std::vector<TargetOperation> lro(3);
144 lro[0] = ScalarPointEvaluation;
147 my_GMLS.addTargets(lro);
148 // add two more targets individually to test addTargets(...) function
151 my_GMLS.generateAlphas();
152
153 double instantiation_time = timer.seconds();
154 std::cout << "Took " << instantiation_time << "s to complete instantiation." << std::endl;
155
156 Kokkos::Profiling::pushRegion("Creating Data");
157
158
159 // need Kokkos View storing true solution
160 Kokkos::View<double*, Kokkos::HostSpace> sampling_data("samples of true solution", source_coords.extent(0));
161 Kokkos::View<double**, Kokkos::HostSpace> gradient_sampling_data("samples of true gradient", source_coords.extent(0), dimension);
162 Kokkos::View<double**, Kokkos::LayoutLeft, Kokkos::HostSpace> divergence_sampling_data("samples of true solution for divergence test", source_coords.extent(0), dimension);
163 Kokkos::parallel_for("Sampling Manufactured Solutions", Kokkos::RangePolicy<Kokkos::DefaultHostExecutionSpace>(0,source_coords.extent(0)), KOKKOS_LAMBDA(const int i) {
164 double xval = source_coords(i,0);
165 double yval = (dimension>1) ? source_coords(i,1) : 0;
166 double zval = (dimension>2) ? source_coords(i,2) : 0;
167 sampling_data(i) = trueSolution(xval, yval, zval, order, dimension);
168 double true_grad[3] = {0,0,0};
169 trueGradient(true_grad, xval, yval,zval, order, dimension);
170 for (int j=0; j<dimension; ++j) {
171 divergence_sampling_data(i,j) = divergenceTestSamples(xval, yval, zval, j, dimension);
172 gradient_sampling_data(i,j) = true_grad[j];
173 }
174 });
175 Kokkos::Profiling::popRegion();
176
177 Evaluator gmls_evaluator(&my_GMLS);
178
179 for (int i=0; i<number_target_coords; i++) {
180
181 Kokkos::Profiling::pushRegion("Apply Alphas to Data");
182
183 double GMLS_value = gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(sampling_data, 0, ScalarPointEvaluation, i, 0, 0, 0, 0, 0);
184 //for (int j = 0; j< neighbor_lists(i,0); j++){
185 // double xval = source_coords(neighbor_lists(i,j+1),0);
186 // double yval = (dimension>1) ? source_coords(neighbor_lists(i,j+1),1) : 0;
187 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
188 // GMLS_value += gmls_evaluator.getAlpha0TensorTo0Tensor(ScalarPointEvaluation, i, j)*trueSolution(xval, yval, zval, order, dimension);
189 //}
190
191 double GMLS_Laplacian = gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(sampling_data, 0, LaplacianOfScalarPointEvaluation, i, 0, 0, 0, 0, 0);
192 //double GMLS_Laplacian = 0.0;
193 //for (int j = 0; j< neighbor_lists(i,0); j++){
194 // double xval = source_coords(neighbor_lists(i,j+1),0);
195 // double yval = (dimension>1) ? source_coords(neighbor_lists(i,j+1),1) : 0;
196 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
197 // GMLS_Laplacian += gmls_evaluator.getAlpha0TensorTo0Tensor(LaplacianOfScalarPointEvaluation, i, j)*trueSolution(xval, yval, zval, order, dimension);
198 //}
199
200 double GMLS_GradX = gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(sampling_data, 0, GradientOfScalarPointEvaluation, i, 0, 0, 0, 0, 0);
201 //double GMLS_GradX = 0.0;
202 //for (int j = 0; j< neighbor_lists(i,0); j++){
203 // double xval = source_coords(neighbor_lists(i,j+1),0);
204 // double yval = (dimension>1) ? source_coords(neighbor_lists(i,j+1),1) : 0;
205 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
206 // GMLS_GradX += gmls_evaluator.getAlpha0TensorTo1Tensor(GradientOfScalarPointEvaluation, i, 0, j)*trueSolution(xval, yval, zval, order, dimension);
207 //}
208
209 double GMLS_GradY = (dimension>1) ? gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(sampling_data, 0, GradientOfScalarPointEvaluation, i, 0, 1, 0, 0, 0) : 0;
210 //double GMLS_GradY = 0.0;
211 //if (dimension>1) {
212 // for (int j = 0; j< neighbor_lists(i,0); j++){
213 // double xval = source_coords(neighbor_lists(i,j+1),0);
214 // double yval = source_coords(neighbor_lists(i,j+1),1);
215 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
216 // GMLS_GradY += gmls_evaluator.getAlpha0TensorTo1Tensor(GradientOfScalarPointEvaluation, i, 1, j)*trueSolution(xval, yval, zval, order, dimension);
217 // }
218 //}
219
220 double GMLS_GradZ = (dimension>2) ? gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(sampling_data, 0, GradientOfScalarPointEvaluation, i, 0, 2, 0, 0, 0) : 0;
221 //double GMLS_GradZ = 0.0;
222 //if (dimension>2) {
223 // for (int j = 0; j< neighbor_lists(i,0); j++){
224 // double xval = source_coords(neighbor_lists(i,j+1),0);
225 // double yval = source_coords(neighbor_lists(i,j+1),1);
226 // double zval = source_coords(neighbor_lists(i,j+1),2);
227 // GMLS_GradZ += gmls_evaluator.getAlpha0TensorTo1Tensor(GradientOfScalarPointEvaluation, i, 2, j)*trueSolution(xval, yval, zval, order, dimension);
228 // }
229 //}
230
231 double GMLS_Divergence = gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(gradient_sampling_data, 0, DivergenceOfVectorPointEvaluation, i, 0, 0, 0, 0, 0);
232 if (dimension>1) GMLS_Divergence += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(gradient_sampling_data, 1, DivergenceOfVectorPointEvaluation, i, 0, 0, 0, 1, 0);
233 if (dimension>2) GMLS_Divergence += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(gradient_sampling_data, 2, DivergenceOfVectorPointEvaluation, i, 0, 0, 0, 2, 0);
234
235 //double GMLS_Divergence = gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(Kokkos::subview(gradient_sampling_data,Kokkos::ALL,0), 0, DivergenceOfVectorPointEvaluation, i, 0, 0, 0, 0);
236 //if (dimension>1) GMLS_Divergence += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(Kokkos::subview(gradient_sampling_data,Kokkos::ALL,1), 1, DivergenceOfVectorPointEvaluation, i, 0, 0, 1, 0);
237 //if (dimension>2) GMLS_Divergence += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(Kokkos::subview(gradient_sampling_data,Kokkos::ALL,2), 2, DivergenceOfVectorPointEvaluation, i, 0, 0, 2, 0);
238 //double GMLS_Divergence = 0.0;
239 //for (int j = 0; j< neighbor_lists(i,0); j++){
240 // double xval = source_coords(neighbor_lists(i,j+1),0);
241 // double yval = (dimension>1) ? source_coords(neighbor_lists(i,j+1),1) : 0;
242 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
243 // // TODO: use different functions for the vector components
244 // if (use_arbitrary_order_divergence) {
245 // GMLS_Divergence += gmls_evaluator.getAlpha1TensorTo0Tensor(DivergenceOfVectorPointEvaluation, i, j, 0)*trueSolution(xval, yval, zval, order, dimension);
246 // if (dimension>1) GMLS_Divergence += gmls_evaluator.getAlpha1TensorTo0Tensor(DivergenceOfVectorPointEvaluation, i, j, 1)*trueSolution(xval, yval, zval, order, dimension);
247 // if (dimension>2) GMLS_Divergence += gmls_evaluator.getAlpha1TensorTo0Tensor(DivergenceOfVectorPointEvaluation, i, j, 2)*trueSolution(xval, yval, zval, order, dimension);
248 // } else {
249 // for (int k=0; k<dimension; ++k) {
250 // GMLS_Divergence += gmls_evaluator.getAlpha1TensorTo0Tensor(DivergenceOfVectorPointEvaluation, i, j, k)*divergenceTestSamples(xval, yval, zval, k, dimension);
251 // }
252 // }
253 //}
254
255 double GMLS_CurlX = 0.0;
256 double GMLS_CurlY = 0.0;
257 double GMLS_CurlZ = 0.0;
258 if (dimension>1) {
259 for (int j=0; j<dimension; ++j) {
260 GMLS_CurlX += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(divergence_sampling_data, j, CurlOfVectorPointEvaluation, i, 0, 0, 0, j, 0);
261 GMLS_CurlY += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(divergence_sampling_data, j, CurlOfVectorPointEvaluation, i, 0, 1, 0, j, 0);
262 }
263 }
264
265 if (dimension>2) {
266 for (int j=0; j<dimension; ++j) {
267 GMLS_CurlZ += gmls_evaluator.applyAlphasToDataSingleComponentSingleTargetSite(divergence_sampling_data, j, CurlOfVectorPointEvaluation, i, 0, 2, 0, j, 0);
268 }
269
270 }
271
272 Kokkos::Profiling::popRegion();
273 //if (dimension>1) {
274 // for (int j = 0; j< neighbor_lists(i,0); j++){
275 // double xval = source_coords(neighbor_lists(i,j+1),0);
276 // double yval = source_coords(neighbor_lists(i,j+1),1);
277 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
278 // for (int k=0; k<dimension; ++k) {
279 // GMLS_CurlX += my_GMLS.getAlpha1TensorTo1Tensor(CurlOfVectorPointEvaluation, i, 0, j, k)*divergenceTestSamples(xval, yval, zval, k, dimension);
280 // }
281 // }
282
283 // for (int j = 0; j< neighbor_lists(i,0); j++){
284 // double xval = source_coords(neighbor_lists(i,j+1),0);
285 // double yval = source_coords(neighbor_lists(i,j+1),1);
286 // double zval = (dimension>2) ? source_coords(neighbor_lists(i,j+1),2) : 0;
287 // for (int k=0; k<dimension; ++k) {
288 // GMLS_CurlY += my_GMLS.getAlpha1TensorTo1Tensor(CurlOfVectorPointEvaluation, i, 1, j, k)*divergenceTestSamples(xval, yval, zval, k, dimension);
289 // }
290 // }
291 //}
292
293 //if (dimension>2) {
294 // for (int j = 0; j< neighbor_lists(i,0); j++){
295 // double xval = source_coords(neighbor_lists(i,j+1),0);
296 // double yval = source_coords(neighbor_lists(i,j+1),1);
297 // double zval = source_coords(neighbor_lists(i,j+1),2);
298 // for (int k=0; k<dimension; ++k) {
299 // GMLS_CurlZ += my_GMLS.getAlpha1TensorTo1Tensor(CurlOfVectorPointEvaluation, i, 2, j, k)*divergenceTestSamples(xval, yval, zval, k, dimension);
300 // }
301 // }
302 //}
303 //
304 Kokkos::Profiling::pushRegion("Comparison");
305
306 double xval = target_coords(i,0);
307 double yval = (dimension>1) ? target_coords(i,1) : 0;
308 double zval = (dimension>2) ? target_coords(i,2) : 0;
309
310 double actual_value = trueSolution(xval, yval, zval, order, dimension);
311 double actual_Laplacian = trueLaplacian(xval, yval, zval, order, dimension);
312 double actual_Gradient[3] = {0,0,0};
313 trueGradient(actual_Gradient, xval, yval, zval, order, dimension);
314 double actual_Divergence;
315 actual_Divergence = trueLaplacian(xval, yval, zval, order, dimension);
316
317 double actual_CurlX = 0;
318 double actual_CurlY = 0;
319 double actual_CurlZ = 0;
320 if (dimension>1) {
321 actual_CurlX = curlTestSolution(xval, yval, zval, 0, dimension);
322 actual_CurlY = curlTestSolution(xval, yval, zval, 1, dimension);
323 }
324 if (dimension>2) {
325 actual_CurlZ = curlTestSolution(xval, yval, zval, 2, dimension);
326 }
327
328// fprintf(stdout, "Reconstructed value: %f \n", GMLS_value);
329// fprintf(stdout, "Actual value: %f \n", actual_value);
330// fprintf(stdout, "Reconstructed Laplacian: %f \n", GMLS_Laplacian);
331// fprintf(stdout, "Actual Laplacian: %f \n", actual_Laplacian);
332
333 if(GMLS_value!=GMLS_value || std::abs(actual_value - GMLS_value) > failure_tolerance) {
334 all_passed = false;
335 std::cout << "Failed Actual by: " << std::abs(actual_value - GMLS_value) << std::endl;
336 }
337
338 if(std::abs(actual_Laplacian - GMLS_Laplacian) > failure_tolerance) {
339 all_passed = false;
340 std::cout << "Failed Laplacian by: " << std::abs(actual_Laplacian - GMLS_Laplacian) << std::endl;
341 }
342
343 if(std::abs(actual_Gradient[0] - GMLS_GradX) > failure_tolerance) {
344 all_passed = false;
345 std::cout << "Failed GradX by: " << std::abs(actual_Gradient[0] - GMLS_GradX) << std::endl;
346 }
347
348 if (dimension>1) {
349 if(std::abs(actual_Gradient[1] - GMLS_GradY) > failure_tolerance) {
350 all_passed = false;
351 std::cout << "Failed GradY by: " << std::abs(actual_Gradient[1] - GMLS_GradY) << std::endl;
352 }
353 }
354
355 if (dimension>2) {
356 if(std::abs(actual_Gradient[2] - GMLS_GradZ) > failure_tolerance) {
357 all_passed = false;
358 std::cout << "Failed GradZ by: " << std::abs(actual_Gradient[2] - GMLS_GradZ) << std::endl;
359 }
360 }
361
362 if(std::abs(actual_Divergence - GMLS_Divergence) > failure_tolerance) {
363 all_passed = false;
364 std::cout << "Failed Divergence by: " << std::abs(actual_Divergence - GMLS_Divergence) << std::endl;
365 }
366
367 double tmp_diff = 0;
368 if (dimension>1)
369 tmp_diff += std::abs(actual_CurlX - GMLS_CurlX) + std::abs(actual_CurlY - GMLS_CurlY);
370 if (dimension>2)
371 tmp_diff += std::abs(actual_CurlZ - GMLS_CurlZ);
372 if(std::abs(tmp_diff) > failure_tolerance) {
373 all_passed = false;
374 std::cout << "Failed Curl by: " << std::abs(tmp_diff) << std::endl;
375 }
376 Kokkos::Profiling::popRegion();
377 }
378
379}
380
381 Kokkos::finalize();
382#ifdef COMPADRE_USE_MPI
383 MPI_Finalize();
384#endif
385
386if(all_passed) {
387 fprintf(stdout, "Passed test \n");
388 return 0;
389} else {
390 fprintf(stdout, "Failed test \n");
391 return -1;
392}
393}
int main(int argc, char *args[])
Manifold GMLS Example.
Definition GMLS_Host.cpp:32
KOKKOS_INLINE_FUNCTION double trueSolution(double x, double y, double z, int order, int dimension)
KOKKOS_INLINE_FUNCTION void trueGradient(double *ans, double x, double y, double z, int order, int dimension)
KOKKOS_INLINE_FUNCTION double divergenceTestSamples(double x, double y, double z, int component, int dimension)
KOKKOS_INLINE_FUNCTION double curlTestSolution(double x, double y, double z, int component, int dimension)
KOKKOS_INLINE_FUNCTION double trueLaplacian(double x, double y, double z, int order, int dimension)
Lightweight Evaluator Helper This class is a lightweight wrapper for extracting and applying all rele...
double applyAlphasToDataSingleComponentSingleTargetSite(view_type_data sampling_input_data, const int column_of_input, TargetOperation lro, const int target_index, const int evaluation_site_local_index, const int output_component_axis_1, const int output_component_axis_2, const int input_component_axis_1, const int input_component_axis_2, bool scalar_as_vector_if_needed=true) const
Dot product of alphas with sampling data, FOR A SINGLE target_index, where sampling data is in a 1D/2...
Generalized Moving Least Squares (GMLS)
void addTargets(TargetOperation lro)
Adds a target to the vector of target functional to be applied to the reconstruction.
void setWeightingParameter(int wp, int index=0)
Parameter for weighting kernel for GMLS problem index = 0 sets p paramater for weighting kernel index...
void generateAlphas(const int number_of_batches=1, const bool keep_coefficients=false, const bool clear_cache=true)
Meant to calculate target operations and apply the evaluations to the previously constructed polynomi...
void setProblemData(view_type_1 neighbor_lists, view_type_2 source_coordinates, view_type_3 target_coordinates, view_type_4 epsilons)
Sets basic problem data (neighbor lists, source coordinates, and target coordinates)
static KOKKOS_INLINE_FUNCTION int getNP(const int m, const int dimension=3, const ReconstructionSpace r_space=ReconstructionSpace::ScalarTaylorPolynomial)
Returns size of the basis for a given polynomial order and dimension General to dimension 1....
@ LaplacianOfScalarPointEvaluation
Point evaluation of the laplacian of a scalar (could be on a manifold or not)
@ GradientOfScalarPointEvaluation
Point evaluation of the gradient of a scalar.
@ CurlOfVectorPointEvaluation
Point evaluation of the curl of a vector (results in a vector)
@ DivergenceOfVectorPointEvaluation
Point evaluation of the divergence of a vector (results in a scalar)
@ ScalarPointEvaluation
Point evaluation of a scalar.