Panzer Version of the Day
Loading...
Searching...
No Matches
Panzer_STK_PeriodicBC_Search.cpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Panzer: A partial differential equation assembly
4// engine for strongly coupled complex multiphysics systems
5//
6// Copyright 2011 NTESS and the Panzer contributors.
7// SPDX-License-Identifier: BSD-3-Clause
8// *****************************************************************************
9// @HEADER
10
12
13#include <stk_mesh/base/GetEntities.hpp>
14#include <stk_mesh/base/FieldBase.hpp>
15
16#include "Panzer_NodeType.hpp"
17#include "Tpetra_Map.hpp"
18#include "Tpetra_Import.hpp"
19#include "Tpetra_Vector.hpp"
20
21#include "Teuchos_FancyOStream.hpp"
22
23#ifdef PANZER_HAVE_STKSEARCH
24namespace panzer_stk {
25
26namespace periodic_helpers {
27
28void fillLocalSearchVector(const STK_Interface & mesh, SphereIdVector & searchVector, const double & error,
29 const std::string & sideName, const std::string & type_, const bool & getGhostedIDs)
30{
31
32 // empty optional arguments for real call below
33 // this is partially for backwards compatability but also recognizes that for the first
34 // pairing, these vectors are not needed
35 // they are also not used for side B in every case
36
37 std::vector<std::string> matchedSides;
38 std::vector<SearchId> potentialIDsToRemap;
39
40 fillLocalSearchVector(mesh,searchVector,error,sideName,type_,getGhostedIDs,matchedSides,potentialIDsToRemap);
41
42 return;
43
44}
45
46void fillLocalSearchVector(const STK_Interface & mesh, SphereIdVector & searchVector, const double & error,
47 const std::string & sideName, const std::string & type_, const bool & getGhostedIDs,
48 const std::vector<std::string> & matchedSides, std::vector<SearchId> & potentialIDsToRemap)
49{
50 unsigned physicalDim = mesh.getDimension();
51
52 Teuchos::RCP<stk::mesh::MetaData> metaData = mesh.getMetaData();
53 Teuchos::RCP<stk::mesh::BulkData> bulkData = mesh.getBulkData();
54
55 const unsigned parallelRank = bulkData->parallel_rank();
56
57 // grab entities owned by requested side
59 std::stringstream ss;
60 ss << "Can't find a sideset named \"" << sideName << "\" in the mesh" << std::endl;
61 stk::mesh::Part * side = metaData->get_part(sideName,ss.str().c_str());
62
63 // if ghosted IDs are requested, add in the shared portion
64 stk::mesh::Selector mySides = getGhostedIDs ?
65 (*side) & (metaData->locally_owned_part() | metaData->globally_shared_part()) :
66 (*side) & metaData->locally_owned_part();
67
68 stk::mesh::EntityRank rank;
70 // the sorting further downstream only uses ids which are offset
71 // based on the entity type
72 stk::mesh::EntityId offset = 0;
73 if(type_ == "coord"){
74 rank = mesh.getNodeRank();
75 field = & mesh.getCoordinatesField();
76 // no offset
77 } else if(type_ == "edge"){
78 rank = mesh.getEdgeRank();
79 field = & mesh.getEdgesField();
80 offset = mesh.getMaxEntityId(mesh.getNodeRank());
81 } else if(type_ == "face"){
82 rank = mesh.getFaceRank();
83 field = & mesh.getFacesField();
84 offset = mesh.getMaxEntityId(mesh.getNodeRank())+mesh.getMaxEntityId(mesh.getEdgeRank());
85 } else {
86 ss << "Can't do BCs of type " << type_ << std::endl;
87 TEUCHOS_TEST_FOR_EXCEPTION(true,std::runtime_error, ss.str())
88 }
89
90 // remove previously matched nodes
91
92 stk::mesh::Selector intersection; // empty set to start
93
94 if (matchedSides.size()>0) {
95 for (size_t j=0; j<matchedSides.size(); ++j) {
96 auto previouslyMatched = matchedSides[j];
97 // add in the overlap between the requested side and the previously matched side
98 intersection = intersection | (mySides & *(metaData->get_part(previouslyMatched)));
99 }
100 // remove all overlaps
101 mySides = mySides - intersection;
102 }
103
104 // get buckets
105 std::vector<stk::mesh::Bucket*> const& entityBuckets =
106 bulkData->get_buckets(rank, mySides);
107
108 // loop over entity buckets
109
110 for(size_t b=0;b<entityBuckets.size();b++) {
111 stk::mesh::Bucket & bucket = *entityBuckets[b];
112 double const* array = stk::mesh::field_data(*field, bucket);
113
114 // loop over entities
115 for(size_t n=0;n<bucket.size();n++) {
116
117 double coord[3]; // coordinates
118 // copy coordinates into multi vector
119 for(size_t d=0;d<physicalDim;d++)
120 coord[d] = array[physicalDim*n + d];
121
122 // need to ensure that higher dimensions are properly zeroed
123 // required for 1D periodic boundary conditions
124 for(size_t d=physicalDim;d<3;d++)
125 coord[d] = 0;
126
127 // add to the coordinate and id to the search vector
128 // a tolerance can be specified
129 // TODO allow for relative tolerances...
130 stk::search::Point<double> center(coord[0],coord[1],coord[2]);
131 stk::mesh::EntityKey trueKey = bulkData->entity_key(bucket[n]);
132 stk::mesh::EntityKey shiftedKey(trueKey.rank(), trueKey.id()+offset);
133 SearchId search_id(shiftedKey, parallelRank);
134
135 searchVector.emplace_back( Sphere(center, error), search_id);
136 }
137 }
138
139 // for multiperiodic case, populate the potentialIDsToRemap vector with the IDs that have
140 // already been matched and fall on this side
141
142 if (matchedSides.size()>0) {
143 TEUCHOS_ASSERT(potentialIDsToRemap.size()==0);
144 // reset mySides
145 mySides = getGhostedIDs ?
146 (*side) & (metaData->locally_owned_part() | metaData->globally_shared_part()) :
147 (*side) & metaData->locally_owned_part();
148
149 std::vector<stk::mesh::Bucket*> const & intersectionEntityBuckets =
150 bulkData->get_buckets(rank, mySides & intersection);
151
152 // loop over entity buckets
153
154 for(size_t b=0;b<intersectionEntityBuckets.size();b++) {
155 stk::mesh::Bucket & bucket = *intersectionEntityBuckets[b];
156 // loop over entities
157 for(size_t n=0;n<bucket.size();n++) {
158 stk::mesh::EntityKey trueKey = bulkData->entity_key(bucket[n]);
159 stk::mesh::EntityKey shiftedKey(trueKey.rank(), trueKey.id()+offset);
160 potentialIDsToRemap.emplace_back(shiftedKey, parallelRank);
161 }
162 }
163 }
164
165 return;
166}
167
168const std::vector<double> computeGlobalCentroid(const STK_Interface & mesh, const std::string & sideName)
169{
170 // TODO too much replicated code here
171 unsigned physicalDim = mesh.getDimension();
172
173 Teuchos::RCP<stk::mesh::MetaData> metaData = mesh.getMetaData();
174 Teuchos::RCP<stk::mesh::BulkData> bulkData = mesh.getBulkData();
175
176 // grab requested side
178 std::stringstream ss;
179 ss << "Can't find part=\"" << sideName << "\"" << std::endl;
180 stk::mesh::Part * side = metaData->get_part(sideName,ss.str().c_str());
181 stk::mesh::Selector mySides = (*side) & metaData->locally_owned_part();
182
183 // get node buckets
184 std::vector<stk::mesh::Bucket*> const& entityBuckets =
185 bulkData->get_buckets(mesh.getNodeRank(), mySides);
186
187 // loop over node buckets
188 double localCentroid[3] = {0.,0.,0.};
189 int localNodeCount = 0;
190 for(size_t b=0;b<entityBuckets.size();b++) {
191 stk::mesh::Bucket & bucket = *entityBuckets[b];
192 double const* array = stk::mesh::field_data(mesh.getCoordinatesField(), bucket);
193
194 // loop over nodes
195 for(size_t n=0;n<bucket.size();n++) {
196
197 ++localNodeCount;
198 // sum (note that unused dimensions are skipped)
199 for(size_t d=0;d<physicalDim;d++)
200 localCentroid[d] += array[physicalDim*n + d];
201
202 }
203 }
204 int globalNodeCount = 0;
205
206 auto comm = mesh.getComm();
207
208 double globalCentroid[3] = { };
209
210 Teuchos::reduceAll(*comm,Teuchos::REDUCE_SUM,1,&localNodeCount,&globalNodeCount);
211 Teuchos::reduceAll(*comm,Teuchos::REDUCE_SUM,3,&localCentroid[0],&globalCentroid[0]);
212
213 std::vector<double> result = {0.,0.,0.};
214 for (size_t d=0;d<physicalDim;d++)
215 result[d] = globalCentroid[d]/globalNodeCount;
216
217 return result;
218}
219
220void updateMapping(Teuchos::RCP<std::vector<std::pair<size_t,size_t> > > & currentMatches,
221 const std::vector<std::pair<size_t,size_t> > & previousMatches,
222 const std::vector<SearchId> & IDsToRemap, const STK_Interface & mesh)
223{
224
225 using LO = panzer::LocalOrdinal;
226 using GO = panzer::GlobalOrdinal;
228 using Map = Tpetra::Map<LO,GO,NODE>;
229 using Importer = Tpetra::Import<LO,GO,NODE>;
230
231 auto comm = mesh.getComm();
232
233 // store maps
234 // this is necessary because of the uniqueness requirements
235 // and convenient to update the map
236
237 std::map<size_t,size_t> myPreviousAtoB,myCurrentAtoB;
238 std::map<size_t,std::vector<size_t> > myPreviousBtoA;
239 for (size_t i=0;i<previousMatches.size();++i) {
240 myPreviousAtoB[previousMatches[i].first] = previousMatches[i].second;
241 // may not be one-to-one
242 myPreviousBtoA[previousMatches[i].second].push_back(previousMatches[i].first);
243 }
244 for (size_t i=0;i<currentMatches->size();++i)
245 myCurrentAtoB[(*currentMatches)[i].first] = (*currentMatches)[i].second;
246
247 // find which IDs we need to query to get THEIR A to B map
248 // this means we need to the the B id of our previous match for the IDsToRemap
249 std::vector<GO> requestedAIDs;
250
251 for (auto & id : IDsToRemap)
252 requestedAIDs.push_back(myPreviousAtoB[id.id().id()]);
253
254 // quick and dirty way to get rid of repeated entries on each process
255 std::set<GO> uniqueAIDs(requestedAIDs.begin(),requestedAIDs.end());
256 requestedAIDs = std::vector<GO>(uniqueAIDs.begin(),uniqueAIDs.end());
257
258 // find which A ids we have in our new mapping
259 std::vector<GO> newAIDs(currentMatches->size());
260
261 for (size_t i=0;i<currentMatches->size();++i) {
262 newAIDs[i] = (*currentMatches)[i].first;
263 }
264
265 // create teuchos maps
266 auto computeInternally = Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid();
267 Teuchos::RCP<const Map> testMap = Teuchos::rcp(new Map(computeInternally,&newAIDs[0],newAIDs.size(),0,comm));
268 Teuchos::RCP<const Map> newAIDsMap;
269 // source must be unique across communicator
270 if (!testMap->isOneToOne()){
271 newAIDsMap = Tpetra::createOneToOne<LO,GO,NODE>(testMap);
272 } else {
273 newAIDsMap = testMap;
274 }
275
276 Teuchos::RCP<Map> requestedAIDsMap = Teuchos::rcp(new Map(computeInternally,&requestedAIDs[0],requestedAIDs.size(),0,comm));
277
278 Importer importer(newAIDsMap,requestedAIDsMap);
279
280 // send out my A to B map
281 Tpetra::Vector<GO,LO,GO,NODE> newBIDs(newAIDsMap);
282 auto newBIDsHost = newBIDs.getLocalViewHost(Tpetra::Access::OverwriteAll);
283 auto myGIDs = newAIDsMap->getMyGlobalIndices();
284 for (size_t i=0;i<myGIDs.size();++i)
285 newBIDsHost(i,0) = myCurrentAtoB[myGIDs[i]];
286
287 Tpetra::Vector<GO,LO,GO,NODE> requestedBIDs(requestedAIDsMap);
288 requestedBIDs.doImport(newBIDs,importer,Tpetra::INSERT);
289 auto requestedBIDsHost = requestedBIDs.getLocalViewHost(Tpetra::Access::ReadOnly);
290
291 // now overwrite previous map where necessary...
292 // what about error checking? what is the default is something is requested but not there?
293 // TODO this assumes that teuchos maps and communication does not
294 // alter the ordering in anyway so that AIDs and IDsToRemap correspond appropriately
295 size_t ind = 0;
296 for (const auto & id : requestedAIDs) {
297 // get the corresponding ids to update in the previous map
298 for (const auto & idToUpdate : myPreviousBtoA[id])
299 // update with the final B id
300 myPreviousAtoB[idToUpdate] = requestedBIDsHost(ind,0);
301 ++ind;
302 }
303
304 // and add to new map...
305 // needs to respect the previous ordering or type_vec in getPeriodicNodePairing will be wrong
306 for (const auto & AB : previousMatches) {
307 // so we get the A ids in order
308 auto id = AB.first;
309 // and use the updated previous A to B map
310 (*currentMatches).emplace_back(std::pair<size_t,size_t>(id,myPreviousAtoB[id]));
311 }
312
313 return;
314}
315
316void appendMapping(Teuchos::RCP<std::vector<std::pair<size_t,size_t> > > & currentMatches,
317 const std::vector<std::pair<size_t,size_t> > & previousMatches)
318{
319 // add previous mapping to new map
320 for (const auto & AB : previousMatches)
321 (*currentMatches).push_back(AB);
322
323 return;
324}
325
326} // end periodic_helpers
327
328} // end panzer_stk
329#endif
PHX::MDField< ScalarT, panzer::Cell, panzer::IP > result
A field that will be used to build up the result of the integral we're performing.
PHX::MDField< ScalarT, panzer::Cell, panzer::BASIS > field
A field to which we'll contribute, or in which we'll store, the result of computing this integral.
stk::mesh::Field< double > VectorFieldType
Tpetra::KokkosCompat::KokkosDeviceWrapperNode< PHX::Device > TpetraNodeType