FreeFOAM The Cross-Platform CFD Toolkit
meshRefinement.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | Copyright (C) 1991-2010 OpenCFD Ltd.
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*----------------------------------------------------------------------------*/
25 
26 #include "meshRefinement.H"
27 #include <finiteVolume/volMesh.H>
28 #include <finiteVolume/volFields.H>
30 #include <OpenFOAM/syncTools.H>
31 #include <OpenFOAM/Time.H>
35 #include <meshTools/regionSplit.H>
42 #include <OpenFOAM/pointMesh.H>
43 #include <OpenFOAM/pointFields.H>
49 #include <OpenFOAM/globalIndex.H>
50 #include <meshTools/meshTools.H>
51 #include <OpenFOAM/OFstream.H>
53 #include <OpenFOAM/Random.H>
55 #include <meshTools/treeBoundBox.H>
57 
58 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
59 
60 namespace Foam
61 {
62  defineTypeNameAndDebug(meshRefinement, 0);
63 }
64 
65 // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
66 
67 void Foam::meshRefinement::calcNeighbourData
68 (
69  labelList& neiLevel,
70  pointField& neiCc
71 ) const
72 {
73  const labelList& cellLevel = meshCutter_.cellLevel();
74  const pointField& cellCentres = mesh_.cellCentres();
75 
76  label nBoundaryFaces = mesh_.nFaces() - mesh_.nInternalFaces();
77 
78  if (neiLevel.size() != nBoundaryFaces || neiCc.size() != nBoundaryFaces)
79  {
80  FatalErrorIn("meshRefinement::calcNeighbour(..)") << "nBoundaries:"
81  << nBoundaryFaces << " neiLevel:" << neiLevel.size()
82  << abort(FatalError);
83  }
84 
85  const polyBoundaryMesh& patches = mesh_.boundaryMesh();
86 
87  labelHashSet addedPatchIDSet = meshedPatches();
88 
89  forAll(patches, patchI)
90  {
91  const polyPatch& pp = patches[patchI];
92 
93  const unallocLabelList& faceCells = pp.faceCells();
94  const vectorField::subField faceCentres = pp.faceCentres();
95  const vectorField::subField faceAreas = pp.faceAreas();
96 
97  label bFaceI = pp.start()-mesh_.nInternalFaces();
98 
99  if (pp.coupled())
100  {
101  forAll(faceCells, i)
102  {
103  neiLevel[bFaceI] = cellLevel[faceCells[i]];
104  neiCc[bFaceI] = cellCentres[faceCells[i]];
105  bFaceI++;
106  }
107  }
108  else if (addedPatchIDSet.found(patchI))
109  {
110  // Face was introduced from cell-cell intersection. Try to
111  // reconstruct other side cell(centre). Three possibilities:
112  // - cells same size.
113  // - preserved cell smaller. Not handled.
114  // - preserved cell larger.
115  forAll(faceCells, i)
116  {
117  // Extrapolate the face centre.
118  vector fn = faceAreas[i];
119  fn /= mag(fn)+VSMALL;
120 
121  label own = faceCells[i];
122  label ownLevel = cellLevel[own];
123  label faceLevel = meshCutter_.getAnchorLevel(pp.start()+i);
124 
125  // Normal distance from face centre to cell centre
126  scalar d = ((faceCentres[i] - cellCentres[own]) & fn);
127  if (faceLevel > ownLevel)
128  {
129  // Other cell more refined. Adjust normal distance
130  d *= 0.5;
131  }
132  neiLevel[bFaceI] = cellLevel[ownLevel];
133  // Calculate other cell centre by extrapolation
134  neiCc[bFaceI] = faceCentres[i] + d*fn;
135  bFaceI++;
136  }
137  }
138  else
139  {
140  forAll(faceCells, i)
141  {
142  neiLevel[bFaceI] = cellLevel[faceCells[i]];
143  neiCc[bFaceI] = faceCentres[i];
144  bFaceI++;
145  }
146  }
147  }
148 
149  // Swap coupled boundaries. Apply separation to cc since is coordinate.
150  syncTools::swapBoundaryFaceList(mesh_, neiCc, true);
151  syncTools::swapBoundaryFaceList(mesh_, neiLevel, false);
152 }
153 
154 
155 // Find intersections of edges (given by their two endpoints) with surfaces.
156 // Returns first intersection if there are more than one.
157 void Foam::meshRefinement::updateIntersections(const labelList& changedFaces)
158 {
159  const pointField& cellCentres = mesh_.cellCentres();
160 
161  // Stats on edges to test. Count proc faces only once.
162  PackedBoolList isMasterFace(syncTools::getMasterFaces(mesh_));
163 
164  {
165  label nMasterFaces = 0;
166  forAll(isMasterFace, faceI)
167  {
168  if (isMasterFace.get(faceI) == 1)
169  {
170  nMasterFaces++;
171  }
172  }
173  reduce(nMasterFaces, sumOp<label>());
174 
175  label nChangedFaces = 0;
176  forAll(changedFaces, i)
177  {
178  if (isMasterFace.get(changedFaces[i]) == 1)
179  {
180  nChangedFaces++;
181  }
182  }
183  reduce(nChangedFaces, sumOp<label>());
184 
185  Info<< "Edge intersection testing:" << nl
186  << " Number of edges : " << nMasterFaces << nl
187  << " Number of edges to retest : " << nChangedFaces
188  << endl;
189  }
190 
191 
192  // Get boundary face centre and level. Coupled aware.
193  labelList neiLevel(mesh_.nFaces()-mesh_.nInternalFaces());
194  pointField neiCc(mesh_.nFaces()-mesh_.nInternalFaces());
195  calcNeighbourData(neiLevel, neiCc);
196 
197  // Collect segments we want to test for
198  pointField start(changedFaces.size());
199  pointField end(changedFaces.size());
200 
201  forAll(changedFaces, i)
202  {
203  label faceI = changedFaces[i];
204  label own = mesh_.faceOwner()[faceI];
205 
206  start[i] = cellCentres[own];
207  if (mesh_.isInternalFace(faceI))
208  {
209  end[i] = cellCentres[mesh_.faceNeighbour()[faceI]];
210  }
211  else
212  {
213  end[i] = neiCc[faceI-mesh_.nInternalFaces()];
214  }
215  }
216 
217  // Do tests in one go
218  labelList surfaceHit;
219  {
220  labelList surfaceLevel;
221  surfaces_.findHigherIntersection
222  (
223  start,
224  end,
225  labelList(start.size(), -1), // accept any intersection
226  surfaceHit,
227  surfaceLevel
228  );
229  }
230 
231  // Keep just surface hit
232  forAll(surfaceHit, i)
233  {
234  surfaceIndex_[changedFaces[i]] = surfaceHit[i];
235  }
236 
237  // Make sure both sides have same information. This should be
238  // case in general since same vectors but just to make sure.
239  syncTools::syncFaceList(mesh_, surfaceIndex_, maxEqOp<label>(), false);
240 
241  label nHits = countHits();
242  label nTotHits = returnReduce(nHits, sumOp<label>());
243 
244  Info<< " Number of intersected edges : " << nTotHits << endl;
245 
246  // Set files to same time as mesh
247  setInstance(mesh_.facesInstance());
248 }
249 
250 
252 {
253  Pout<< "meshRefinement::checkData() : Checking refinement structure."
254  << endl;
255  meshCutter_.checkMesh();
256 
257  Pout<< "meshRefinement::checkData() : Checking refinement levels."
258  << endl;
259  meshCutter_.checkRefinementLevels(1, labelList(0));
260 
261 
262  label nBnd = mesh_.nFaces()-mesh_.nInternalFaces();
263 
264  Pout<< "meshRefinement::checkData() : Checking synchronization."
265  << endl;
266 
267  // Check face centres
268  {
269  // Boundary face centres
270  pointField::subList boundaryFc
271  (
272  mesh_.faceCentres(),
273  nBnd,
274  mesh_.nInternalFaces()
275  );
276 
277  // Get neighbouring face centres
278  pointField neiBoundaryFc(boundaryFc);
280  (
281  mesh_,
282  neiBoundaryFc,
283  true
284  );
285 
286  // Compare
287  testSyncBoundaryFaceList
288  (
289  mergeDistance_,
290  "testing faceCentres : ",
291  boundaryFc,
292  neiBoundaryFc
293  );
294  }
295  // Check meshRefinement
296  {
297  // Get boundary face centre and level. Coupled aware.
298  labelList neiLevel(nBnd);
299  pointField neiCc(nBnd);
300  calcNeighbourData(neiLevel, neiCc);
301 
302  // Collect segments we want to test for
303  pointField start(mesh_.nFaces());
304  pointField end(mesh_.nFaces());
305 
306  forAll(start, faceI)
307  {
308  start[faceI] = mesh_.cellCentres()[mesh_.faceOwner()[faceI]];
309 
310  if (mesh_.isInternalFace(faceI))
311  {
312  end[faceI] = mesh_.cellCentres()[mesh_.faceNeighbour()[faceI]];
313  }
314  else
315  {
316  end[faceI] = neiCc[faceI-mesh_.nInternalFaces()];
317  }
318  }
319 
320  // Do tests in one go
321  labelList surfaceHit;
322  {
323  labelList surfaceLevel;
324  surfaces_.findHigherIntersection
325  (
326  start,
327  end,
328  labelList(start.size(), -1), // accept any intersection
329  surfaceHit,
330  surfaceLevel
331  );
332  }
333  // Get the coupled hit
334  labelList neiHit
335  (
337  (
338  surfaceHit,
339  nBnd,
340  mesh_.nInternalFaces()
341  )
342  );
343  syncTools::swapBoundaryFaceList(mesh_, neiHit, false);
344 
345  // Check
346  forAll(surfaceHit, faceI)
347  {
348  if (surfaceIndex_[faceI] != surfaceHit[faceI])
349  {
350  if (mesh_.isInternalFace(faceI))
351  {
352  WarningIn("meshRefinement::checkData()")
353  << "Internal face:" << faceI
354  << " fc:" << mesh_.faceCentres()[faceI]
355  << " cached surfaceIndex_:" << surfaceIndex_[faceI]
356  << " current:" << surfaceHit[faceI]
357  << " ownCc:"
358  << mesh_.cellCentres()[mesh_.faceOwner()[faceI]]
359  << " neiCc:"
360  << mesh_.cellCentres()[mesh_.faceNeighbour()[faceI]]
361  << endl;
362  }
363  else if
364  (
365  surfaceIndex_[faceI]
366  != neiHit[faceI-mesh_.nInternalFaces()]
367  )
368  {
369  WarningIn("meshRefinement::checkData()")
370  << "Boundary face:" << faceI
371  << " fc:" << mesh_.faceCentres()[faceI]
372  << " cached surfaceIndex_:" << surfaceIndex_[faceI]
373  << " current:" << surfaceHit[faceI]
374  << " ownCc:"
375  << mesh_.cellCentres()[mesh_.faceOwner()[faceI]]
376  << " end:" << end[faceI]
377  << endl;
378  }
379  }
380  }
381  }
382  {
383  labelList::subList boundarySurface
384  (
385  surfaceIndex_,
386  mesh_.nFaces()-mesh_.nInternalFaces(),
387  mesh_.nInternalFaces()
388  );
389 
390  labelList neiBoundarySurface(boundarySurface);
392  (
393  mesh_,
394  neiBoundarySurface,
395  false
396  );
397 
398  // Compare
399  testSyncBoundaryFaceList
400  (
401  0, // tolerance
402  "testing surfaceIndex() : ",
403  boundarySurface,
404  neiBoundarySurface
405  );
406  }
407 
408 
409  // Find duplicate faces
410  Pout<< "meshRefinement::checkData() : Counting duplicate faces."
411  << endl;
412 
413  labelList duplicateFace
414  (
416  (
417  mesh_,
418  identity(mesh_.nFaces()-mesh_.nInternalFaces())
419  + mesh_.nInternalFaces()
420  )
421  );
422 
423  // Count
424  {
425  label nDup = 0;
426 
427  forAll(duplicateFace, i)
428  {
429  if (duplicateFace[i] != -1)
430  {
431  nDup++;
432  }
433  }
434  nDup /= 2; // will have counted both faces of duplicate
435  Pout<< "meshRefinement::checkData() : Found " << nDup
436  << " duplicate pairs of faces." << endl;
437  }
438 }
439 
440 
442 {
443  meshCutter_.setInstance(inst);
444  surfaceIndex_.instance() = inst;
445 }
446 
447 
448 // Remove cells. Put exposedFaces (output of getExposedFaces(cellsToRemove))
449 // into exposedPatchIDs.
450 Foam::autoPtr<Foam::mapPolyMesh> Foam::meshRefinement::doRemoveCells
451 (
452  const labelList& cellsToRemove,
453  const labelList& exposedFaces,
454  const labelList& exposedPatchIDs,
455  removeCells& cellRemover
456 )
457 {
458  polyTopoChange meshMod(mesh_);
459 
460  // Arbitrary: put exposed faces into last patch.
461  cellRemover.setRefinement
462  (
463  cellsToRemove,
464  exposedFaces,
465  exposedPatchIDs,
466  meshMod
467  );
468 
469  // Change the mesh (no inflation)
470  autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh_, false, true);
471 
472  // Update fields
473  mesh_.updateMesh(map);
474 
475  // Move mesh (since morphing might not do this)
476  if (map().hasMotionPoints())
477  {
478  mesh_.movePoints(map().preMotionPoints());
479  }
480  else
481  {
482  // Delete mesh volumes. No other way to do this?
483  mesh_.clearOut();
484  }
485 
486  if (overwrite_)
487  {
488  mesh_.setInstance(oldInstance_);
489  }
490 
491  // Update local mesh data
492  cellRemover.updateMesh(map);
493 
494  // Update intersections. Recalculate intersections for exposed faces.
495  labelList newExposedFaces = renumber
496  (
497  map().reverseFaceMap(),
498  exposedFaces
499  );
500 
501  //Pout<< "removeCells : updating intersections for "
502  // << newExposedFaces.size() << " newly exposed faces." << endl;
503 
504  updateMesh(map, newExposedFaces);
505 
506  return map;
507 }
508 
509 
510 // Determine for multi-processor regions the lowest numbered cell on the lowest
511 // numbered processor.
512 void Foam::meshRefinement::getCoupledRegionMaster
513 (
514  const globalIndex& globalCells,
515  const boolList& blockedFace,
516  const regionSplit& globalRegion,
517  Map<label>& regionToMaster
518 ) const
519 {
520  const polyBoundaryMesh& patches = mesh_.boundaryMesh();
521 
522  forAll(patches, patchI)
523  {
524  const polyPatch& pp = patches[patchI];
525 
526  if (isA<processorPolyPatch>(pp))
527  {
528  forAll(pp, i)
529  {
530  label faceI = pp.start()+i;
531 
532  if (!blockedFace[faceI])
533  {
534  // Only if there is a connection to the neighbour
535  // will there be a multi-domain region; if not through
536  // this face then through another.
537 
538  label cellI = mesh_.faceOwner()[faceI];
539  label globalCellI = globalCells.toGlobal(cellI);
540 
541  Map<label>::iterator iter =
542  regionToMaster.find(globalRegion[cellI]);
543 
544  if (iter != regionToMaster.end())
545  {
546  label master = iter();
547  iter() = min(master, globalCellI);
548  }
549  else
550  {
551  regionToMaster.insert
552  (
553  globalRegion[cellI],
554  globalCellI
555  );
556  }
557  }
558  }
559  }
560  }
561 
562  // Do reduction
563  Pstream::mapCombineGather(regionToMaster, minEqOp<label>());
564  Pstream::mapCombineScatter(regionToMaster);
565 }
566 
567 
568 void Foam::meshRefinement::calcLocalRegions
569 (
570  const globalIndex& globalCells,
571  const labelList& globalRegion,
572  const Map<label>& coupledRegionToMaster,
573  const scalarField& cellWeights,
574 
575  Map<label>& globalToLocalRegion,
576  pointField& localPoints,
577  scalarField& localWeights
578 ) const
579 {
580  globalToLocalRegion.resize(globalRegion.size());
581  DynamicList<point> localCc(globalRegion.size()/2);
582  DynamicList<scalar> localWts(globalRegion.size()/2);
583 
584  forAll(globalRegion, cellI)
585  {
586  Map<label>::const_iterator fndMaster =
587  coupledRegionToMaster.find(globalRegion[cellI]);
588 
589  if (fndMaster != coupledRegionToMaster.end())
590  {
591  // Multi-processor region.
592  if (globalCells.toGlobal(cellI) == fndMaster())
593  {
594  // I am master. Allocate region for me.
595  globalToLocalRegion.insert(globalRegion[cellI], localCc.size());
596  localCc.append(mesh_.cellCentres()[cellI]);
597  localWts.append(cellWeights[cellI]);
598  }
599  }
600  else
601  {
602  // Single processor region.
603  if (globalToLocalRegion.insert(globalRegion[cellI], localCc.size()))
604  {
605  localCc.append(mesh_.cellCentres()[cellI]);
606  localWts.append(cellWeights[cellI]);
607  }
608  }
609  }
610 
611  localPoints.transfer(localCc);
612  localWeights.transfer(localWts);
613 
614  if (localPoints.size() != globalToLocalRegion.size())
615  {
616  FatalErrorIn("calcLocalRegions(..)")
617  << "localPoints:" << localPoints.size()
618  << " globalToLocalRegion:" << globalToLocalRegion.size()
619  << exit(FatalError);
620  }
621 }
622 
623 
624 Foam::label Foam::meshRefinement::getShiftedRegion
625 (
626  const globalIndex& indexer,
627  const Map<label>& globalToLocalRegion,
628  const Map<label>& coupledRegionToShifted,
629  const label globalRegion
630 )
631 {
633  globalToLocalRegion.find(globalRegion);
634 
635  if (iter != globalToLocalRegion.end())
636  {
637  // Region is 'owned' locally. Convert local region index into global.
638  return indexer.toGlobal(iter());
639  }
640  else
641  {
642  return coupledRegionToShifted[globalRegion];
643  }
644 }
645 
646 
647 // Add if not yet present
648 void Foam::meshRefinement::addUnique(const label elem, labelList& lst)
649 {
650  if (findIndex(lst, elem) == -1)
651  {
652  label sz = lst.size();
653  lst.setSize(sz+1);
654  lst[sz] = elem;
655  }
656 }
657 
658 
659 void Foam::meshRefinement::calcRegionRegions
660 (
661  const labelList& globalRegion,
662  const Map<label>& globalToLocalRegion,
663  const Map<label>& coupledRegionToMaster,
664  labelListList& regionRegions
665 ) const
666 {
667  // Global region indexing since we now know the shifted regions.
668  globalIndex shiftIndexer(globalToLocalRegion.size());
669 
670  // Redo the coupledRegionToMaster to be in shifted region indexing.
671  Map<label> coupledRegionToShifted(coupledRegionToMaster.size());
672  forAllConstIter(Map<label>, coupledRegionToMaster, iter)
673  {
674  label region = iter.key();
675 
676  Map<label>::const_iterator fndRegion = globalToLocalRegion.find(region);
677 
678  if (fndRegion != globalToLocalRegion.end())
679  {
680  // A local cell is master of this region. Get its shifted region.
681  coupledRegionToShifted.insert
682  (
683  region,
684  shiftIndexer.toGlobal(fndRegion())
685  );
686  }
687  Pstream::mapCombineGather(coupledRegionToShifted, minEqOp<label>());
688  Pstream::mapCombineScatter(coupledRegionToShifted);
689  }
690 
691 
692  // Determine region-region connectivity.
693  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
694  // This is for all my regions (so my local ones or the ones I am master
695  // of) the neighbouring region indices.
696 
697 
698  // Transfer lists.
699  PtrList<HashSet<edge, Hash<edge> > > regionConnectivity(Pstream::nProcs());
700  forAll(regionConnectivity, procI)
701  {
702  if (procI != Pstream::myProcNo())
703  {
704  regionConnectivity.set
705  (
706  procI,
707  new HashSet<edge, Hash<edge> >
708  (
709  coupledRegionToShifted.size()
710  / Pstream::nProcs()
711  )
712  );
713  }
714  }
715 
716 
717  // Connectivity. For all my local regions the connected regions.
718  regionRegions.setSize(globalToLocalRegion.size());
719 
720  // Add all local connectivity to regionRegions; add all non-local
721  // to the transferlists.
722  for (label faceI = 0; faceI < mesh_.nInternalFaces(); faceI++)
723  {
724  label ownRegion = globalRegion[mesh_.faceOwner()[faceI]];
725  label neiRegion = globalRegion[mesh_.faceNeighbour()[faceI]];
726 
727  if (ownRegion != neiRegion)
728  {
729  label shiftOwnRegion = getShiftedRegion
730  (
731  shiftIndexer,
732  globalToLocalRegion,
733  coupledRegionToShifted,
734  ownRegion
735  );
736  label shiftNeiRegion = getShiftedRegion
737  (
738  shiftIndexer,
739  globalToLocalRegion,
740  coupledRegionToShifted,
741  neiRegion
742  );
743 
744 
745  // Connection between two regions. Send to owner of region.
746  // - is ownRegion 'owned' by me
747  // - is neiRegion 'owned' by me
748 
749  if (shiftIndexer.isLocal(shiftOwnRegion))
750  {
751  label localI = shiftIndexer.toLocal(shiftOwnRegion);
752  addUnique(shiftNeiRegion, regionRegions[localI]);
753  }
754  else
755  {
756  label masterProc = shiftIndexer.whichProcID(shiftOwnRegion);
757  regionConnectivity[masterProc].insert
758  (
759  edge(shiftOwnRegion, shiftNeiRegion)
760  );
761  }
762 
763  if (shiftIndexer.isLocal(shiftNeiRegion))
764  {
765  label localI = shiftIndexer.toLocal(shiftNeiRegion);
766  addUnique(shiftOwnRegion, regionRegions[localI]);
767  }
768  else
769  {
770  label masterProc = shiftIndexer.whichProcID(shiftNeiRegion);
771  regionConnectivity[masterProc].insert
772  (
773  edge(shiftOwnRegion, shiftNeiRegion)
774  );
775  }
776  }
777  }
778 
779 
780  // Send
781  forAll(regionConnectivity, procI)
782  {
783  if (procI != Pstream::myProcNo())
784  {
785  OPstream str(Pstream::blocking, procI);
786  str << regionConnectivity[procI];
787  }
788  }
789  // Receive
790  forAll(regionConnectivity, procI)
791  {
792  if (procI != Pstream::myProcNo())
793  {
794  IPstream str(Pstream::blocking, procI);
795  str >> regionConnectivity[procI];
796  }
797  }
798 
799  // Add to addressing.
800  forAll(regionConnectivity, procI)
801  {
802  if (procI != Pstream::myProcNo())
803  {
804  for
805  (
806  HashSet<edge, Hash<edge> >::const_iterator iter =
807  regionConnectivity[procI].begin();
808  iter != regionConnectivity[procI].end();
809  ++iter
810  )
811  {
812  const edge& e = iter.key();
813 
814  bool someLocal = false;
815  if (shiftIndexer.isLocal(e[0]))
816  {
817  label localI = shiftIndexer.toLocal(e[0]);
818  addUnique(e[1], regionRegions[localI]);
819  someLocal = true;
820  }
821  if (shiftIndexer.isLocal(e[1]))
822  {
823  label localI = shiftIndexer.toLocal(e[1]);
824  addUnique(e[0], regionRegions[localI]);
825  someLocal = true;
826  }
827 
828  if (!someLocal)
829  {
830  FatalErrorIn("calcRegionRegions(..)")
831  << "Received from processor " << procI
832  << " connection " << e
833  << " where none of the elements is local to me."
834  << abort(FatalError);
835  }
836  }
837  }
838  }
839 }
840 
841 
842 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
843 
844 // Construct from components
845 Foam::meshRefinement::meshRefinement
846 (
847  fvMesh& mesh,
848  const scalar mergeDistance,
849  const bool overwrite,
850  const refinementSurfaces& surfaces,
851  const shellSurfaces& shells
852 )
853 :
854  mesh_(mesh),
855  mergeDistance_(mergeDistance),
856  overwrite_(overwrite),
857  oldInstance_(mesh.pointsInstance()),
858  surfaces_(surfaces),
859  shells_(shells),
860  meshCutter_
861  (
862  mesh,
864  (
865  IOobject
866  (
867  "cellLevel",
868  mesh_.facesInstance(),
870  mesh,
873  false
874  ),
875  labelList(mesh_.nCells(), 0)
876  ),
878  (
879  IOobject
880  (
881  "pointLevel",
882  mesh_.facesInstance(),
884  mesh,
887  false
888  ),
889  labelList(mesh_.nPoints(), 0)
890  ),
892  (
893  IOobject
894  (
895  "refinementHistory",
896  mesh_.facesInstance(),
898  mesh_,
901  false
902  ),
904  labelList(0)
905  ) // no unrefinement
906  ),
907  surfaceIndex_
908  (
909  IOobject
910  (
911  "surfaceIndex",
912  mesh_.facesInstance(),
914  mesh_,
917  false
918  ),
919  labelList(mesh_.nFaces(), -1)
920  ),
921  userFaceData_(0)
922 {
923  // recalculate intersections for all faces
924  updateIntersections(identity(mesh_.nFaces()));
925 }
926 
927 
928 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
929 
931 {
932  // Stats on edges to test. Count proc faces only once.
933  PackedBoolList isMasterFace(syncTools::getMasterFaces(mesh_));
934 
935  label nHits = 0;
936 
937  forAll(surfaceIndex_, faceI)
938  {
939  if (surfaceIndex_[faceI] >= 0 && isMasterFace.get(faceI) == 1)
940  {
941  nHits++;
942  }
943  }
944  return nHits;
945 }
946 
947 
948 // Determine distribution to move connected regions onto one processor.
950 (
951  const scalarField& cellWeights,
952  const boolList& blockedFace,
953  const List<labelPair>& explicitConnections,
954  decompositionMethod& decomposer
955 ) const
956 {
957  // Determine global regions, separated by blockedFaces
958  regionSplit globalRegion(mesh_, blockedFace, explicitConnections);
959 
960  // Now globalRegion has global region per cell. Problem is that
961  // the region might span multiple domains so we want to get
962  // a "region master" per domain. Note that multi-processor
963  // regions can only occur on cells on coupled patches.
964  // Note: since the number of regions does not change by this the
965  // process can be seen as just a shift of a region onto a single
966  // processor.
967 
968 
969  // Global cell numbering engine
970  globalIndex globalCells(mesh_.nCells());
971 
972 
973  // Determine per coupled region the master cell (lowest numbered cell
974  // on lowest numbered processor)
975  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
976  // (does not determine master for non-coupled=fully-local regions)
977 
978  Map<label> coupledRegionToMaster(mesh_.nFaces()-mesh_.nInternalFaces());
979  getCoupledRegionMaster
980  (
981  globalCells,
982  blockedFace,
983  globalRegion,
984  coupledRegionToMaster
985  );
986 
987  // Determine my regions
988  // ~~~~~~~~~~~~~~~~~~~~
989  // These are the fully local ones or the coupled ones of which I am master.
990 
991  Map<label> globalToLocalRegion;
992  pointField localPoints;
993  scalarField localWeights;
994  calcLocalRegions
995  (
996  globalCells,
997  globalRegion,
998  coupledRegionToMaster,
999  cellWeights,
1000 
1001  globalToLocalRegion,
1002  localPoints,
1003  localWeights
1004  );
1005 
1006 
1007 
1008  // Find distribution for regions
1009  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1010 
1011  labelList regionDistribution;
1012 
1013  if (isA<geomDecomp>(decomposer))
1014  {
1015  regionDistribution = decomposer.decompose(localPoints, localWeights);
1016  }
1017  else
1018  {
1019  labelListList regionRegions;
1020  calcRegionRegions
1021  (
1022  globalRegion,
1023  globalToLocalRegion,
1024  coupledRegionToMaster,
1025 
1026  regionRegions
1027  );
1028 
1029  regionDistribution = decomposer.decompose
1030  (
1031  regionRegions,
1032  localPoints,
1033  localWeights
1034  );
1035  }
1036 
1037 
1038 
1039  // Convert region-based decomposition back to cell-based one
1040  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1041 
1042  // Transfer destination processor back to all. Use global reduce for now.
1043  Map<label> regionToDist(coupledRegionToMaster.size());
1044  forAllConstIter(Map<label>, coupledRegionToMaster, iter)
1045  {
1046  label region = iter.key();
1047 
1048  Map<label>::const_iterator regionFnd = globalToLocalRegion.find(region);
1049 
1050  if (regionFnd != globalToLocalRegion.end())
1051  {
1052  // Master cell is local. Store my distribution.
1053  regionToDist.insert(iter.key(), regionDistribution[regionFnd()]);
1054  }
1055  else
1056  {
1057  // Master cell is not on this processor. Make sure it is overridden.
1058  regionToDist.insert(iter.key(), labelMax);
1059  }
1060  }
1061  Pstream::mapCombineGather(regionToDist, minEqOp<label>());
1062  Pstream::mapCombineScatter(regionToDist);
1063 
1064 
1065  // Determine destination for all cells
1066  labelList distribution(mesh_.nCells());
1067 
1068  forAll(globalRegion, cellI)
1069  {
1070  Map<label>::const_iterator fndRegion =
1071  regionToDist.find(globalRegion[cellI]);
1072 
1073  if (fndRegion != regionToDist.end())
1074  {
1075  distribution[cellI] = fndRegion();
1076  }
1077  else
1078  {
1079  // region is local to the processor.
1080  label localRegionI = globalToLocalRegion[globalRegion[cellI]];
1081 
1082  distribution[cellI] = regionDistribution[localRegionI];
1083  }
1084  }
1085 
1086  return distribution;
1087 }
1088 
1089 
1092  const bool keepZoneFaces,
1093  const bool keepBaffles,
1094  const scalarField& cellWeights,
1095  decompositionMethod& decomposer,
1096  fvMeshDistribute& distributor
1097 )
1098 {
1100 
1101  if (Pstream::parRun())
1102  {
1103  //if (debug_)
1104  //{
1105  // const_cast<Time&>(mesh_.time())++;
1106  //}
1107 
1108  // Wanted distribution
1110 
1111  if (keepZoneFaces || keepBaffles)
1112  {
1113  // Faces where owner and neighbour are not 'connected' so can
1114  // go to different processors.
1115  boolList blockedFace(mesh_.nFaces(), true);
1116  label nUnblocked = 0;
1117  // Pairs of baffles
1118  List<labelPair> couples;
1119 
1120  if (keepZoneFaces)
1121  {
1122  // Determine decomposition to keep/move surface zones
1123  // on one processor. The reason is that snapping will make these
1124  // into baffles, move and convert them back so if they were
1125  // proc boundaries after baffling&moving the points might be no
1126  // longer snychronised so recoupling will fail. To prevent this
1127  // keep owner&neighbour of such a surface zone on the same
1128  // processor.
1129 
1130  const wordList& fzNames = surfaces().faceZoneNames();
1131  const faceZoneMesh& fZones = mesh_.faceZones();
1132 
1133  // Get faces whose owner and neighbour should stay together,
1134  // i.e. they are not 'blocked'.
1135 
1136  forAll(fzNames, surfI)
1137  {
1138  if (fzNames[surfI].size())
1139  {
1140  // Get zone
1141  label zoneI = fZones.findZoneID(fzNames[surfI]);
1142 
1143  const faceZone& fZone = fZones[zoneI];
1144 
1145  forAll(fZone, i)
1146  {
1147  if (blockedFace[fZone[i]])
1148  {
1149  blockedFace[fZone[i]] = false;
1150  nUnblocked++;
1151  }
1152  }
1153  }
1154  }
1155 
1156 
1157  // If the faceZones are not synchronised the blockedFace
1158  // might not be synchronised. If you are sure the faceZones
1159  // are synchronised remove below check.
1161  (
1162  mesh_,
1163  blockedFace,
1164  andEqOp<bool>(), // combine operator
1165  false // separation
1166  );
1167  }
1168  reduce(nUnblocked, sumOp<label>());
1169 
1170  if (keepZoneFaces)
1171  {
1172  Info<< "Found " << nUnblocked
1173  << " zoned faces to keep together." << endl;
1174  }
1175 
1176  if (keepBaffles)
1177  {
1178  // Get boundary baffles that need to stay together.
1179  couples = getDuplicateFaces // all baffles
1180  (
1181  identity(mesh_.nFaces()-mesh_.nInternalFaces())
1182  +mesh_.nInternalFaces()
1183  );
1184  }
1185  label nCouples = returnReduce(couples.size(), sumOp<label>());
1186 
1187  if (keepBaffles)
1188  {
1189  Info<< "Found " << nCouples << " baffles to keep together."
1190  << endl;
1191  }
1192 
1193  if (nUnblocked > 0 || nCouples > 0)
1194  {
1195  Info<< "Applying special decomposition to keep baffles"
1196  << " and zoned faces together." << endl;
1197 
1198  distribution = decomposeCombineRegions
1199  (
1200  cellWeights,
1201  blockedFace,
1202  couples,
1203  decomposer
1204  );
1205 
1206  labelList nProcCells(distributor.countCells(distribution));
1208  Pstream::listCombineScatter(nProcCells);
1209 
1210  Info<< "Calculated decomposition:" << endl;
1211  forAll(nProcCells, procI)
1212  {
1213  Info<< " " << procI << '\t' << nProcCells[procI] << endl;
1214  }
1215  Info<< endl;
1216  }
1217  else
1218  {
1219  // Normal decomposition
1220  distribution = decomposer.decompose
1221  (
1222  mesh_.cellCentres(),
1223  cellWeights
1224  );
1225  }
1226  }
1227  else
1228  {
1229  // Normal decomposition
1230  distribution = decomposer.decompose
1231  (
1232  mesh_.cellCentres(),
1233  cellWeights
1234  );
1235  }
1236 
1237  if (debug)
1238  {
1239  labelList nProcCells(distributor.countCells(distribution));
1240  Pout<< "Wanted distribution:" << nProcCells << endl;
1241 
1243  Pstream::listCombineScatter(nProcCells);
1244 
1245  Pout<< "Wanted resulting decomposition:" << endl;
1246  forAll(nProcCells, procI)
1247  {
1248  Pout<< " " << procI << '\t' << nProcCells[procI] << endl;
1249  }
1250  Pout<< endl;
1251  }
1252  // Do actual sending/receiving of mesh
1253  map = distributor.distribute(distribution);
1254 
1255  // Update numbering of meshRefiner
1256  distribute(map);
1257  }
1258  return map;
1259 }
1260 
1261 
1262 // Helper function to get intersected faces
1264 {
1265  label nBoundaryFaces = 0;
1266 
1267  forAll(surfaceIndex_, faceI)
1268  {
1269  if (surfaceIndex_[faceI] != -1)
1270  {
1271  nBoundaryFaces++;
1272  }
1273  }
1274 
1275  labelList surfaceFaces(nBoundaryFaces);
1276  nBoundaryFaces = 0;
1277 
1278  forAll(surfaceIndex_, faceI)
1279  {
1280  if (surfaceIndex_[faceI] != -1)
1281  {
1282  surfaceFaces[nBoundaryFaces++] = faceI;
1283  }
1284  }
1285  return surfaceFaces;
1286 }
1287 
1288 
1289 // Helper function to get points used by faces
1291 {
1292  const faceList& faces = mesh_.faces();
1293 
1294  // Mark all points on faces that will become baffles
1295  PackedBoolList isBoundaryPoint(mesh_.nPoints());
1296  label nBoundaryPoints = 0;
1297 
1298  forAll(surfaceIndex_, faceI)
1299  {
1300  if (surfaceIndex_[faceI] != -1)
1301  {
1302  const face& f = faces[faceI];
1303 
1304  forAll(f, fp)
1305  {
1306  if (isBoundaryPoint.set(f[fp], 1u))
1307  {
1308  nBoundaryPoints++;
1309  }
1310  }
1311  }
1312  }
1313 
1315  //labelList adaptPatchIDs(meshedPatches());
1316  //forAll(adaptPatchIDs, i)
1317  //{
1318  // label patchI = adaptPatchIDs[i];
1319  //
1320  // if (patchI != -1)
1321  // {
1322  // const polyPatch& pp = mesh_.boundaryMesh()[patchI];
1323  //
1324  // label faceI = pp.start();
1325  //
1326  // forAll(pp, i)
1327  // {
1328  // const face& f = faces[faceI];
1329  //
1330  // forAll(f, fp)
1331  // {
1332  // if (isBoundaryPoint.set(f[fp], 1u))
1333  // nBoundaryPoints++;
1334  // }
1335  // }
1336  // faceI++;
1337  // }
1338  // }
1339  //}
1340 
1341 
1342  // Pack
1343  labelList boundaryPoints(nBoundaryPoints);
1344  nBoundaryPoints = 0;
1345  forAll(isBoundaryPoint, pointI)
1346  {
1347  if (isBoundaryPoint.get(pointI) == 1u)
1348  {
1349  boundaryPoints[nBoundaryPoints++] = pointI;
1350  }
1351  }
1352 
1353  return boundaryPoints;
1354 }
1355 
1356 
1357 //- Create patch from set of patches
1360  const polyMesh& mesh,
1361  const labelList& patchIDs
1362 )
1363 {
1364  const polyBoundaryMesh& patches = mesh.boundaryMesh();
1365 
1366  // Count faces.
1367  label nFaces = 0;
1368 
1369  forAll(patchIDs, i)
1370  {
1371  const polyPatch& pp = patches[patchIDs[i]];
1372 
1373  nFaces += pp.size();
1374  }
1375 
1376  // Collect faces.
1377  labelList addressing(nFaces);
1378  nFaces = 0;
1379 
1380  forAll(patchIDs, i)
1381  {
1382  const polyPatch& pp = patches[patchIDs[i]];
1383 
1384  label meshFaceI = pp.start();
1385 
1386  forAll(pp, i)
1387  {
1388  addressing[nFaces++] = meshFaceI++;
1389  }
1390  }
1391 
1393  (
1395  (
1396  IndirectList<face>(mesh.faces(), addressing),
1397  mesh.points()
1398  )
1399  );
1400 }
1401 
1402 
1403 // Construct pointVectorField with correct boundary conditions
1406  const pointMesh& pMesh,
1407  const labelList& adaptPatchIDs
1408 )
1409 {
1410  const polyMesh& mesh = pMesh();
1411 
1412  // Construct displacement field.
1413  const pointBoundaryMesh& pointPatches = pMesh.boundary();
1414 
1415  wordList patchFieldTypes
1416  (
1417  pointPatches.size(),
1418  slipPointPatchVectorField::typeName
1419  );
1420 
1421  forAll(adaptPatchIDs, i)
1422  {
1423  patchFieldTypes[adaptPatchIDs[i]] =
1424  fixedValuePointPatchVectorField::typeName;
1425  }
1426 
1427  forAll(pointPatches, patchI)
1428  {
1429  if (isA<globalPointPatch>(pointPatches[patchI]))
1430  {
1431  patchFieldTypes[patchI] = globalPointPatchVectorField::typeName;
1432  }
1433  else if (isA<processorPointPatch>(pointPatches[patchI]))
1434  {
1435  patchFieldTypes[patchI] = calculatedPointPatchVectorField::typeName;
1436  }
1437  }
1438 
1440  (
1441  new pointVectorField
1442  (
1443  IOobject
1444  (
1445  "pointDisplacement",
1446  mesh.time().timeName(), //timeName(),
1447  mesh,
1450  ),
1451  pMesh,
1452  dimensionedVector("displacement", dimLength, vector::zero),
1453  patchFieldTypes
1454  )
1455  );
1456 
1457  return tfld;
1458 }
1459 
1460 
1462 {
1463  const faceZoneMesh& fZones = mesh.faceZones();
1464 
1465  // Check any zones are present anywhere and in same order
1466 
1467  {
1468  List<wordList> zoneNames(Pstream::nProcs());
1469  zoneNames[Pstream::myProcNo()] = fZones.names();
1470  Pstream::gatherList(zoneNames);
1471  Pstream::scatterList(zoneNames);
1472  // All have same data now. Check.
1473  forAll(zoneNames, procI)
1474  {
1475  if (procI != Pstream::myProcNo())
1476  {
1477  if (zoneNames[procI] != zoneNames[Pstream::myProcNo()])
1478  {
1479  FatalErrorIn
1480  (
1481  "meshRefinement::checkCoupledFaceZones(const polyMesh&)"
1482  ) << "faceZones are not synchronised on processors." << nl
1483  << "Processor " << procI << " has faceZones "
1484  << zoneNames[procI] << nl
1485  << "Processor " << Pstream::myProcNo()
1486  << " has faceZones "
1487  << zoneNames[Pstream::myProcNo()] << nl
1488  << exit(FatalError);
1489  }
1490  }
1491  }
1492  }
1493 
1494  // Check that coupled faces are present on both sides.
1495 
1496  labelList faceToZone(mesh.nFaces()-mesh.nInternalFaces(), -1);
1497 
1498  forAll(fZones, zoneI)
1499  {
1500  const faceZone& fZone = fZones[zoneI];
1501 
1502  forAll(fZone, i)
1503  {
1504  label bFaceI = fZone[i]-mesh.nInternalFaces();
1505 
1506  if (bFaceI >= 0)
1507  {
1508  if (faceToZone[bFaceI] == -1)
1509  {
1510  faceToZone[bFaceI] = zoneI;
1511  }
1512  else if (faceToZone[bFaceI] == zoneI)
1513  {
1514  FatalErrorIn
1515  (
1516  "meshRefinement::checkCoupledFaceZones(const polyMesh&)"
1517  ) << "Face " << fZone[i] << " in zone "
1518  << fZone.name()
1519  << " is twice in zone!"
1520  << abort(FatalError);
1521  }
1522  else
1523  {
1524  FatalErrorIn
1525  (
1526  "meshRefinement::checkCoupledFaceZones(const polyMesh&)"
1527  ) << "Face " << fZone[i] << " in zone "
1528  << fZone.name()
1529  << " is also in zone "
1530  << fZones[faceToZone[bFaceI]].name()
1531  << abort(FatalError);
1532  }
1533  }
1534  }
1535  }
1536 
1537  labelList neiFaceToZone(faceToZone);
1538  syncTools::swapBoundaryFaceList(mesh, neiFaceToZone, false);
1539 
1540  forAll(faceToZone, i)
1541  {
1542  if (faceToZone[i] != neiFaceToZone[i])
1543  {
1544  FatalErrorIn
1545  (
1546  "meshRefinement::checkCoupledFaceZones(const polyMesh&)"
1547  ) << "Face " << mesh.nInternalFaces()+i
1548  << " is in zone " << faceToZone[i]
1549  << ", its coupled face is in zone " << neiFaceToZone[i]
1550  << abort(FatalError);
1551  }
1552  }
1553 }
1554 
1555 
1556 // Adds patch if not yet there. Returns patchID.
1559  fvMesh& mesh,
1560  const word& patchName,
1561  const word& patchType
1562 )
1563 {
1564  polyBoundaryMesh& polyPatches =
1565  const_cast<polyBoundaryMesh&>(mesh.boundaryMesh());
1566 
1567  label patchI = polyPatches.findPatchID(patchName);
1568  if (patchI != -1)
1569  {
1570  if (polyPatches[patchI].type() == patchType)
1571  {
1572  // Already there
1573  return patchI;
1574  }
1575  //else
1576  //{
1577  // FatalErrorIn
1578  // (
1579  // "meshRefinement::addPatch(fvMesh&, const word&, const word&)"
1580  // ) << "Patch " << patchName << " already exists but with type "
1581  // << patchType << nl
1582  // << "Current patch names:" << polyPatches.names()
1583  // << exit(FatalError);
1584  //}
1585  }
1586 
1587 
1588  label insertPatchI = polyPatches.size();
1589  label startFaceI = mesh.nFaces();
1590 
1591  forAll(polyPatches, patchI)
1592  {
1593  const polyPatch& pp = polyPatches[patchI];
1594 
1595  if (isA<processorPolyPatch>(pp))
1596  {
1597  insertPatchI = patchI;
1598  startFaceI = pp.start();
1599  break;
1600  }
1601  }
1602 
1603 
1604  // Below is all quite a hack. Feel free to change once there is a better
1605  // mechanism to insert and reorder patches.
1606 
1607  // Clear local fields and e.g. polyMesh parallelInfo.
1608  mesh.clearOut();
1609 
1610  label sz = polyPatches.size();
1611 
1612  fvBoundaryMesh& fvPatches = const_cast<fvBoundaryMesh&>(mesh.boundary());
1613 
1614  // Add polyPatch at the end
1615  polyPatches.setSize(sz+1);
1616  polyPatches.set
1617  (
1618  sz,
1620  (
1621  patchType,
1622  patchName,
1623  0, // size
1624  startFaceI,
1625  insertPatchI,
1626  polyPatches
1627  )
1628  );
1629  fvPatches.setSize(sz+1);
1630  fvPatches.set
1631  (
1632  sz,
1633  fvPatch::New
1634  (
1635  polyPatches[sz], // point to newly added polyPatch
1636  mesh.boundary()
1637  )
1638  );
1639 
1640  addPatchFields<volScalarField>
1641  (
1642  mesh,
1644  );
1645  addPatchFields<volVectorField>
1646  (
1647  mesh,
1649  );
1650  addPatchFields<volSphericalTensorField>
1651  (
1652  mesh,
1654  );
1655  addPatchFields<volSymmTensorField>
1656  (
1657  mesh,
1659  );
1660  addPatchFields<volTensorField>
1661  (
1662  mesh,
1664  );
1665 
1666  // Surface fields
1667 
1668  addPatchFields<surfaceScalarField>
1669  (
1670  mesh,
1672  );
1673  addPatchFields<surfaceVectorField>
1674  (
1675  mesh,
1677  );
1678  addPatchFields<surfaceSphericalTensorField>
1679  (
1680  mesh,
1682  );
1683  addPatchFields<surfaceSymmTensorField>
1684  (
1685  mesh,
1687  );
1688  addPatchFields<surfaceTensorField>
1689  (
1690  mesh,
1692  );
1693 
1694  // Create reordering list
1695  // patches before insert position stay as is
1696  labelList oldToNew(sz+1);
1697  for (label i = 0; i < insertPatchI; i++)
1698  {
1699  oldToNew[i] = i;
1700  }
1701  // patches after insert position move one up
1702  for (label i = insertPatchI; i < sz; i++)
1703  {
1704  oldToNew[i] = i+1;
1705  }
1706  // appended patch gets moved to insert position
1707  oldToNew[sz] = insertPatchI;
1708 
1709  // Shuffle into place
1710  polyPatches.reorder(oldToNew);
1711  fvPatches.reorder(oldToNew);
1712 
1713  reorderPatchFields<volScalarField>(mesh, oldToNew);
1714  reorderPatchFields<volVectorField>(mesh, oldToNew);
1715  reorderPatchFields<volSphericalTensorField>(mesh, oldToNew);
1716  reorderPatchFields<volSymmTensorField>(mesh, oldToNew);
1717  reorderPatchFields<volTensorField>(mesh, oldToNew);
1718  reorderPatchFields<surfaceScalarField>(mesh, oldToNew);
1719  reorderPatchFields<surfaceVectorField>(mesh, oldToNew);
1720  reorderPatchFields<surfaceSphericalTensorField>(mesh, oldToNew);
1721  reorderPatchFields<surfaceSymmTensorField>(mesh, oldToNew);
1722  reorderPatchFields<surfaceTensorField>(mesh, oldToNew);
1723 
1724  return insertPatchI;
1725 }
1726 
1727 
1730  const word& name,
1731  const word& type
1732 )
1733 {
1734  label meshedI = findIndex(meshedPatches_, name);
1735 
1736  if (meshedI != -1)
1737  {
1738  // Already there. Get corresponding polypatch
1739  return mesh_.boundaryMesh().findPatchID(name);
1740  }
1741  else
1742  {
1743  // Add patch
1744  label patchI = addPatch(mesh_, name, type);
1745 
1746  // Store
1747  label sz = meshedPatches_.size();
1748  meshedPatches_.setSize(sz+1);
1749  meshedPatches_[sz] = name;
1750 
1751  return patchI;
1752  }
1753 }
1754 
1755 
1757 {
1758  labelList patchIDs(meshedPatches_.size());
1759  forAll(meshedPatches_, i)
1760  {
1761  patchIDs[i] = mesh_.boundaryMesh().findPatchID(meshedPatches_[i]);
1762 
1763  if (patchIDs[i] == -1)
1764  {
1765  FatalErrorIn("meshRefinement::meshedPatches() const")
1766  << "Problem : did not find patch " << meshedPatches_[i]
1767  << abort(FatalError);
1768  }
1769  }
1770 
1771  return patchIDs;
1772 }
1773 
1774 
1777  const point& keepPoint
1778 )
1779 {
1780  // Determine connected regions. regionSplit is the labelList with the
1781  // region per cell.
1782  regionSplit cellRegion(mesh_);
1783 
1784  label regionI = -1;
1785 
1786  label cellI = mesh_.findCell(keepPoint);
1787 
1788  if (cellI != -1)
1789  {
1790  regionI = cellRegion[cellI];
1791  }
1792 
1793  reduce(regionI, maxOp<label>());
1794 
1795  if (regionI == -1)
1796  {
1797  FatalErrorIn
1798  (
1799  "meshRefinement::splitMeshRegions(const point&)"
1800  ) << "Point " << keepPoint
1801  << " is not inside the mesh." << nl
1802  << "Bounding box of the mesh:" << mesh_.globalData().bb()
1803  << exit(FatalError);
1804  }
1805 
1806  // Subset
1807  // ~~~~~~
1808 
1809  // Get cells to remove
1810  DynamicList<label> cellsToRemove(mesh_.nCells());
1811  forAll(cellRegion, cellI)
1812  {
1813  if (cellRegion[cellI] != regionI)
1814  {
1815  cellsToRemove.append(cellI);
1816  }
1817  }
1818  cellsToRemove.shrink();
1819 
1820  label nCellsToKeep = mesh_.nCells() - cellsToRemove.size();
1821  reduce(nCellsToKeep, sumOp<label>());
1822 
1823  Info<< "Keeping all cells in region " << regionI
1824  << " containing point " << keepPoint << endl
1825  << "Selected for keeping : "
1826  << nCellsToKeep
1827  << " cells." << endl;
1828 
1829 
1830  // Remove cells
1831  removeCells cellRemover(mesh_);
1832 
1833  labelList exposedFaces(cellRemover.getExposedFaces(cellsToRemove));
1834 
1835  if (exposedFaces.size())
1836  {
1837  FatalErrorIn
1838  (
1839  "meshRefinement::splitMeshRegions(const point&)"
1840  ) << "Removing non-reachable cells should only expose boundary faces"
1841  << nl
1842  << "ExposedFaces:" << exposedFaces << abort(FatalError);
1843  }
1844 
1845  return doRemoveCells
1846  (
1847  cellsToRemove,
1848  exposedFaces,
1849  labelList(exposedFaces.size(),-1), // irrelevant since 0 size.
1850  cellRemover
1851  );
1852 }
1853 
1854 
1856 {
1857  // mesh_ already distributed; distribute my member data
1858 
1859  // surfaceQueries_ ok.
1860 
1861  // refinement
1862  meshCutter_.distribute(map);
1863 
1864  // surfaceIndex is face data.
1865  map.distributeFaceData(surfaceIndex_);
1866 
1867  // maintainedFaces are indices of faces.
1868  forAll(userFaceData_, i)
1869  {
1870  map.distributeFaceData(userFaceData_[i].second());
1871  }
1872 
1873  // Redistribute surface and any fields on it.
1874  {
1875  Random rndGen(653213);
1876 
1877  // Get local mesh bounding box. Single box for now.
1878  List<treeBoundBox> meshBb(1);
1879  treeBoundBox& bb = meshBb[0];
1880  bb = treeBoundBox(mesh_.points());
1881  bb = bb.extend(rndGen, 1E-4);
1882 
1883  // Distribute all geometry (so refinementSurfaces and shellSurfaces)
1884  searchableSurfaces& geometry =
1885  const_cast<searchableSurfaces&>(surfaces_.geometry());
1886 
1887  forAll(geometry, i)
1888  {
1889  autoPtr<mapDistribute> faceMap;
1890  autoPtr<mapDistribute> pointMap;
1891  geometry[i].distribute
1892  (
1893  meshBb,
1894  false, // do not keep outside triangles
1895  faceMap,
1896  pointMap
1897  );
1898 
1899  if (faceMap.valid())
1900  {
1901  // (ab)use the instance() to signal current modification time
1902  geometry[i].instance() = geometry[i].time().timeName();
1903  }
1904 
1905  faceMap.clear();
1906  pointMap.clear();
1907  }
1908  }
1909 }
1910 
1911 
1914  const mapPolyMesh& map,
1915  const labelList& changedFaces
1916 )
1917 {
1918  Map<label> dummyMap(0);
1919 
1920  updateMesh(map, changedFaces, dummyMap, dummyMap, dummyMap);
1921 }
1922 
1923 
1926  const labelList& pointsToStore,
1927  const labelList& facesToStore,
1928  const labelList& cellsToStore
1929 )
1930 {
1931  // For now only meshCutter has storable/retrievable data.
1932  meshCutter_.storeData
1933  (
1934  pointsToStore,
1935  facesToStore,
1936  cellsToStore
1937  );
1938 }
1939 
1940 
1943  const mapPolyMesh& map,
1944  const labelList& changedFaces,
1945  const Map<label>& pointsToRestore,
1946  const Map<label>& facesToRestore,
1947  const Map<label>& cellsToRestore
1948 )
1949 {
1950  // For now only meshCutter has storable/retrievable data.
1951 
1952  // Update numbering of cells/vertices.
1953  meshCutter_.updateMesh
1954  (
1955  map,
1956  pointsToRestore,
1957  facesToRestore,
1958  cellsToRestore
1959  );
1960 
1961  // Update surfaceIndex
1962  updateList(map.faceMap(), -1, surfaceIndex_);
1963 
1964  // Update cached intersection information
1965  updateIntersections(changedFaces);
1966 
1967  // Update maintained faces
1968  forAll(userFaceData_, i)
1969  {
1970  labelList& data = userFaceData_[i].second();
1971 
1972  if (userFaceData_[i].first() == KEEPALL)
1973  {
1974  // extend list with face-from-face data
1975  updateList(map.faceMap(), -1, data);
1976  }
1977  else if (userFaceData_[i].first() == MASTERONLY)
1978  {
1979  // keep master only
1980  labelList newFaceData(map.faceMap().size(), -1);
1981 
1982  forAll(newFaceData, faceI)
1983  {
1984  label oldFaceI = map.faceMap()[faceI];
1985 
1986  if (oldFaceI >= 0 && map.reverseFaceMap()[oldFaceI] == faceI)
1987  {
1988  newFaceData[faceI] = data[oldFaceI];
1989  }
1990  }
1991  data.transfer(newFaceData);
1992  }
1993  else
1994  {
1995  // remove any face that has been refined i.e. referenced more than
1996  // once.
1997 
1998  // 1. Determine all old faces that get referenced more than once.
1999  // These get marked with -1 in reverseFaceMap
2000  labelList reverseFaceMap(map.reverseFaceMap());
2001 
2002  forAll(map.faceMap(), faceI)
2003  {
2004  label oldFaceI = map.faceMap()[faceI];
2005 
2006  if (oldFaceI >= 0)
2007  {
2008  if (reverseFaceMap[oldFaceI] != faceI)
2009  {
2010  // faceI is slave face. Mark old face.
2011  reverseFaceMap[oldFaceI] = -1;
2012  }
2013  }
2014  }
2015 
2016  // 2. Map only faces with intact reverseFaceMap
2017  labelList newFaceData(map.faceMap().size(), -1);
2018  forAll(newFaceData, faceI)
2019  {
2020  label oldFaceI = map.faceMap()[faceI];
2021 
2022  if (oldFaceI >= 0)
2023  {
2024  if (reverseFaceMap[oldFaceI] == faceI)
2025  {
2026  newFaceData[faceI] = data[oldFaceI];
2027  }
2028  }
2029  }
2030  data.transfer(newFaceData);
2031  }
2032  }
2033 }
2034 
2035 
2037 {
2038  bool writeOk =
2039  mesh_.write()
2040  && meshCutter_.write()
2041  && surfaceIndex_.write();
2042 
2043 
2044  // Make sure that any distributed surfaces (so ones which probably have
2045  // been changed) get written as well.
2046  // Note: should ideally have some 'modified' flag to say whether it
2047  // has been changed or not.
2048  searchableSurfaces& geometry =
2049  const_cast<searchableSurfaces&>(surfaces_.geometry());
2050 
2051  forAll(geometry, i)
2052  {
2053  searchableSurface& s = geometry[i];
2054 
2055  // Check if instance() of surface is not constant or system.
2056  // Is good hint that surface is distributed.
2057  if
2058  (
2059  s.instance() != s.time().system()
2060  && s.instance() != s.time().caseSystem()
2061  && s.instance() != s.time().constant()
2062  && s.instance() != s.time().caseConstant()
2063  )
2064  {
2065  // Make sure it gets written to current time, not constant.
2066  s.instance() = s.time().timeName();
2067  writeOk = writeOk && s.write();
2068  }
2069  }
2070 
2071  return writeOk;
2072 }
2073 
2074 
2075 void Foam::meshRefinement::printMeshInfo(const bool debug, const string& msg)
2076  const
2077 {
2078  const globalMeshData& pData = mesh_.globalData();
2079 
2080  if (debug)
2081  {
2082  Pout<< msg.c_str()
2083  << " : cells(local):" << mesh_.nCells()
2084  << " faces(local):" << mesh_.nFaces()
2085  << " points(local):" << mesh_.nPoints()
2086  << endl;
2087  }
2088  else
2089  {
2090  Info<< msg.c_str()
2091  << " : cells:" << pData.nTotalCells()
2092  << " faces:" << pData.nTotalFaces()
2093  << " points:" << pData.nTotalPoints()
2094  << endl;
2095  }
2096 
2097 
2098  //if (debug)
2099  {
2100  const labelList& cellLevel = meshCutter_.cellLevel();
2101 
2102  labelList nCells(gMax(cellLevel)+1, 0);
2103 
2104  forAll(cellLevel, cellI)
2105  {
2106  nCells[cellLevel[cellI]]++;
2107  }
2108 
2111 
2112  Info<< "Cells per refinement level:" << endl;
2113  forAll(nCells, levelI)
2114  {
2115  Info<< " " << levelI << '\t' << nCells[levelI]
2116  << endl;
2117  }
2118  }
2119 }
2120 
2121 
2122 //- Return either time().constant() or oldInstance
2124 {
2125  if (overwrite_ && mesh_.time().timeIndex() == 0)
2126  {
2127  return oldInstance_;
2128  }
2129  else
2130  {
2131  return mesh_.time().timeName();
2132  }
2133 }
2134 
2135 
2137 {
2138  volScalarField volRefLevel
2139  (
2140  IOobject
2141  (
2142  "cellLevel",
2143  timeName(),
2144  mesh_,
2145  IOobject::NO_READ,
2147  false
2148  ),
2149  mesh_,
2150  dimensionedScalar("zero", dimless, 0),
2151  zeroGradientFvPatchScalarField::typeName
2152  );
2153 
2154  const labelList& cellLevel = meshCutter_.cellLevel();
2155 
2156  forAll(volRefLevel, cellI)
2157  {
2158  volRefLevel[cellI] = cellLevel[cellI];
2159  }
2160 
2161  volRefLevel.write();
2162 
2163 
2164  const pointMesh& pMesh = pointMesh::New(mesh_);
2165 
2166  pointScalarField pointRefLevel
2167  (
2168  IOobject
2169  (
2170  "pointLevel",
2171  timeName(),
2172  mesh_,
2173  IOobject::NO_READ,
2174  IOobject::NO_WRITE,
2175  false
2176  ),
2177  pMesh,
2178  dimensionedScalar("zero", dimless, 0)
2179  );
2180 
2181  const labelList& pointLevel = meshCutter_.pointLevel();
2182 
2183  forAll(pointRefLevel, pointI)
2184  {
2185  pointRefLevel[pointI] = pointLevel[pointI];
2186  }
2187 
2188  pointRefLevel.write();
2189 }
2190 
2191 
2193 {
2194  {
2195  const pointField& cellCentres = mesh_.cellCentres();
2196 
2197  OFstream str(prefix + "_edges.obj");
2198  label vertI = 0;
2199  Pout<< "meshRefinement::dumpIntersections :"
2200  << " Writing cellcentre-cellcentre intersections to file "
2201  << str.name() << endl;
2202 
2203 
2204  // Redo all intersections
2205  // ~~~~~~~~~~~~~~~~~~~~~~
2206 
2207  // Get boundary face centre and level. Coupled aware.
2208  labelList neiLevel(mesh_.nFaces()-mesh_.nInternalFaces());
2209  pointField neiCc(mesh_.nFaces()-mesh_.nInternalFaces());
2210  calcNeighbourData(neiLevel, neiCc);
2211 
2212  labelList intersectionFaces(intersectedFaces());
2213 
2214  // Collect segments we want to test for
2215  pointField start(intersectionFaces.size());
2216  pointField end(intersectionFaces.size());
2217 
2218  forAll(intersectionFaces, i)
2219  {
2220  label faceI = intersectionFaces[i];
2221  start[i] = cellCentres[mesh_.faceOwner()[faceI]];
2222 
2223  if (mesh_.isInternalFace(faceI))
2224  {
2225  end[i] = cellCentres[mesh_.faceNeighbour()[faceI]];
2226  }
2227  else
2228  {
2229  end[i] = neiCc[faceI-mesh_.nInternalFaces()];
2230  }
2231  }
2232 
2233  // Do tests in one go
2234  labelList surfaceHit;
2235  List<pointIndexHit> surfaceHitInfo;
2236  surfaces_.findAnyIntersection
2237  (
2238  start,
2239  end,
2240  surfaceHit,
2241  surfaceHitInfo
2242  );
2243 
2244  forAll(intersectionFaces, i)
2245  {
2246  if (surfaceHit[i] != -1)
2247  {
2248  meshTools::writeOBJ(str, start[i]);
2249  vertI++;
2250  meshTools::writeOBJ(str, surfaceHitInfo[i].hitPoint());
2251  vertI++;
2252  meshTools::writeOBJ(str, end[i]);
2253  vertI++;
2254  str << "l " << vertI-2 << ' ' << vertI-1 << nl
2255  << "l " << vertI-1 << ' ' << vertI << nl;
2256  }
2257  }
2258  }
2259 
2260  // Convert to vtk format
2261  string cmd
2262  (
2263  "objToVTK " + prefix + "_edges.obj " + prefix + "_edges.vtk > /dev/null"
2264  );
2265  system(cmd.c_str());
2266 
2267  Pout<< endl;
2268 }
2269 
2270 
2273  const label flag,
2274  const fileName& prefix
2275 ) const
2276 {
2277  if (flag & MESH)
2278  {
2279  write();
2280  }
2281  if (flag & SCALARLEVELS)
2282  {
2283  dumpRefinementLevel();
2284  }
2285  if (flag & OBJINTERSECTIONS && prefix.size())
2286  {
2287  dumpIntersections(prefix);
2288  }
2289 }
2290 
2291 
2292 // ************************ vim: set sw=4 sts=4 et: ************************ //