FreeFOAM The Cross-Platform CFD Toolkit
globalMeshData.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | Copyright (C) 1991-2010 OpenCFD Ltd.
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "globalMeshData.H"
27 #include <OpenFOAM/Time.H>
28 #include <OpenFOAM/Pstream.H>
32 #include "globalPoints.H"
33 //#include "geomGlobalPoints.H"
34 #include <OpenFOAM/labelIOList.H>
35 #include <OpenFOAM/PackedList.H>
36 #include <OpenFOAM/mergePoints.H>
37 #include <OpenFOAM/matchPoints.H>
38 #include <OpenFOAM/OFstream.H>
39 
40 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
41 
43 
44 // Geometric matching tolerance. Factor of mesh bounding box.
45 const Foam::scalar Foam::globalMeshData::matchTol_ = 1E-8;
46 
47 
48 // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
49 
50 // Collect processor patch addressing.
51 void Foam::globalMeshData::initProcAddr()
52 {
53  processorPatchIndices_.setSize(mesh_.boundaryMesh().size());
54  processorPatchIndices_ = -1;
55 
56  processorPatchNeighbours_.setSize(mesh_.boundaryMesh().size());
57  processorPatchNeighbours_ = -1;
58 
59  // Construct processor patch indexing. processorPatchNeighbours_ only
60  // set if running in parallel!
61  processorPatches_.setSize(mesh_.boundaryMesh().size());
62 
63  label nNeighbours = 0;
64 
65  forAll (mesh_.boundaryMesh(), patchi)
66  {
67  if (isA<processorPolyPatch>(mesh_.boundaryMesh()[patchi]))
68  {
69  processorPatches_[nNeighbours] = patchi;
70  processorPatchIndices_[patchi] = nNeighbours++;
71  }
72  }
73  processorPatches_.setSize(nNeighbours);
74 
75 
76  if (Pstream::parRun())
77  {
78  // Send indices of my processor patches to my neighbours
79  forAll (processorPatches_, i)
80  {
81  label patchi = processorPatches_[i];
82 
83  OPstream toNeighbour
84  (
86  refCast<const processorPolyPatch>
87  (
88  mesh_.boundaryMesh()[patchi]
89  ).neighbProcNo()
90  );
91 
92  toNeighbour << processorPatchIndices_[patchi];
93  }
94 
95  forAll(processorPatches_, i)
96  {
97  label patchi = processorPatches_[i];
98 
99  IPstream fromNeighbour
100  (
102  refCast<const processorPolyPatch>
103  (
104  mesh_.boundaryMesh()[patchi]
105  ).neighbProcNo()
106  );
107 
108  fromNeighbour >> processorPatchNeighbours_[patchi];
109  }
110  }
111 }
112 
113 
114 // Given information about locally used edges allocate global shared edges.
115 void Foam::globalMeshData::countSharedEdges
116 (
117  const HashTable<labelList, edge, Hash<edge> >& procSharedEdges,
118  HashTable<label, edge, Hash<edge> >& globalShared,
119  label& sharedEdgeI
120 )
121 {
122  // Count occurrences of procSharedEdges in global shared edges table.
123  for
124  (
125  HashTable<labelList, edge, Hash<edge> >::const_iterator iter =
126  procSharedEdges.begin();
127  iter != procSharedEdges.end();
128  ++iter
129  )
130  {
131  const edge& e = iter.key();
132 
133  HashTable<label, edge, Hash<edge> >::iterator globalFnd =
134  globalShared.find(e);
135 
136  if (globalFnd == globalShared.end())
137  {
138  // First time occurrence of this edge. Check how many we are adding.
139  if (iter().size() == 1)
140  {
141  // Only one edge. Mark with special value.
142  globalShared.insert(e, -1);
143  }
144  else
145  {
146  // Edge used more than once (even by local shared edges alone)
147  // so allocate proper shared edge label.
148  globalShared.insert(e, sharedEdgeI++);
149  }
150  }
151  else
152  {
153  if (globalFnd() == -1)
154  {
155  // Second time occurence of this edge. Assign proper
156  // edge label.
157  globalFnd() = sharedEdgeI++;
158  }
159  }
160  }
161 }
162 
163 
164 // Shared edges are shared between multiple processors. By their nature both
165 // of their endpoints are shared points. (but not all edges using two shared
166 // points are shared edges! There might e.g. be an edge between two unrelated
167 // clusters of shared points)
168 void Foam::globalMeshData::calcSharedEdges() const
169 {
170  if (nGlobalEdges_ != -1 || sharedEdgeLabelsPtr_ || sharedEdgeAddrPtr_)
171  {
172  FatalErrorIn("globalMeshData::calcSharedEdges()")
173  << "Shared edge addressing already done" << abort(FatalError);
174  }
175 
176 
177  const labelList& sharedPtAddr = sharedPointAddr();
178  const labelList& sharedPtLabels = sharedPointLabels();
179 
180  // Since don't want to construct pointEdges for whole mesh create
181  // Map for all shared points.
182  Map<label> meshToShared(2*sharedPtLabels.size());
183  forAll(sharedPtLabels, i)
184  {
185  meshToShared.insert(sharedPtLabels[i], i);
186  }
187 
188  // Find edges using shared points. Store correspondence to local edge
189  // numbering. Note that multiple local edges can have the same shared
190  // points! (for cyclics or separated processor patches)
191  HashTable<labelList, edge, Hash<edge> > localShared
192  (
193  2*sharedPtAddr.size()
194  );
195 
196  const edgeList& edges = mesh_.edges();
197 
198  forAll(edges, edgeI)
199  {
200  const edge& e = edges[edgeI];
201 
202  Map<label>::const_iterator e0Fnd = meshToShared.find(e[0]);
203 
204  if (e0Fnd != meshToShared.end())
205  {
206  Map<label>::const_iterator e1Fnd = meshToShared.find(e[1]);
207 
208  if (e1Fnd != meshToShared.end())
209  {
210  // Found edge which uses shared points. Probably shared.
211 
212  // Construct the edge in shared points (or rather global indices
213  // of the shared points)
214  edge sharedEdge
215  (
216  sharedPtAddr[e0Fnd()],
217  sharedPtAddr[e1Fnd()]
218  );
219 
220  HashTable<labelList, edge, Hash<edge> >::iterator iter =
221  localShared.find(sharedEdge);
222 
223  if (iter == localShared.end())
224  {
225  // First occurrence of this point combination. Store.
226  localShared.insert(sharedEdge, labelList(1, edgeI));
227  }
228  else
229  {
230  // Add this edge to list of edge labels.
231  labelList& edgeLabels = iter();
232 
233  label sz = edgeLabels.size();
234  edgeLabels.setSize(sz+1);
235  edgeLabels[sz] = edgeI;
236  }
237  }
238  }
239  }
240 
241 
242  // Now we have a table on every processors which gives its edges which use
243  // shared points. Send this all to the master and have it allocate
244  // global edge numbers for it. But only allocate a global edge number for
245  // edge if it is used more than once!
246  // Note that we are now sending the whole localShared to the master whereas
247  // we only need the local count (i.e. the number of times a global edge is
248  // used). But then this only gets done once so not too bothered about the
249  // extra global communication.
250 
251  HashTable<label, edge, Hash<edge> > globalShared(nGlobalPoints());
252 
253  if (Pstream::master())
254  {
255  label sharedEdgeI = 0;
256 
257  // Merge my shared edges into the global list
258  if (debug)
259  {
260  Pout<< "globalMeshData::calcSharedEdges : Merging in from proc0 : "
261  << localShared.size() << endl;
262  }
263  countSharedEdges(localShared, globalShared, sharedEdgeI);
264 
265  // Receive data from slaves and insert
266  if (Pstream::parRun())
267  {
268  for
269  (
270  int slave=Pstream::firstSlave();
271  slave<=Pstream::lastSlave();
272  slave++
273  )
274  {
275  // Receive the edges using shared points from the slave.
276  IPstream fromSlave(Pstream::blocking, slave);
277  HashTable<labelList, edge, Hash<edge> > procSharedEdges
278  (
279  fromSlave
280  );
281 
282  if (debug)
283  {
284  Pout<< "globalMeshData::calcSharedEdges : "
285  << "Merging in from proc"
286  << Foam::name(slave) << " : " << procSharedEdges.size()
287  << endl;
288  }
289  countSharedEdges(procSharedEdges, globalShared, sharedEdgeI);
290  }
291  }
292 
293  // Now our globalShared should have some edges with -1 as edge label
294  // These were only used once so are not proper shared edges.
295  // Remove them.
296  {
297  HashTable<label, edge, Hash<edge> > oldSharedEdges(globalShared);
298 
299  globalShared.clear();
300 
301  for
302  (
303  HashTable<label, edge, Hash<edge> >::const_iterator iter =
304  oldSharedEdges.begin();
305  iter != oldSharedEdges.end();
306  ++iter
307  )
308  {
309  if (iter() != -1)
310  {
311  globalShared.insert(iter.key(), iter());
312  }
313  }
314  if (debug)
315  {
316  Pout<< "globalMeshData::calcSharedEdges : Filtered "
317  << oldSharedEdges.size()
318  << " down to " << globalShared.size() << endl;
319  }
320  }
321 
322 
323  // Send back to slaves.
324  if (Pstream::parRun())
325  {
326  for
327  (
328  int slave=Pstream::firstSlave();
329  slave<=Pstream::lastSlave();
330  slave++
331  )
332  {
333  // Receive the edges using shared points from the slave.
334  OPstream toSlave(Pstream::blocking, slave);
335  toSlave << globalShared;
336  }
337  }
338  }
339  else
340  {
341  // Send local edges to master
342  {
343  OPstream toMaster(Pstream::blocking, Pstream::masterNo());
344 
345  toMaster << localShared;
346  }
347  // Receive merged edges from master.
348  {
349  IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
350 
351  fromMaster >> globalShared;
352  }
353  }
354 
355  // Now use the global shared edges list (globalShared) to classify my local
356  // ones (localShared)
357 
358  nGlobalEdges_ = globalShared.size();
359 
360  DynamicList<label> dynSharedEdgeLabels(globalShared.size());
361  DynamicList<label> dynSharedEdgeAddr(globalShared.size());
362 
363  for
364  (
365  HashTable<labelList, edge, Hash<edge> >::const_iterator iter =
366  localShared.begin();
367  iter != localShared.end();
368  ++iter
369  )
370  {
371  const edge& e = iter.key();
372 
373  HashTable<label, edge, Hash<edge> >::const_iterator edgeFnd =
374  globalShared.find(e);
375 
376  if (edgeFnd != globalShared.end())
377  {
378  // My local edge is indeed a shared one. Go through all local edge
379  // labels with this point combination.
380  const labelList& edgeLabels = iter();
381 
382  forAll(edgeLabels, i)
383  {
384  // Store label of local mesh edge
385  dynSharedEdgeLabels.append(edgeLabels[i]);
386 
387  // Store label of shared edge
388  dynSharedEdgeAddr.append(edgeFnd());
389  }
390  }
391  }
392 
393  sharedEdgeLabelsPtr_ = new labelList();
394  labelList& sharedEdgeLabels = *sharedEdgeLabelsPtr_;
395  sharedEdgeLabels.transfer(dynSharedEdgeLabels);
396 
397  sharedEdgeAddrPtr_ = new labelList();
398  labelList& sharedEdgeAddr = *sharedEdgeAddrPtr_;
399  sharedEdgeAddr.transfer(dynSharedEdgeAddr);
400 
401  if (debug)
402  {
403  Pout<< "globalMeshData : nGlobalEdges_:" << nGlobalEdges_ << nl
404  << "globalMeshData : sharedEdgeLabels:" << sharedEdgeLabels.size()
405  << nl
406  << "globalMeshData : sharedEdgeAddr:" << sharedEdgeAddr.size()
407  << endl;
408  }
409 }
410 
411 
412 // Helper function to count coincident faces. This part used to be
413 // in updateMesh but I've had to move it to a separate function
414 // because of aliasing optimisation errors in icc9.1 on the
415 // Itanium.
416 Foam::label Foam::globalMeshData::countCoincidentFaces
417 (
418  const scalar tolDim,
419  const vectorField& separationDist
420 )
421 {
422  label nCoincident = 0;
423 
424  forAll(separationDist, faceI)
425  {
426  if (mag(separationDist[faceI]) < tolDim)
427  {
428  // Faces coincide
429  nCoincident++;
430  }
431  }
432  return nCoincident;
433 }
434 
435 
436 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
437 
438 // Construct from polyMesh
439 Foam::globalMeshData::globalMeshData(const polyMesh& mesh)
440 :
442  mesh_(mesh),
443  bb_(vector::zero, vector::zero),
444  nTotalPoints_(-1),
445  nTotalFaces_(-1),
446  nTotalCells_(-1),
447  processorPatches_(0),
448  processorPatchIndices_(0),
449  processorPatchNeighbours_(0),
450  nGlobalPoints_(-1),
451  sharedPointLabels_(0),
452  sharedPointAddr_(0),
453  sharedPointGlobalLabelsPtr_(NULL),
454  nGlobalEdges_(-1),
455  sharedEdgeLabelsPtr_(NULL),
456  sharedEdgeAddrPtr_(NULL)
457 {
458  updateMesh();
459 }
460 
461 
462 // Read constructor given IOobject and a polyMesh reference
463 Foam::globalMeshData::globalMeshData(const IOobject& io, const polyMesh& mesh)
464 :
466  mesh_(mesh),
467  bb_(mesh.points()),
468  nTotalPoints_(-1),
469  nTotalFaces_(-1),
470  nTotalCells_(-1),
471  processorPatches_(0),
472  processorPatchIndices_(0),
473  processorPatchNeighbours_(0),
474  nGlobalPoints_(-1),
475  sharedPointLabels_(0),
476  sharedPointAddr_(0),
477  sharedPointGlobalLabelsPtr_(NULL),
478  nGlobalEdges_(-1),
479  sharedEdgeLabelsPtr_(NULL),
480  sharedEdgeAddrPtr_(NULL)
481 {
482  initProcAddr();
483 
484  IOdictionary dict(io);
485 
486  dict.lookup("nTotalPoints") >> nTotalPoints_;
487  dict.lookup("nTotalFaces") >> nTotalFaces_;
488  dict.lookup("nTotalCells") >> nTotalCells_;
489  dict.lookup("nGlobalPoints") >> nGlobalPoints_;
490  dict.lookup("sharedPointLabels") >> sharedPointLabels_;
491  dict.lookup("sharedPointAddr") >> sharedPointAddr_;
492  labelList sharedPointGlobalLabels(dict.lookup("sharedPointGlobalLabels"));
493 
494  sharedPointGlobalLabelsPtr_ = new labelList(sharedPointGlobalLabels);
495 }
496 
497 
498 // * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
499 
501 {
502  clearOut();
503 }
504 
505 
507 {
508  deleteDemandDrivenData(sharedPointGlobalLabelsPtr_);
509  // Edge
510  nGlobalPoints_ = -1;
511  deleteDemandDrivenData(sharedEdgeLabelsPtr_);
512  deleteDemandDrivenData(sharedEdgeAddrPtr_);
513 }
514 
515 
516 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
517 
518 // Return shared point global labels.
520 {
521  if (!sharedPointGlobalLabelsPtr_)
522  {
523  sharedPointGlobalLabelsPtr_ = new labelList(sharedPointLabels_.size());
524  labelList& sharedPointGlobalLabels = *sharedPointGlobalLabelsPtr_;
525 
526  IOobject addrHeader
527  (
528  "pointProcAddressing",
529  mesh_.facesInstance()/mesh_.meshSubDir,
530  mesh_,
532  );
533 
534  if (addrHeader.headerOk())
535  {
536  // There is a pointProcAddressing file so use it to get labels
537  // on the original mesh
538  Pout<< "globalMeshData::sharedPointGlobalLabels : "
539  << "Reading pointProcAddressing" << endl;
540 
541  labelIOList pointProcAddressing(addrHeader);
542 
543  forAll(sharedPointLabels_, i)
544  {
545  // Get my mesh point
546  label pointI = sharedPointLabels_[i];
547 
548  // Map to mesh point of original mesh
549  sharedPointGlobalLabels[i] = pointProcAddressing[pointI];
550  }
551  }
552  else
553  {
554  Pout<< "globalMeshData::sharedPointGlobalLabels :"
555  << " Setting pointProcAddressing to -1" << endl;
556 
557  sharedPointGlobalLabels = -1;
558  }
559  }
560  return *sharedPointGlobalLabelsPtr_;
561 }
562 
563 
564 // Collect coordinates of shared points. (does parallel communication!)
566 {
567  // Get all processors to send their shared points to master.
568  // (not very efficient)
569 
570  pointField sharedPoints(nGlobalPoints_);
571 
572  if (Pstream::master())
573  {
574  // Master:
575  // insert my own data first
576  forAll(sharedPointLabels_, i)
577  {
578  label sharedPointI = sharedPointAddr_[i];
579 
580  sharedPoints[sharedPointI] = mesh_.points()[sharedPointLabels_[i]];
581  }
582 
583  // Receive data from slaves and insert
584  for
585  (
586  int slave=Pstream::firstSlave();
587  slave<=Pstream::lastSlave();
588  slave++
589  )
590  {
591  IPstream fromSlave(Pstream::blocking, slave);
592 
593  labelList nbrSharedPointAddr;
594  pointField nbrSharedPoints;
595  fromSlave >> nbrSharedPointAddr >> nbrSharedPoints;
596 
597  forAll(nbrSharedPointAddr, i)
598  {
599  label sharedPointI = nbrSharedPointAddr[i];
600 
601  sharedPoints[sharedPointI] = nbrSharedPoints[i];
602  }
603  }
604 
605  // Send back
606  for
607  (
608  int slave=Pstream::firstSlave();
609  slave<=Pstream::lastSlave();
610  slave++
611  )
612  {
613  OPstream toSlave
614  (
616  slave,
617  sharedPoints.size()*sizeof(vector::zero)
618  );
619  toSlave << sharedPoints;
620  }
621  }
622  else
623  {
624  // Slave:
625  // send points
626  {
628 
629  toMaster
630  << sharedPointAddr_
631  << UIndirectList<point>(mesh_.points(), sharedPointLabels_)();
632  }
633 
634  // Receive sharedPoints
635  {
637  fromMaster >> sharedPoints;
638  }
639  }
640 
641  return sharedPoints;
642 }
643 
644 
645 // Collect coordinates of shared points. (does parallel communication!)
647 {
648  // Get coords of my shared points
649  pointField sharedPoints(sharedPointLabels_.size());
650 
651  forAll(sharedPointLabels_, i)
652  {
653  label meshPointI = sharedPointLabels_[i];
654 
655  sharedPoints[i] = mesh_.points()[meshPointI];
656  }
657 
658  // Append from all processors
659  combineReduce(sharedPoints, plusEqOp<pointField>());
660 
661  // Merge tolerance
662  scalar tolDim = matchTol_ * bb_.mag();
663 
664  // And see how many are unique
665  labelList pMap;
666  pointField mergedPoints;
667 
669  (
670  sharedPoints, // coordinates to merge
671  tolDim, // tolerance
672  false, // verbosity
673  pMap,
674  mergedPoints
675  );
676 
677  return mergedPoints;
678 }
679 
680 
682 {
683  if (nGlobalEdges_ == -1)
684  {
685  calcSharedEdges();
686  }
687  return nGlobalEdges_;
688 }
689 
690 
692 {
693  if (!sharedEdgeLabelsPtr_)
694  {
695  calcSharedEdges();
696  }
697  return *sharedEdgeLabelsPtr_;
698 }
699 
700 
702 {
703  if (!sharedEdgeAddrPtr_)
704  {
705  calcSharedEdges();
706  }
707  return *sharedEdgeAddrPtr_;
708 }
709 
710 
712 {
713  // Topology does not change and we don't store any geometry so nothing
714  // needs to be done.
715 }
716 
717 
718 // Update all data after morph
720 {
721  // Clear out old data
722  clearOut();
723 
724  // Do processor patch addressing
725  initProcAddr();
726 
727  // Note: boundBox does reduce
728  bb_ = boundBox(mesh_.points());
729 
730  scalar tolDim = matchTol_ * bb_.mag();
731 
732  if (debug)
733  {
734  Pout<< "globalMeshData : bb_:" << bb_
735  << " merge dist:" << tolDim << endl;
736  }
737 
738 
739  // Option 1. Topological
740  {
741  // Calculate all shared points. This does all the hard work.
742  globalPoints parallelPoints(mesh_);
743 
744  // Copy data out.
745  nGlobalPoints_ = parallelPoints.nGlobalPoints();
746  sharedPointLabels_ = parallelPoints.sharedPointLabels();
747  sharedPointAddr_ = parallelPoints.sharedPointAddr();
748  }
750  //{
751  // // Calculate all shared points. This does all the hard work.
752  // geomGlobalPoints parallelPoints(mesh_, tolDim);
753  //
754  // // Copy data out.
755  // nGlobalPoints_ = parallelPoints.nGlobalPoints();
756  // sharedPointLabels_ = parallelPoints.sharedPointLabels();
757  // sharedPointAddr_ = parallelPoints.sharedPointAddr();
758  //
759  // nGlobalEdges_ = parallelPoints.nGlobalEdges();
760  // sharedEdgeLabels_ = parallelPoints.sharedEdgeLabels();
761  // sharedEdgeAddr_ = parallelPoints.sharedEdgeAddr();
762  //}
763 
764  // Total number of faces. Start off from all faces. Remove coincident
765  // processor faces (on highest numbered processor) before summing.
766  nTotalFaces_ = mesh_.nFaces();
767 
768  // Do not count processor-patch faces that are coincident.
769  forAll(processorPatches_, i)
770  {
771  label patchI = processorPatches_[i];
772 
773  const processorPolyPatch& procPatch =
774  refCast<const processorPolyPatch>(mesh_.boundaryMesh()[patchI]);
775 
776  if (Pstream::myProcNo() > procPatch.neighbProcNo())
777  {
778  // Uncount my faces. Handle cyclics separately.
779 
780  if (procPatch.separated())
781  {
782  const vectorField& separationDist = procPatch.separation();
783 
784  nTotalFaces_ -= countCoincidentFaces(tolDim, separationDist);
785  }
786  else
787  {
788  // Normal, unseparated processor patch. Remove duplicates.
789  nTotalFaces_ -= procPatch.size();
790  }
791  }
792  }
793  reduce(nTotalFaces_, sumOp<label>());
794 
795  if (debug)
796  {
797  Pout<< "globalMeshData : nTotalFaces_:" << nTotalFaces_ << endl;
798  }
799 
800 
801  nTotalCells_ = mesh_.nCells();
802  reduce(nTotalCells_, sumOp<label>());
803 
804  if (debug)
805  {
806  Pout<< "globalMeshData : nTotalCells_:" << nTotalCells_ << endl;
807  }
808 
809  nTotalPoints_ = mesh_.nPoints();
810 
811  // Correct points for duplicate ones. We have
812  // - points shared between 2 processor patches only. Count only on
813  // lower numbered processor. Make sure to count only once since points
814  // can be on multiple patches on the same processor.
815  // - globally shared points.
816 
817  if (Pstream::parRun())
818  {
819  const label UNSET = 0; // not set
820  const label SHARED = 1; // globally shared
821  const label VISITED = 2; // corrected for
822 
823  // Mark globally shared points
824  PackedList<2> pointStatus(mesh_.nPoints(), UNSET);
825 
826  forAll(sharedPointLabels_, i)
827  {
828  label meshPointI = sharedPointLabels_[i];
829 
830  pointStatus.set(meshPointI, SHARED);
831  }
832 
833  // Send patch local points
834  forAll(processorPatches_, i)
835  {
836  label patchI = processorPatches_[i];
837 
838  const processorPolyPatch& procPatch =
839  refCast<const processorPolyPatch>(mesh_.boundaryMesh()[patchI]);
840 
841  OPstream toNeighbour(Pstream::blocking, procPatch.neighbProcNo());
842 
843  toNeighbour << procPatch.localPoints();
844  }
845 
846  // Receive patch local points and uncount if coincident (and not shared)
847  forAll(processorPatches_, i)
848  {
849  label patchI = processorPatches_[i];
850 
851  const processorPolyPatch& procPatch =
852  refCast<const processorPolyPatch>(mesh_.boundaryMesh()[patchI]);
853 
854  IPstream fromNeighbour(Pstream::blocking, procPatch.neighbProcNo());
855 
856  pointField nbrPoints(fromNeighbour);
857 
858  if (Pstream::myProcNo() > procPatch.neighbProcNo())
859  {
860  labelList pMap;
862  (
863  procPatch.localPoints(),
864  nbrPoints,
865  scalarField(procPatch.nPoints(), tolDim), // tolerance
866  false, // verbosity
867  pMap // map from my points to nbrPoints
868  );
869 
870  forAll(pMap, patchPointI)
871  {
872  label meshPointI = procPatch.meshPoints()[patchPointI];
873 
874  label stat = pointStatus.get(meshPointI);
875 
876  if (stat == UNSET)
877  {
878  // Mark point as visited so if point is on multiple proc
879  // patches it only gets uncounted once.
880  pointStatus.set(meshPointI, VISITED);
881 
882  if (pMap[patchPointI] != -1)
883  {
884  // Points share same coordinate so uncount.
885  nTotalPoints_--;
886  }
887  }
888  }
889  }
890  }
891  // Sum all points
892  reduce(nTotalPoints_, sumOp<label>());
893  }
894 
895  // nTotalPoints has not been corrected yet for shared points. For these
896  // just collect all their coordinates and count unique ones.
897 
898  label mySharedPoints = sharedPointLabels_.size();
899  reduce(mySharedPoints, sumOp<label>());
900 
901  // Collect and merge shared points (does parallel communication)
902  pointField geomSharedPoints(geometricSharedPoints());
903  label nGeomSharedPoints = geomSharedPoints.size();
904 
905  // Shared points merged down to mergedPoints size.
906  nTotalPoints_ -= mySharedPoints - nGeomSharedPoints;
907 
908  if (debug)
909  {
910  Pout<< "globalMeshData : nTotalPoints_:" << nTotalPoints_ << endl;
911  }
912 
913  //
914  // Now we have all info we wanted.
915  // Do some checking (if debug is set)
916  //
917 
918  if (debug)
919  {
920  if (Pstream::master())
921  {
922  // We have the geometricSharedPoints already so write them.
923  // Ideally would like to write the networks of connected points as
924  // well but this is harder. (Todo)
925  Pout<< "globalMeshData : writing geometrically separated shared"
926  << " points to geomSharedPoints.obj" << endl;
927 
928  OFstream str("geomSharedPoints.obj");
929 
930  forAll(geomSharedPoints, i)
931  {
932  const point& pt = geomSharedPoints[i];
933 
934  str << "v " << pt.x() << ' ' << pt.y() << ' ' << pt.z()
935  << nl;
936  }
937  }
938  }
939 }
940 
941 
942 // Write data
944 {
945  IOdictionary dict
946  (
947  IOobject
948  (
949  "parallelData",
950  mesh_.facesInstance(),
951  mesh_.meshSubDir,
952  mesh_
953  )
954  );
955 
956  dict.add("nTotalPoints", nTotalPoints());
957  dict.add("nTotalFaces", nTotalFaces());
958  dict.add("nTotalCells", nTotalCells());
959 
960  dict.add("nGlobalPoints", nGlobalPoints());
961  dict.add("sharedPointLabels", sharedPointLabels());
962  dict.add("sharedPointAddr", sharedPointAddr());
963  dict.add("sharedPointGlobalLabels", sharedPointGlobalLabels());
964 
965  return dict.writeObject
966  (
970  );
971 }
972 
973 
974 // * * * * * * * * * * * * * * * Ostream Operators * * * * * * * * * * * * * //
975 
977 {
978  os << "nTotalPoints " << p.nTotalPoints() << token::END_STATEMENT << nl
979  << "nTotalFaces " << p.nTotalFaces() << token::END_STATEMENT << nl
980  << "nTotalCells " << p.nTotalCells() << token::END_STATEMENT << nl
981  << "nGlobalPoints " << p.nGlobalPoints() << token::END_STATEMENT << nl
982  << "sharedPointLabels " << p.sharedPointLabels()
984  << "sharedPointAddr " << p.sharedPointAddr()
986 
987  return os;
988 }
989 
990 
991 // ************************ vim: set sw=4 sts=4 et: ************************ //