1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_GRID_YASPGRID_HH
4 #define DUNE_GRID_YASPGRID_HH
5 
6 #include <cstdint>
7 #include <iostream>
8 #include <vector>
9 #include <algorithm>
10 #include <stack>
11 #include <type_traits>
12 
13 #include <dune/grid/common/backuprestore.hh>
14 #include <dune/grid/common/grid.hh>     // the grid base classes
15 #include <dune/grid/common/capabilities.hh> // the capabilities
16 #include <dune/common/hybridutilities.hh>
17 #include <dune/common/power.hh>
18 #include <dune/common/bigunsignedint.hh>
19 #include <dune/common/typetraits.hh>
20 #include <dune/common/reservedvector.hh>
21 #include <dune/common/parallel/communication.hh>
22 #include <dune/common/parallel/mpihelper.hh>
23 #include <dune/geometry/axisalignedcubegeometry.hh>
24 #include <dune/geometry/type.hh>
25 #include <dune/grid/common/indexidset.hh>
26 #include <dune/grid/common/datahandleif.hh>
27 
28 
29 #if HAVE_MPI
30 #include <dune/common/parallel/mpicommunication.hh>
31 #endif
32 
33 /*! \file yaspgrid.hh
34  * YaspGrid stands for yet another structured parallel grid.
35  * It will implement the dune grid interface for structured grids
36  * with arbitrary overlap, parallel features with two overlap
37  * models, periodic boundaries and a fast implementation allowing on-the-fly computations.
38  */
39 
40 namespace Dune {
41 
42   /* some sizes for building global ids
43    */
44   const int yaspgrid_dim_bits = 24; // bits for encoding each dimension
45   const int yaspgrid_level_bits = 5; // bits for encoding level number
46 
47 
48   //************************************************************************
49   // forward declaration of templates
50 
51   template<int dim, class Coordinates>                             class YaspGrid;
52   template<int mydim, int cdim, class GridImp>  class YaspGeometry;
53   template<int codim, int dim, class GridImp>   class YaspEntity;
54   template<int codim, class GridImp>            class YaspEntitySeed;
55   template<int codim, PartitionIteratorType pitype, class GridImp> class YaspLevelIterator;
56   template<class GridImp>            class YaspIntersectionIterator;
57   template<class GridImp>            class YaspIntersection;
58   template<class GridImp>            class YaspHierarchicIterator;
59   template<class GridImp, bool isLeafIndexSet>                     class YaspIndexSet;
60   template<class GridImp>            class YaspGlobalIdSet;
61   template<class GridImp>            class YaspPersistentContainerIndex;
62 
63 } // namespace Dune
64 
65 #include <dune/grid/yaspgrid/coordinates.hh>
66 #include <dune/grid/yaspgrid/torus.hh>
67 #include <dune/grid/yaspgrid/ygrid.hh>
68 #include <dune/grid/yaspgrid/yaspgridgeometry.hh>
69 #include <dune/grid/yaspgrid/yaspgridentity.hh>
70 #include <dune/grid/yaspgrid/yaspgridintersection.hh>
71 #include <dune/grid/yaspgrid/yaspgridintersectioniterator.hh>
72 #include <dune/grid/yaspgrid/yaspgridhierarchiciterator.hh>
73 #include <dune/grid/yaspgrid/yaspgridentityseed.hh>
74 #include <dune/grid/yaspgrid/yaspgridleveliterator.hh>
75 #include <dune/grid/yaspgrid/yaspgridindexsets.hh>
76 #include <dune/grid/yaspgrid/yaspgrididset.hh>
77 #include <dune/grid/yaspgrid/yaspgridpersistentcontainer.hh>
78 
79 namespace Dune {
80 
81 #if HAVE_MPI
82   using YaspCollectiveCommunication = CollectiveCommunication<MPI_Comm>;
83 #else
84   using YaspCollectiveCommunication = CollectiveCommunication<No_Comm>;
85 #endif
86 
87   template<int dim, class Coordinates>
88   struct YaspGridFamily
89   {
90     typedef YaspCollectiveCommunication CCType;
91 
92     typedef GridTraits<dim,                                     // dimension of the grid
93         dim,                                                    // dimension of the world space
94         Dune::YaspGrid<dim, Coordinates>,
95         YaspGeometry,YaspEntity,
96         YaspLevelIterator,                                      // type used for the level iterator
97         YaspIntersection,              // leaf  intersection
98         YaspIntersection,              // level intersection
99         YaspIntersectionIterator,              // leaf  intersection iter
100         YaspIntersectionIterator,              // level intersection iter
101         YaspHierarchicIterator,
102         YaspLevelIterator,                                      // type used for the leaf(!) iterator
103         YaspIndexSet< const YaspGrid< dim, Coordinates >, false >,                  // level index set
104         YaspIndexSet< const YaspGrid< dim, Coordinates >, true >,                  // leaf index set
105         YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
106         bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
107         YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
108         bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
109         CCType,
110         DefaultLevelGridViewTraits, DefaultLeafGridViewTraits,
111         YaspEntitySeed>
112     Traits;
113   };
114 
115 #ifndef DOXYGEN
116   template<int dim, int codim>
117   struct YaspCommunicateMeta {
118     template<class G, class DataHandle>
commDune::YaspCommunicateMeta119     static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
120     {
121       if (data.contains(dim,codim))
122       {
123         g.template communicateCodim<DataHandle,codim>(data,iftype,dir,level);
124       }
125       YaspCommunicateMeta<dim,codim-1>::comm(g,data,iftype,dir,level);
126     }
127   };
128 
129   template<int dim>
130   struct YaspCommunicateMeta<dim,0> {
131     template<class G, class DataHandle>
commDune::YaspCommunicateMeta132     static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
133     {
134       if (data.contains(dim,0))
135         g.template communicateCodim<DataHandle,0>(data,iftype,dir,level);
136     }
137   };
138 #endif
139 
140   //************************************************************************
141   /*!
142    * \brief [<em> provides \ref Dune::Grid </em>]
143    * \brief Provides a distributed structured cube mesh.
144    * \ingroup GridImplementations
145    *
146    * YaspGrid stands for yet another structured parallel grid.
147    * It implements the dune grid interface for structured grids
148    * with arbitrary overlap (including zero),
149    * periodic boundaries, and a fast implementation allowing on-the-fly computations.
150    *
151    * YaspGrid supports three coordinate modes: \ref EquidistantCoordinates,
152    * \ref EquidistantOffsetCoordinates, and \ref Dune::TensorProductCoordinates.
153    *
154    * \tparam dim The dimension of the grid and its surrounding world
155    * \tparam Coordinates The coordinate mode of the grid.
156    */
157   template<int dim, class Coordinates = EquidistantCoordinates<double, dim> >
158   class YaspGrid
159     : public GridDefaultImplementation<dim,dim,typename Coordinates::ctype,YaspGridFamily<dim, Coordinates> >
160   {
161 
162     template<int, PartitionIteratorType, typename>
163     friend class YaspLevelIterator;
164 
165     template<typename>
166     friend class YaspHierarchicIterator;
167 
168   protected:
169 
170   public:
171     //! Type used for coordinates
172     typedef typename Coordinates::ctype ctype;
173     typedef YaspCollectiveCommunication CollectiveCommunicationType;
174 
175 #ifndef DOXYGEN
176     typedef typename Dune::YGrid<Coordinates> YGrid;
177     typedef typename Dune::YGridList<Coordinates>::Intersection Intersection;
178 
179     /** \brief A single grid level within a YaspGrid
180      */
181     struct YGridLevel {
182 
183       /** \brief Level number of this level grid */
levelDune::YaspGrid::YGridLevel184       int level() const
185       {
186         return level_;
187       }
188 
189       Coordinates coords;
190 
191       std::array<YGrid, dim+1> overlapfront;
192       std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power> overlapfront_data;
193       std::array<YGrid, dim+1> overlap;
194       std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power> overlap_data;
195       std::array<YGrid, dim+1> interiorborder;
196       std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power> interiorborder_data;
197       std::array<YGrid, dim+1> interior;
198       std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power> interior_data;
199 
200       std::array<YGridList<Coordinates>,dim+1> send_overlapfront_overlapfront;
201       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  send_overlapfront_overlapfront_data;
202       std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlapfront;
203       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  recv_overlapfront_overlapfront_data;
204 
205       std::array<YGridList<Coordinates>,dim+1> send_overlap_overlapfront;
206       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  send_overlap_overlapfront_data;
207       std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlap;
208       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  recv_overlapfront_overlap_data;
209 
210       std::array<YGridList<Coordinates>,dim+1> send_interiorborder_interiorborder;
211       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  send_interiorborder_interiorborder_data;
212       std::array<YGridList<Coordinates>,dim+1> recv_interiorborder_interiorborder;
213       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  recv_interiorborder_interiorborder_data;
214 
215       std::array<YGridList<Coordinates>,dim+1> send_interiorborder_overlapfront;
216       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  send_interiorborder_overlapfront_data;
217       std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_interiorborder;
218       std::array<std::deque<Intersection>, StaticPower<2,dim>::power>  recv_overlapfront_interiorborder_data;
219 
220       // general
221       YaspGrid<dim,Coordinates>* mg;  // each grid level knows its multigrid
222       int overlapSize;           // in mesh cells on this level
223       bool keepOverlap;
224 
225       /** \brief The level number within the YaspGrid level hierarchy */
226       int level_;
227     };
228 
229     //! define types used for arguments
230     typedef std::array<int, dim> iTupel;
231     typedef FieldVector<ctype, dim> fTupel;
232 
233     // communication tag used by multigrid
234     enum { tag = 17 };
235 #endif
236 
237     //! return reference to torus
torus() const238     const Torus<CollectiveCommunicationType, dim>& torus () const
239     {
240       return _torus;
241     }
242 
243     //! return number of cells on finest level in given direction on all processors
globalSize(int i) const244     int globalSize(int i) const
245     {
246       return levelSize(maxLevel(),i);
247     }
248 
249     //! return number of cells on finest level on all processors
globalSize() const250     iTupel globalSize() const
251     {
252       return levelSize(maxLevel());
253     }
254 
255     //! return size of the grid (in cells) on level l in direction i
levelSize(int l,int i) const256     int levelSize(int l, int i) const
257     {
258       return _coarseSize[i] * (1 << l);
259     }
260 
261     //! return size vector of the grid (in cells) on level l
levelSize(int l) const262     iTupel levelSize(int l) const
263     {
264       iTupel s;
265       for (int i=0; i<dim; ++i)
266         s[i] = levelSize(l,i);
267       return s;
268     }
269 
270     //! return whether the grid is periodic in direction i
isPeriodic(int i) const271     bool isPeriodic(int i) const
272     {
273       return _periodic[i];
274     }
275 
getRefineOption() const276     bool getRefineOption() const
277     {
278       return keep_ovlp;
279     }
280 
281     //! Iterator over the grid levels
282     typedef typename ReservedVector<YGridLevel,32>::const_iterator YGridLevelIterator;
283 
284     //! return iterator pointing to coarsest level
begin() const285     YGridLevelIterator begin () const
286     {
287       return YGridLevelIterator(_levels,0);
288     }
289 
290     //! return iterator pointing to given level
begin(int i) const291     YGridLevelIterator begin (int i) const
292     {
293       if (i<0 || i>maxLevel())
294         DUNE_THROW(GridError, "level not existing");
295       return YGridLevelIterator(_levels,i);
296     }
297 
298     //! return iterator pointing to one past the finest level
end() const299     YGridLevelIterator end () const
300     {
301       return YGridLevelIterator(_levels,maxLevel()+1);
302     }
303 
304     // static method to create the default load balance strategy
defaultLoadbalancer()305     static const YLoadBalanceDefault<dim>* defaultLoadbalancer()
306     {
307       static YLoadBalanceDefault<dim> lb;
308       return & lb;
309     }
310 
311   protected:
312     /** \brief Make a new YGridLevel structure
313      *
314      * \param coords      the coordinate container
315      * \param periodic    indicate periodicity for each direction
316      * \param o_interior  origin of interior (non-overlapping) cell decomposition
317      * \param overlap     to be used on this grid level
318      */
makelevel(const Coordinates & coords,std::bitset<dim> periodic,iTupel o_interior,int overlap)319     void makelevel (const Coordinates& coords, std::bitset<dim> periodic, iTupel o_interior, int overlap)
320     {
321       YGridLevel& g = _levels.back();
322       g.overlapSize = overlap;
323       g.mg = this;
324       g.level_ = maxLevel();
325       g.coords = coords;
326       g.keepOverlap = keep_ovlp;
327 
328       // set the inserting positions in the corresponding arrays of YGridLevelStructure
329       typename std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power>::iterator overlapfront_it = g.overlapfront_data.begin();
330       typename std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power>::iterator overlap_it = g.overlap_data.begin();
331       typename std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power>::iterator interiorborder_it = g.interiorborder_data.begin();
332       typename std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power>::iterator interior_it = g.interior_data.begin();
333 
334       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
335         send_overlapfront_overlapfront_it = g.send_overlapfront_overlapfront_data.begin();
336       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
337         recv_overlapfront_overlapfront_it = g.recv_overlapfront_overlapfront_data.begin();
338 
339       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
340         send_overlap_overlapfront_it = g.send_overlap_overlapfront_data.begin();
341       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
342         recv_overlapfront_overlap_it = g.recv_overlapfront_overlap_data.begin();
343 
344       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
345         send_interiorborder_interiorborder_it = g.send_interiorborder_interiorborder_data.begin();
346       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
347         recv_interiorborder_interiorborder_it = g.recv_interiorborder_interiorborder_data.begin();
348 
349       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
350         send_interiorborder_overlapfront_it = g.send_interiorborder_overlapfront_data.begin();
351       typename std::array<std::deque<Intersection>, StaticPower<2,dim>::power>::iterator
352         recv_overlapfront_interiorborder_it = g.recv_overlapfront_interiorborder_data.begin();
353 
354       // have a null array for constructor calls around
355       std::array<int,dim> n;
356       std::fill(n.begin(), n.end(), 0);
357 
358       // determine origin of the grid with overlap and store whether an overlap area exists in direction i.
359       std::bitset<dim> ovlp_low(0ULL);
360       std::bitset<dim> ovlp_up(0ULL);
361 
362       iTupel o_overlap;
363       iTupel s_overlap;
364 
365       // determine at where we have overlap and how big the size of the overlap partition is
366       for (int i=0; i<dim; i++)
367       {
368         // the coordinate container has been contructed to hold the entire grid on
369         // this processor, including overlap. this is the element size.
370         s_overlap[i] = g.coords.size(i);
371 
372         //in the periodic case there is always overlap
373         if (periodic[i])
374         {
375           o_overlap[i] = o_interior[i]-overlap;
376           ovlp_low[i] = true;
377           ovlp_up[i] = true;
378         }
379         else
380         {
381           //check lower boundary
382           if (o_interior[i] - overlap < 0)
383             o_overlap[i] = 0;
384           else
385           {
386             o_overlap[i] = o_interior[i] - overlap;
387             ovlp_low[i] = true;
388           }
389 
390           //check upper boundary
391           if (o_overlap[i] + g.coords.size(i) < globalSize(i))
392             ovlp_up[i] = true;
393         }
394       }
395 
396       for (unsigned int codim = 0; codim < dim + 1; codim++)
397       {
398         // set the begin iterator for the corresponding ygrids
399         g.overlapfront[codim].setBegin(overlapfront_it);
400         g.overlap[codim].setBegin(overlap_it);
401         g.interiorborder[codim].setBegin(interiorborder_it);
402         g.interior[codim].setBegin(interior_it);
403         g.send_overlapfront_overlapfront[codim].setBegin(send_overlapfront_overlapfront_it);
404         g.recv_overlapfront_overlapfront[codim].setBegin(recv_overlapfront_overlapfront_it);
405         g.send_overlap_overlapfront[codim].setBegin(send_overlap_overlapfront_it);
406         g.recv_overlapfront_overlap[codim].setBegin(recv_overlapfront_overlap_it);
407         g.send_interiorborder_interiorborder[codim].setBegin(send_interiorborder_interiorborder_it);
408         g.recv_interiorborder_interiorborder[codim].setBegin(recv_interiorborder_interiorborder_it);
409         g.send_interiorborder_overlapfront[codim].setBegin(send_interiorborder_overlapfront_it);
410         g.recv_overlapfront_interiorborder[codim].setBegin(recv_overlapfront_interiorborder_it);
411 
412         // find all combinations of unit vectors that span entities of the given codimension
413         for (unsigned int index = 0; index < (1<<dim); index++)
414         {
415           // check whether the given shift is of our codimension
416           std::bitset<dim> r(index);
417           if (r.count() != dim-codim)
418             continue;
419 
420           // get an origin and a size array for subsequent modification
421           std::array<int,dim> origin(o_overlap);
422           std::array<int,dim> size(s_overlap);
423 
424           // build overlapfront
425           // we have to extend the element size by one in all directions without shift.
426           for (int i=0; i<dim; i++)
427             if (!r[i])
428               size[i]++;
429           *overlapfront_it = YGridComponent<Coordinates>(origin, r, &g.coords, size, n, size);
430 
431           // build overlap
432           for (int i=0; i<dim; i++)
433           {
434             if (!r[i])
435             {
436               if (ovlp_low[i])
437               {
438                 origin[i]++;
439                 size[i]--;
440               }
441               if (ovlp_up[i])
442                 size[i]--;
443             }
444           }
445           *overlap_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
446 
447           // build interiorborder
448           for (int i=0; i<dim; i++)
449           {
450             if (ovlp_low[i])
451             {
452               origin[i] += overlap;
453               size[i] -= overlap;
454               if (!r[i])
455               {
456                 origin[i]--;
457                 size[i]++;
458               }
459             }
460             if (ovlp_up[i])
461             {
462               size[i] -= overlap;
463               if (!r[i])
464                 size[i]++;
465             }
466           }
467           *interiorborder_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
468 
469           // build interior
470           for (int i=0; i<dim; i++)
471           {
472             if (!r[i])
473             {
474               if (ovlp_low[i])
475               {
476                 origin[i]++;
477                 size[i]--;
478               }
479               if (ovlp_up[i])
480                 size[i]--;
481             }
482           }
483           *interior_it = YGridComponent<Coordinates>(origin, size, *overlapfront_it);
484 
485           intersections(*overlapfront_it,*overlapfront_it,*send_overlapfront_overlapfront_it, *recv_overlapfront_overlapfront_it);
486           intersections(*overlap_it,*overlapfront_it,*send_overlap_overlapfront_it, *recv_overlapfront_overlap_it);
487           intersections(*interiorborder_it,*interiorborder_it,*send_interiorborder_interiorborder_it,*recv_interiorborder_interiorborder_it);
488           intersections(*interiorborder_it,*overlapfront_it,*send_interiorborder_overlapfront_it,*recv_overlapfront_interiorborder_it);
489 
490           // advance all iterators pointing to the next insertion point
491           ++overlapfront_it;
492           ++overlap_it;
493           ++interiorborder_it;
494           ++interior_it;
495           ++send_overlapfront_overlapfront_it;
496           ++recv_overlapfront_overlapfront_it;
497           ++send_overlap_overlapfront_it;
498           ++recv_overlapfront_overlap_it;
499           ++send_interiorborder_interiorborder_it;
500           ++recv_interiorborder_interiorborder_it;
501           ++send_interiorborder_overlapfront_it;
502           ++recv_overlapfront_interiorborder_it;
503         }
504 
505         // set end iterators in the corresonding ygrids
506         g.overlapfront[codim].finalize(overlapfront_it);
507         g.overlap[codim].finalize(overlap_it);
508         g.interiorborder[codim].finalize(interiorborder_it);
509         g.interior[codim].finalize(interior_it);
510         g.send_overlapfront_overlapfront[codim].finalize(send_overlapfront_overlapfront_it,g.overlapfront[codim]);
511         g.recv_overlapfront_overlapfront[codim].finalize(recv_overlapfront_overlapfront_it,g.overlapfront[codim]);
512         g.send_overlap_overlapfront[codim].finalize(send_overlap_overlapfront_it,g.overlapfront[codim]);
513         g.recv_overlapfront_overlap[codim].finalize(recv_overlapfront_overlap_it,g.overlapfront[codim]);
514         g.send_interiorborder_interiorborder[codim].finalize(send_interiorborder_interiorborder_it,g.overlapfront[codim]);
515         g.recv_interiorborder_interiorborder[codim].finalize(recv_interiorborder_interiorborder_it,g.overlapfront[codim]);
516         g.send_interiorborder_overlapfront[codim].finalize(send_interiorborder_overlapfront_it,g.overlapfront[codim]);
517         g.recv_overlapfront_interiorborder[codim].finalize(recv_overlapfront_interiorborder_it,g.overlapfront[codim]);
518       }
519     }
520 
521 #ifndef DOXYGEN
522     /** \brief special data structure to communicate ygrids
523      * Historically, this was needed because Ygrids had virtual functions and
524      * a communicated virtual function table pointer introduced a bug. After the
525      * change to tensorproductgrid, the dynamic polymorphism was removed, still this
526      * is kept because it allows to communicate ygrids, that only have index, but no
527      * coordinate information. This is sufficient, because all communicated YGrids are
528      * intersected with a local grid, which has coordinate information.
529      */
530     struct mpifriendly_ygrid {
mpifriendly_ygridDune::YaspGrid::mpifriendly_ygrid531       mpifriendly_ygrid ()
532       {
533         std::fill(origin.begin(), origin.end(), 0);
534         std::fill(size.begin(), size.end(), 0);
535       }
mpifriendly_ygridDune::YaspGrid::mpifriendly_ygrid536       mpifriendly_ygrid (const YGridComponent<Coordinates>& grid)
537         : origin(grid.origin()), size(grid.size())
538       {}
539       iTupel origin;
540       iTupel size;
541     };
542 #endif
543 
544     /** \brief Construct list of intersections with neighboring processors
545      *
546      * \param recvgrid the grid stored in this processor
547      * \param sendgrid the subgrid to be sent to neighboring processors
548      * \param sendlist the deque to fill with send intersections
549      * \param recvlist the deque to fill with recv intersections
550      * \returns two lists: Intersections to be sent and Intersections to be received
551      */
intersections(const YGridComponent<Coordinates> & sendgrid,const YGridComponent<Coordinates> & recvgrid,std::deque<Intersection> & sendlist,std::deque<Intersection> & recvlist)552     void intersections(const YGridComponent<Coordinates>& sendgrid, const YGridComponent<Coordinates>& recvgrid,
553                         std::deque<Intersection>& sendlist, std::deque<Intersection>& recvlist)
554     {
555       iTupel size = globalSize();
556 
557       // the exchange buffers
558       std::vector<YGridComponent<Coordinates> > send_recvgrid(_torus.neighbors());
559       std::vector<YGridComponent<Coordinates> > recv_recvgrid(_torus.neighbors());
560       std::vector<YGridComponent<Coordinates> > send_sendgrid(_torus.neighbors());
561       std::vector<YGridComponent<Coordinates> > recv_sendgrid(_torus.neighbors());
562 
563       // new exchange buffers to send simple struct without virtual functions
564       std::vector<mpifriendly_ygrid> mpifriendly_send_recvgrid(_torus.neighbors());
565       std::vector<mpifriendly_ygrid> mpifriendly_recv_recvgrid(_torus.neighbors());
566       std::vector<mpifriendly_ygrid> mpifriendly_send_sendgrid(_torus.neighbors());
567       std::vector<mpifriendly_ygrid> mpifriendly_recv_sendgrid(_torus.neighbors());
568 
569       // fill send buffers; iterate over all neighboring processes
570       // non-periodic case is handled automatically because intersection will be zero
571       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
572       {
573         // determine if we communicate with this neighbor (and what)
574         bool skip = false;
575         iTupel coord = _torus.coord();   // my coordinates
576         iTupel delta = i.delta();        // delta to neighbor
577         iTupel nb = coord;               // the neighbor
578         for (int k=0; k<dim; k++) nb[k] += delta[k];
579         iTupel v;                    // grid movement
580         std::fill(v.begin(), v.end(), 0);
581 
582         for (int k=0; k<dim; k++)
583         {
584           if (nb[k]<0)
585           {
586             if (_periodic[k])
587               v[k] += size[k];
588             else
589               skip = true;
590           }
591           if (nb[k]>=_torus.dims(k))
592           {
593             if (_periodic[k])
594               v[k] -= size[k];
595             else
596               skip = true;
597           }
598           // neither might be true, then v=0
599         }
600 
601         // store moved grids in send buffers
602         if (!skip)
603         {
604           send_sendgrid[i.index()] = sendgrid.move(v);
605           send_recvgrid[i.index()] = recvgrid.move(v);
606         }
607         else
608         {
609           send_sendgrid[i.index()] = YGridComponent<Coordinates>();
610           send_recvgrid[i.index()] = YGridComponent<Coordinates>();
611         }
612       }
613 
614       // issue send requests for sendgrid being sent to all neighbors
615       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
616       {
617         mpifriendly_send_sendgrid[i.index()] = mpifriendly_ygrid(send_sendgrid[i.index()]);
618         _torus.send(i.rank(), &mpifriendly_send_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
619       }
620 
621       // issue recv requests for sendgrids of neighbors
622       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
623         _torus.recv(i.rank(), &mpifriendly_recv_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
624 
625       // exchange the sendgrids
626       _torus.exchange();
627 
628       // issue send requests for recvgrid being sent to all neighbors
629       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
630       {
631         mpifriendly_send_recvgrid[i.index()] = mpifriendly_ygrid(send_recvgrid[i.index()]);
632         _torus.send(i.rank(), &mpifriendly_send_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
633       }
634 
635       // issue recv requests for recvgrid of neighbors
636       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
637         _torus.recv(i.rank(), &mpifriendly_recv_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
638 
639       // exchange the recvgrid
640       _torus.exchange();
641 
642       // process receive buffers and compute intersections
643       for (typename Torus<CollectiveCommunicationType,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
644       {
645         // what must be sent to this neighbor
646         Intersection send_intersection;
647         mpifriendly_ygrid yg = mpifriendly_recv_recvgrid[i.index()];
648         recv_recvgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
649         send_intersection.grid = sendgrid.intersection(recv_recvgrid[i.index()]);
650         send_intersection.rank = i.rank();
651         send_intersection.distance = i.distance();
652         if (!send_intersection.grid.empty()) sendlist.push_front(send_intersection);
653 
654         Intersection recv_intersection;
655         yg = mpifriendly_recv_sendgrid[i.index()];
656         recv_sendgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
657         recv_intersection.grid = recvgrid.intersection(recv_sendgrid[i.index()]);
658         recv_intersection.rank = i.rank();
659         recv_intersection.distance = i.distance();
660         if(!recv_intersection.grid.empty()) recvlist.push_back(recv_intersection);
661       }
662     }
663 
664   protected:
665 
666     typedef const YaspGrid<dim,Coordinates> GridImp;
667 
init()668     void init()
669     {
670       indexsets.push_back( std::make_shared< YaspIndexSet<const YaspGrid<dim, Coordinates>, false > >(*this,0) );
671       boundarysegmentssize();
672     }
673 
boundarysegmentssize()674     void boundarysegmentssize()
675     {
676       // sizes of local macro grid
677       std::array<int, dim> sides;
678       {
679         for (int i=0; i<dim; i++)
680         {
681           sides[i] =
682             ((begin()->overlap[0].dataBegin()->origin(i) == 0)+
683              (begin()->overlap[0].dataBegin()->origin(i) + begin()->overlap[0].dataBegin()->size(i)
684                     == levelSize(0,i)));
685         }
686       }
687       nBSegments = 0;
688       for (int k=0; k<dim; k++)
689       {
690         int offset = 1;
691         for (int l=0; l<dim; l++)
692         {
693           if (l==k) continue;
694           offset *= begin()->overlap[0].dataBegin()->size(l);
695         }
696         nBSegments += sides[k]*offset;
697       }
698     }
699 
700   public:
701 
702     // define the persistent index type
703     typedef bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim> PersistentIndexType;
704 
705     //! the GridFamily of this grid
706     typedef YaspGridFamily<dim, Coordinates> GridFamily;
707     // the Traits
708     typedef typename YaspGridFamily<dim, Coordinates>::Traits Traits;
709 
710     // need for friend declarations in entity
711     typedef YaspIndexSet<YaspGrid<dim, Coordinates>, false > LevelIndexSetType;
712     typedef YaspIndexSet<YaspGrid<dim, Coordinates>, true > LeafIndexSetType;
713     typedef YaspGlobalIdSet<YaspGrid<dim, Coordinates> > GlobalIdSetType;
714 
715     /** Standard constructor for a YaspGrid with a given Coordinates object
716      *  @param coordinates Object that stores or computes the vertex coordinates
717      *  @param periodic tells if direction is periodic or not
718      *  @param overlap size of overlap on coarsest grid (same in all directions)
719      *  @param comm the collective communication object for this grid. An MPI communicator can be given here.
720      *  @param lb pointer to an overloaded YLoadBalance instance
721      */
YaspGrid(const Coordinates & coordinates,std::bitset<dim> periodic=std::bitset<dim> (0ULL),int overlap=1,CollectiveCommunicationType comm=CollectiveCommunicationType (),const YLoadBalance<dim> * lb=defaultLoadbalancer ())722     YaspGrid (const Coordinates& coordinates,
723               std::bitset<dim> periodic = std::bitset<dim>(0ULL),
724               int overlap = 1,
725               CollectiveCommunicationType comm = CollectiveCommunicationType(),
726               const YLoadBalance<dim>* lb = defaultLoadbalancer())
727       : ccobj(comm)
728       , leafIndexSet_(*this)
729       , _periodic(periodic)
730       , _overlap(overlap)
731       , keep_ovlp(true)
732       , adaptRefCount(0)
733       , adaptActive(false)
734     {
735       _levels.resize(1);
736 
737       // Number of elements per coordinate direction on the coarsest level
738       for (std::size_t i=0; i<dim; i++)
739         _coarseSize[i] = coordinates.size(i);
740 
741       // Construct the communication torus
742       _torus = decltype(_torus)(comm,tag,_coarseSize,lb);
743 
744       iTupel o;
745       std::fill(o.begin(), o.end(), 0);
746       iTupel o_interior(o);
747       iTupel s_interior(_coarseSize);
748 
749       _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
750 
751       // Set domain size
752       if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
753         || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
754       {
755         for (std::size_t i=0; i<dim; i++)
756           _L[i] = coordinates.size(i) * coordinates.meshsize(i,0);
757       }
758       if (std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value)
759       {
760         //determine sizes of vector to correctly construct torus structure and store for later size requests
761         for (int i=0; i<dim; i++)
762           _L[i] = coordinates.coordinate(i,_coarseSize[i]) - coordinates.coordinate(i,0);
763       }
764 
765 #if HAVE_MPI
766       // TODO: Settle on a single value for all coordinate types
767       int mysteryFactor = (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value) ? 1 : 2;
768 
769       // check whether the grid is large enough to be overlapping
770       for (int i=0; i<dim; i++)
771       {
772         // find out whether the grid is too small to
773         int toosmall = (s_interior[i] <= mysteryFactor * overlap) &&    // interior is very small
774             (periodic[i] || (s_interior[i] != _coarseSize[i]));    // there is an overlap in that direction
775         // communicate the result to all those processes to have all processors error out if one process failed.
776         int global = 0;
777         MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
778         if (global)
779           DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
780                                      " Note that this also holds for DOFs on subdomain boundaries."
781                                      " Increase grid elements or decrease overlap accordingly.");
782       }
783 #endif // #if HAVE_MPI
784 
785       if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
786         || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
787       {
788         iTupel s_overlap(s_interior);
789         for (int i=0; i<dim; i++)
790         {
791           if ((o_interior[i] - overlap > 0) || (periodic[i]))
792             s_overlap[i] += overlap;
793           if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
794             s_overlap[i] += overlap;
795         }
796 
797         FieldVector<ctype,dim> upperRightWithOverlap;
798         for (int i=0; i<dim; i++)
799           upperRightWithOverlap[i] = coordinates.coordinate(i,0) + coordinates.meshsize(i,0) * s_overlap[i];
800 
801         if constexpr (std::is_same_v<Coordinates,EquidistantCoordinates<ctype,dim>>)
802         {
803           // New coordinate object that additionally contains the overlap elements
804           EquidistantCoordinates<ctype,dim> coordinatesWithOverlap(upperRightWithOverlap,s_overlap);
805 
806           // add level (the this-> is needed to make g++-6 happy)
807           this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
808         }
809 
810         if constexpr (std::is_same_v<Coordinates,EquidistantOffsetCoordinates<ctype,dim>>)
811         {
812           Dune::FieldVector<ctype,dim> lowerleft;
813           for (int i=0; i<dim; i++)
814             lowerleft[i] = coordinates.origin(i);
815 
816           // New coordinate object that additionally contains the overlap elements
817           EquidistantOffsetCoordinates<ctype,dim> coordinatesWithOverlap(lowerleft,upperRightWithOverlap,s_overlap);
818 
819           // add level (the this-> is needed to make g++-6 happy)
820           this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
821         }
822       }
823 
824       if constexpr (std::is_same_v<Coordinates,TensorProductCoordinates<ctype,dim>>)
825       {
826         std::array<std::vector<ctype>,dim> newCoords;
827         std::array<int, dim> offset(o_interior);
828 
829         // find the relevant part of the coords vector for this processor and copy it to newCoords
830         for (int i=0; i<dim; ++i)
831         {
832           //define the coordinate range to be used
833           std::size_t begin = o_interior[i];
834           std::size_t end   = begin + s_interior[i] + 1;
835 
836           // check whether we are not at the physical boundary. In that case overlap is a simple
837           // extension of the coordinate range to be used
838           if (o_interior[i] - overlap > 0)
839           {
840             begin = begin - overlap;
841             offset[i] -= overlap;
842           }
843           if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
844             end = end + overlap;
845 
846           //copy the selected part in the new coord vector
847           newCoords[i].resize(end-begin);
848           auto newCoordsIt = newCoords[i].begin();
849           for (std::size_t j=begin; j<end; j++)
850           {
851             *newCoordsIt = coordinates.coordinate(i, j);
852             newCoordsIt++;
853           }
854 
855           // Check whether we are at the physical boundary and have a periodic grid.
856           // In this case the coordinate vector has to be tweaked manually.
857           if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
858           {
859             // we need to add the first <overlap> cells to the end of newcoords
860             for (int j=0; j<overlap; ++j)
861               newCoords[i].push_back(newCoords[i].back() - coordinates.coordinate(i,j) + coordinates.coordinate(i,j+1));
862           }
863 
864           if ((periodic[i]) && (o_interior[i] - overlap <= 0))
865           {
866             offset[i] -= overlap;
867 
868             // we need to add the last <overlap> cells to the begin of newcoords
869             std::size_t reverseCounter = coordinates.size(i);
870             for (int j=0; j<overlap; ++j, --reverseCounter)
871               newCoords[i].insert(newCoords[i].begin(), newCoords[i].front()
872                                   - coordinates.coordinate(i,reverseCounter) + coordinates.coordinate(i,reverseCounter-1));
873           }
874         }
875 
876         TensorProductCoordinates<ctype,dim> coordinatesWithOverlap(newCoords, offset);
877 
878         // add level (the this-> is needed to make g++-6 happy)
879         this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
880       }
881 
882       init();
883     }
884 
885     /** Standard constructor for an equidistant YaspGrid
886      *  @param L extension of the domain
887      *  @param s number of cells on coarse mesh in each direction
888      *  @param periodic tells if direction is periodic or not
889      *  @param overlap size of overlap on coarsest grid (same in all directions)
890      *  @param comm the collective communication object for this grid. An MPI communicator can be given here.
891      *  @param lb pointer to an overloaded YLoadBalance instance
892      */
893     template<class C = Coordinates,
894              typename std::enable_if_t< std::is_same_v<C, EquidistantCoordinates<ctype,dim> >, int> = 0>
YaspGrid(Dune::FieldVector<ctype,dim> L,std::array<int,std::size_t{dim}> s,std::bitset<std::size_t{dim}> periodic=std::bitset<std::size_t{dim}>{0ULL},int overlap=1,CollectiveCommunicationType comm=CollectiveCommunicationType (),const YLoadBalance<dim> * lb=defaultLoadbalancer ())895     YaspGrid (Dune::FieldVector<ctype, dim> L,
896               std::array<int, std::size_t{dim}> s,
897               std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>{0ULL},
898               int overlap = 1,
899               CollectiveCommunicationType comm = CollectiveCommunicationType(),
900               const YLoadBalance<dim>* lb = defaultLoadbalancer())
901       : ccobj(comm), _torus(comm,tag,s,lb), leafIndexSet_(*this),
902         _L(L), _periodic(periodic), _coarseSize(s), _overlap(overlap),
903         keep_ovlp(true), adaptRefCount(0), adaptActive(false)
904     {
905       _levels.resize(1);
906 
907       iTupel o;
908       std::fill(o.begin(), o.end(), 0);
909       iTupel o_interior(o);
910       iTupel s_interior(s);
911 
912       _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
913 
914 #if HAVE_MPI
915       // check whether the grid is large enough to be overlapping
916       for (int i=0; i<dim; i++)
917       {
918         // find out whether the grid is too small to
919         int toosmall = (s_interior[i] / 2 <= overlap) &&    // interior is very small
920             (periodic[i] || (s_interior[i] != s[i]));    // there is an overlap in that direction
921         // communicate the result to all those processes to have all processors error out if one process failed.
922         int global = 0;
923         MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
924         if (global)
925           DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
926                                      " Note that this also holds for DOFs on subdomain boundaries."
927                                      " Increase grid elements or decrease overlap accordingly.");
928       }
929 #endif // #if HAVE_MPI
930 
931       iTupel s_overlap(s_interior);
932       for (int i=0; i<dim; i++)
933       {
934         if ((o_interior[i] - overlap > 0) || (periodic[i]))
935           s_overlap[i] += overlap;
936         if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
937           s_overlap[i] += overlap;
938       }
939 
940       FieldVector<ctype,dim> upperRightWithOverlap;
941 
942       for (int i=0; i<dim; i++)
943         upperRightWithOverlap[i] = (L[i] / s[i]) * s_overlap[i];
944 
945       // New coordinate object that additionally contains the overlap elements
946       EquidistantCoordinates<ctype,dim> cc(upperRightWithOverlap,s_overlap);
947 
948       // add level
949       makelevel(cc,periodic,o_interior,overlap);
950 
951       init();
952     }
953 
954     /** Constructor for an equidistant YaspGrid with non-trivial origin
955      *  @param lowerleft Lower left corner of the domain
956      *  @param upperright Upper right corner of the domain
957      *  @param s number of cells on coarse mesh in each direction
958      *  @param periodic tells if direction is periodic or not
959      *  @param overlap size of overlap on coarsest grid (same in all directions)
960      *  @param comm the collective communication object for this grid. An MPI communicator can be given here.
961      *  @param lb pointer to an overloaded YLoadBalance instance
962      */
963     template<class C = Coordinates,
964              typename std::enable_if_t< std::is_same_v<C, EquidistantOffsetCoordinates<ctype,dim> >, int> = 0>
YaspGrid(Dune::FieldVector<ctype,dim> lowerleft,Dune::FieldVector<ctype,dim> upperright,std::array<int,std::size_t{dim}> s,std::bitset<std::size_t{dim}> periodic=std::bitset<std::size_t{dim}> (0ULL),int overlap=1,CollectiveCommunicationType comm=CollectiveCommunicationType (),const YLoadBalance<dim> * lb=defaultLoadbalancer ())965     YaspGrid (Dune::FieldVector<ctype, dim> lowerleft,
966               Dune::FieldVector<ctype, dim> upperright,
967               std::array<int, std::size_t{dim}> s,
968               std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
969               int overlap = 1,
970               CollectiveCommunicationType comm = CollectiveCommunicationType(),
971               const YLoadBalance<dim>* lb = defaultLoadbalancer())
972       : ccobj(comm), _torus(comm,tag,s,lb), leafIndexSet_(*this),
973         _L(upperright - lowerleft),
974         _periodic(periodic), _coarseSize(s), _overlap(overlap),
975         keep_ovlp(true), adaptRefCount(0), adaptActive(false)
976     {
977       _levels.resize(1);
978 
979       iTupel o;
980       std::fill(o.begin(), o.end(), 0);
981       iTupel o_interior(o);
982       iTupel s_interior(s);
983 
984       _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
985 
986 #if HAVE_MPI
987       // check whether the grid is large enough to be overlapping
988       for (int i=0; i<dim; i++)
989       {
990         // find out whether the grid is too small to
991         int toosmall = (s_interior[i] / 2 <= overlap) &&    // interior is very small
992             (periodic[i] || (s_interior[i] != s[i]));    // there is an overlap in that direction
993         // communicate the result to all those processes to have all processors error out if one process failed.
994         int global = 0;
995         MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
996         if (global)
997           DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
998                                      " Note that this also holds for DOFs on subdomain boundaries."
999                                      " Increase grid elements or decrease overlap accordingly.");
1000       }
1001 #endif // #if HAVE_MPI
1002 
1003       iTupel s_overlap(s_interior);
1004       for (int i=0; i<dim; i++)
1005       {
1006         if ((o_interior[i] - overlap > 0) || (periodic[i]))
1007           s_overlap[i] += overlap;
1008         if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
1009           s_overlap[i] += overlap;
1010       }
1011 
1012       FieldVector<ctype,dim> upperRightWithOverlap;
1013       for (int i=0; i<dim; i++)
1014         upperRightWithOverlap[i] = lowerleft[i]
1015                                  + s_overlap[i] * (upperright[i]-lowerleft[i]) / s[i];
1016 
1017       EquidistantOffsetCoordinates<ctype,dim> cc(lowerleft,upperRightWithOverlap,s_overlap);
1018 
1019       // add level
1020       makelevel(cc,periodic,o_interior,overlap);
1021 
1022       init();
1023     }
1024 
1025     /** @brief Standard constructor for a tensorproduct YaspGrid
1026      *  @param coords coordinate vectors to be used for coarse grid
1027      *  @param periodic tells if direction is periodic or not
1028      *  @param overlap size of overlap on coarsest grid (same in all directions)
1029      *  @param comm the collective communication object for this grid. An MPI communicator can be given here.
1030      *  @param lb pointer to an overloaded YLoadBalance instance
1031      */
1032     template<class C = Coordinates,
1033              typename std::enable_if_t< std::is_same_v<C, TensorProductCoordinates<ctype,dim> >, int> = 0>
YaspGrid(std::array<std::vector<ctype>,std::size_t{dim}> coords,std::bitset<std::size_t{dim}> periodic=std::bitset<std::size_t{dim}> (0ULL),int overlap=1,CollectiveCommunicationType comm=CollectiveCommunicationType (),const YLoadBalance<dim> * lb=defaultLoadbalancer ())1034     YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1035               std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
1036               int overlap = 1,
1037               CollectiveCommunicationType comm = CollectiveCommunicationType(),
1038               const YLoadBalance<dim>* lb = defaultLoadbalancer())
1039       : ccobj(comm), _torus(comm,tag,Dune::Yasp::sizeArray<dim>(coords),lb),
1040         leafIndexSet_(*this), _periodic(periodic), _overlap(overlap),
1041         keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1042     {
1043       if (!Dune::Yasp::checkIfMonotonous(coords))
1044         DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1045 
1046       _levels.resize(1);
1047 
1048       //determine sizes of vector to correctly construct torus structure and store for later size requests
1049       for (int i=0; i<dim; i++) {
1050         _coarseSize[i] = coords[i].size() - 1;
1051         _L[i] = coords[i][_coarseSize[i]] - coords[i][0];
1052       }
1053 
1054       iTupel o;
1055       std::fill(o.begin(), o.end(), 0);
1056       iTupel o_interior(o);
1057       iTupel s_interior(_coarseSize);
1058 
1059       _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
1060 
1061 #if HAVE_MPI
1062       // check whether the grid is large enough to be overlapping
1063       for (int i=0; i<dim; i++)
1064       {
1065         // find out whether the grid is too small to
1066         int toosmall = (s_interior[i] / 2 <= overlap) &&               // interior is very small
1067              (periodic[i] || (s_interior[i] != _coarseSize[i]));    // there is an overlap in that direction
1068         // communicate the result to all those processes to have all processors error out if one process failed.
1069         int global = 0;
1070         MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1071         if (global)
1072           DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1073                                      " Note that this also holds for DOFs on subdomain boundaries."
1074                                      " Increase grid elements or decrease overlap accordingly.");
1075       }
1076 #endif // #if HAVE_MPI
1077 
1078 
1079       std::array<std::vector<ctype>,dim> newcoords;
1080       std::array<int, dim> offset(o_interior);
1081 
1082       // find the relevant part of the coords vector for this processor and copy it to newcoords
1083       for (int i=0; i<dim; ++i)
1084       {
1085         //define iterators on coords that specify the coordinate range to be used
1086         typename std::vector<ctype>::iterator begin = coords[i].begin() + o_interior[i];
1087         typename std::vector<ctype>::iterator end = begin + s_interior[i] + 1;
1088 
1089         // check whether we are not at the physical boundary. In that case overlap is a simple
1090         // extension of the coordinate range to be used
1091         if (o_interior[i] - overlap > 0)
1092         {
1093           begin = begin - overlap;
1094           offset[i] -= overlap;
1095         }
1096         if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
1097           end = end + overlap;
1098 
1099         //copy the selected part in the new coord vector
1100         newcoords[i].resize(end-begin);
1101         std::copy(begin, end, newcoords[i].begin());
1102 
1103         // check whether we are at the physical boundary and a have a periodic grid.
1104         // In this case the coordinate vector has to be tweaked manually.
1105         if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
1106         {
1107           // we need to add the first <overlap> cells to the end of newcoords
1108           typename std::vector<ctype>::iterator it = coords[i].begin();
1109           for (int j=0; j<overlap; ++j)
1110             newcoords[i].push_back(newcoords[i].back() - *it + *(++it));
1111         }
1112 
1113         if ((periodic[i]) && (o_interior[i] - overlap <= 0))
1114         {
1115           offset[i] -= overlap;
1116 
1117           // we need to add the last <overlap> cells to the begin of newcoords
1118           typename std::vector<ctype>::iterator it = coords[i].end() - 1;
1119           for (int j=0; j<overlap; ++j)
1120             newcoords[i].insert(newcoords[i].begin(), newcoords[i].front() - *it + *(--it));
1121         }
1122       }
1123 
1124       TensorProductCoordinates<ctype,dim> cc(newcoords, offset);
1125 
1126       // add level
1127       makelevel(cc,periodic,o_interior,overlap);
1128       init();
1129     }
1130 
1131   private:
1132 
1133     /** @brief Constructor for a tensorproduct YaspGrid with only coordinate
1134      *         information on this processor
1135      *  @param comm MPI communicator where this mesh is distributed to
1136      *  @param coords coordinate vectors to be used for coarse grid
1137      *  @param periodic tells if direction is periodic or not
1138      *  @param overlap size of overlap on coarsest grid (same in all directions)
1139      *  @param coarseSize the coarse size of the global grid
1140      *  @param lb pointer to an overloaded YLoadBalance instance
1141      *
1142      *  @warning The construction of overlapping coordinate ranges is
1143      *           an error-prone procedure. For this reason, it is kept private.
1144      *           You can safely use it through BackupRestoreFacility. All other
1145      *           use is not supported for the moment.
1146      */
YaspGrid(std::array<std::vector<ctype>,std::size_t{dim}> coords,std::bitset<std::size_t{dim}> periodic,int overlap,CollectiveCommunicationType comm,std::array<int,dim> coarseSize,const YLoadBalance<dim> * lb=defaultLoadbalancer ())1147     YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1148               std::bitset<std::size_t{dim}> periodic,
1149               int overlap,
1150               CollectiveCommunicationType comm,
1151               std::array<int,dim> coarseSize,
1152               const YLoadBalance<dim>* lb = defaultLoadbalancer())
1153       : ccobj(comm), _torus(comm,tag,coarseSize,lb), leafIndexSet_(*this),
1154         _periodic(periodic), _coarseSize(coarseSize), _overlap(overlap),
1155         keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1156     {
1157       // check whether YaspGrid has been given the correct template parameter
1158       static_assert(std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value,
1159                   "YaspGrid coordinate container template parameter and given constructor values do not match!");
1160 
1161       if (!Dune::Yasp::checkIfMonotonous(coords))
1162         DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1163 
1164       for (int i=0; i<dim; i++)
1165         _L[i] = coords[i][coords[i].size() - 1] - coords[i][0];
1166 
1167       _levels.resize(1);
1168 
1169       std::array<int,dim> o;
1170       std::fill(o.begin(), o.end(), 0);
1171       std::array<int,dim> o_interior(o);
1172       std::array<int,dim> s_interior(coarseSize);
1173 
1174       _torus.partition(_torus.rank(),o,coarseSize,o_interior,s_interior);
1175 
1176       // get offset by modifying o_interior according to overlap
1177       std::array<int,dim> offset(o_interior);
1178       for (int i=0; i<dim; i++)
1179         if ((periodic[i]) || (o_interior[i] > 0))
1180           offset[i] -= overlap;
1181 
1182       TensorProductCoordinates<ctype,dim> cc(coords, offset);
1183 
1184       // add level
1185       makelevel(cc,periodic,o_interior,overlap);
1186 
1187       init();
1188     }
1189 
1190     // the backup restore facility needs to be able to use above constructor
1191     friend struct BackupRestoreFacility<YaspGrid<dim,Coordinates> >;
1192 
1193     // do not copy this class
1194     YaspGrid(const YaspGrid&);
1195 
1196   public:
1197 
1198     /*! Return maximum level defined in this grid. Levels are numbered
1199           0 ... maxlevel with 0 the coarsest level.
1200      */
maxLevel() const1201     int maxLevel() const
1202     {
1203       return _levels.size()-1;
1204     }
1205 
1206     //! refine the grid refCount times.
globalRefine(int refCount)1207     void globalRefine (int refCount)
1208     {
1209       if (refCount < -maxLevel())
1210         DUNE_THROW(GridError, "Only " << maxLevel() << " levels left. " <<
1211                    "Coarsening " << -refCount << " levels requested!");
1212 
1213       // If refCount is negative then coarsen the grid
1214       for (int k=refCount; k<0; k++)
1215       {
1216         // create an empty grid level
1217         YGridLevel empty;
1218         _levels.back() = empty;
1219         // reduce maxlevel
1220         _levels.pop_back();
1221 
1222         indexsets.pop_back();
1223       }
1224 
1225       // If refCount is positive refine the grid
1226       for (int k=0; k<refCount; k++)
1227       {
1228         // access to coarser grid level
1229         YGridLevel& cg = _levels[maxLevel()];
1230 
1231         std::bitset<dim> ovlp_low(0ULL), ovlp_up(0ULL);
1232         for (int i=0; i<dim; i++)
1233         {
1234           if (cg.overlap[0].dataBegin()->origin(i) > 0 || _periodic[i])
1235             ovlp_low[i] = true;
1236           if (cg.overlap[0].dataBegin()->max(i) + 1 < globalSize(i) || _periodic[i])
1237             ovlp_up[i] = true;
1238         }
1239 
1240         Coordinates newcont(cg.coords.refine(ovlp_low, ovlp_up, cg.overlapSize, keep_ovlp));
1241 
1242         int overlap = (keep_ovlp) ? 2*cg.overlapSize : cg.overlapSize;
1243 
1244         //determine new origin
1245         iTupel o_interior;
1246         for (int i=0; i<dim; i++)
1247           o_interior[i] = 2*cg.interior[0].dataBegin()->origin(i);
1248 
1249         // add level
1250         _levels.resize(_levels.size() + 1);
1251         makelevel(newcont,_periodic,o_interior,overlap);
1252 
1253         indexsets.push_back( std::make_shared<YaspIndexSet<const YaspGrid<dim,Coordinates>, false > >(*this,maxLevel()) );
1254       }
1255     }
1256 
1257     /**
1258        \brief set options for refinement
1259        @param keepPhysicalOverlap [true] keep the physical size of the overlap, [false] keep the number of cells in the overlap.  Default is [true].
1260      */
refineOptions(bool keepPhysicalOverlap)1261     void refineOptions (bool keepPhysicalOverlap)
1262     {
1263       keep_ovlp = keepPhysicalOverlap;
1264     }
1265 
1266     /** \brief Marks an entity to be refined/coarsened in a subsequent adapt.
1267 
1268        \param[in] refCount Number of subdivisions that should be applied. Negative value means coarsening.
1269        \param[in] e        Entity to Entity that should be refined
1270 
1271        \return true if Entity was marked, false otherwise.
1272 
1273        \note
1274           -  On yaspgrid marking one element will mark all other elements of the level as well
1275           -  If refCount is lower than refCount of a previous mark-call, nothing is changed
1276      */
mark(int refCount,const typename Traits::template Codim<0>::Entity & e)1277     bool mark( int refCount, const typename Traits::template Codim<0>::Entity & e )
1278     {
1279       assert(adaptActive == false);
1280       if (e.level() != maxLevel()) return false;
1281       adaptRefCount = std::max(adaptRefCount, refCount);
1282       return true;
1283     }
1284 
1285     /** \brief returns adaptation mark for given entity
1286 
1287        \param[in] e   Entity for which adaptation mark should be determined
1288 
1289        \return int adaptation mark, here the default value 0 is returned
1290      */
getMark(const typename Traits::template Codim<0>::Entity & e) const1291     int getMark ( const typename Traits::template Codim<0>::Entity &e ) const
1292     {
1293       return ( e.level() == maxLevel() ) ? adaptRefCount : 0;
1294     }
1295 
1296     //! map adapt to global refine
adapt()1297     bool adapt ()
1298     {
1299       globalRefine(adaptRefCount);
1300       return (adaptRefCount > 0);
1301     }
1302 
1303     //! returns true, if the grid will be coarsened
preAdapt()1304     bool preAdapt ()
1305     {
1306       adaptActive = true;
1307       adaptRefCount = comm().max(adaptRefCount);
1308       return (adaptRefCount < 0);
1309     }
1310 
1311     //! clean up some markers
postAdapt()1312     void postAdapt()
1313     {
1314       adaptActive = false;
1315       adaptRefCount = 0;
1316     }
1317 
1318     //! one past the end on this level
1319     template<int cd, PartitionIteratorType pitype>
lbegin(int level) const1320     typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lbegin (int level) const
1321     {
1322       return levelbegin<cd,pitype>(level);
1323     }
1324 
1325     //! Iterator to one past the last entity of given codim on level for partition type
1326     template<int cd, PartitionIteratorType pitype>
lend(int level) const1327     typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lend (int level) const
1328     {
1329       return levelend<cd,pitype>(level);
1330     }
1331 
1332     //! version without second template parameter for convenience
1333     template<int cd>
lbegin(int level) const1334     typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lbegin (int level) const
1335     {
1336       return levelbegin<cd,All_Partition>(level);
1337     }
1338 
1339     //! version without second template parameter for convenience
1340     template<int cd>
lend(int level) const1341     typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lend (int level) const
1342     {
1343       return levelend<cd,All_Partition>(level);
1344     }
1345 
1346     //! return LeafIterator which points to the first entity in maxLevel
1347     template<int cd, PartitionIteratorType pitype>
leafbegin() const1348     typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafbegin () const
1349     {
1350       return levelbegin<cd,pitype>(maxLevel());
1351     }
1352 
1353     //! return LeafIterator which points behind the last entity in maxLevel
1354     template<int cd, PartitionIteratorType pitype>
leafend() const1355     typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafend () const
1356     {
1357       return levelend<cd,pitype>(maxLevel());
1358     }
1359 
1360     //! return LeafIterator which points to the first entity in maxLevel
1361     template<int cd>
leafbegin() const1362     typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafbegin () const
1363     {
1364       return levelbegin<cd,All_Partition>(maxLevel());
1365     }
1366 
1367     //! return LeafIterator which points behind the last entity in maxLevel
1368     template<int cd>
leafend() const1369     typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafend () const
1370     {
1371       return levelend<cd,All_Partition>(maxLevel());
1372     }
1373 
1374     // \brief obtain Entity from EntitySeed. */
1375     template <typename Seed>
1376     typename Traits::template Codim<Seed::codimension>::Entity
entity(const Seed & seed) const1377     entity(const Seed& seed) const
1378     {
1379       const int codim = Seed::codimension;
1380       YGridLevelIterator g = begin(seed.impl().level());
1381 
1382       typedef typename Traits::template Codim<Seed::codimension>::Entity Entity;
1383       typedef YaspEntity<codim,dim,const YaspGrid> EntityImp;
1384       typedef typename YGrid::Iterator YIterator;
1385 
1386       return Entity(EntityImp(g,YIterator(g->overlapfront[codim],seed.impl().coord(),seed.impl().offset())));
1387     }
1388 
1389     //! return size (= distance in graph) of overlap region
overlapSize(int level,int codim) const1390     int overlapSize (int level, [[maybe_unused]] int codim) const
1391     {
1392       YGridLevelIterator g = begin(level);
1393       return g->overlapSize;
1394     }
1395 
1396     //! return size (= distance in graph) of overlap region
overlapSize(int odim) const1397     int overlapSize ([[maybe_unused]] int odim) const
1398     {
1399       YGridLevelIterator g = begin(maxLevel());
1400       return g->overlapSize;
1401     }
1402 
1403     //! return size (= distance in graph) of ghost region
ghostSize(int level,int codim) const1404     int ghostSize ([[maybe_unused]] int level, [[maybe_unused]] int codim) const
1405     {
1406       return 0;
1407     }
1408 
1409     //! return size (= distance in graph) of ghost region
ghostSize(int codim) const1410     int ghostSize ([[maybe_unused]] int codim) const
1411     {
1412       return 0;
1413     }
1414 
1415     //! number of entities per level and codim in this process
size(int level,int codim) const1416     int size (int level, int codim) const
1417     {
1418       YGridLevelIterator g = begin(level);
1419 
1420       // sum over all components of the codimension
1421       int count = 0;
1422       typedef typename std::array<YGridComponent<Coordinates>, StaticPower<2,dim>::power>::iterator DAI;
1423       for (DAI it = g->overlapfront[codim].dataBegin(); it != g->overlapfront[codim].dataEnd(); ++it)
1424         count += it->totalsize();
1425 
1426       return count;
1427     }
1428 
1429     //! number of leaf entities per codim in this process
size(int codim) const1430     int size (int codim) const
1431     {
1432       return size(maxLevel(),codim);
1433     }
1434 
1435     //! number of entities per level and geometry type in this process
size(int level,GeometryType type) const1436     int size (int level, GeometryType type) const
1437     {
1438       return (type.isCube()) ? size(level,dim-type.dim()) : 0;
1439     }
1440 
1441     //! number of leaf entities per geometry type in this process
size(GeometryType type) const1442     int size (GeometryType type) const
1443     {
1444       return size(maxLevel(),type);
1445     }
1446 
1447     //! \brief returns the number of boundary segments within the macro grid
numBoundarySegments() const1448     size_t numBoundarySegments () const
1449     {
1450       return nBSegments;
1451     }
1452 
1453     //! \brief returns the size of the physical domain
domainSize() const1454     const Dune::FieldVector<ctype, dim>& domainSize () const {
1455       return _L;
1456     }
1457 
1458     /*! The new communication interface
1459 
1460        communicate objects for all codims on a given level
1461      */
1462     template<class DataHandleImp, class DataType>
communicate(CommDataHandleIF<DataHandleImp,DataType> & data,InterfaceType iftype,CommunicationDirection dir,int level) const1463     void communicate (CommDataHandleIF<DataHandleImp,DataType> & data, InterfaceType iftype, CommunicationDirection dir, int level) const
1464     {
1465       YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,level);
1466     }
1467 
1468     /*! The new communication interface
1469 
1470        communicate objects for all codims on the leaf grid
1471      */
1472     template<class DataHandleImp, class DataType>
communicate(CommDataHandleIF<DataHandleImp,DataType> & data,InterfaceType iftype,CommunicationDirection dir) const1473     void communicate (CommDataHandleIF<DataHandleImp,DataType> & data, InterfaceType iftype, CommunicationDirection dir) const
1474     {
1475       YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,this->maxLevel());
1476     }
1477 
1478     /*! The new communication interface
1479 
1480        communicate objects for one codim
1481      */
1482     template<class DataHandle, int codim>
communicateCodim(DataHandle & data,InterfaceType iftype,CommunicationDirection dir,int level) const1483     void communicateCodim (DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level) const
1484     {
1485       // check input
1486       if (!data.contains(dim,codim)) return; // should have been checked outside
1487 
1488       // data types
1489       typedef typename DataHandle::DataType DataType;
1490 
1491       // access to grid level
1492       YGridLevelIterator g = begin(level);
1493 
1494       // find send/recv lists or throw error
1495       const YGridList<Coordinates>* sendlist = 0;
1496       const YGridList<Coordinates>* recvlist = 0;
1497 
1498       if (iftype==InteriorBorder_InteriorBorder_Interface)
1499       {
1500         sendlist = &g->send_interiorborder_interiorborder[codim];
1501         recvlist = &g->recv_interiorborder_interiorborder[codim];
1502       }
1503       if (iftype==InteriorBorder_All_Interface)
1504       {
1505         sendlist = &g->send_interiorborder_overlapfront[codim];
1506         recvlist = &g->recv_overlapfront_interiorborder[codim];
1507       }
1508       if (iftype==Overlap_OverlapFront_Interface || iftype==Overlap_All_Interface)
1509       {
1510         sendlist = &g->send_overlap_overlapfront[codim];
1511         recvlist = &g->recv_overlapfront_overlap[codim];
1512       }
1513       if (iftype==All_All_Interface)
1514       {
1515         sendlist = &g->send_overlapfront_overlapfront[codim];
1516         recvlist = &g->recv_overlapfront_overlapfront[codim];
1517       }
1518 
1519       // change communication direction?
1520       if (dir==BackwardCommunication)
1521         std::swap(sendlist,recvlist);
1522 
1523       int cnt;
1524 
1525       // Size computation (requires communication if variable size)
1526       std::vector<int> send_size(sendlist->size(),-1);    // map rank to total number of objects (of type DataType) to be sent
1527       std::vector<int> recv_size(recvlist->size(),-1);    // map rank to total number of objects (of type DataType) to be recvd
1528       std::vector<size_t*> send_sizes(sendlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be sent
1529       std::vector<size_t*> recv_sizes(recvlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be recvd
1530 
1531       // define type to iterate over send and recv lists
1532       typedef typename YGridList<Coordinates>::Iterator ListIt;
1533 
1534       if (data.fixedSize(dim,codim))
1535       {
1536         // fixed size: just take a dummy entity, size can be computed without communication
1537         cnt=0;
1538         for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1539         {
1540           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1541           it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1542           send_size[cnt] = is->grid.totalsize() * data.size(*it);
1543           cnt++;
1544         }
1545         cnt=0;
1546         for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1547         {
1548           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1549           it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1550           recv_size[cnt] = is->grid.totalsize() * data.size(*it);
1551           cnt++;
1552         }
1553       }
1554       else
1555       {
1556         // variable size case: sender side determines the size
1557         cnt=0;
1558         for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1559         {
1560           // allocate send buffer for sizes per entitiy
1561           size_t *buf = new size_t[is->grid.totalsize()];
1562           send_sizes[cnt] = buf;
1563 
1564           // loop over entities and ask for size
1565           int i=0; size_t n=0;
1566           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1567           it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1568           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1569           itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1570           for ( ; it!=itend; ++it)
1571           {
1572             buf[i] = data.size(*it);
1573             n += buf[i];
1574             i++;
1575           }
1576 
1577           // now we know the size for this rank
1578           send_size[cnt] = n;
1579 
1580           // hand over send request to torus class
1581           torus().send(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1582           cnt++;
1583         }
1584 
1585         // allocate recv buffers for sizes and store receive request
1586         cnt=0;
1587         for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1588         {
1589           // allocate recv buffer
1590           size_t *buf = new size_t[is->grid.totalsize()];
1591           recv_sizes[cnt] = buf;
1592 
1593           // hand over recv request to torus class
1594           torus().recv(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1595           cnt++;
1596         }
1597 
1598         // exchange all size buffers now
1599         torus().exchange();
1600 
1601         // release send size buffers
1602         cnt=0;
1603         for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1604         {
1605           delete[] send_sizes[cnt];
1606           send_sizes[cnt] = 0;
1607           cnt++;
1608         }
1609 
1610         // process receive size buffers
1611         cnt=0;
1612         for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1613         {
1614           // get recv buffer
1615           size_t *buf = recv_sizes[cnt];
1616 
1617           // compute total size
1618           size_t n=0;
1619           for (int i=0; i<is->grid.totalsize(); ++i)
1620             n += buf[i];
1621 
1622           // ... and store it
1623           recv_size[cnt] = n;
1624           ++cnt;
1625         }
1626       }
1627 
1628 
1629       // allocate & fill the send buffers & store send request
1630       std::vector<DataType*> sends(sendlist->size(), static_cast<DataType*>(0)); // store pointers to send buffers
1631       cnt=0;
1632       for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1633       {
1634         // allocate send buffer
1635         DataType *buf = new DataType[send_size[cnt]];
1636 
1637         // remember send buffer
1638         sends[cnt] = buf;
1639 
1640         // make a message buffer
1641         MessageBuffer<DataType> mb(buf);
1642 
1643         // fill send buffer; iterate over cells in intersection
1644         typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1645         it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1646         typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1647         itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1648         for ( ; it!=itend; ++it)
1649           data.gather(mb,*it);
1650 
1651         // hand over send request to torus class
1652         torus().send(is->rank,buf,send_size[cnt]*sizeof(DataType));
1653         cnt++;
1654       }
1655 
1656       // allocate recv buffers and store receive request
1657       std::vector<DataType*> recvs(recvlist->size(),static_cast<DataType*>(0)); // store pointers to send buffers
1658       cnt=0;
1659       for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1660       {
1661         // allocate recv buffer
1662         DataType *buf = new DataType[recv_size[cnt]];
1663 
1664         // remember recv buffer
1665         recvs[cnt] = buf;
1666 
1667         // hand over recv request to torus class
1668         torus().recv(is->rank,buf,recv_size[cnt]*sizeof(DataType));
1669         cnt++;
1670       }
1671 
1672       // exchange all buffers now
1673       torus().exchange();
1674 
1675       // release send buffers
1676       cnt=0;
1677       for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1678       {
1679         delete[] sends[cnt];
1680         sends[cnt] = 0;
1681         cnt++;
1682       }
1683 
1684       // process receive buffers and delete them
1685       cnt=0;
1686       for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1687       {
1688         // get recv buffer
1689         DataType *buf = recvs[cnt];
1690 
1691         // make a message buffer
1692         MessageBuffer<DataType> mb(buf);
1693 
1694         // copy data from receive buffer; iterate over cells in intersection
1695         if (data.fixedSize(dim,codim))
1696         {
1697           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1698           it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1699           size_t n=data.size(*it);
1700           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1701           itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1702           for ( ; it!=itend; ++it)
1703             data.scatter(mb,*it,n);
1704         }
1705         else
1706         {
1707           int i=0;
1708           size_t *sbuf = recv_sizes[cnt];
1709           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1710           it(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg)));
1711           typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1712           itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1713           for ( ; it!=itend; ++it)
1714             data.scatter(mb,*it,sbuf[i++]);
1715           delete[] sbuf;
1716         }
1717 
1718         // delete buffer
1719         delete[] buf; // hier krachts !
1720         cnt++;
1721       }
1722     }
1723 
1724     // The new index sets from DDM 11.07.2005
globalIdSet() const1725     const typename Traits::GlobalIdSet& globalIdSet() const
1726     {
1727       return theglobalidset;
1728     }
1729 
localIdSet() const1730     const typename Traits::LocalIdSet& localIdSet() const
1731     {
1732       return theglobalidset;
1733     }
1734 
levelIndexSet(int level) const1735     const typename Traits::LevelIndexSet& levelIndexSet(int level) const
1736     {
1737       if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1738       return *(indexsets[level]);
1739     }
1740 
leafIndexSet() const1741     const typename Traits::LeafIndexSet& leafIndexSet() const
1742     {
1743       return leafIndexSet_;
1744     }
1745 
1746     /*! @brief return a collective communication object
1747      */
comm() const1748     const CollectiveCommunicationType& comm () const
1749     {
1750       return ccobj;
1751     }
1752 
1753   private:
1754 
1755     // number of boundary segments of the level 0 grid
1756     int nBSegments;
1757 
1758     // Index classes need access to the real entity
1759     friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, true >;
1760     friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, false >;
1761     friend class Dune::YaspGlobalIdSet<const Dune::YaspGrid<dim, Coordinates> >;
1762     friend class Dune::YaspPersistentContainerIndex<const Dune::YaspGrid<dim, Coordinates> >;
1763 
1764     friend class Dune::YaspIntersectionIterator<const Dune::YaspGrid<dim, Coordinates> >;
1765     friend class Dune::YaspIntersection<const Dune::YaspGrid<dim, Coordinates> >;
1766     friend class Dune::YaspEntity<0, dim, const Dune::YaspGrid<dim, Coordinates> >;
1767 
1768     template<int codim_, int dim_, class GridImp_, template<int,int,class> class EntityImp_>
1769     friend class Entity;
1770 
1771     template<class DT>
1772     class MessageBuffer {
1773     public:
1774       // Constructor
MessageBuffer(DT * p)1775       MessageBuffer (DT *p)
1776       {
1777         a=p;
1778         i=0;
1779         j=0;
1780       }
1781 
1782       // write data to message buffer, acts like a stream !
1783       template<class Y>
write(const Y & data)1784       void write (const Y& data)
1785       {
1786         static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1787         a[i++] = data;
1788       }
1789 
1790       // read data from message buffer, acts like a stream !
1791       template<class Y>
read(Y & data) const1792       void read (Y& data) const
1793       {
1794         static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1795         data = a[j++];
1796       }
1797 
1798     private:
1799       DT *a;
1800       int i;
1801       mutable int j;
1802     };
1803 
1804     //! one past the end on this level
1805     template<int cd, PartitionIteratorType pitype>
levelbegin(int level) const1806     YaspLevelIterator<cd,pitype,GridImp> levelbegin (int level) const
1807     {
1808       YGridLevelIterator g = begin(level);
1809       if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1810 
1811       if (pitype==Interior_Partition)
1812         return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].begin());
1813       if (pitype==InteriorBorder_Partition)
1814         return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].begin());
1815       if (pitype==Overlap_Partition)
1816         return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].begin());
1817       if (pitype<=All_Partition)
1818         return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].begin());
1819       if (pitype==Ghost_Partition)
1820         return levelend <cd, pitype> (level);
1821 
1822       DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1823     }
1824 
1825     //! Iterator to one past the last entity of given codim on level for partition type
1826     template<int cd, PartitionIteratorType pitype>
levelend(int level) const1827     YaspLevelIterator<cd,pitype,GridImp> levelend (int level) const
1828     {
1829       YGridLevelIterator g = begin(level);
1830       if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1831 
1832       if (pitype==Interior_Partition)
1833         return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].end());
1834       if (pitype==InteriorBorder_Partition)
1835         return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].end());
1836       if (pitype==Overlap_Partition)
1837         return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].end());
1838       if (pitype<=All_Partition || pitype == Ghost_Partition)
1839         return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].end());
1840 
1841       DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1842     }
1843 
1844     CollectiveCommunicationType ccobj;
1845 
1846     Torus<CollectiveCommunicationType,dim> _torus;
1847 
1848     std::vector< std::shared_ptr< YaspIndexSet<const YaspGrid<dim,Coordinates>, false > > > indexsets;
1849     YaspIndexSet<const YaspGrid<dim,Coordinates>, true> leafIndexSet_;
1850     YaspGlobalIdSet<const YaspGrid<dim,Coordinates> > theglobalidset;
1851 
1852     Dune::FieldVector<ctype, dim> _L;
1853     iTupel _s;
1854     std::bitset<dim> _periodic;
1855     iTupel _coarseSize;
1856     ReservedVector<YGridLevel,32> _levels;
1857     int _overlap;
1858     bool keep_ovlp;
1859     int adaptRefCount;
1860     bool adaptActive;
1861   };
1862 
1863 #if __cpp_deduction_guides >= 201611
1864   // Class template deduction guides
1865   template<typename ctype, int dim>
1866   YaspGrid(FieldVector<ctype, dim>,
1867            std::array<int, std::size_t{dim}>,
1868            std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1869            int = 1,
1870            YaspCollectiveCommunication = YaspCollectiveCommunication(),
1871            const YLoadBalance<dim>* = YaspGrid< dim, EquidistantCoordinates<ctype, dim> >::defaultLoadbalancer())
1872     -> YaspGrid< dim, EquidistantCoordinates<ctype, dim> >;
1873 
1874   template<typename ctype, int dim>
1875   YaspGrid(FieldVector<ctype, dim>,
1876            FieldVector<ctype, dim>,
1877            std::array<int, std::size_t{dim}>,
1878            std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1879            int = 1,
1880            YaspCollectiveCommunication = YaspCollectiveCommunication(),
1881            const YLoadBalance<dim>* = YaspGrid< dim, EquidistantOffsetCoordinates<ctype, dim> >::defaultLoadbalancer())
1882     -> YaspGrid< dim, EquidistantOffsetCoordinates<ctype, dim> >;
1883 
1884   template<typename ctype, std::size_t dim>
1885   YaspGrid(std::array<std::vector<ctype>, dim>,
1886            std::bitset<dim> = std::bitset<dim>{0ULL},
1887            int = 1,
1888            YaspCollectiveCommunication = YaspCollectiveCommunication(),
1889            const YLoadBalance<int{dim}>* = YaspGrid< int{dim}, TensorProductCoordinates<ctype, int{dim}> >::defaultLoadbalancer())
1890     -> YaspGrid< int{dim}, TensorProductCoordinates<ctype, int{dim}> >;
1891 #endif
1892 
1893   //! Output operator for multigrids
1894   template <int d, class CC>
operator <<(std::ostream & s,const YaspGrid<d,CC> & grid)1895   std::ostream& operator<< (std::ostream& s, const YaspGrid<d,CC>& grid)
1896   {
1897     int rank = grid.torus().rank();
1898 
1899     s << "[" << rank << "]:" << " YaspGrid maxlevel=" << grid.maxLevel() << std::endl;
1900 
1901     s << "Printing the torus: " <<std::endl;
1902     s << grid.torus() << std::endl;
1903 
1904     for (typename YaspGrid<d,CC>::YGridLevelIterator g=grid.begin(); g!=grid.end(); ++g)
1905     {
1906       s << "[" << rank << "]:   " << std::endl;
1907       s << "[" << rank << "]:   " << "==========================================" << std::endl;
1908       s << "[" << rank << "]:   " << "level=" << g->level() << std::endl;
1909 
1910       for (int codim = 0; codim < d + 1; ++codim)
1911       {
1912         s << "[" << rank << "]:   " << "overlapfront[" << codim << "]:    " << g->overlapfront[codim] << std::endl;
1913         s << "[" << rank << "]:   " << "overlap[" << codim << "]:    " << g->overlap[codim] << std::endl;
1914         s << "[" << rank << "]:   " << "interiorborder[" << codim << "]:    " << g->interiorborder[codim] << std::endl;
1915         s << "[" << rank << "]:   " << "interior[" << codim << "]:    " << g->interior[codim] << std::endl;
1916 
1917         typedef typename YGridList<CC>::Iterator I;
1918         for (I i=g->send_overlapfront_overlapfront[codim].begin();
1919                  i!=g->send_overlapfront_overlapfront[codim].end(); ++i)
1920           s << "[" << rank << "]:    " << " s_of_of[" << codim << "] to rank "
1921                    << i->rank << " " << i->grid << std::endl;
1922 
1923         for (I i=g->recv_overlapfront_overlapfront[codim].begin();
1924                  i!=g->recv_overlapfront_overlapfront[codim].end(); ++i)
1925           s << "[" << rank << "]:    " << " r_of_of[" << codim << "] to rank "
1926                    << i->rank << " " << i->grid << std::endl;
1927 
1928         for (I i=g->send_overlap_overlapfront[codim].begin();
1929                  i!=g->send_overlap_overlapfront[codim].end(); ++i)
1930           s << "[" << rank << "]:    " << " s_o_of[" << codim << "] to rank "
1931                    << i->rank << " " << i->grid << std::endl;
1932 
1933         for (I i=g->recv_overlapfront_overlap[codim].begin();
1934                  i!=g->recv_overlapfront_overlap[codim].end(); ++i)
1935           s << "[" << rank << "]:    " << " r_of_o[" << codim << "] to rank "
1936                    << i->rank << " " << i->grid << std::endl;
1937 
1938         for (I i=g->send_interiorborder_interiorborder[codim].begin();
1939                  i!=g->send_interiorborder_interiorborder[codim].end(); ++i)
1940           s << "[" << rank << "]:    " << " s_ib_ib[" << codim << "] to rank "
1941           << i->rank << " " << i->grid << std::endl;
1942 
1943         for (I i=g->recv_interiorborder_interiorborder[codim].begin();
1944                  i!=g->recv_interiorborder_interiorborder[codim].end(); ++i)
1945              s << "[" << rank << "]:    " << " r_ib_ib[" << codim << "] to rank "
1946              << i->rank << " " << i->grid << std::endl;
1947 
1948         for (I i=g->send_interiorborder_overlapfront[codim].begin();
1949                  i!=g->send_interiorborder_overlapfront[codim].end(); ++i)
1950              s << "[" << rank << "]:    " << " s_ib_of[" << codim << "] to rank "
1951              << i->rank << " " << i->grid << std::endl;
1952 
1953         for (I i=g->recv_overlapfront_interiorborder[codim].begin();
1954                  i!=g->recv_overlapfront_interiorborder[codim].end(); ++i)
1955              s << "[" << rank << "]:    " << " r_of_ib[" << codim << "] to rank "
1956              << i->rank << " " << i->grid << std::endl;
1957       }
1958     }
1959 
1960     s << std::endl;
1961 
1962     return s;
1963   }
1964 
1965   namespace Capabilities
1966   {
1967 
1968     /** \struct hasEntity
1969        \ingroup YaspGrid
1970      */
1971 
1972     /** \struct hasBackupRestoreFacilities
1973        \ingroup YaspGrid
1974      */
1975     template<int dim, class Coordinates>
1976     struct hasBackupRestoreFacilities< YaspGrid<dim, Coordinates> >
1977     {
1978       static const bool v = true;
1979     };
1980 
1981     /** \brief YaspGrid has only one geometry type for codim 0 entities
1982        \ingroup YaspGrid
1983      */
1984     template<int dim, class Coordinates>
1985     struct hasSingleGeometryType< YaspGrid<dim, Coordinates> >
1986     {
1987       static const bool v = true;
1988       static const unsigned int topologyId = GeometryTypes::cube(dim).id();
1989     };
1990 
1991     /** \brief YaspGrid is a Cartesian grid
1992         \ingroup YaspGrid
1993      */
1994     template<int dim, class Coordinates>
1995     struct isCartesian< YaspGrid<dim, Coordinates> >
1996     {
1997       static const bool v = true;
1998     };
1999 
2000     /** \brief YaspGrid has entities for all codimensions
2001        \ingroup YaspGrid
2002      */
2003     template<int dim, class Coordinates, int codim>
2004     struct hasEntity< YaspGrid<dim, Coordinates>, codim>
2005     {
2006       static const bool v = true;
2007     };
2008 
2009     /**
2010      * \brief YaspGrid can iterate over all codimensions
2011      * \ingroup YaspGrid
2012      **/
2013     template<int dim, class Coordinates, int codim>
2014     struct hasEntityIterator<YaspGrid<dim, Coordinates>, codim>
2015     {
2016       static const bool v = true;
2017     };
2018 
2019     /** \brief YaspGrid can communicate on all codimensions
2020      *  \ingroup YaspGrid
2021      */
2022     template<int dim, int codim, class Coordinates>
2023     struct canCommunicate< YaspGrid< dim, Coordinates>, codim >
2024     {
2025       static const bool v = true;
2026     };
2027 
2028     /** \brief YaspGrid is levelwise conforming
2029        \ingroup YaspGrid
2030      */
2031     template<int dim, class Coordinates>
2032     struct isLevelwiseConforming< YaspGrid<dim, Coordinates> >
2033     {
2034       static const bool v = true;
2035     };
2036 
2037     /** \brief YaspGrid is leafwise conforming
2038        \ingroup YaspGrid
2039      */
2040     template<int dim, class Coordinates>
2041     struct isLeafwiseConforming< YaspGrid<dim, Coordinates> >
2042     {
2043       static const bool v = true;
2044     };
2045 
2046   }
2047 
2048 } // end namespace
2049 
2050 // Include the specialization of the StructuredGridFactory class for YaspGrid
2051 #include <dune/grid/yaspgrid/structuredyaspgridfactory.hh>
2052 // Include the specialization of the BackupRestoreFacility class for YaspGrid
2053 #include <dune/grid/yaspgrid/backuprestore.hh>
2054 
2055 #endif
2056