My Project
Loading...
Searching...
No Matches
CpGridData.hpp
1//===========================================================================
2//
3// File: CpGridData.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10// Antonella Ritorto <antonella.ritorto@opm-op.com>
11//
12// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
13// and got transfered here during refactoring for the parallelization.
14//
15// $Date$
16//
17// $Revision$
18//
19//===========================================================================
20
21/*
22 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
23 Copyright 2009, 2010, 2013, 2022-2023 Equinor ASA.
24 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
25
26 This file is part of The Open Porous Media project (OPM).
27
28 OPM is free software: you can redistribute it and/or modify
29 it under the terms of the GNU General Public License as published by
30 the Free Software Foundation, either version 3 of the License, or
31 (at your option) any later version.
32
33 OPM is distributed in the hope that it will be useful,
34 but WITHOUT ANY WARRANTY; without even the implied warranty of
35 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 GNU General Public License for more details.
37
38 You should have received a copy of the GNU General Public License
39 along with OPM. If not, see <http://www.gnu.org/licenses/>.
40*/
48#ifndef OPM_CPGRIDDATA_HEADER
49#define OPM_CPGRIDDATA_HEADER
50
51
52#include <dune/common/parallel/mpihelper.hh>
53#ifdef HAVE_DUNE_ISTL
54#include <dune/istl/owneroverlapcopy.hh>
55#endif
56
57#include <dune/common/parallel/communication.hh>
58#include <dune/common/parallel/variablesizecommunicator.hh>
59#include <dune/grid/common/gridenums.hh>
60
61#if HAVE_ECL_INPUT
62#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
63#endif
64
66
67#include "Entity2IndexDataHandle.hpp"
68#include "CpGridDataTraits.hpp"
69//#include "DataHandleWrappers.hpp"
70//#include "GlobalIdMapping.hpp"
71#include "Geometry.hpp"
72
73#include <array>
74#include <initializer_list>
75#include <set>
76#include <vector>
77
78namespace Opm
79{
80class EclipseState;
81}
82namespace Dune
83{
84class CpGrid;
85
86namespace cpgrid
87{
88
89class IndexSet;
90class IdSet;
91class LevelGlobalIdSet;
92class PartitionTypeIndicator;
93template<int,int> class Geometry;
94template<int> class Entity;
95template<int> class EntityRep;
96}
97}
98
99
100void markAndAdapt_check(Dune::CpGrid&,
101 const std::array<int,3>&,
102 const std::vector<int>&,
104 bool,
105 bool,
106 bool);
107
108void refine_and_check(const Dune::cpgrid::Geometry<3, 3>&,
109 const std::array<int, 3>&,
110 bool);
111
112void refinePatch_and_check(const std::array<int,3>&,
113 const std::array<int,3>&,
114 const std::array<int,3>&);
115
116void refinePatch_and_check(Dune::CpGrid&,
117 const std::vector<std::array<int,3>>&,
118 const std::vector<std::array<int,3>>&,
119 const std::vector<std::array<int,3>>&,
120 const std::vector<std::string>&);
121
122void check_global_refine(const Dune::CpGrid&,
123 const Dune::CpGrid&);
124
125void fieldProp_check(const Dune::CpGrid& grid, Opm::EclipseGrid eclGrid, std::string deck_string);
126
127void testInactiveCellsLgrs(const std::string&,
128 const std::vector<std::array<int,3>>&,
129 const std::vector<std::array<int,3>>&,
130 const std::vector<std::array<int,3>>&,
131 const std::vector<std::string>&);
132
133namespace Dune
134{
135namespace cpgrid
136{
137namespace mover
138{
139template<class T, int i> struct Mover;
140}
141
147{
148 template<class T, int i> friend struct mover::Mover;
149 friend class GlobalIdSet;
150 friend class HierarchicIterator;
151 friend class Dune::cpgrid::IndexSet;
152 friend class Dune::cpgrid::IdSet;
154
155 friend
156 void ::markAndAdapt_check(Dune::CpGrid&,
157 const std::array<int,3>&,
158 const std::vector<int>&,
160 bool,
161 bool,
162 bool);
163
164 friend
165 void ::refine_and_check(const Dune::cpgrid::Geometry<3, 3>&,
166 const std::array<int, 3>&,
167 bool);
168 friend
169 void ::refinePatch_and_check(const std::array<int,3>&,
170 const std::array<int,3>&,
171 const std::array<int,3>&);
172
173 friend
174 void ::refinePatch_and_check(Dune::CpGrid&,
175 const std::vector<std::array<int,3>>&,
176 const std::vector<std::array<int,3>>&,
177 const std::vector<std::array<int,3>>&,
178 const std::vector<std::string>&);
179
180 friend
181 void ::check_global_refine(const Dune::CpGrid&,
182 const Dune::CpGrid&);
183 friend
184 void ::fieldProp_check(const Dune::CpGrid& grid, Opm::EclipseGrid eclGrid, std::string deck_string);
185 friend
186 void ::testInactiveCellsLgrs(const std::string&,
187 const std::vector<std::array<int,3>>&,
188 const std::vector<std::array<int,3>>&,
189 const std::vector<std::array<int,3>>&,
190 const std::vector<std::string>&);
191
192private:
193 CpGridData(const CpGridData& g);
194
195public:
196 enum{
197#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
205#else
210 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
211#endif
212 };
213
217 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
218
219
220
222 explicit CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
224 ~CpGridData();
225
226
227
228
230 int size(int codim) const;
231
233 int size (GeometryType type) const
234 {
235 if (type.isCube()) {
236 return size(3 - type.dim());
237 } else {
238 return 0;
239 }
240 }
241
247 void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
248
249#if HAVE_ECL_INPUT
258 void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
259 const std::vector<double>& poreVolume = std::vector<double>());
260
274 std::vector<std::size_t>
275 processEclipseFormat(const Opm::EclipseGrid* ecl_grid, Opm::EclipseState* ecl_state,
276 bool periodic_extension, bool turn_normals = false, bool clip_z = false,
277 bool pinchActive = true);
278#endif
279
291 void processEclipseFormat(const grdecl& input_data,
292#if HAVE_ECL_INPUT
293 Opm::EclipseState* ecl_state,
294#endif
295 std::array<std::set<std::pair<int, int>>, 2>& nnc,
296 bool remove_ij_boundary, bool turn_normals, bool pinchActive,
297 double tolerance_unique_points);
298
306 void getIJK(int c, std::array<int,3>& ijk) const
307 {
308 ijk = getIJK(global_cell_[c], logical_cartesian_size_);
309 }
310
317 std::array<int,3> getIJK(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const
318 {
319 // idx = k*cells_per_dim_[0]*cells_per_dim_[1] + j*cells_per_dim_[0] + i
320 // with 0<= i < cells_per_dim_[0], 0<= j < cells_per_dim_[1], 0<= k <cells_per_dim_[2].
321 assert(cells_per_dim[0]);
322 assert(cells_per_dim[1]);
323 assert(cells_per_dim[2]);
324
325 std::array<int,3> ijk = {0,0,0};
326 ijk[0] = idx_in_parent_cell % cells_per_dim[0]; idx_in_parent_cell /= cells_per_dim[0];
327 ijk[1] = idx_in_parent_cell % cells_per_dim[1];
328 ijk[2] = idx_in_parent_cell /cells_per_dim[1];
329 return ijk;
330 }
331
337 bool disjointPatches(const std::vector<std::array<int,3>>& startIJK_vec, const std::vector<std::array<int,3>>& endIJK_vec) const;
338
346 std::vector<int>
347 getPatchesCells(const std::vector<std::array<int,3>>& startIJK_vec, const std::vector<std::array<int,3>>& endIJK_vec) const;
348
351 bool hasNNCs(const std::vector<int>& cellIndices) const;
352
359 void validStartEndIJKs(const std::vector<std::array<int,3>>& startIJK_vec, const std::vector<std::array<int,3>>& endIJK_vec) const;
360
362 void checkCuboidShape(const std::vector<int>& cellIdx_vec) const;
363
369 bool patchesShareFace(const std::vector<std::array<int,3>>& startIJK_vec, const std::vector<std::array<int,3>>& endIJK_vec) const;
370
371 int sharedFaceTag(const std::vector<std::array<int,3>>& startIJK_2Patches, const std::vector<std::array<int,3>>& endIJK_2Patches) const;
372
373
386 bool mark(int refCount, const cpgrid::Entity<0>& element);
387
391 int getMark(const cpgrid::Entity<0>& element) const;
392
395 bool preAdapt();
396
398 bool adapt();
399
401 void postAdapt();
402
403private:
404 std::array<Dune::FieldVector<double,3>,8> getReferenceRefinedCorners(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const;
405
413 std::array<int,3> getPatchDim(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
414
422 std::vector<int> getPatchCorners(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
423
431 std::vector<int> getPatchFaces(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
432
440 std::vector<int> getPatchCells(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
441
449 std::vector<int> getPatchBoundaryCorners(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
450
458 std::array<std::vector<int>,6> getBoundaryPatchFaces(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
459
461 std::array<std::vector<double>,3> getWidthsLengthsHeights(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
462
477 Geometry<3,3> cellifyPatch(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK,
478 const std::vector<int>& patch_cells, DefaultGeometryPolicy& cellifiedPatch_geometry,
479 std::array<int,8>& cellifiedPatch_to_point,
480 std::array<int,8>& allcorners_cellifiedPatch) const;
481
482 // @brief Compute the average of array<double,3>.
483 //
484 // @param [in] vector of array<double,3>
485 // @return array<double,3> (average of the entries of the given vector).
486 std::array<double,3> getAverageArr(const std::vector<std::array<double,3>>& vec) const;
487
488public:
490 int getGridIdx() const {
491 // Not the nicest way of checking if "this" points at the leaf grid view of a mixed grid (with coarse and refined cells).
492 // 1. When the grid has been refined at least onece, level_data_ptr_ ->size() >1. Therefore, there is a chance of "this" pointing at the leaf grid view.
493 // 2. Unfortunately, level_ is default initialized by 0. This implies, in particular, that if someone wants to check the value of
494 // "this->level_" when "this" points at the leaf grid view of a grid that has been refined, this value is - unfortunately - equal to 0.
495 // 3. Due to 2. we need an extra bool value to distinguish between the actual level 0 grid and such a leaf grid view (with incorrect level_ == 0). For this
496 // reason we check if child_to_parent_cells_.empty() [true for actual level 0 grid, false for the leaf grid view].
497 // --- TO BE IMPROVED ---
498 if ((level_data_ptr_ ->size() >1) && (level_ == 0) && (!child_to_parent_cells_.empty())) {
499 return level_data_ptr_->size() -1;
500 }
501 return level_;
502 }
504 std::vector<std::shared_ptr<Dune::cpgrid::CpGridData>> levelData() const
505 {
506 return *level_data_ptr_;
507 }
508
509
531 std::tuple< const std::shared_ptr<CpGridData>,
532 const std::vector<std::array<int,2>>, // parent_to_refined_corners(~boundary_old_to_new_corners)
533 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces (~boundary_old_to_new_faces)
534 const std::tuple<int, std::vector<int>>, // parent_to_children_cells
535 const std::vector<std::array<int,2>>, // child_to_parent_faces
536 const std::vector<std::array<int,2>>> // child_to_parent_cells
537 refineSingleCell(const std::array<int,3>& cells_per_dim, const int& parent_idx) const;
538
551 std::tuple< std::shared_ptr<CpGridData>,
552 const std::vector<std::array<int,2>>, // boundary_old_to_new_corners
553 const std::vector<std::tuple<int,std::vector<int>>>, // boundary_old_to_new_faces
554 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces
555 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_cell
556 const std::vector<std::array<int,2>>, // child_to_parent_faces
557 const std::vector<std::array<int,2>>> // child_to_parent_cells
558 refinePatch(const std::array<int,3>& cells_per_dim, const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
559
560 // @breif Compute center of an entity/element/cell in the Eclipse way:
561 // - Average of the 4 corners of the bottom face.
562 // - Average of the 4 corners of the top face.
563 // Return average of the previous computations.
564 // @param [in] int Index of a cell.
565 // @return 'eclipse centroid'
566 std::array<double,3> computeEclCentroid(const int idx) const;
567
568 // @breif Compute center of an entity/element/cell in the Eclipse way:
569 // - Average of the 4 corners of the bottom face.
570 // - Average of the 4 corners of the top face.
571 // Return average of the previous computations.
572 // @param [in] Entity<0> Entity
573 // @return 'eclipse centroid'
574 std::array<double,3> computeEclCentroid(const Entity<0>& elem) const;
575
576 // Make unique boundary ids for all intersections.
577 void computeUniqueBoundaryIds();
578
582 bool uniqueBoundaryIds() const
583 {
584 return use_unique_boundary_ids_;
585 }
586
589 void setUniqueBoundaryIds(bool uids)
590 {
591 use_unique_boundary_ids_ = uids;
592 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
593 computeUniqueBoundaryIds();
594 }
595 }
596
600 const std::vector<double>& zcornData() const {
601 return zcorn;
602 }
603
604
607 const IndexSet& indexSet() const
608 {
609 return *index_set_;
610 }
611
614 {
615 return *local_id_set_;
616 }
617
620 {
621 return *global_id_set_;
622 }
623
627 const std::array<int, 3>& logicalCartesianSize() const
628 {
629 return logical_cartesian_size_;
630 }
631
635 void distributeGlobalGrid(CpGrid& grid,
636 const CpGridData& view_data,
637 const std::vector<int>& cell_part);
638
644 template<class DataHandle>
645 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
646
647 void computeCellPartitionType();
648
649 void computePointPartitionType();
650
651 void computeCommunicationInterfaces(int noexistingPoints);
652
656 using Communication = CpGridDataTraits::Communication;
657 using CollectiveCommunication = CpGridDataTraits::CollectiveCommunication;
658#if HAVE_MPI
660 using AttributeSet = CpGridDataTraits::AttributeSet;
661
663 using Communicator = CpGridDataTraits::Communicator;
664
666 using InterfaceMap = CpGridDataTraits::InterfaceMap;
667
669 using CommunicationType = CpGridDataTraits::CommunicationType;
670
672 using ParallelIndexSet = CpGridDataTraits::ParallelIndexSet;
673
675 using RemoteIndices = CpGridDataTraits::RemoteIndices;
676
680 CommunicationType& cellCommunication()
681 {
682 return cell_comm_;
683 }
684
688 const CommunicationType& cellCommunication() const
689 {
690 return cell_comm_;
691 }
692
693 ParallelIndexSet& cellIndexSet()
694 {
695 return cellCommunication().indexSet();
696 }
697
698 const ParallelIndexSet& cellIndexSet() const
699 {
700 return cellCommunication().indexSet();
701 }
702
703 RemoteIndices& cellRemoteIndices()
704 {
705 return cellCommunication().remoteIndices();
706 }
707
708 const RemoteIndices& cellRemoteIndices() const
709 {
710 return cellCommunication().remoteIndices();
711 }
712#endif
713
715 const std::vector<int>& sortedNumAquiferCells() const
716 {
717 return aquifer_cells_;
718 }
719
720private:
721
723 void populateGlobalCellIndexSet();
724
725#if HAVE_MPI
726
732 template<class DataHandle>
733 void gatherData(DataHandle& data, CpGridData* global_view,
734 CpGridData* distributed_view);
735
736
743 template<int codim, class DataHandle>
744 void gatherCodimData(DataHandle& data, CpGridData* global_data,
745 CpGridData* distributed_data);
746
753 template<class DataHandle>
754 void scatterData(DataHandle& data, CpGridData* global_data,
755 CpGridData* distributed_data, const InterfaceMap& cell_inf,
756 const InterfaceMap& point_inf);
757
765 template<int codim, class DataHandle>
766 void scatterCodimData(DataHandle& data, CpGridData* global_data,
767 CpGridData* distributed_data);
768
777 template<int codim, class DataHandle>
778 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
779 const Interface& interface);
780
789 template<int codim, class DataHandle>
790 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
791 const InterfaceMap& interface);
792
793#endif
794
795 void computeGeometry(CpGrid& grid,
796 const DefaultGeometryPolicy& globalGeometry,
797 const std::vector<int>& globalAquiferCells,
798 const OrientedEntityTable<0, 1>& globalCell2Faces,
799 DefaultGeometryPolicy& geometry,
800 std::vector<int>& aquiferCells,
801 const OrientedEntityTable<0, 1>& cell2Faces,
802 const std::vector< std::array<int,8> >& cell2Points);
803
804 // Representing the topology
816 Opm::SparseTable<int> face_to_point_;
818 std::vector< std::array<int,8> > cell_to_point_;
825 std::array<int, 3> logical_cartesian_size_{};
832 std::vector<int> global_cell_;
838 typedef FieldVector<double, 3> PointType;
842 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
844 std::unique_ptr<cpgrid::IndexSet> index_set_;
846 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
848 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
850 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
852 std::vector<int> mark_;
854 int level_{0};
856 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
857 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
859 std::vector<int> level_to_leaf_cells_; // In entry 'level cell index', we store 'leafview cell index'
860 // {level LGR, {child0, child1, ...}}
861 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_;
862 // {# children in x-direction, ... y-, ... z-}
863 std::array<int,3> cells_per_dim_;
864 // SUITABLE ONLY FOR LEAFVIEW
865 // {level, cell index in that level}
866 std::vector<std::array<int,2>> leaf_to_level_cells_;
868 std::vector<std::array<int,2>> corner_history_;
869 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW
870 // {level parent cell, parent cell index}
871 std::vector<std::array<int,2>> child_to_parent_cells_;
874 std::vector<int> cell_to_idxInParentCell_;
875
876
877
879 Communication ccobj_;
880
881 // Boundary information (optional).
882 bool use_unique_boundary_ids_;
883
889 std::vector<double> zcorn;
890
892 std::vector<int> aquifer_cells_;
893
894#if HAVE_MPI
895
897 CommunicationType cell_comm_;
898
900 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
901 /*
902 // code deactivated, because users cannot access face indices and therefore
903 // communication on faces makes no sense!
905 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
906 face_interfaces_;
907 */
909 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
910 point_interfaces_;
911
912#endif
913
914 // Return the geometry vector corresponding to the given codim.
915 template <int codim>
916 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
917 {
918 return geometry_.geomVector<codim>();
919 }
920
921 friend class Dune::CpGrid;
922 template<int> friend class Entity;
923 template<int> friend class EntityRep;
924 friend class Intersection;
925 friend class PartitionTypeIndicator;
926};
927
928
929
930#if HAVE_MPI
931
932namespace
933{
938template<class T>
939T& getInterface(InterfaceType iftype,
940 std::tuple<T,T,T,T,T>& interfaces)
941{
942 switch(iftype)
943 {
944 case 0:
945 return std::get<0>(interfaces);
946 case 1:
947 return std::get<1>(interfaces);
948 case 2:
949 return std::get<2>(interfaces);
950 case 3:
951 return std::get<3>(interfaces);
952 case 4:
953 return std::get<4>(interfaces);
954 }
955 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
956}
957
958} // end unnamed namespace
959
960template<int codim, class DataHandle>
961void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
962 const Interface& interface)
963{
964 this->template communicateCodim<codim>(data, dir, interface.interfaces());
965}
966
967template<int codim, class DataHandle>
968void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
969 const InterfaceMap& interface)
970{
971 Communicator comm(ccobj_, interface);
972
973 if(dir==ForwardCommunication)
974 comm.forward(data_wrapper);
975 else
976 comm.backward(data_wrapper);
977}
978#endif
979
980template<class DataHandle>
981void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
982 CommunicationDirection dir)
983{
984#if HAVE_MPI
985 if(data.contains(3,0))
986 {
987 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
988 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
989 }
990 if(data.contains(3,3))
991 {
992 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
993 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
994 }
995#else
996 // Suppress warnings for unused arguments.
997 (void) data;
998 (void) iftype;
999 (void) dir;
1000#endif
1001}
1002}}
1003
1004#if HAVE_MPI
1005#include <opm/grid/cpgrid/Entity.hpp>
1006#include <opm/grid/cpgrid/Indexsets.hpp>
1007
1008namespace Dune {
1009namespace cpgrid {
1010
1011namespace mover
1012{
1013template<class T>
1014class MoveBuffer
1015{
1016 friend class Dune::cpgrid::CpGridData;
1017public:
1018 void read(T& data)
1019 {
1020 data=buffer_[index_++];
1021 }
1022 void write(const T& data)
1023 {
1024 buffer_[index_++]=data;
1025 }
1026 void reset()
1027 {
1028 index_=0;
1029 }
1030 void resize(std::size_t size)
1031 {
1032 buffer_.resize(size);
1033 index_=0;
1034 }
1035private:
1036 std::vector<T> buffer_;
1037 typename std::vector<T>::size_type index_;
1038};
1039template<class DataHandle,int codim>
1040struct Mover
1041{
1042};
1043
1044template<class DataHandle>
1045struct BaseMover
1046{
1047 BaseMover(DataHandle& data)
1048 : data_(data)
1049 {}
1050 template<class E>
1051 void moveData(const E& from, const E& to)
1052 {
1053 std::size_t size=data_.size(from);
1054 buffer.resize(size);
1055 data_.gather(buffer, from);
1056 buffer.reset();
1057 data_.scatter(buffer, to, size);
1058 }
1059 DataHandle& data_;
1060 MoveBuffer<typename DataHandle::DataType> buffer;
1061};
1062
1063
1064template<class DataHandle>
1065struct Mover<DataHandle,0> : public BaseMover<DataHandle>
1066{
1067 Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
1068 CpGridData* scatterView)
1069 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1070 {}
1071
1072 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1073 {
1074 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
1075 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
1076 this->moveData(from_entity, to_entity);
1077 }
1078 CpGridData* gatherView_;
1079 CpGridData* scatterView_;
1080};
1081
1082template<class DataHandle>
1083struct Mover<DataHandle,1> : public BaseMover<DataHandle>
1084{
1085 Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
1086 CpGridData* scatterView)
1087 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1088 {}
1089
1090 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1091 {
1092 typedef typename OrientedEntityTable<0,1>::row_type row_type;
1093 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
1094 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
1095 OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
1096 row_type from_faces=table.operator[](from_cell);
1097 row_type to_faces=scatterView_->cell_to_face_[to_cell];
1098
1099 for(int i=0; i<from_faces.size(); ++i)
1100 this->moveData(from_faces[i], to_faces[i]);
1101 }
1102 CpGridData *gatherView_;
1103 CpGridData *scatterView_;
1104};
1105
1106template<class DataHandle>
1107struct Mover<DataHandle,3> : public BaseMover<DataHandle>
1108{
1109 Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
1110 CpGridData* scatterView)
1111 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1112 {}
1113 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1114 {
1115 const std::array<int,8>& from_cell_points=
1116 gatherView_->cell_to_point_[from_cell_index];
1117 const std::array<int,8>& to_cell_points=
1118 scatterView_->cell_to_point_[to_cell_index];
1119 for(std::size_t i=0; i<8; ++i)
1120 {
1121 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
1122 Entity<3>(*scatterView_, to_cell_points[i], true));
1123 }
1124 }
1125 CpGridData* gatherView_;
1126 CpGridData* scatterView_;
1127};
1128
1129} // end mover namespace
1130
1131template<class DataHandle>
1132void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
1133 CpGridData* distributed_data, const InterfaceMap& cell_inf,
1134 const InterfaceMap& point_inf)
1135{
1136#if HAVE_MPI
1137 if(data.contains(3,0))
1138 {
1139 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
1140 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
1141 }
1142 if(data.contains(3,3))
1143 {
1144 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
1145 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
1146 }
1147#endif
1148}
1149
1150template<int codim, class DataHandle>
1151void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
1152 CpGridData* distributed_data)
1153{
1154 CpGridData *gather_view, *scatter_view;
1155 gather_view=global_data;
1156 scatter_view=distributed_data;
1157
1158 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
1159
1160
1161 for(auto index=distributed_data->cellIndexSet().begin(),
1162 end = distributed_data->cellIndexSet().end();
1163 index!=end; ++index)
1164 {
1165 std::size_t from=index->global();
1166 std::size_t to=index->local();
1167 mover(from,to);
1168 }
1169}
1170
1171namespace
1172{
1173
1174template<int codim, class T, class F>
1175void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
1176{
1177 for(T it=begin; it!=endit; ++it)
1178 {
1179 Entity<codim> entity(distributed_data, it-begin, true);
1180 PartitionType pt = entity.partitionType();
1181 if(pt==Dune::InteriorEntity)
1182 {
1183 func(*it, entity);
1184 }
1185 }
1186}
1187
1188template<class DataHandle>
1189struct GlobalIndexSizeGatherer
1190{
1191 GlobalIndexSizeGatherer(DataHandle& data_,
1192 std::vector<int>& ownedGlobalIndices_,
1193 std::vector<int>& ownedSizes_)
1194 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
1195 {}
1196
1197 template<class T, class E>
1198 void operator()(T& i, E& entity)
1199 {
1200 ownedGlobalIndices.push_back(i);
1201 ownedSizes.push_back(data.size(entity));
1202 }
1203 DataHandle& data;
1204 std::vector<int>& ownedGlobalIndices;
1205 std::vector<int>& ownedSizes;
1206};
1207
1208template<class DataHandle>
1209struct DataGatherer
1210{
1211 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
1212 DataHandle& data_)
1213 : buffer(buffer_), data(data_)
1214 {}
1215
1216 template<class T, class E>
1217 void operator()(T& /* it */, E& entity)
1218 {
1219 data.gather(buffer, entity);
1220 }
1221 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
1222 DataHandle& data;
1223};
1224
1225}
1226
1227template<class DataHandle>
1228void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
1229 CpGridData* distributed_data)
1230{
1231#if HAVE_MPI
1232 if(data.contains(3,0))
1233 gatherCodimData<0>(data, global_data, distributed_data);
1234 if(data.contains(3,3))
1235 gatherCodimData<3>(data, global_data, distributed_data);
1236#endif
1237}
1238
1239template<int codim, class DataHandle>
1240void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
1241 CpGridData* distributed_data)
1242{
1243#if HAVE_MPI
1244 // Get the mapping to global index from the global id set
1245 const std::vector<int>& mapping =
1246 distributed_data->global_id_set_->getMapping<codim>();
1247
1248 // Get the global indices and data size for the entities whose data is
1249 // to be sent, i.e. the ones that we own.
1250 std::vector<int> owned_global_indices;
1251 std::vector<int> owned_sizes;
1252 owned_global_indices.reserve(mapping.size());
1253 owned_sizes.reserve(mapping.size());
1254
1255 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1256 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1257
1258 // communicate the number of indices that each processor sends
1259 int no_indices=owned_sizes.size();
1260 // We will take the address of the first elemet for MPI_Allgather below.
1261 // Make sure the containers have such an element.
1262 if ( owned_global_indices.empty() )
1263 owned_global_indices.resize(1);
1264 if ( owned_sizes.empty() )
1265 owned_sizes.resize(1);
1266 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1267 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1268 // compute size of the vector capable for receiving all indices
1269 // and allgather the global indices and the sizes.
1270 // calculate displacements
1271 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1272 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1273 std::plus<int>());
1274 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1275 std::vector<int> global_indices(global_size);
1276 std::vector<int> global_sizes(global_size);
1277 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1278 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1279 MPITraits<int>::getType(),
1280 distributed_data->ccobj_);
1281 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1282 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1283 MPITraits<int>::getType(),
1284 distributed_data->ccobj_);
1285 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1286 // Compute the number of data items to send
1287 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1288 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1289 i=begin, end=no_data_send.end(); i!=end; ++i)
1290 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1291 global_sizes.begin()+displ[i-begin+1], std::size_t());
1292 // free at least some memory that can be reused.
1293 std::vector<int>().swap(owned_sizes);
1294 // compute the displacements for receiving with allgatherv
1295 displ[0]=0;
1296 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1297 std::plus<std::size_t>());
1298 // Compute the number of data items we will receive
1299 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1300
1301 // Collect the data to send, gather it
1302 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1303 if ( no_data_send[distributed_data->ccobj_.rank()] )
1304 {
1305 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1306 }
1307 else
1308 {
1309 local_data_buffer.resize(1);
1310 }
1311 global_data_buffer.resize(no_data_recv);
1312
1313 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1314 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1315 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1316 MPITraits<typename DataHandle::DataType>::getType(),
1317 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1318 MPITraits<typename DataHandle::DataType>::getType(),
1319 distributed_data->ccobj_);
1320 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1321 int offset=0;
1322 for(int i=0; i< codim; ++i)
1323 offset+=global_data->size(i);
1324
1325 typename std::vector<int>::const_iterator s=global_sizes.begin();
1326 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1327 end=global_indices.end();
1328 i!=end; ++s, ++i)
1329 {
1330 edata.scatter(global_data_buffer, *i-offset, *s);
1331 }
1332#endif
1333}
1334
1335} // end namespace cpgrid
1336} // end namespace Dune
1337
1338#endif
1339
1340#endif
[ provides Dune::Grid ]
Definition CpGrid.hpp:201
Struct that hods all the data needed to represent a Cpgrid.
Definition CpGridData.hpp:147
void postAdapt()
Clean up refinement/coarsening markers - set every element to the mark 0 which represents 'doing noth...
Definition CpGridData.cpp:2703
const cpgrid::LevelGlobalIdSet & globalIdSet() const
Get the global index set.
Definition CpGridData.hpp:619
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition CpGridData.hpp:204
std::vector< int > getPatchesCells(const std::vector< std::array< int, 3 > > &startIJK_vec, const std::vector< std::array< int, 3 > > &endIJK_vec) const
Compute cell indices of selected patches of cells (Cartesian grid required).
Definition CpGridData.cpp:2186
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition CpGridData.hpp:233
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition CpGridData.hpp:627
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition CpGridData.hpp:981
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition CpGridData.hpp:582
std::array< int, 3 > getIJK(int idx_in_parent_cell, const std::array< int, 3 > &cells_per_dim) const
Extract Cartesian index triplet (i,j,k) given an index between 0 and NXxNYxNZ -1 where NX,...
Definition CpGridData.hpp:317
bool patchesShareFace(const std::vector< std::array< int, 3 > > &startIJK_vec, const std::vector< std::array< int, 3 > > &endIJK_vec) const
Determine if a finite amount of patches (of cells) share a face.
Definition CpGridData.cpp:2023
int size(int codim) const
number of leaf entities per codim in this process
Definition CpGridData.cpp:146
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format ('grdecl').
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition CpGridData.hpp:656
~CpGridData()
Destructor.
Definition CpGridData.cpp:99
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition CpGridData.hpp:306
const IndexSet & indexSet() const
Get the index set.
Definition CpGridData.hpp:607
std::tuple< std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refinePatch(const std::array< int, 3 > &cells_per_dim, const std::array< int, 3 > &startIJK, const std::array< int, 3 > &endIJK) const
Refine a (connected block-shaped) patch of cells.
Definition CpGridData.cpp:2443
int getMark(const cpgrid::Entity< 0 > &element) const
Return refinement mark for entity.
Definition CpGridData.cpp:2677
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition CpGridData.hpp:654
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive, double tolerance_unique_points)
Read the Eclipse grid format ('grdecl').
Definition processEclipseFormat.cpp:409
std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::tuple< int, std::vector< int > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx) const
Refine a single cell and return a shared pointer of CpGridData type.
Definition CpGridData.cpp:2312
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
Definition CpGridData.cpp:1515
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition CpGridData.hpp:600
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition CpGridData.hpp:589
bool preAdapt()
Set mightVanish flags for elements that will be refined in the next adapt() call Need to be called af...
Definition CpGridData.cpp:2682
int getGridIdx() const
Add doc/or remove method and replace it with better approach.
Definition CpGridData.hpp:490
bool hasNNCs(const std::vector< int > &cellIndices) const
Check all cells selected for refinement have no NNCs (no neighbor connections).
Definition CpGridData.cpp:2197
bool mark(int refCount, const cpgrid::Entity< 0 > &element)
Mark entity for refinement or coarsening.
Definition CpGridData.cpp:2660
std::vector< std::shared_ptr< Dune::cpgrid::CpGridData > > levelData() const
Add doc/or remove method and replace it with better approach.
Definition CpGridData.hpp:504
const cpgrid::IdSet & localIdSet() const
Get the local index set.
Definition CpGridData.hpp:613
bool adapt()
TO DO: Documentation. Triggers the grid refinement process - Currently, returns preAdapt()
Definition CpGridData.cpp:2698
void checkCuboidShape(const std::vector< int > &cellIdx_vec) const
Check that every cell to be refined has cuboid shape.
Definition CpGridData.cpp:1827
bool disjointPatches(const std::vector< std::array< int, 3 > > &startIJK_vec, const std::vector< std::array< int, 3 > > &endIJK_vec) const
Determine if a finite amount of patches (of cells) are disjoint, namely, they do not share any corner...
Definition CpGridData.cpp:1974
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition CpGridData.hpp:715
void validStartEndIJKs(const std::vector< std::array< int, 3 > > &startIJK_vec, const std::vector< std::array< int, 3 > > &endIJK_vec) const
Check startIJK and endIJK of each patch of cells to be refined are valid, i.e.
Definition CpGridData.cpp:2221
Definition DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition DefaultGeometryPolicy.hpp:86
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition Entity2IndexDataHandle.hpp:56
A class design to hold a variable with a value for each entity of the given codimension,...
Definition EntityRep.hpp:272
Definition PartitionTypeIndicator.hpp:46
This class encapsulates geometry for vertices, intersections, and cells.
Definition Geometry.hpp:75
The global id set for Dune.
Definition Indexsets.hpp:431
Only needs to provide interface for doing nothing.
Definition Iterators.hpp:108
Definition Indexsets.hpp:196
Definition Indexsets.hpp:53
Definition Indexsets.hpp:349
Represents the topological relationships between sets of entities, for example cells and faces.
Definition OrientedEntityTable.hpp:139
A class design to hold a variable with a value for each entity of the given codimension,...
Definition EntityRep.hpp:307
A SparseTable stores a table with rows of varying size as efficiently as possible.
Definition SparseTable.hpp:55
Copyright 2019 Equinor AS.
Definition CartesianIndexMapper.hpp:10
Holds the implementation of the CpGrid as a pimple.
Definition CellQuadrature.cpp:68
Low-level corner-point processing routines and supporting data structures.
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition CpGridDataTraits.hpp:55
AttributeSet
The type of the set of the attributes.
Definition CpGridDataTraits.hpp:65
Definition CpGridData.hpp:139
Raw corner-point specification of a particular geological model.
Definition preprocess.h:56