My Project
Loading...
Searching...
No Matches
overlappingbcrsmatrix.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
27#ifndef EWOMS_OVERLAPPING_BCRS_MATRIX_HH
28#define EWOMS_OVERLAPPING_BCRS_MATRIX_HH
29
34
35#include <opm/material/common/Valgrind.hpp>
36
37#include <dune/istl/scalarproducts.hh>
38#include <dune/istl/io.hh>
39#include <algorithm>
40#include <set>
41#include <map>
42#include <iostream>
43#include <vector>
44#include <memory>
45
46namespace Opm {
47namespace Linear {
48
52template <class BCRSMatrix>
53class OverlappingBCRSMatrix : public BCRSMatrix
54{
55 using ParentType = BCRSMatrix;
56
57public:
59
60private:
61 using Entries = std::vector<std::set<Index> >;
62
63public:
64 using ColIterator = typename ParentType::ColIterator;
65 using ConstColIterator = typename ParentType::ConstColIterator;
66 using block_type = typename ParentType::block_type;
67 using field_type = typename ParentType::field_type;
68
69 // no real copying done at the moment
71 : ParentType(other)
72 {}
73
74 template <class NativeBCRSMatrix>
75 OverlappingBCRSMatrix(const NativeBCRSMatrix& nativeMatrix,
76 const BorderList& borderList,
77 const BlackList& blackList,
78 unsigned overlapSize)
79 {
80 overlap_ = std::make_shared<Overlap>(nativeMatrix, borderList, blackList, overlapSize);
81 myRank_ = 0;
82#if HAVE_MPI
83 MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
84#endif // HAVE_MPI
85
86 // build the overlapping matrix from the non-overlapping
87 // matrix and the overlap
88 build_(nativeMatrix);
89 }
90
91 // this constructor is required to make the class compatible with the SeqILU class of
92 // Dune >= 2.7.
94 size_t,
95 typename BCRSMatrix::BuildMode)
96 { throw std::logic_error("OverlappingBCRSMatrix objects cannot be build from scratch!"); }
97
99 {
100 if (overlap_.use_count() == 0)
101 return;
102
103 // delete all MPI buffers
104 const PeerSet& peerSet = overlap_->peerSet();
105 typename PeerSet::const_iterator peerIt = peerSet.begin();
106 typename PeerSet::const_iterator peerEndIt = peerSet.end();
107 for (; peerIt != peerEndIt; ++peerIt) {
108 ProcessRank peerRank = *peerIt;
109
110 delete rowSizesRecvBuff_[peerRank];
111 delete rowIndicesRecvBuff_[peerRank];
112 delete entryColIndicesRecvBuff_[peerRank];
113 delete entryValuesRecvBuff_[peerRank];
114
115 delete numRowsSendBuff_[peerRank];
116 delete rowSizesSendBuff_[peerRank];
117 delete rowIndicesSendBuff_[peerRank];
118 delete entryColIndicesSendBuff_[peerRank];
119 delete entryValuesSendBuff_[peerRank];
120 }
121 }
122
123 ParentType& asParent()
124 { return *this; }
125
126 const ParentType& asParent() const
127 { return *this; }
128
132 const Overlap& overlap() const
133 { return *overlap_; }
134
138 void assignAdd(const ParentType& nativeMatrix)
139 {
140 // copy the native entries
141 assignFromNative(nativeMatrix);
142
143 // communicate and add the contents of overlapping rows
144 syncAdd();
145 }
146
153 template <class NativeBCRSMatrix>
154 void assignCopy(const NativeBCRSMatrix& nativeMatrix)
155 {
156 // copy the native entries
157 assignFromNative(nativeMatrix);
158
159 // communicate and add the contents of overlapping rows
160 syncCopy();
161 }
162
167 {
168 // create an identity matrix
169 block_type idMatrix(0.0);
170 for (unsigned i = 0; i < idMatrix.size(); ++i)
171 idMatrix[i][i] = 1.0;
172
173 int numLocal = overlap_->numLocal();
174 int numDomestic = overlap_->numDomestic();
175 for (int domRowIdx = numLocal; domRowIdx < numDomestic; ++domRowIdx) {
176 if (overlap_->isFront(domRowIdx)) {
177 // set the front rows to a diagonal 1
178 (*this)[domRowIdx] = 0.0;
179 (*this)[domRowIdx][domRowIdx] = idMatrix;
180 }
181 }
182 }
183
184 void print() const
185 {
186 overlap_->print();
187
188 for (int i = 0; i < this->N(); ++i) {
189 if (overlap_->isLocal(i))
190 std::cout << " ";
191 else
192 std::cout << "*";
193 std::cout << "row " << i << " ";
194
195 using ColIt = typename BCRSMatrix::ConstColIterator;
196 ColIt colIt = (*this)[i].begin();
197 ColIt colEndIt = (*this)[i].end();
198 for (int j = 0; j < this->M(); ++j) {
199 if (colIt != colEndIt && j == colIt.index()) {
200 ++colIt;
201 if (overlap_->isBorder(j))
202 std::cout << "|";
203 else if (overlap_->isLocal(j))
204 std::cout << "X";
205 else
206 std::cout << "*";
207 }
208 else
209 std::cout << " ";
210 }
211 std::cout << "\n" << std::flush;
212 }
213 Dune::printSparseMatrix(std::cout,
214 *static_cast<const BCRSMatrix *>(this),
215 "M",
216 "row");
217 }
218
219 template <class NativeBCRSMatrix>
220 void assignFromNative(const NativeBCRSMatrix& nativeMatrix)
221 {
222 // first, set everything to 0,
223 BCRSMatrix::operator=(0.0);
224
225 // then copy the domestic entries of the native matrix to the overlapping matrix
226 for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
227 Index domesticRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
228 if (domesticRowIdx < 0) {
229 continue; // row corresponds to a black-listed entry
230 }
231
232 auto nativeColIt = nativeMatrix[nativeRowIdx].begin();
233 const auto& nativeColEndIt = nativeMatrix[nativeRowIdx].end();
234 for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
235 Index domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIt.index()));
236
237 // make sure to include all off-diagonal entries, even those which belong
238 // to DOFs which are managed by a peer process. For this, we have to
239 // re-map the column index of the black-listed index to a native one.
240 if (domesticColIdx < 0)
241 domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIt.index()));
242
243 if (domesticColIdx < 0)
244 // there is no domestic index which corresponds to a black-listed
245 // one. this can happen if the grid overlap is larger than the
246 // algebraic one...
247 continue;
248
249 // we need to copy the block matrices manually since it seems that (at
250 // least some versions of) Dune have an endless recursion bug when
251 // assigning dense matrices of different field type
252 const auto& src = *nativeColIt;
253 auto& dest = (*this)[static_cast<unsigned>(domesticRowIdx)][static_cast<unsigned>(domesticColIdx)];
254 for (unsigned i = 0; i < src.rows; ++i) {
255 for (unsigned j = 0; j < src.cols; ++j) {
256 dest[i][j] = static_cast<field_type>(src[i][j]);
257 }
258 }
259 }
260 }
261 }
262
263 // communicates and adds up the contents of overlapping rows
264 void syncAdd()
265 {
266 // first, send all entries to the peers
267 const PeerSet& peerSet = overlap_->peerSet();
268 typename PeerSet::const_iterator peerIt = peerSet.begin();
269 typename PeerSet::const_iterator peerEndIt = peerSet.end();
270 for (; peerIt != peerEndIt; ++peerIt) {
271 ProcessRank peerRank = *peerIt;
272
273 sendEntries_(peerRank);
274 }
275
276 // then, receive entries from the peers
277 peerIt = peerSet.begin();
278 for (; peerIt != peerEndIt; ++peerIt) {
279 ProcessRank peerRank = *peerIt;
280
281 receiveAddEntries_(peerRank);
282 }
283
284 // finally, make sure that everything which we send was
285 // received by the peers
286 peerIt = peerSet.begin();
287 for (; peerIt != peerEndIt; ++peerIt) {
288 ProcessRank peerRank = *peerIt;
289 entryValuesSendBuff_[peerRank]->wait();
290 }
291 }
292
293 // communicates and copies the contents of overlapping rows from
294 // the master
295 void syncCopy()
296 {
297 // first, send all entries to the peers
298 const PeerSet& peerSet = overlap_->peerSet();
299 typename PeerSet::const_iterator peerIt = peerSet.begin();
300 typename PeerSet::const_iterator peerEndIt = peerSet.end();
301 for (; peerIt != peerEndIt; ++peerIt) {
302 ProcessRank peerRank = *peerIt;
303
304 sendEntries_(peerRank);
305 }
306
307 // then, receive entries from the peers
308 peerIt = peerSet.begin();
309 for (; peerIt != peerEndIt; ++peerIt) {
310 ProcessRank peerRank = *peerIt;
311
312 receiveCopyEntries_(peerRank);
313 }
314
315 // finally, make sure that everything which we send was
316 // received by the peers
317 peerIt = peerSet.begin();
318 for (; peerIt != peerEndIt; ++peerIt) {
319 ProcessRank peerRank = *peerIt;
320 entryValuesSendBuff_[peerRank]->wait();
321 }
322 }
323
324private:
325 template <class NativeBCRSMatrix>
326 void build_(const NativeBCRSMatrix& nativeMatrix)
327 {
328 size_t numDomestic = overlap_->numDomestic();
329
330 // allocate the rows
331 this->setSize(numDomestic, numDomestic);
332 this->setBuildMode(ParentType::random);
333
334 // communicate the entries
335 buildIndices_(nativeMatrix);
336 }
337
338 template <class NativeBCRSMatrix>
339 void buildIndices_(const NativeBCRSMatrix& nativeMatrix)
340 {
342 // first, add all local matrix entries
344 entries_.resize(overlap_->numDomestic());
345 for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
346 int domesticRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
347 if (domesticRowIdx < 0)
348 continue;
349
350 auto nativeColIt = nativeMatrix[nativeRowIdx].begin();
351 const auto& nativeColEndIt = nativeMatrix[nativeRowIdx].end();
352 for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
353 int domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIt.index()));
354
355 // make sure to include all off-diagonal entries, even those which belong
356 // to DOFs which are managed by a peer process. For this, we have to
357 // re-map the column index of the black-listed index to a native one.
358 if (domesticColIdx < 0) {
359 domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIt.index()));
360 }
361
362 if (domesticColIdx < 0)
363 continue;
364
365 entries_[static_cast<unsigned>(domesticRowIdx)].insert(domesticColIdx);
366 }
367 }
368
370 // add the indices for all additional entries
372
373 // first, send all our indices to all peers
374 const PeerSet& peerSet = overlap_->peerSet();
375 typename PeerSet::const_iterator peerIt = peerSet.begin();
376 typename PeerSet::const_iterator peerEndIt = peerSet.end();
377 for (; peerIt != peerEndIt; ++peerIt) {
378 ProcessRank peerRank = *peerIt;
379 sendIndices_(nativeMatrix, peerRank);
380 }
381
382 // then recieve all indices from the peers
383 peerIt = peerSet.begin();
384 for (; peerIt != peerEndIt; ++peerIt) {
385 ProcessRank peerRank = *peerIt;
386 receiveIndices_(peerRank);
387 }
388
389 // wait until all send operations are completed
390 peerIt = peerSet.begin();
391 for (; peerIt != peerEndIt; ++peerIt) {
392 ProcessRank peerRank = *peerIt;
393
394 numRowsSendBuff_[peerRank]->wait();
395 rowSizesSendBuff_[peerRank]->wait();
396 rowIndicesSendBuff_[peerRank]->wait();
397 entryColIndicesSendBuff_[peerRank]->wait();
398
399 // convert the global indices in the send buffers to domestic
400 // ones
401 globalToDomesticBuff_(*rowIndicesSendBuff_[peerRank]);
402 globalToDomesticBuff_(*entryColIndicesSendBuff_[peerRank]);
403 }
404
406 // actually initialize the BCRS matrix structure
408
409 // set the row sizes
410 size_t numDomestic = overlap_->numDomestic();
411 for (unsigned rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
412 unsigned numCols = 0;
413 const auto& colIndices = entries_[rowIdx];
414 auto colIdxIt = colIndices.begin();
415 const auto& colIdxEndIt = colIndices.end();
416 for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
417 if (*colIdxIt < 0)
418 // the matrix for the local process does not know about this DOF
419 continue;
420
421 ++numCols;
422 }
423
424 this->setrowsize(rowIdx, numCols);
425 }
426 this->endrowsizes();
427
428 // set the indices
429 for (unsigned rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
430 const auto& colIndices = entries_[rowIdx];
431
432 auto colIdxIt = colIndices.begin();
433 const auto& colIdxEndIt = colIndices.end();
434 for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
435 if (*colIdxIt < 0)
436 // the matrix for the local process does not know about this DOF
437 continue;
438
439 this->addindex(rowIdx, static_cast<unsigned>(*colIdxIt));
440 }
441 }
442 this->endindices();
443
444 // free the memory occupied by the array of the matrix entries
445 entries_.clear();
446 }
447
448 // send the overlap indices to a peer
449 template <class NativeBCRSMatrix>
450 void sendIndices_([[maybe_unused]] const NativeBCRSMatrix& nativeMatrix,
451 [[maybe_unused]] ProcessRank peerRank)
452 {
453#if HAVE_MPI
454 // send size of foreign overlap to peer
455 size_t numOverlapRows = overlap_->foreignOverlapSize(peerRank);
456 numRowsSendBuff_[peerRank] = new MpiBuffer<unsigned>(1);
457 (*numRowsSendBuff_[peerRank])[0] = static_cast<unsigned>(numOverlapRows);
458 numRowsSendBuff_[peerRank]->send(peerRank);
459
460 // allocate the buffers which hold the global indices of each row and the number
461 // of entries which need to be communicated by the respective row
462 rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
463 rowSizesSendBuff_[peerRank] = new MpiBuffer<unsigned>(numOverlapRows);
464
465 // compute the sets of the indices of the entries which need to be send to the peer
466 using ColumnIndexSet = std::set<int>;
467 using EntryTuples = std::map<int, ColumnIndexSet>;
468
469 EntryTuples entryIndices;
470 unsigned numEntries = 0; // <- total number of matrix entries to be send to the peer
471 for (unsigned overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
472 Index domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
473 Index nativeRowIdx = overlap_->domesticToNative(domesticRowIdx);
474 Index globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
475
476 ColumnIndexSet& colIndices = entryIndices[globalRowIdx];
477
478 auto nativeColIt = nativeMatrix[static_cast<unsigned>(nativeRowIdx)].begin();
479 const auto& nativeColEndIt = nativeMatrix[static_cast<unsigned>(nativeRowIdx)].end();
480 for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
481 unsigned nativeColIdx = static_cast<unsigned>(nativeColIt.index());
482 Index domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIdx));
483
484 if (domesticColIdx < 0)
485 // the native column index may be blacklisted, use the corresponding
486 // index in the domestic overlap.
487 domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIdx));
488
489 if (domesticColIdx < 0)
490 // the column may still not be known locally, i.e. the corresponding
491 // DOF of the row is at the process's front. we don't need this
492 // entry.
493 continue;
494
495 Index globalColIdx = overlap_->domesticToGlobal(domesticColIdx);
496 colIndices.insert(globalColIdx);
497 ++numEntries;
498 }
499 };
500
501 // fill the send buffers
502 entryColIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numEntries);
503 Index overlapEntryIdx = 0;
504 for (unsigned overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
505 Index domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
506 Index globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
507
508 (*rowIndicesSendBuff_[peerRank])[overlapOffset] = globalRowIdx;
509
510 const ColumnIndexSet& colIndexSet = entryIndices[globalRowIdx];
511 auto* rssb = rowSizesSendBuff_[peerRank];
512 (*rssb)[overlapOffset] = static_cast<unsigned>(colIndexSet.size());
513 for (auto it = colIndexSet.begin(); it != colIndexSet.end(); ++it) {
514 int globalColIdx = *it;
515
516 (*entryColIndicesSendBuff_[peerRank])[static_cast<unsigned>(overlapEntryIdx)] = globalColIdx;
517 ++ overlapEntryIdx;
518 }
519 }
520
521 // actually communicate with the peer
522 rowSizesSendBuff_[peerRank]->send(peerRank);
523 rowIndicesSendBuff_[peerRank]->send(peerRank);
524 entryColIndicesSendBuff_[peerRank]->send(peerRank);
525
526 // create the send buffers for the values of the matrix
527 // entries
528 entryValuesSendBuff_[peerRank] = new MpiBuffer<block_type>(numEntries);
529#endif // HAVE_MPI
530 }
531
532 // receive the overlap indices to a peer
533 void receiveIndices_([[maybe_unused]] ProcessRank peerRank)
534 {
535#if HAVE_MPI
536 // receive size of foreign overlap to peer
537 unsigned numOverlapRows;
538 auto& numRowsRecvBuff = numRowsRecvBuff_[peerRank];
539 numRowsRecvBuff.resize(1);
540 numRowsRecvBuff.receive(peerRank);
541 numOverlapRows = numRowsRecvBuff[0];
542
543 // create receive buffer for the row sizes and receive them
544 // from the peer
545 rowSizesRecvBuff_[peerRank] = new MpiBuffer<unsigned>(numOverlapRows);
546 rowIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
547 rowSizesRecvBuff_[peerRank]->receive(peerRank);
548 rowIndicesRecvBuff_[peerRank]->receive(peerRank);
549
550 // calculate the total number of indices which are send by the
551 // peer
552 unsigned totalIndices = 0;
553 for (unsigned i = 0; i < numOverlapRows; ++i)
554 totalIndices += (*rowSizesRecvBuff_[peerRank])[i];
555
556 // create the buffer to store the column indices of the matrix entries
557 entryColIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(totalIndices);
558 entryValuesRecvBuff_[peerRank] = new MpiBuffer<block_type>(totalIndices);
559
560 // communicate with the peer
561 entryColIndicesRecvBuff_[peerRank]->receive(peerRank);
562
563 // convert the global indices in the receive buffers to
564 // domestic ones
565 globalToDomesticBuff_(*rowIndicesRecvBuff_[peerRank]);
566 globalToDomesticBuff_(*entryColIndicesRecvBuff_[peerRank]);
567
568 // add the entries to the global entry map
569 unsigned k = 0;
570 for (unsigned i = 0; i < numOverlapRows; ++i) {
571 Index domRowIdx = (*rowIndicesRecvBuff_[peerRank])[i];
572 for (unsigned j = 0; j < (*rowSizesRecvBuff_[peerRank])[i]; ++j) {
573 Index domColIdx = (*entryColIndicesRecvBuff_[peerRank])[k];
574 entries_[static_cast<unsigned>(domRowIdx)].insert(domColIdx);
575 ++k;
576 }
577 }
578#endif // HAVE_MPI
579 }
580
581 void sendEntries_([[maybe_unused]] ProcessRank peerRank)
582 {
583#if HAVE_MPI
584 auto &mpiSendBuff = *entryValuesSendBuff_[peerRank];
585
586 auto &mpiRowIndicesSendBuff = *rowIndicesSendBuff_[peerRank];
587 auto &mpiRowSizesSendBuff = *rowSizesSendBuff_[peerRank];
588 auto &mpiColIndicesSendBuff = *entryColIndicesSendBuff_[peerRank];
589
590 // fill the send buffer
591 unsigned k = 0;
592 for (unsigned i = 0; i < mpiRowIndicesSendBuff.size(); ++i) {
593 Index domRowIdx = mpiRowIndicesSendBuff[i];
594
595 for (Index j = 0; j < static_cast<Index>(mpiRowSizesSendBuff[i]); ++j)
596 {
597 // move to the next column which is in the overlap
598 Index domColIdx = mpiColIndicesSendBuff[k];
599
600 // add the values of this column to the send buffer
601 mpiSendBuff[k] = (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)];
602 ++k;
603 }
604 }
605
606 mpiSendBuff.send(peerRank);
607#endif // HAVE_MPI
608 }
609
610 void receiveAddEntries_([[maybe_unused]] ProcessRank peerRank)
611 {
612#if HAVE_MPI
613 auto &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
614
615 auto &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
616 auto &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
617 auto &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
618
619 mpiRecvBuff.receive(peerRank);
620
621 // retrieve the values from the receive buffer
622 unsigned k = 0;
623 for (unsigned i = 0; i < mpiRowIndicesRecvBuff.size(); ++i) {
624 Index domRowIdx = mpiRowIndicesRecvBuff[i];
625 for (unsigned j = 0; j < mpiRowSizesRecvBuff[i]; ++j, ++k) {
626 Index domColIdx = mpiColIndicesRecvBuff[k];
627
628 if (domColIdx < 0)
629 // the matrix for the current process does not know about this DOF
630 continue;
631
632 (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)] += mpiRecvBuff[k];
633 }
634 }
635#endif // HAVE_MPI
636 }
637
638 void receiveCopyEntries_([[maybe_unused]] int peerRank)
639 {
640#if HAVE_MPI
641 MpiBuffer<block_type> &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
642
643 MpiBuffer<Index> &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
644 MpiBuffer<unsigned> &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
645 MpiBuffer<Index> &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
646
647 mpiRecvBuff.receive(peerRank);
648
649 // retrieve the values from the receive buffer
650 unsigned k = 0;
651 for (unsigned i = 0; i < mpiRowIndicesRecvBuff.size(); ++i) {
652 Index domRowIdx = mpiRowIndicesRecvBuff[i];
653 for (unsigned j = 0; j < mpiRowSizesRecvBuff[i]; ++j, ++k) {
654 Index domColIdx = mpiColIndicesRecvBuff[k];
655
656 if (domColIdx < 0)
657 // the matrix for the current process does not know about this DOF
658 continue;
659
660 (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)] = mpiRecvBuff[k];
661 }
662 }
663#endif // HAVE_MPI
664 }
665
666 void globalToDomesticBuff_(MpiBuffer<Index>& idxBuff)
667 {
668 for (unsigned i = 0; i < idxBuff.size(); ++i)
669 idxBuff[i] = overlap_->globalToDomestic(idxBuff[i]);
670 }
671
672 int myRank_;
673 Entries entries_;
674 std::shared_ptr<Overlap> overlap_;
675
676 std::map<ProcessRank, MpiBuffer<unsigned> *> numRowsSendBuff_;
677 std::map<ProcessRank, MpiBuffer<unsigned> *> rowSizesSendBuff_;
678 std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesSendBuff_;
679 std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesSendBuff_;
680 std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesSendBuff_;
681
682 std::map<ProcessRank, MpiBuffer<unsigned> > numRowsRecvBuff_;
683 std::map<ProcessRank, MpiBuffer<unsigned> *> rowSizesRecvBuff_;
684 std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesRecvBuff_;
685 std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesRecvBuff_;
686 std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesRecvBuff_;
687};
688
689} // namespace Linear
690} // namespace Opm
691
692#endif
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
Definition blacklist.hh:49
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...
Definition domesticoverlapfrombcrsmatrix.hh:53
An overlap aware block-compressed row storage (BCRS) matrix.
Definition overlappingbcrsmatrix.hh:54
void resetFront()
Set the identity matrix on the main diagonal of front indices.
Definition overlappingbcrsmatrix.hh:166
void assignAdd(const ParentType &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition overlappingbcrsmatrix.hh:138
void assignCopy(const NativeBCRSMatrix &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition overlappingbcrsmatrix.hh:154
const Overlap & overlap() const
Returns the domestic overlap for the process.
Definition overlappingbcrsmatrix.hh:132
A set of process ranks.
Definition overlaptypes.hh:149
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...
This class maps domestic row indices to and from "global" indices which is used to construct an algeb...
Simplifies handling of buffers to be used in conjunction with MPI.
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition blackoilboundaryratevector.hh:37
unsigned ProcessRank
The type of the rank of a process.
Definition overlaptypes.hh:49
int Index
The type of an index of a degree of freedom.
Definition overlaptypes.hh:44
std::list< BorderIndex > BorderList
This class managages a list of indices which are on the border of a process' partition of the grid.
Definition overlaptypes.hh:120