Tpetra parallel linear algebra  Version of the Day
Tpetra_Experimental_BlockCrsMatrix_Helpers_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Tpetra: Templated Linear Algebra Services Package
5 // Copyright (2008) Sandia Corporation
6 //
7 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8 // the U.S. Government retains certain rights in this software.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
38 //
39 // ************************************************************************
40 // @HEADER
41 
42 #ifndef TPETRA_EXPERIMENTAL_BLOCKCRSMATRIX_HELPERS_DEF_HPP
43 #define TPETRA_EXPERIMENTAL_BLOCKCRSMATRIX_HELPERS_DEF_HPP
44 
46 
48 #include <Tpetra_HashTable.hpp>
49 
50 
51 namespace Tpetra {
52 namespace Experimental {
53 
54  template<class Scalar, class LO, class GO, class Node>
55  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::string const &fileName) {
56  Teuchos::ParameterList pl;
57  std::ofstream out;
58  out.open(fileName.c_str());
59  blockCrsMatrixWriter(A, out, pl);
60  }
61 
62  template<class Scalar, class LO, class GO, class Node>
63  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::string const &fileName, Teuchos::ParameterList const &params) {
64  std::ofstream out;
65  out.open(fileName.c_str());
66  blockCrsMatrixWriter(A, out, params);
67  }
68 
69  template<class Scalar, class LO, class GO, class Node>
70  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os) {
71  Teuchos::ParameterList pl;
72  blockCrsMatrixWriter(A, os, pl);
73  }
74 
75  template<class Scalar, class LO, class GO, class Node>
76  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os, Teuchos::ParameterList const &params) {
77 
78  using Teuchos::RCP;
79  using Teuchos::rcp;
80 
81  typedef Teuchos::OrdinalTraits<GO> TOT;
82  typedef BlockCrsMatrix<Scalar, LO, GO, Node> block_crs_matrix_type;
83  typedef Tpetra::Import<LO, GO, Node> import_type;
84  typedef Tpetra::Map<LO, GO, Node> map_type;
86  typedef Tpetra::CrsGraph<LO, GO, Node> crs_graph_type;
87 
88  RCP<const map_type> const rowMap = A.getRowMap(); //"mesh" map
89  RCP<const Teuchos::Comm<int> > comm = rowMap->getComm();
90  const int myRank = comm->getRank();
91  const size_t numProcs = comm->getSize();
92 
93  // If true, force use of the import strip-mining infrastructure. This is useful for debugging on one process.
94  bool alwaysUseParallelAlgorithm = false;
95  if (params.isParameter("always use parallel algorithm"))
96  alwaysUseParallelAlgorithm = params.get<bool>("always use parallel algorithm");
97  bool printMatrixMarketHeader = true;
98  if (params.isParameter("print MatrixMarket header"))
99  printMatrixMarketHeader = params.get<bool>("print MatrixMarket header");
100 
101  if (printMatrixMarketHeader && myRank==0) {
102  std::time_t now = std::time(NULL);
103 
104  const std::string dataTypeStr =
105  Teuchos::ScalarTraits<Scalar>::isComplex ? "complex" : "real";
106 
107  // Explanation of first line of file:
108  // - "%%MatrixMarket" is the tag for Matrix Market format.
109  // - "matrix" is what we're printing.
110  // - "coordinate" means sparse (triplet format), rather than dense.
111  // - "real" / "complex" is the type (in an output format sense,
112  // not in a C++ sense) of each value of the matrix. (The
113  // other two possibilities are "integer" (not really necessary
114  // here) and "pattern" (no values, just graph).)
115  os << "%%MatrixMarket matrix coordinate " << dataTypeStr << " general" << std::endl;
116  os << "% time stamp: " << ctime(&now);
117  os << "% written from " << numProcs << " processes" << std::endl;
118  os << "% point representation of Tpetra::Experimental::BlockCrsMatrix" << std::endl;
119  size_t numRows = A.getGlobalNumRows();
120  size_t numCols = A.getGlobalNumCols();
121  os << "% " << numRows << " block rows, " << numCols << " block columns" << std::endl;
122  const LO blockSize = A.getBlockSize();
123  os << "% block size " << blockSize << std::endl;
124  os << numRows*blockSize << " " << numCols*blockSize << " " << A.getGlobalNumEntries()*blockSize*blockSize << std::endl;
125  }
126 
127  if (numProcs==1 && !alwaysUseParallelAlgorithm) {
128  writeMatrixStrip(A,os,params);
129  } else {
130  size_t numRows = rowMap->getNodeNumElements();
131 
132  //Create source map
133  RCP<const map_type> allMeshGidsMap = rcp(new map_type(TOT::invalid(), numRows, A.getIndexBase(), comm));
134  //Create and populate vector of mesh GIDs corresponding to this pid's rows.
135  //This vector will be imported one pid's worth of information at a time to pid 0.
136  mv_type allMeshGids(allMeshGidsMap,1);
137  Teuchos::ArrayRCP<GO> allMeshGidsData = allMeshGids.getDataNonConst(0);
138 
139  for (size_t i=0; i<numRows; i++)
140  allMeshGidsData[i] = rowMap->getGlobalElement(i);
141  allMeshGidsData = Teuchos::null;
142 
143  // Now construct a RowMatrix on PE 0 by strip-mining the rows of the input matrix A.
144  size_t stripSize = allMeshGids.getGlobalLength() / numProcs;
145  size_t remainder = allMeshGids.getGlobalLength() % numProcs;
146  size_t curStart = 0;
147  size_t curStripSize = 0;
148  Teuchos::Array<GO> importMeshGidList;
149  for (size_t i=0; i<numProcs; i++) {
150  if (myRank==0) { // Only PE 0 does this part
151  curStripSize = stripSize;
152  if (i<remainder) curStripSize++; // handle leftovers
153  importMeshGidList.resize(curStripSize); // Set size of vector to max needed
154  for (size_t j=0; j<curStripSize; j++) importMeshGidList[j] = j + curStart + A.getIndexBase();
155  curStart += curStripSize;
156  }
157  // The following import map should be non-trivial only on PE 0.
158  TEUCHOS_TEST_FOR_EXCEPTION(myRank>0 && curStripSize!=0,
159  std::runtime_error, "Tpetra::Experimental::blockCrsMatrixWriter: (pid "
160  << myRank << ") map size should be zero, but is " << curStripSize);
161  RCP<map_type> importMeshGidMap = rcp(new map_type(TOT::invalid(), importMeshGidList(), A.getIndexBase(), comm));
162  import_type gidImporter(allMeshGidsMap, importMeshGidMap);
163  mv_type importMeshGids(importMeshGidMap, 1);
164  importMeshGids.doImport(allMeshGids, gidImporter, INSERT);
165 
166  // importMeshGids now has a list of GIDs for the current strip of matrix rows.
167  // Use these values to build another importer that will get rows of the matrix.
168 
169  // The following import map will be non-trivial only on PE 0.
170  Teuchos::ArrayRCP<const GO> importMeshGidsData = importMeshGids.getData(0);
171  Teuchos::Array<GO> importMeshGidsGO;
172  importMeshGidsGO.reserve(importMeshGidsData.size());
173  for (typename Teuchos::ArrayRCP<const GO>::size_type j=0; j<importMeshGidsData.size(); ++j)
174  importMeshGidsGO.push_back(importMeshGidsData[j]);
175  RCP<const map_type> importMap = rcp(new map_type(TOT::invalid(), importMeshGidsGO(), rowMap->getIndexBase(), comm) );
176 
177  import_type importer(rowMap,importMap );
178  size_t numEntriesPerRow = A.getCrsGraph().getGlobalMaxNumRowEntries();
179  RCP<crs_graph_type> graph = createCrsGraph(importMap,numEntriesPerRow);
180  RCP<const map_type> domainMap = A.getCrsGraph().getDomainMap();
181  graph->doImport(A.getCrsGraph(), importer, INSERT);
182  graph->fillComplete(domainMap, importMap);
183 
184  block_crs_matrix_type importA(*graph, A.getBlockSize());
185  importA.doImport(A, importer, INSERT);
186 
187  // Finally we are ready to write this strip of the matrix
188  writeMatrixStrip(importA, os, params);
189  }
190  }
191  }
192 
193  template<class Scalar, class LO, class GO, class Node>
194  void writeMatrixStrip(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os, Teuchos::ParameterList const &params) {
195  using Teuchos::RCP;
196  typedef Tpetra::Map<LO, GO, Node> map_type;
197 
198  size_t numRows = A.getGlobalNumRows();
199  RCP<const map_type> rowMap = A.getRowMap();
200  RCP<const map_type> colMap = A.getColMap();
201  RCP<const Teuchos::Comm<int> > comm = rowMap->getComm();
202  const int myRank = comm->getRank();
203 
204  const size_t meshRowOffset = rowMap->getIndexBase();
205  const size_t meshColOffset = colMap->getIndexBase();
206  TEUCHOS_TEST_FOR_EXCEPTION(meshRowOffset != meshColOffset,
207  std::runtime_error, "Tpetra::Experimental::writeMatrixStrip: "
208  "mesh row index base != mesh column index base");
209 
210  if (myRank !=0) {
211 
212  TEUCHOS_TEST_FOR_EXCEPTION(A.getNodeNumRows() != 0,
213  std::runtime_error, "Tpetra::Experimental::writeMatrixStrip: pid "
214  << myRank << " should have 0 rows but has " << A.getNodeNumRows());
215  TEUCHOS_TEST_FOR_EXCEPTION(A.getNodeNumCols() != 0,
216  std::runtime_error, "Tpetra::Experimental::writeMatrixStrip: pid "
217  << myRank << " should have 0 columns but has " << A.getNodeNumCols());
218 
219  } else {
220 
221  TEUCHOS_TEST_FOR_EXCEPTION(numRows != A.getNodeNumRows(),
222  std::runtime_error, "Tpetra::Experimental::writeMatrixStrip: "
223  "number of rows on pid 0 does not match global number of rows");
224 
225 
226  int err = 0;
227  const LO blockSize = A.getBlockSize();
228  const size_t numLocalRows = A.getNodeNumRows();
229  bool precisionChanged=false;
230  int oldPrecision = 0; // avoid "unused variable" warning
231  if (params.isParameter("precision")) {
232  oldPrecision = os.precision(params.get<int>("precision"));
233  precisionChanged=true;
234  }
235  int pointOffset = 1;
236  if (params.isParameter("zero-based indexing")) {
237  if (params.get<bool>("zero-based indexing") == true)
238  pointOffset = 0;
239  }
240 
241  size_t localRowInd;
242  for (localRowInd = 0; localRowInd < numLocalRows; ++localRowInd) {
243 
244  // Get a view of the current row.
245  const LO* localColInds;
246  Scalar* vals;
247  LO numEntries;
248  err = A.getLocalRowView (localRowInd, localColInds, vals, numEntries);
249  if (err != 0)
250  break;
251  GO globalMeshRowID = rowMap->getGlobalElement(localRowInd) - meshRowOffset;
252 
253  for (LO k = 0; k < numEntries; ++k) {
254  GO globalMeshColID = colMap->getGlobalElement(localColInds[k]) - meshColOffset;
255  Scalar* const curBlock = vals + blockSize * blockSize * k;
256  // Blocks are stored in row-major format.
257  for (LO j = 0; j < blockSize; ++j) {
258  GO globalPointRowID = globalMeshRowID * blockSize + j + pointOffset;
259  for (LO i = 0; i < blockSize; ++i) {
260  GO globalPointColID = globalMeshColID * blockSize + i + pointOffset;
261  const Scalar curVal = curBlock[i + j * blockSize];
262 
263  os << globalPointRowID << " " << globalPointColID << " ";
264  if (Teuchos::ScalarTraits<Scalar>::isComplex) {
265  // Matrix Market format wants complex values to be
266  // written as space-delimited pairs. See Bug 6469.
267  os << Teuchos::ScalarTraits<Scalar>::real (curVal) << " "
268  << Teuchos::ScalarTraits<Scalar>::imag (curVal);
269  }
270  else {
271  os << curVal;
272  }
273  os << std::endl;
274  }
275  }
276  }
277  }
278  if (precisionChanged)
279  os.precision(oldPrecision);
280  TEUCHOS_TEST_FOR_EXCEPTION(err != 0,
281  std::runtime_error, "Tpetra::Experimental::writeMatrixStrip: "
282  "error getting view of local row " << localRowInd);
283 
284  }
285 
286  }
287 
288  template<class Scalar, class LO, class GO, class Node>
289  Teuchos::RCP<BlockCrsMatrix<Scalar, LO, GO, Node> >
290  convertToBlockCrsMatrix(const Tpetra::CrsMatrix<Scalar, LO, GO, Node>& pointMatrix, const LO &blockSize)
291  {
292 
293  /*
294  ASSUMPTIONS:
295 
296  1) In point matrix, all entries associated with a little block are present (even if they are zero).
297  2) For given mesh DOF, point DOFs appear consecutively and in ascending order in row & column maps.
298  3) Point column map and block column map are ordered consistently.
299  */
300 
301  using Teuchos::Array;
302  using Teuchos::ArrayView;
303  using Teuchos::RCP;
304 
305  typedef Tpetra::Experimental::BlockCrsMatrix<Scalar,LO,GO,Node> block_crs_matrix_type;
306  typedef Tpetra::Map<LO,GO,Node> map_type;
307  typedef Tpetra::CrsGraph<LO,GO,Node> crs_graph_type;
308 
309  const map_type &pointRowMap = *(pointMatrix.getRowMap());
310  RCP<const map_type> meshRowMap = createMeshMap<LO,GO,Node>(blockSize, pointRowMap);
311 
312  const map_type &pointColMap = *(pointMatrix.getColMap());
313  RCP<const map_type> meshColMap = createMeshMap<LO,GO,Node>(blockSize, pointColMap);
314 
315  const map_type &pointDomainMap = *(pointMatrix.getDomainMap());
316  RCP<const map_type> meshDomainMap = createMeshMap<LO,GO,Node>(blockSize, pointDomainMap);
317 
318  const map_type &pointRangeMap = *(pointMatrix.getRangeMap());
319  RCP<const map_type> meshRangeMap = createMeshMap<LO,GO,Node>(blockSize, pointRangeMap);
320 
321  // Use graph ctor that provides column map and upper bound on nonzeros per row.
322  // We can use static profile because the point graph should have at least as many entries per
323  // row as the mesh graph.
324  RCP<crs_graph_type> meshCrsGraph = rcp(new crs_graph_type(meshRowMap, meshColMap,
326  // Fill the graph by walking through the matrix. For each mesh row, we query the collection of point
327  // rows associated with it. The point column ids are converted to mesh column ids and put into an array.
328  // As each point row collection is finished, the mesh column ids are sorted, made unique, and inserted
329  // into the mesh graph.
330  ArrayView<const LO> pointColInds;
331  ArrayView<const Scalar> pointVals;
332  Array<GO> meshColGids;
333  meshColGids.reserve(pointMatrix.getGlobalMaxNumRowEntries());
334  //again, I assume that point GIDs associated with a mesh GID are consecutive.
335  //if they are not, this will break!!
336  for (size_t i=0; i<pointMatrix.getNodeNumRows()/blockSize; i++) {
337  for (int j=0; j<blockSize; ++j) {
338  LO rowLid = i*blockSize+j;
339  pointMatrix.getLocalRowView(rowLid,pointColInds,pointVals); //TODO optimization: Since I don't care about values,
340  //TODO I should use the graph instead.
341  for (int k=0; k<pointColInds.size(); ++k) {
342  GO meshColInd = pointColMap.getGlobalElement(pointColInds[k]) / blockSize;
343  meshColGids.push_back(meshColInd);
344  }
345  }
346  //List of mesh GIDs probably contains duplicates because we looped over all point rows in the block.
347  //Sort and make unique.
348  std::sort(meshColGids.begin(), meshColGids.end());
349  meshColGids.erase( std::unique(meshColGids.begin(), meshColGids.end()), meshColGids.end() );
350  meshCrsGraph->insertGlobalIndices(meshRowMap->getGlobalElement(i), meshColGids());
351  meshColGids.clear();
352  }
353  meshCrsGraph->fillComplete(meshDomainMap,meshRangeMap);
354 
355  //create and populate the block matrix
356  RCP<block_crs_matrix_type> blockMatrix = rcp(new block_crs_matrix_type(*meshCrsGraph, blockSize));
357 
358  //preallocate the maximum number of (dense) block entries needed by any row
359  int maxBlockEntries = blockMatrix->getNodeMaxNumRowEntries();
360  Array<Array<Scalar>> blocks(maxBlockEntries);
361  for (int i=0; i<maxBlockEntries; ++i)
362  blocks[i].reserve(blockSize*blockSize);
363  std::map<int,int> bcol2bentry; //maps block column index to dense block entries
364  std::map<int,int>::iterator iter;
365  //Fill the block matrix. We must do this in local index space.
366  //TODO: Optimization: We assume the blocks are fully populated in the point matrix. This means
367  //TODO: on the first point row in the block row, we know that we're hitting new block col indices.
368  //TODO: on other rows, we know the block col indices have all been seen before
369  //int offset;
370  //if (pointMatrix.getIndexBase()) offset = 0;
371  //else offset = 1;
372  for (size_t i=0; i<pointMatrix.getNodeNumRows()/blockSize; i++) {
373  int blkCnt=0; //how many unique block entries encountered so far in current block row
374  for (int j=0; j<blockSize; ++j) {
375  LO rowLid = i*blockSize+j;
376  pointMatrix.getLocalRowView(rowLid,pointColInds,pointVals);
377  for (int k=0; k<pointColInds.size(); ++k) {
378  //convert point column to block col
379  LO meshColInd = pointColInds[k] / blockSize;
380  iter = bcol2bentry.find(meshColInd);
381  if (iter == bcol2bentry.end()) {
382  //new block column
383  bcol2bentry[meshColInd] = blkCnt;
384  blocks[blkCnt].push_back(pointVals[k]);
385  blkCnt++;
386  } else {
387  //block column found previously
388  int littleBlock = iter->second;
389  blocks[littleBlock].push_back(pointVals[k]);
390  }
391  }
392  }
393  // TODO This inserts the blocks one block entry at a time. It is probably more efficient to
394  // TODO store all the blocks in a block row contiguously so they can be inserted with a single call.
395  for (iter=bcol2bentry.begin(); iter != bcol2bentry.end(); ++iter) {
396  LO localBlockCol = iter->first;
397  Scalar *vals = (blocks[iter->second]).getRawPtr();
398  blockMatrix->replaceLocalValues(i, &localBlockCol, vals, 1);
399  }
400 
401  //Done with block row. Zero everything out.
402  for (int j=0; j<maxBlockEntries; ++j)
403  blocks[j].clear();
404  blkCnt = 0;
405  bcol2bentry.clear();
406  }
407 
408  return blockMatrix;
409 
410  }
411 
412  template<class LO, class GO, class Node>
413  Teuchos::RCP<const Tpetra::Map<LO,GO,Node> >
414  createMeshMap (const LO& blockSize, const Tpetra::Map<LO,GO,Node>& pointMap)
415  {
416  typedef Teuchos::OrdinalTraits<Tpetra::global_size_t> TOT;
417  typedef Tpetra::Map<LO,GO,Node> map_type;
418 
419  //calculate mesh GIDs
420  Teuchos::ArrayView<const GO> pointGids = pointMap.getNodeElementList();
421  Teuchos::Array<GO> meshGids;
422  GO indexBase = pointMap.getIndexBase();
423 
424  // Use hash table to track whether we've encountered this GID previously. This will happen
425  // when striding through the point DOFs in a block. It should not happen otherwise.
426  // I don't use sort/make unique because I don't want to change the ordering.
427  meshGids.reserve(pointGids.size());
428  Tpetra::Details::HashTable<GO,int> hashTable(pointGids.size());
429  for (int i=0; i<pointGids.size(); ++i) {
430  GO meshGid = (pointGids[i]-indexBase) / blockSize + indexBase;
431  if (hashTable.get(meshGid) == -1) {
432  hashTable.add(meshGid,1); //(key,value)
433  meshGids.push_back(meshGid);
434  }
435  }
436 
437  Teuchos::RCP<const map_type> meshMap = Teuchos::rcp( new map_type(TOT::invalid(), meshGids(), 0, pointMap.getComm()) );
438  return meshMap;
439 
440  }
441 
442 } // namespace Experimental
443 } // namespace Tpetra
444 
445 //
446 // Explicit instantiation macro for blockCrsMatrixWriter (various
447 // overloads), writeMatrixStrip, and convertToBlockCrsMatrix.
448 //
449 // Must be expanded from within the Tpetra namespace!
450 //
451 #define TPETRA_EXPERIMENTAL_BLOCKCRSMATRIX_HELPERS_INSTANT(S,LO,GO,NODE) \
452  template void Experimental::blockCrsMatrixWriter(Experimental::BlockCrsMatrix<S,LO,GO,NODE> const &A, std::string const &fileName); \
453  template void Experimental::blockCrsMatrixWriter(Experimental::BlockCrsMatrix<S,LO,GO,NODE> const &A, std::string const &fileName, Teuchos::ParameterList const &params); \
454  template void Experimental::blockCrsMatrixWriter(Experimental::BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os); \
455  template void Experimental::blockCrsMatrixWriter(Experimental::BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os, Teuchos::ParameterList const &params); \
456  template void Experimental::writeMatrixStrip(Experimental::BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os, Teuchos::ParameterList const &params); \
457  template Teuchos::RCP<Experimental::BlockCrsMatrix<S, LO, GO, NODE> > Experimental::convertToBlockCrsMatrix(const CrsMatrix<S, LO, GO, NODE>& pointMatrix, const LO &blockSize);
458 
459 //
460 // Explicit instantiation macro for createMeshMap.
461 //
462 // Must be expanded from within the Tpetra::Experimental namespace!
463 //
464 #define TPETRA_EXPERIMENTAL_CREATEMESHMAP_INSTANT(LO,GO,NODE) \
465  template Teuchos::RCP<const Map<LO,GO,NODE> > createMeshMap (const LO& blockSize, const Map<LO,GO,NODE>& pointMap);
466 
467 #endif // TPETRA_EXPERIMENTAL_BLOCKCRSMATRIX_HELPERS_DEF_HPP
Teuchos::RCP< BlockCrsMatrix< Scalar, LO, GO, Node > > convertToBlockCrsMatrix(const Tpetra::CrsMatrix< Scalar, LO, GO, Node > &pointMatrix, const LO &blockSize)
Non-member constructor that creates a BlockCrsMatrix from an existing point CrsMatrix.
Communication plan for data redistribution from a uniquely-owned to a (possibly) multiply-owned distr...
Namespace Tpetra contains the class and methods constituting the Tpetra library.
Sparse matrix that presents a row-oriented interface that lets users read or modify entries...
global_size_t getGlobalNumRows() const
get the global number of block rows
void writeMatrixStrip(BlockCrsMatrix< Scalar, LO, GO, Node > const &A, std::ostream &os, Teuchos::ParameterList const &params)
Helper function called by blockCrsMatrixWriter.
Teuchos::RCP< const map_type > getRowMap() const
The Map that describes the row distribution in this matrix.
One or more distributed dense vectors.
virtual global_size_t getGlobalNumCols() const
The global number of columns of this matrix.
Teuchos::RCP< CrsGraph< LocalOrdinal, GlobalOrdinal, Node, classic > > createCrsGraph(const Teuchos::RCP< const Map< LocalOrdinal, GlobalOrdinal, Node > > &map, size_t maxNumEntriesPerRow=0, const Teuchos::RCP< Teuchos::ParameterList > &params=Teuchos::null)
Nonmember function to create an empty CrsGraph given a row Map and the max number of entries allowed ...
LO getLocalRowView(const LO localRowInd, const LO *&colInds, Scalar *&vals, LO &numInds) const
Get a view of the (mesh, i.e., block) row, using local (mesh, i.e., block) indices.
void blockCrsMatrixWriter(BlockCrsMatrix< Scalar, LO, GO, Node > const &A, std::string const &fileName)
Helper function to write a BlockCrsMatrix. Calls the 3-argument version.
Teuchos::RCP< const map_type > getDomainMap() const
Returns the Map associated with the domain of this graph.
virtual size_t getNodeNumCols() const
The number of columns needed to apply the forward operator on this node.
Teuchos::RCP< const Teuchos::Comm< int > > getComm() const
Accessors for the Teuchos::Comm and Kokkos Node objects.
Insert new values that don&#39;t currently exist.
GlobalOrdinal getIndexBase() const
The index base for this Map.
void getLocalRowView(LocalOrdinal LocalRow, Teuchos::ArrayView< const LocalOrdinal > &indices, Teuchos::ArrayView< const Scalar > &values) const
Get a constant, nonpersisting view of a row of this matrix, using local row and column indices...
Teuchos::RCP< const map_type > getRowMap() const
get the (mesh) map for the rows of this block matrix.
Teuchos::ArrayView< const GlobalOrdinal > getNodeElementList() const
Return a NONOWNING view of the global indices owned by this process.
size_t getGlobalMaxNumRowEntries() const
Maximum number of entries in all rows over all processes.
size_t getNodeNumRows() const
The number of matrix rows owned by the calling process.
A distributed graph accessed by rows (adjacency lists) and stored sparsely.
virtual global_size_t getGlobalNumEntries() const
The global number of stored (structurally nonzero) entries.
void add(const KeyType key, const ValueType value)
Add a key and its value to the hash table.
virtual GO getIndexBase() const
The index base for global indices in this matrix.
size_t getGlobalMaxNumRowEntries() const
Returns the maximum number of entries across all rows/columns on all nodes.
Sparse matrix whose entries are small dense square blocks, all of the same dimensions.
Teuchos::RCP< const map_type > getColMap() const
The Map that describes the column distribution in this matrix.
Teuchos::RCP< const map_type > getDomainMap() const
The domain Map of this matrix.
LO getBlockSize() const
The number of degrees of freedom per mesh point.
Teuchos::RCP< const map_type > getRangeMap() const
The range Map of this matrix.
Teuchos::RCP< const map_type > getColMap() const
get the (mesh) map for the columns of this block matrix.
Teuchos::RCP< const Tpetra::Map< LO, GO, Node > > createMeshMap(LO const &blockSize, const Tpetra::Map< LO, GO, Node > &pointMap)
Helper function to generate a mesh map from a point map. Important! It&#39;s assumed that point GIDs asso...
size_t getNodeNumRows() const
get the local number of block rows