Skip to content
sjdeal edited this page Jul 22, 2015 · 1 revision
// @HEADER
// ***********************************************************************
// 
//            Amesos: An Interface to Direct Solvers
//                 Copyright (2004) Sandia Corporation
// 
// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
// license for use of this work by or on behalf of the U.S. Government.
// 
// This library is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation; either version 2.1 of the
// License, or (at your option) any later version.
//  
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// Lesser General Public License for more details.
//  
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
// USA
// Questions? Contact Michael A. Heroux ([email protected]) 
// 
// ***********************************************************************
// @HEADER

//
// Example using Amesos' interface to KLU to solve a sparse linear
// system, with Epetra_CrsMatrix as the sparse matrix type.
//

#include "Amesos_ConfigDefs.h"
#ifdef HAVE_MPI
#  include "mpi.h"
#  include "Epetra_MpiComm.h"
#else
#  include "Epetra_SerialComm.h"
#endif
#include "Amesos.h"
#include "Epetra_RowMatrix.h"
#include "Epetra_MultiVector.h"
#include "Epetra_LinearProblem.h"

// This example code uses Trilinos' Galeri package to generate the
// linear system to solve.  You don't need Galeri to use Amesos.  We
// just use Galeri here because it provides a prepackaged example
// problem to solve.
#include "Galeri_Maps.h"
#include "Galeri_CrsMatrices.h"

// ==================== //
// M A I N  D R I V E R //
// ==================== //
//
// This example will:
//
// 1. Create a linear system, stored as an Epetra_LinearProblem. The
//    matrix in this example corresponds to a 5pt Laplacian (2D on
//    Cartesian grid).  The user can change the global size of the
//    problem by modifying variables nx and ny (the number of internal
//    grid points along the x resp. y dimensions of the grid).  This
//    problem happens to be symmetric, but Amesos solvers don't
//    necessarily assume symmetry.
//
// 2. The linear system's matrix, solution, and right-hand side are
//    distributed among the available processors, using a linear
//    distribution.  This is for simplicity only!  Amesos can support
//    any distribution (i.e., any Epetra_Map).
//
// 3. Once the linear problem is created, we create an Amesos Factory
//    object.
//
// 4. Using the Factory, we create the required Amesos_BaseSolver
//    solver. Any supported (and compiled) Amesos solver can be
//    used. If the selected solver is not available (that is, if
//    Amesos has *not* been configured with support for this solver),
//    the factory returns null. Usually, Amesos_Klu is always
//    available.  KLU provides a sparse LU factorization without
//    supernode or multifrontal optimizations.
//
// 5. At this point we can factor the matrix and solve the linear
//    system.  Only three methods should be used for any
//    Amesos_BaseSolver object:
//
//    a. NumericFactorization();
//    b. SymbolicFactorization();
//    c. Solve();
//
// 6. The header files of libraries supported by Amesos are *not*
//    required in this file. They are only needed when building
//    Amesos from source.
//
// NOTE: this example can be run with any number of processors.
//
// Original author: Marzio Sala, SNL 9214.  Most recent modifications
// due to Mark Hoemmen ([email protected]), Aug 2011.
//
int 
main(int argc, char *argv[]) 
{
  using Teuchos::getParameter;
  using Teuchos::ParameterList;
  using std::cerr;
  using std::cout;
  using std::endl;

#ifdef HAVE_MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Number of grid points in the x direction.  In this example, nx is
  // fixed with respect to the number of MPI processes.
  int nx = 100;
  // Number of grid points in the y direction.  We scale this with the
  // number of MPI processes.
  int ny = 100 * Comm.NumProc();
  // Number of right-hand sides.  Amesos solvers support single or
  // multiple right-hand sides.
  int NumVectors = 1;

  // Initialize a Gallery object.
  //
  // NOTE: this example uses the Trilinos package Galeri, which
  // includes many example problems.  This makes our example code
  // shorter.  The user can easily change the matrix type; consult the
  // Galeri documentation for mode details.
  //
  // Here the problem has size nx x ny, and the 2D Cartesian
  // grid is divided into mx x my subdomains.
  ParameterList GaleriList;
  GaleriList.set ("nx", nx);
  GaleriList.set ("ny", ny);
  GaleriList.set ("mx", 1);
  GaleriList.set ("my", Comm.NumProc());

  Epetra_Map* Map = Galeri::CreateMap ("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* Matrix = Galeri::CreateCrsMatrix ("Laplace2D", Map, GaleriList);

  // Create solution and right-hand side vectors.
  Epetra_MultiVector LHS (*Map, NumVectors); 
  LHS.PutScalar (0.0); // Fill the solution vector with zeros
  Epetra_MultiVector RHS (*Map, NumVectors); 
  RHS.Random ();       // Fill the right-hand side with random data

  // The linear problem for Amesos to solve.
  Epetra_LinearProblem Problem (Matrix, &LHS, &RHS);

  // ====================================================== //
  // B E G I N N I N G   O F  T H E   A M E S O S   P A R T //
  // ====================================================== //

  // Amesos_BaseSolver is the interface for all Amesos solvers. It is
  // a pure virtual class.  Hence, objects of this class cannot be
  // allocated, and can exist only as pointers or references.  
  //
  // If you prefer, you may also use a "smart pointer" such as
  // Teuchos::RCP or shared_ptr in the new C++ standard (C++0x) to
  // handle deallocation automatically.  However, see the cautionary
  // note below.  (In brief: the destructor may make MPI calls, so you
  // should be sure that the smart pointer invokes the destructor
  // _before_ calling MPI_Finalize().)
  Amesos_BaseSolver* Solver = NULL;

  // Initialize the Factory. Amesos is a class that contains methods
  // only, no data). Factory will be used to create objects that
  // implement the Amesos_BaseSolver interface.
  Amesos Factory;

  // Specifies the solver. String ``SolverType'' can assume one 
  // of the following values:
  // - Lapack
  // - Klu
  // - Umfpack
  // - Pardiso
  // - Taucs
  // - Superlu
  // - Superludist
  // - Mumps
  // - Dscpack
  // 
  // We use KLU because it is a sparse solver, and is always
  // available.  "Lapack" is a dense solver (it converts to a dense
  // matrix and invokes LAPACK's LU factorization).  The other solvers
  // in the list interfaces to third-party libraries, with which
  // Trilinos may or may not have been built.  You can check whether
  // they are available by asking Amesos' Factory for them.  If the
  // Factory returns a NULL pointer, then they are not avaiable.
  std::string SolverType = "Klu";
  Solver = Factory.Create (SolverType, Problem);

  // Factory.Create() returns NULL if the requested solver is not
  // available.
  if (Solver == NULL) {
    cerr << "Specified solver \"" << SolverType << "\" is not available." << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return EXIT_FAILURE;
  }

  // Parameters for all Amesos solvers are set through a call to
  // SetParameters(List). List is a Teuchos ParameterList (Amesos must
  // be built with Trilinos' Teuchos package).  If you do not call
  // SetParameters, the solver will use default parameters.  This will
  // work fine in most cases.  Please consult the Amesos Users' Guide
  // for more details.
  //
  // Parameters in the list are set using List.set("parameter-name",
  // ParameterValue); In this example, we specify that we want more
  // output.
  ParameterList List;
  List.set ("PrintTiming", true);
  List.set ("PrintStatus", true);

  Solver->SetParameters (List);
  
  // Now we are ready to solve. Generally, users will call
  // SymbolicFactorization(), then NumericFactorization(), and finally
  // Solve(). Note that:
  // - the numerical values of the linear system matrix are *not*
  //   required before NumericFactorization();
  // - the solution and right-hand side are *not* required before
  //   calling Solve().
  if (Comm.MyPID() == 0)
    cout << "Starting symbolic factorization..." << endl;
  Solver->SymbolicFactorization();

  // You can change the matrix values at any point before calling
  // Solver->NumericFactorization().
  if (Comm.MyPID() == 0)
    cout << "Starting numeric factorization..." << endl;
  Solver->NumericFactorization();
  
  // You can change the solution vector LHS and the right-hand side
  // vector RHS at any time before calling Solver->Solve().
  if (Comm.MyPID() == 0)
    cout << "Starting solution phase..." << endl;
  Solver->Solve();
	
  // Amesos has solved the linear system.  Now you can get the
  // timings.  Amesos stores the timings for different components of
  // the solve in a ParameterList.
  ParameterList TimingsList;
  Solver->GetTiming (TimingsList);
  
  //
  // You can find out how much time was spent in ...
  //

  // 1) The symbolic factorization 
  //    (this parameter doesn't always exist; if it doesn't, the
  //    two-argument version of get() will return the value of the
  //    second argument, which here is zero)
  const double sfact_time = 
    TimingsList.get ("Total symbolic factorization time", 0.0);

  // 2) The numeric factorization 
  //    (always exists if NumericFactorization() is called)
  const double nfact_time = 
    getParameter<double> (TimingsList, "Total numeric factorization time");

  // 3) Solving the linear system 
  //    (always exists if Solve() is called)
  const double solve_time = 
    getParameter<double> (TimingsList, "Total solve time");

  // 4) Converting the matrix to the specific solver's input format
  //    (always exists if SymbolicFactorization() is called)
  const double mtx_conv_time = 
    getParameter<double> (TimingsList, "Total matrix conversion time");

  // 5) Redistributing the matrix for each solve to the accepted
  //    format for the solver.  (If the matrix to factor is
  //    distributed over processors, and the specific solver does not
  //    know how to operate on distributed data, then Amesos has to
  //    gather the matrix onto one node for the factorization.)  
  //
  //    This may not exist in TimingsList if the matrix didn't need to
  //    be redistributed, which is why we use the two-argument form of
  //    get() that returns the second argument if the parameter with
  //    the given name doesn't exist.
  const double mtx_redist_time = 
    TimingsList.get ("Total matrix redistribution time", 0.0);

  // 6) Redistributing the vector(s) for each solve to the accepted
  //    format for the solver.
  //
  //    This may not exist in TimingsList if the vector(s) didn't need
  //    to be redistributed, which is why we use the two-argument form
  //    of get() that returns the second argument if the parameter
  //    with the given name doesn't exist.
  const double vec_redist_time = 
    TimingsList.get ("Total vector redistribution time", 0.0);

  // Just for fun, print out the timings here.  Recall, though, that
  // we've already asked Amesos to print out both timings (via the
  // "PrintTiming" solver parameter) and "status" (via the
  // "PrintStatus" solver parameter).  "Status" in this case means
  // interesting details about the factorization.
  if (Comm.MyPID() == 0)
    {
      cout << endl 
	   << "Solver timings (in seconds):" << endl
	   << "- Symbolic factorization:       " << sfact_time << endl
	   << "- Numeric factorization:        " << nfact_time << endl
	   << "- Solving the linear system:    " << solve_time << endl
	   << "- Converting the matrix:        " << mtx_conv_time << endl
	   << "- Redistributing the matrix:    " << mtx_redist_time << endl
	   << "- Redistributing the vector(s): " << vec_redist_time << endl
	   << endl;
    }
  // =========================================== //
  // E N D   O F   T H E   A M E S O S   P A R T //
  // =========================================== //

  // Invoke the solver's destructor and deallocate it.  The solver's
  // destructor might make MPI calls, so this shuld be done before
  // calling MPI_Finalize().  
  //
  // Take note of this if you are using a reference-counted "smart
  // pointer" like Teuchos::RCP, Boost's shared_ptr, or the shared_ptr
  // in the new C++ standard (C++0x).  In that case, you should make
  // sure that all the smart pointers have either fallen out of scope
  // or had null assigned to them, before calling MPI_Finalize().
  // Also be careful of this if you are handling Amesos solvers in a
  // garbage-collected language (such as Java or Python), since such
  // languages often do not promise when objects will be finalized or
  // even if they will be finalized at all.
  delete Solver;
    
  // Deallocate the objects created by Galeri.
  delete Matrix;
  delete Map;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif
  return EXIT_SUCCESS;
}

Wiki Pages
Trilinos Hands On Tutorial
[Zoltan Hands On Tutorial] (ZoltanHandsOnTutorial)

Links
Trilinos Home Page
Trilinos "Getting Started"

Clone this wiki locally