[PPL-devel] [GIT] ppl/ppl(MPI): Distributed_Sparse_Matrix: add an exact_entering_index( ) method.

Marco Poletti poletti.marco at gmail.com
Sun Sep 12 10:55:50 CEST 2010


Module: ppl/ppl
Branch: MPI
Commit: 248a43ded5052d330693a15edf18e0651fe005f2
URL:    http://www.cs.unipr.it/git/gitweb.cgi?p=ppl/ppl.git;a=commit;h=248a43ded5052d330693a15edf18e0651fe005f2

Author: Marco Poletti <poletti.marco at gmail.com>
Date:   Sun Sep 12 08:58:14 2010 +0200

Distributed_Sparse_Matrix: add an exact_entering_index() method.

---

 src/Distributed_Sparse_Matrix.cc      |  189 +++++++++++++++++++++++++++++++++
 src/Distributed_Sparse_Matrix.defs.hh |   11 ++
 2 files changed, 200 insertions(+), 0 deletions(-)

diff --git a/src/Distributed_Sparse_Matrix.cc b/src/Distributed_Sparse_Matrix.cc
index fa19973..ddf67fe 100644
--- a/src/Distributed_Sparse_Matrix.cc
+++ b/src/Distributed_Sparse_Matrix.cc
@@ -72,6 +72,7 @@ PPL::Distributed_Sparse_Matrix::num_operation_params[] = {
   3, // REMOVE_ROW_FROM_BASE_OPERATION: id, rank, row_index
   1, // SET_BASE_OPERATION: id
   1, // GET_BASE_OPERATION: id
+  1, // EXACT_ENTERING_INDEX_OPERATION: id
 };
 
 const mpi::communicator*
@@ -234,6 +235,10 @@ PPL::Distributed_Sparse_Matrix
       worker.get_base(op.params[0]);
       break;
 
+    case EXACT_ENTERING_INDEX_OPERATION:
+      worker.exact_entering_index(op.params[0]);
+      break;
+
     case QUIT_OPERATION:
       PPL_ASSERT(false);
 
@@ -897,6 +902,85 @@ PPL::Distributed_Sparse_Matrix
   }
 }
 
+namespace {
+
+class exact_entering_index_reducer_functor {
+public:
+  const PPL::Coefficient&
+  operator()(PPL::Coefficient& x, const PPL::Coefficient& y) const {
+    PPL::lcm_assign(x, x, y);
+    return x;
+  }
+};
+
+} // namespace
+
+void
+PPL::Distributed_Sparse_Matrix
+::exact_entering_index__common(const std::vector<dimension_type>& columns,
+                               std::vector<Coefficient>& challenger_values,
+                               const std::vector<Sparse_Row>& rows,
+                               const std::vector<dimension_type>& base,
+                               Coefficient& squared_lcm_basis) {
+  // The normalization factor for each coefficient in the tableau.
+  std::vector<Coefficient> norm_factor(rows.size());
+  {
+    // Compute the lcm of all the coefficients of variables in base.
+    PPL_DIRTY_TEMP_COEFFICIENT(local_lcm_basis);
+    local_lcm_basis = 1;
+    for (dimension_type i = rows.size(); i-- > 0; )
+      lcm_assign(local_lcm_basis, local_lcm_basis, rows[i].get(base[i]));
+
+    PPL_DIRTY_TEMP_COEFFICIENT(global_lcm_basis);
+    mpi::all_reduce(comm(), local_lcm_basis, global_lcm_basis,
+                    exact_entering_index_reducer_functor());
+
+    // Compute normalization factors for local rows.
+    for (dimension_type i = rows.size(); i-- > 0; )
+      exact_div_assign(norm_factor[i], global_lcm_basis,
+                       rows[i].get(base[i]));
+
+    // Compute the square of `global_lcm_basis', exploiting the fact that
+    // `global_lcm_basis' will no longer be needed.
+    global_lcm_basis *= global_lcm_basis;
+    std::swap(squared_lcm_basis, global_lcm_basis);
+  }
+
+  PPL_DIRTY_TEMP_COEFFICIENT(scalar_value);
+
+  const dimension_type columns_size = columns.size();
+
+  for (dimension_type i = rows.size(); i-- > 0; ) {
+    const Sparse_Row& row = rows[i];
+    Sparse_Row::const_iterator j = row.begin();
+    Sparse_Row::const_iterator j_end = row.end();
+    // This will be used to index the columns[] and challenger_values[]
+    // vectors.
+    dimension_type k = 0;
+    while (j != j_end) {
+      while (k != columns_size && j.index() > columns[k])
+        ++k;
+      if (k == columns_size)
+        break;
+      PPL_ASSERT(j.index() <= columns[k]);
+      if (j.index() < columns[k])
+        j = row.lower_bound(j, columns[k]);
+      else {
+        Coefficient_traits::const_reference tableau_ij = *j;
+        // FIXME: Check if the test against zero speeds up the sparse version.
+        // The test against 0 gives rise to a consistent speed up: see
+        // http://www.cs.unipr.it/pipermail/ppl-devel/2009-February/014000.html
+        if (tableau_ij != 0) {
+          scalar_value = tableau_ij * norm_factor[i];
+          add_mul_assign(challenger_values[k], scalar_value, scalar_value);
+        }
+        ++k;
+        ++j;
+      }
+    }
+  }
+}
+
 void
 PPL::Distributed_Sparse_Matrix::init(dimension_type num_rows1,
                                      dimension_type num_cols1) {
@@ -1577,6 +1661,83 @@ PPL::Distributed_Sparse_Matrix
   return entering_index;
 }
 
+namespace {
+
+class exact_entering_index_reducer_functor2 {
+public:
+  const std::vector<PPL::Coefficient>&
+  operator()(std::vector<PPL::Coefficient>& x,
+             const std::vector<PPL::Coefficient>& y) const {
+    PPL_ASSERT(x.size() == y.size());
+    for (PPL::dimension_type i = x.size(); i-- > 0; )
+      x[i] += y[i];
+    return x;
+  }
+};
+
+} // namespace
+
+PPL::dimension_type
+PPL::Distributed_Sparse_Matrix
+::exact_entering_index(const Dense_Row& working_cost) const {
+  broadcast_operation(EXACT_ENTERING_INDEX_OPERATION, id);
+
+  // Contains the list of the (column) indexes of challengers.
+  std::vector<dimension_type> columns;
+  // This is only an upper bound.
+  columns.reserve(num_columns() - 1);
+
+  const int cost_sign = sgn(working_cost[working_cost.size() - 1]);
+  for (dimension_type column = 1; column < num_columns() - 1; ++column)
+    if (sgn(working_cost[column]) == cost_sign) {
+      columns.push_back(column);
+    }
+
+  mpi::broadcast(comm(), columns, 0);
+
+  // For each i, challenger_values[i] contains the challenger value for the
+  // columns[i] column.
+  std::vector<Coefficient> challenger_values(columns.size());
+  PPL_DIRTY_TEMP_COEFFICIENT(squared_lcm_basis);
+  exact_entering_index__common(columns, challenger_values, local_rows, base,
+                               squared_lcm_basis);
+
+  std::vector<Coefficient> global_challenger_values;
+  mpi::reduce(comm(), challenger_values, global_challenger_values,
+              exact_entering_index_reducer_functor2(), 0);
+
+  PPL_DIRTY_TEMP_COEFFICIENT(challenger_num);
+  PPL_DIRTY_TEMP_COEFFICIENT(current_num);
+  PPL_DIRTY_TEMP_COEFFICIENT(current_den);
+  PPL_DIRTY_TEMP_COEFFICIENT(challenger_value);
+  PPL_DIRTY_TEMP_COEFFICIENT(current_value);
+  dimension_type entering_index = 0;
+  for (dimension_type k = 0; k < columns.size(); ++k) {
+    global_challenger_values[k] += squared_lcm_basis;
+    Coefficient_traits::const_reference cost_j = working_cost[columns[k]];
+    // We cannot compute the (exact) square root of abs(\Delta x_j).
+    // The workaround is to compute the square of `cost[j]'.
+    challenger_num = cost_j * cost_j;
+    // Initialization during the first loop.
+    if (entering_index == 0) {
+      std::swap(current_num, challenger_num);
+      std::swap(current_den, global_challenger_values[k]);
+      entering_index = columns[k];
+      continue;
+    }
+    challenger_value = challenger_num * current_den;
+    current_value = current_num * global_challenger_values[k];
+    // Update the values, if the challenger wins.
+    if (challenger_value > current_value) {
+      std::swap(current_num, challenger_num);
+      std::swap(current_den, global_challenger_values[k]);
+      entering_index = columns[k];
+    }
+  }
+
+  return entering_index;
+}
+
 void
 PPL::Distributed_Sparse_Matrix
 ::set_artificial_indexes_for_new_rows(dimension_type old_num_rows,
@@ -2279,6 +2440,34 @@ PPL::Distributed_Sparse_Matrix::Worker::get_base(dimension_type id) const {
     mpi::gather(comm(), itr->second.base, 0);
 }
 
+void
+PPL::Distributed_Sparse_Matrix::Worker
+::exact_entering_index(dimension_type id) const {
+  // Contains the list of the (column) indexes of challengers.
+  std::vector<dimension_type> columns;
+
+  mpi::broadcast(comm(), columns, 0);
+
+  // For each i, challenger_values[i] contains the challenger value for the
+  // columns[i] column.
+  std::vector<Coefficient> challenger_values(columns.size());
+
+  row_chunks_const_itr_type itr = row_chunks.find(id);
+
+  PPL_DIRTY_TEMP_COEFFICIENT(squared_lcm_basis);
+  if (itr == row_chunks.end()) {
+    std::vector<Sparse_Row> rows;
+    std::vector<dimension_type> base;
+    exact_entering_index__common(columns, challenger_values, rows, base,
+                                 squared_lcm_basis);
+  } else
+    exact_entering_index__common(columns, challenger_values, itr->second.rows,
+                                 itr->second.base, squared_lcm_basis);
+
+  mpi::reduce(comm(), challenger_values,
+              exact_entering_index_reducer_functor2(), 0);
+}
+
 template <typename Archive>
 void
 PPL::Distributed_Sparse_Matrix::Operation
diff --git a/src/Distributed_Sparse_Matrix.defs.hh b/src/Distributed_Sparse_Matrix.defs.hh
index f714c65..72f42eb 100644
--- a/src/Distributed_Sparse_Matrix.defs.hh
+++ b/src/Distributed_Sparse_Matrix.defs.hh
@@ -133,6 +133,7 @@ public:
   void remove_row_from_base(dimension_type row_index);
   void set_base(const std::vector<dimension_type>& base);
   void get_base(std::vector<dimension_type>& base) const;
+  dimension_type exact_entering_index(const Dense_Row& working_cost) const;
 
   bool OK() const;
 
@@ -175,6 +176,13 @@ private:
       const std::vector<Sparse_Row>& rows,
       std::vector<double>& results);
 
+  static void exact_entering_index__common(
+      const std::vector<dimension_type>& columns,
+      std::vector<Coefficient>& challenger_values,
+      const std::vector<Sparse_Row>& rows,
+      const std::vector<dimension_type>& base,
+      Coefficient& squared_lcm_basis);
+
   void init(dimension_type num_rows, dimension_type num_columns);
 
   static dimension_type get_unique_id();
@@ -239,6 +247,7 @@ private:
                               dimension_type local_row_index);
     void set_base(dimension_type id);
     void get_base(dimension_type id) const;
+    void exact_entering_index(dimension_type id) const;
 
   private:
     struct Row_Chunk {
@@ -325,6 +334,8 @@ private:
     SET_BASE_OPERATION,
     //! Parameters: id
     GET_BASE_OPERATION,
+    //! Parameters: id
+    EXACT_ENTERING_INDEX_OPERATION,
   };
 
   // This associates to each operation code the number of dimension_type




More information about the PPL-devel mailing list