diff --git a/ortools/glop/lu_factorization.cc b/ortools/glop/lu_factorization.cc index de30ea0f22..8deed0b61c 100644 --- a/ortools/glop/lu_factorization.cc +++ b/ortools/glop/lu_factorization.cc @@ -405,7 +405,8 @@ bool LuFactorization::LeftSolveLWithNonZeros( ClearAndResizeVectorWithNonZeros(x->size(), result_before_permutation); x->swap(result_before_permutation->values); if (nz->empty()) { - for (RowIndex row(0); row < inverse_row_perm_.size(); ++row) { + const RowIndex num_rows = inverse_row_perm_.size(); + for (RowIndex row(0); row < num_rows; ++row) { const Fractional value = (*result_before_permutation)[row]; if (value != 0.0) { const RowIndex permuted_row = inverse_row_perm_[row]; diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 28ef73d4c9..88549afa30 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -451,7 +451,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { "PRIMAL_UNBOUNDED was reported, but the tolerance are good " "and the unbounded ray is not great."); SOLVER_LOG(logger_, - "The difference between unbounded and optimal can depends " + "The difference between unbounded and optimal can depend " "on a slight change of tolerance, trying to see if we are " "at OPTIMAL after postsolve."); problem_status_ = ProblemStatus::OPTIMAL; @@ -1087,7 +1087,9 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( // This function work whether the lp is in equation form (with slack) or // without, since the objective of the slacks are always zero. DCHECK_GE(num_cols_, lp.num_variables()); - for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) { + + const auto obj_coeffs = lp.objective_coefficients().const_view(); + for (ColIndex col(obj_coeffs.size()); col < num_cols_; ++col) { if (objective_[col] != 0.0) { objective_is_unchanged = false; objective_[col] = 0.0; @@ -1096,8 +1098,8 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( if (lp.IsMaximizationProblem()) { // Note that we use the minimization version of the objective internally. - for (ColIndex col(0); col < lp.num_variables(); ++col) { - const Fractional coeff = -lp.objective_coefficients()[col]; + for (ColIndex col(0); col < obj_coeffs.size(); ++col) { + const Fractional coeff = -obj_coeffs[col]; if (objective_[col] != coeff) { objective_is_unchanged = false; objective_[col] = coeff; @@ -1106,8 +1108,8 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( objective_offset_ = -lp.objective_offset(); objective_scaling_factor_ = -lp.objective_scaling_factor(); } else { - for (ColIndex col(0); col < lp.num_variables(); ++col) { - const Fractional coeff = lp.objective_coefficients()[col]; + for (ColIndex col(0); col < obj_coeffs.size(); ++col) { + const Fractional coeff = obj_coeffs[col]; if (objective_[col] != coeff) { objective_is_unchanged = false; objective_[col] = coeff; @@ -1120,7 +1122,7 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( return objective_is_unchanged; } -void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) { +void RevisedSimplex::InitializeObjectiveLimit() { objective_limit_reached_ = false; DCHECK(std::isfinite(objective_offset_)); DCHECK(std::isfinite(objective_scaling_factor_)); @@ -1418,7 +1420,7 @@ Status RevisedSimplex::Initialize(const LinearProgram& lp) { } } - InitializeObjectiveLimit(lp); + InitializeObjectiveLimit(); // Computes the variable name as soon as possible for logging. // TODO(user): do we really need to store them? we could just compute them diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index 3f8d9c8b4e..a4fa4b51ad 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -414,7 +414,7 @@ class RevisedSimplex { bool InitializeObjectiveAndTestIfUnchanged(const LinearProgram& lp); // Computes the stopping criterion on the problem objective value. - void InitializeObjectiveLimit(const LinearProgram& lp); + void InitializeObjectiveLimit(); // Initializes the starting basis. In most cases it starts by the all slack // basis and tries to apply some heuristics to replace fixed variables. diff --git a/ortools/glop/variables_info.cc b/ortools/glop/variables_info.cc index 3830178e80..d100f15eea 100644 --- a/ortools/glop/variables_info.cc +++ b/ortools/glop/variables_info.cc @@ -120,13 +120,13 @@ void VariablesInfo::InitializeFromBasisState(ColIndex first_slack_col, // Compute the status for all the columns (note that the slack variables are // already added at the end of the matrix at this stage). + const int state_size = state.statuses.size().value(); for (ColIndex col(0); col < num_cols; ++col) { // Start with the given "warm" status from the BasisState if it exists. VariableStatus status; - if (col < first_new_col && col < state.statuses.size()) { + if (col < first_new_col && col < state_size) { status = state.statuses[col]; - } else if (col >= first_slack_col && - col - num_new_cols < state.statuses.size()) { + } else if (col >= first_slack_col && col - num_new_cols < state_size) { status = state.statuses[col - num_new_cols]; } else { UpdateToNonBasicStatus(col, DefaultVariableStatus(col)); diff --git a/ortools/lp_data/sparse.cc b/ortools/lp_data/sparse.cc index 41ff043ee6..1928aa7d55 100644 --- a/ortools/lp_data/sparse.cc +++ b/ortools/lp_data/sparse.cc @@ -463,15 +463,16 @@ void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView& input) { void CompactSparseMatrix::PopulateFromSparseMatrixAndAddSlacks( const SparseMatrix& input) { - num_cols_ = input.num_cols() + RowToColIndex(input.num_rows()); + const int input_num_cols = input.num_cols().value(); + num_cols_ = input_num_cols + RowToColIndex(input.num_rows()); num_rows_ = input.num_rows(); const EntryIndex num_entries = input.num_entries() + EntryIndex(num_rows_.value()); starts_.assign(num_cols_ + 1, EntryIndex(0)); - coefficients_.assign(num_entries, 0.0); - rows_.assign(num_entries, RowIndex(0)); + coefficients_.resize(num_entries, 0.0); + rows_.resize(num_entries, RowIndex(0)); EntryIndex index(0); - for (ColIndex col(0); col < input.num_cols(); ++col) { + for (ColIndex col(0); col < input_num_cols; ++col) { starts_[col] = index; for (const SparseColumn::Entry e : input.column(col)) { coefficients_[index] = e.coefficient(); @@ -480,11 +481,12 @@ void CompactSparseMatrix::PopulateFromSparseMatrixAndAddSlacks( } } for (RowIndex row(0); row < num_rows_; ++row) { - starts_[input.num_cols() + RowToColIndex(row)] = index; + starts_[input_num_cols + RowToColIndex(row)] = index; coefficients_[index] = 1.0; rows_[index] = row; ++index; } + DCHECK_EQ(index, num_entries); starts_[num_cols_] = index; } @@ -496,11 +498,12 @@ void CompactSparseMatrix::PopulateFromTranspose( // Fill the starts_ vector by computing the number of entries of each rows and // then doing a cumulative sum. After this step starts_[col + 1] will be the // actual start of the column col when we are done. - starts_.assign(num_cols_ + 2, EntryIndex(0)); + const ColIndex start_size = num_cols_ + 2; + starts_.assign(start_size, EntryIndex(0)); for (const RowIndex row : input.rows_) { ++starts_[RowToColIndex(row) + 2]; } - for (ColIndex col(2); col < starts_.size(); ++col) { + for (ColIndex col(2); col < start_size; ++col) { starts_[col] += starts_[col - 1]; } coefficients_.resize(starts_.back(), 0.0); @@ -662,12 +665,13 @@ void TriangularMatrix::CloseCurrentColumn(Fractional diagonal_value) { // TODO(user): This is currently not used by all matrices. It will be good // to fill it only when needed. DCHECK_LT(num_cols_, pruned_ends_.size()); - pruned_ends_[num_cols_] = coefficients_.size(); + const EntryIndex num_entries = coefficients_.size(); + pruned_ends_[num_cols_] = num_entries; ++num_cols_; DCHECK_LT(num_cols_, starts_.size()); - starts_[num_cols_] = coefficients_.size(); - if (first_non_identity_column_ == num_cols_ - 1 && coefficients_.empty() && - diagonal_value == 1.0) { + starts_[num_cols_] = num_entries; + if (first_non_identity_column_ == num_cols_ - 1 && diagonal_value == 1.0 && + num_entries == 0) { first_non_identity_column_ = num_cols_; } all_diagonal_coefficients_are_one_ =