Sleipnir C++ API
Loading...
Searching...
No Matches
interior_point.hpp
1// Copyright (c) Sleipnir contributors
2
3#pragma once
4
5#include <algorithm>
6#include <chrono>
7#include <cmath>
8#include <functional>
9#include <limits>
10#include <span>
11
12#include <Eigen/Core>
13#include <Eigen/SparseCore>
14#include <gch/small_vector.hpp>
15
16#include "sleipnir/optimization/solver/exit_status.hpp"
17#include "sleipnir/optimization/solver/interior_point_matrix_callbacks.hpp"
18#include "sleipnir/optimization/solver/iteration_info.hpp"
19#include "sleipnir/optimization/solver/options.hpp"
20#include "sleipnir/optimization/solver/util/all_finite.hpp"
21#include "sleipnir/optimization/solver/util/append_as_triplets.hpp"
22#include "sleipnir/optimization/solver/util/feasibility_restoration.hpp"
23#include "sleipnir/optimization/solver/util/filter.hpp"
24#include "sleipnir/optimization/solver/util/fraction_to_the_boundary_rule.hpp"
25#include "sleipnir/optimization/solver/util/is_locally_infeasible.hpp"
26#include "sleipnir/optimization/solver/util/kkt_error.hpp"
27#include "sleipnir/optimization/solver/util/regularized_ldlt.hpp"
28#include "sleipnir/util/assert.hpp"
29#include "sleipnir/util/print_diagnostics.hpp"
30#include "sleipnir/util/profiler.hpp"
31#include "sleipnir/util/scope_exit.hpp"
32#include "sleipnir/util/symbol_exports.hpp"
33
34// See docs/algorithms.md#Works_cited for citation definitions.
35//
36// See docs/algorithms.md#Interior-point_method for a derivation of the
37// interior-point method formulation being used.
38
39namespace slp {
40
63template <typename Scalar>
64ExitStatus interior_point(
65 const InteriorPointMatrixCallbacks<Scalar>& matrix_callbacks,
66 std::span<std::function<bool(const IterationInfo<Scalar>& info)>>
67 iteration_callbacks,
68 const Options& options,
69#ifdef SLEIPNIR_ENABLE_BOUND_PROJECTION
70 const Eigen::ArrayX<bool>& bound_constraint_mask,
71#endif
72 Eigen::Vector<Scalar, Eigen::Dynamic>& x) {
73 using DenseVector = Eigen::Vector<Scalar, Eigen::Dynamic>;
74
75 DenseVector s =
76 DenseVector::Ones(matrix_callbacks.num_inequality_constraints);
77 DenseVector y = DenseVector::Zero(matrix_callbacks.num_equality_constraints);
78 DenseVector z =
79 DenseVector::Ones(matrix_callbacks.num_inequality_constraints);
80 Scalar μ(0.1);
81
82 return interior_point(matrix_callbacks, iteration_callbacks, options, false,
83#ifdef SLEIPNIR_ENABLE_BOUND_PROJECTION
84 bound_constraint_mask,
85#endif
86 x, s, y, z, μ);
87}
88
121template <typename Scalar>
122ExitStatus interior_point(
123 const InteriorPointMatrixCallbacks<Scalar>& matrix_callbacks,
124 std::span<std::function<bool(const IterationInfo<Scalar>& info)>>
125 iteration_callbacks,
126 const Options& options, bool in_feasibility_restoration,
127#ifdef SLEIPNIR_ENABLE_BOUND_PROJECTION
128 const Eigen::ArrayX<bool>& bound_constraint_mask,
129#endif
130 Eigen::Vector<Scalar, Eigen::Dynamic>& x,
131 Eigen::Vector<Scalar, Eigen::Dynamic>& s,
132 Eigen::Vector<Scalar, Eigen::Dynamic>& y,
133 Eigen::Vector<Scalar, Eigen::Dynamic>& z, Scalar& μ) {
134 using DenseVector = Eigen::Vector<Scalar, Eigen::Dynamic>;
135 using SparseMatrix = Eigen::SparseMatrix<Scalar>;
136 using SparseVector = Eigen::SparseVector<Scalar>;
137
139 struct Step {
141 DenseVector p_x;
143 DenseVector p_s;
145 DenseVector p_y;
147 DenseVector p_z;
148 };
149
150 using std::isfinite;
151
152 const auto solve_start_time = std::chrono::steady_clock::now();
153
154 gch::small_vector<SolveProfiler> solve_profilers;
155 solve_profilers.emplace_back("solver");
156 solve_profilers.emplace_back("↳ setup");
157 solve_profilers.emplace_back("↳ iteration");
158 solve_profilers.emplace_back(" ↳ feasibility check");
159 solve_profilers.emplace_back(" ↳ callbacks");
160 solve_profilers.emplace_back(" ↳ KKT matrix build");
161 solve_profilers.emplace_back(" ↳ KKT matrix decomp");
162 solve_profilers.emplace_back(" ↳ KKT system solve");
163 solve_profilers.emplace_back(" ↳ line search");
164 solve_profilers.emplace_back(" ↳ SOC");
165 solve_profilers.emplace_back(" ↳ next iter prep");
166 solve_profilers.emplace_back(" ↳ f(x)");
167 solve_profilers.emplace_back(" ↳ ∇f(x)");
168 solve_profilers.emplace_back(" ↳ ∇²ₓₓL");
169 solve_profilers.emplace_back(" ↳ ∇²ₓₓL_c");
170 solve_profilers.emplace_back(" ↳ cₑ(x)");
171 solve_profilers.emplace_back(" ↳ ∂cₑ/∂x");
172 solve_profilers.emplace_back(" ↳ cᵢ(x)");
173 solve_profilers.emplace_back(" ↳ ∂cᵢ/∂x");
174
175 auto& solver_prof = solve_profilers[0];
176 auto& setup_prof = solve_profilers[1];
177 auto& inner_iter_prof = solve_profilers[2];
178 auto& feasibility_check_prof = solve_profilers[3];
179 auto& iter_callbacks_prof = solve_profilers[4];
180 auto& kkt_matrix_build_prof = solve_profilers[5];
181 auto& kkt_matrix_decomp_prof = solve_profilers[6];
182 auto& kkt_system_solve_prof = solve_profilers[7];
183 auto& line_search_prof = solve_profilers[8];
184 auto& soc_prof = solve_profilers[9];
185 auto& next_iter_prep_prof = solve_profilers[10];
186
187 // Set up profiled matrix callbacks
188#ifndef SLEIPNIR_DISABLE_DIAGNOSTICS
189 auto& f_prof = solve_profilers[11];
190 auto& g_prof = solve_profilers[12];
191 auto& H_prof = solve_profilers[13];
192 auto& H_c_prof = solve_profilers[14];
193 auto& c_e_prof = solve_profilers[15];
194 auto& A_e_prof = solve_profilers[16];
195 auto& c_i_prof = solve_profilers[17];
196 auto& A_i_prof = solve_profilers[18];
197
198 InteriorPointMatrixCallbacks<Scalar> matrices{
199 matrix_callbacks.num_decision_variables,
200 matrix_callbacks.num_equality_constraints,
201 matrix_callbacks.num_inequality_constraints,
202 [&](const DenseVector& x) -> Scalar {
203 ScopedProfiler prof{f_prof};
204 return matrix_callbacks.f(x);
205 },
206 [&](const DenseVector& x) -> SparseVector {
207 ScopedProfiler prof{g_prof};
208 return matrix_callbacks.g(x);
209 },
210 [&](const DenseVector& x, const DenseVector& y,
211 const DenseVector& z) -> SparseMatrix {
212 ScopedProfiler prof{H_prof};
213 return matrix_callbacks.H(x, y, z);
214 },
215 [&](const DenseVector& x, const DenseVector& y,
216 const DenseVector& z) -> SparseMatrix {
217 ScopedProfiler prof{H_c_prof};
218 return matrix_callbacks.H_c(x, y, z);
219 },
220 [&](const DenseVector& x) -> DenseVector {
221 ScopedProfiler prof{c_e_prof};
222 return matrix_callbacks.c_e(x);
223 },
224 [&](const DenseVector& x) -> SparseMatrix {
225 ScopedProfiler prof{A_e_prof};
226 return matrix_callbacks.A_e(x);
227 },
228 [&](const DenseVector& x) -> DenseVector {
229 ScopedProfiler prof{c_i_prof};
230 return matrix_callbacks.c_i(x);
231 },
232 [&](const DenseVector& x) -> SparseMatrix {
233 ScopedProfiler prof{A_i_prof};
234 return matrix_callbacks.A_i(x);
235 }};
236#else
237 const auto& matrices = matrix_callbacks;
238#endif
239
240 solver_prof.start();
241 setup_prof.start();
242
243 Scalar f = matrices.f(x);
244 SparseVector g = matrices.g(x);
245 SparseMatrix H = matrices.H(x, y, z);
246 DenseVector c_e = matrices.c_e(x);
247 SparseMatrix A_e = matrices.A_e(x);
248 DenseVector c_i = matrices.c_i(x);
249 SparseMatrix A_i = matrices.A_i(x);
250
251 // Ensure matrix callback dimensions are consistent
252 slp_assert(g.rows() == matrices.num_decision_variables);
253 slp_assert(H.rows() == matrices.num_decision_variables);
254 slp_assert(H.cols() == matrices.num_decision_variables);
255 slp_assert(c_e.rows() == matrices.num_equality_constraints);
256 slp_assert(A_e.rows() == matrices.num_equality_constraints);
257 slp_assert(A_e.cols() == matrices.num_decision_variables);
258 slp_assert(c_i.rows() == matrices.num_inequality_constraints);
259 slp_assert(A_i.rows() == matrices.num_inequality_constraints);
260 slp_assert(A_i.cols() == matrices.num_decision_variables);
261
262 // Check for overconstrained problem
263 if (matrices.num_equality_constraints > matrices.num_decision_variables) {
264 if (options.diagnostics) {
265 print_too_few_dofs_error(c_e);
266 }
267
268 return ExitStatus::TOO_FEW_DOFS;
269 }
270
271 // Check whether initial guess has finite cost, constraints, and derivatives
272 if (!isfinite(f) || !all_finite(g) || !all_finite(H) || !c_e.allFinite() ||
273 !all_finite(A_e) || !c_i.allFinite() || !all_finite(A_i)) {
274 return ExitStatus::NONFINITE_INITIAL_GUESS;
275 }
276
277#ifdef SLEIPNIR_ENABLE_BOUND_PROJECTION
278 // We set sʲ = cᵢʲ(x) for each bound inequality constraint index j
279 s = bound_constraint_mask.select(c_i, s);
280#endif
281
282 int iterations = 0;
283
284 // Barrier parameter minimum
285 const Scalar μ_min = Scalar(options.tolerance) / Scalar(10);
286
287 // Fraction-to-the-boundary rule scale factor minimum
288 constexpr Scalar τ_min(0.99);
289
290 // Fraction-to-the-boundary rule scale factor τ
291 Scalar τ = τ_min;
292
293 Filter<Scalar> filter{c_e.template lpNorm<1>() +
294 (c_i - s).template lpNorm<1>()};
295
296 // This should be run when the error is below a desired threshold for the
297 // current barrier parameter
298 auto update_barrier_parameter_and_reset_filter = [&] {
299 // Barrier parameter linear decrease power in "κ_μ μ". Range of (0, 1).
300 constexpr Scalar κ_μ(0.2);
301
302 // Barrier parameter superlinear decrease power in "μ^(θ_μ)". Range of (1,
303 // 2).
304 constexpr Scalar θ_μ(1.5);
305
306 // Update the barrier parameter.
307 //
308 // μⱼ₊₁ = max(εₜₒₗ/10, min(κ_μ μⱼ, μⱼ^θ_μ))
309 //
310 // See equation (7) of [2].
311 using std::pow;
312 μ = std::max(μ_min, std::min(κ_μ * μ, pow(μ, θ_μ)));
313
314 // Update the fraction-to-the-boundary rule scaling factor.
315 //
316 // τⱼ = max(τₘᵢₙ, 1 − μⱼ)
317 //
318 // See equation (8) of [2].
319 τ = std::max(τ_min, Scalar(1) - μ);
320
321 // Reset the filter when the barrier parameter is updated
322 filter.reset();
323 };
324
325 // Kept outside the loop so its storage can be reused
326 gch::small_vector<Eigen::Triplet<Scalar>> triplets;
327
328 // Constraint regularization is forced to zero in feasibility restoration
329 // because the equality constraint Jacobian cannot be rank-deficient
330 RegularizedLDLT<Scalar> solver{
331 matrices.num_decision_variables, matrices.num_equality_constraints,
332 in_feasibility_restoration ? Scalar(0) : Scalar(1e-10)};
333
334 // Variables for determining when a step is acceptable
335 constexpr Scalar α_reduction_factor(0.5);
336 constexpr Scalar α_min(1e-7);
337
338 int full_step_rejected_counter = 0;
339
340 // Error
341 Scalar E_0 = std::numeric_limits<Scalar>::infinity();
342
343 setup_prof.stop();
344
345 // Prints final solver diagnostics when the solver exits
346 scope_exit exit{[&] {
347 if (options.diagnostics) {
348 solver_prof.stop();
349
350 if (in_feasibility_restoration) {
351 return;
352 }
353
354 if (iterations > 0) {
355 print_bottom_iteration_diagnostics();
356 }
357 print_solver_diagnostics(solve_profilers);
358 }
359 }};
360
361 while (E_0 > Scalar(options.tolerance)) {
362 ScopedProfiler inner_iter_profiler{inner_iter_prof};
363 ScopedProfiler feasibility_check_profiler{feasibility_check_prof};
364
365 // Check for local equality constraint infeasibility
366 if (is_equality_locally_infeasible(A_e, c_e)) {
367 if (options.diagnostics) {
368 print_c_e_local_infeasibility_error(c_e);
369 }
370
371 return ExitStatus::LOCALLY_INFEASIBLE;
372 }
373
374 // Check for local inequality constraint infeasibility
375 if (is_inequality_locally_infeasible(A_i, c_i)) {
376 if (options.diagnostics) {
377 print_c_i_local_infeasibility_error(c_i);
378 }
379
380 return ExitStatus::LOCALLY_INFEASIBLE;
381 }
382
383 // Check for diverging iterates
384 if (x.template lpNorm<Eigen::Infinity>() > Scalar(1e10) || !x.allFinite() ||
385 s.template lpNorm<Eigen::Infinity>() > Scalar(1e10) || !s.allFinite()) {
386 return ExitStatus::DIVERGING_ITERATES;
387 }
388
389 feasibility_check_profiler.stop();
390 ScopedProfiler iter_callbacks_profiler{iter_callbacks_prof};
391
392 // Call iteration callbacks
393 for (const auto& callback : iteration_callbacks) {
394 if (callback({iterations, x, s, y, z, g, H, A_e, A_i})) {
395 return ExitStatus::CALLBACK_REQUESTED_STOP;
396 }
397 }
398
399 iter_callbacks_profiler.stop();
400 ScopedProfiler kkt_matrix_build_profiler{kkt_matrix_build_prof};
401
402 // S = diag(s)
403 // Z = diag(z)
404 // Σ = S⁻¹Z
405 const SparseMatrix Σ{s.cwiseInverse().asDiagonal() * z.asDiagonal()};
406
407 // lhs = [H + AᵢᵀΣAᵢ Aₑᵀ]
408 // [ Aₑ 0 ]
409 //
410 // Don't assign upper triangle because solver only uses lower triangle.
411 const SparseMatrix top_left =
412 H + (A_i.transpose() * Σ * A_i).template triangularView<Eigen::Lower>();
413 triplets.clear();
414 triplets.reserve(top_left.nonZeros() + A_e.nonZeros());
415 append_as_triplets(triplets, 0, 0, {top_left, A_e});
416 SparseMatrix lhs(
417 matrices.num_decision_variables + matrices.num_equality_constraints,
418 matrices.num_decision_variables + matrices.num_equality_constraints);
419 lhs.setFromSortedTriplets(triplets.begin(), triplets.end());
420
421 // rhs = −[∇f − Aₑᵀy − Aᵢᵀ(−Σcᵢ + μS⁻¹e + z)]
422 // [ cₑ ]
423 DenseVector rhs{x.rows() + y.rows()};
424 rhs.segment(0, x.rows()) =
425 -g + A_e.transpose() * y +
426 A_i.transpose() * (-Σ * c_i + μ * s.cwiseInverse() + z);
427 rhs.segment(x.rows(), y.rows()) = -c_e;
428
429 kkt_matrix_build_profiler.stop();
430 ScopedProfiler kkt_matrix_decomp_profiler{kkt_matrix_decomp_prof};
431
432 Step step;
433 Scalar α_max(1);
434 Scalar α(1);
435 Scalar α_z(1);
436 bool call_feasibility_restoration = false;
437
438 // Solve the Newton-KKT system
439 //
440 // [H + AᵢᵀΣAᵢ Aₑᵀ][ pˣ] = −[∇f − Aₑᵀy − Aᵢᵀ(−Σcᵢ + μS⁻¹e + z)]
441 // [ Aₑ 0 ][−pʸ] [ cₑ ]
442 if (solver.compute(lhs).info() != Eigen::Success) [[unlikely]] {
443 return ExitStatus::FACTORIZATION_FAILED;
444 }
445
446 kkt_matrix_decomp_profiler.stop();
447 ScopedProfiler kkt_system_solve_profiler{kkt_system_solve_prof};
448
449 auto compute_step = [&](Step& step) {
450 // p = [ pˣ]
451 // [−pʸ]
452 DenseVector p = solver.solve(rhs);
453 step.p_x = p.segment(0, x.rows());
454 step.p_y = -p.segment(x.rows(), y.rows());
455
456 // pˢ = cᵢ − s + Aᵢpˣ
457 // pᶻ = −Σcᵢ + μS⁻¹e − ΣAᵢpˣ
458 step.p_s = c_i - s + A_i * step.p_x;
459 step.p_z = -Σ * c_i + μ * s.cwiseInverse() - Σ * A_i * step.p_x;
460 };
461 compute_step(step);
462
463 kkt_system_solve_profiler.stop();
464 ScopedProfiler line_search_profiler{line_search_prof};
465
466 // αᵐᵃˣ = max(α ∈ (0, 1] : sₖ + αpₖˢ ≥ (1−τⱼ)sₖ)
467 α_max = fraction_to_the_boundary_rule<Scalar>(s, step.p_s, τ);
468 α = α_max;
469
470 // If maximum step size is below minimum, invoke feasibility restoration
471 if (α < α_min) {
472 call_feasibility_restoration = true;
473 }
474
475 // αₖᶻ = max(α ∈ (0, 1] : zₖ + αpₖᶻ ≥ (1−τⱼ)zₖ)
476 α_z = fraction_to_the_boundary_rule<Scalar>(z, step.p_z, τ);
477
478 const FilterEntry<Scalar> current_entry{f, s, c_e, c_i, μ};
479
480 // Loop until a step is accepted
481 while (1) {
482 DenseVector trial_x = x + α * step.p_x;
483 DenseVector trial_y = y + α_z * step.p_y;
484 DenseVector trial_z = z + α_z * step.p_z;
485
486 Scalar trial_f = matrices.f(trial_x);
487 DenseVector trial_c_e = matrices.c_e(trial_x);
488 DenseVector trial_c_i = matrices.c_i(trial_x);
489
490 // If f(xₖ + αpₖˣ), cₑ(xₖ + αpₖˣ), or cᵢ(xₖ + αpₖˣ) aren't finite, reduce
491 // step size immediately
492 if (!isfinite(trial_f) || !trial_c_e.allFinite() ||
493 !trial_c_i.allFinite()) {
494 // Reduce step size
495 α *= α_reduction_factor;
496
497 if (α < α_min) {
498 call_feasibility_restoration = true;
499 break;
500 }
501 continue;
502 }
503
504 DenseVector trial_s;
505 if (options.feasible_ipm && c_i.cwiseGreater(Scalar(0)).all()) {
506 // If the inequality constraints are all feasible, prevent them from
507 // becoming infeasible again.
508 //
509 // See equation (19.30) in [1].
510 trial_s = trial_c_i;
511 } else {
512 trial_s = s + α * step.p_s;
513 }
514
515 // Check whether filter accepts trial iterate
516 FilterEntry trial_entry{trial_f, trial_s, trial_c_e, trial_c_i, μ};
517 if (filter.try_add(current_entry, trial_entry, step.p_x, g, α)) {
518 // Accept step
519 break;
520 }
521
522 Scalar prev_constraint_violation =
523 c_e.template lpNorm<1>() + (c_i - s).template lpNorm<1>();
524 Scalar next_constraint_violation =
525 trial_c_e.template lpNorm<1>() +
526 (trial_c_i - trial_s).template lpNorm<1>();
527
528 // Second-order corrections
529 //
530 // If first trial point was rejected and constraint violation stayed the
531 // same or went up, apply second-order corrections
532 if (α == α_max &&
533 next_constraint_violation >= prev_constraint_violation) {
534 // Apply second-order corrections. See section 2.4 of [2].
535 auto soc_step = step;
536
537 Scalar α_soc = α;
538 Scalar α_z_soc = α_z;
539 DenseVector c_e_soc = c_e;
540
541 bool step_acceptable = false;
542 for (int soc_iteration = 0; soc_iteration < 5 && !step_acceptable;
543 ++soc_iteration) {
544 ScopedProfiler soc_profiler{soc_prof};
545
546 scope_exit soc_exit{[&] {
547 soc_profiler.stop();
548
549 if (options.diagnostics) {
550 print_iteration_diagnostics(
551 iterations,
552 step_acceptable ? IterationType::ACCEPTED_SOC
553 : IterationType::REJECTED_SOC,
554 soc_profiler.current_duration(),
555 kkt_error<Scalar, KKTErrorType::INF_NORM_SCALED>(
556 g, A_e, trial_c_e, A_i, trial_c_i, trial_s, trial_y,
557 trial_z, Scalar(0)),
558 trial_f,
559 trial_c_e.template lpNorm<1>() +
560 (trial_c_i - trial_s).template lpNorm<1>(),
561 trial_s.dot(trial_z), μ, solver.hessian_regularization(),
562 α_soc, Scalar(1), α_reduction_factor, α_z_soc);
563 }
564 }};
565
566 // Rebuild Newton-KKT rhs with updated constraint values.
567 //
568 // rhs = −[∇f − Aₑᵀy − Aᵢᵀ(−Σcᵢ + μS⁻¹e + z)]
569 // [ cₑˢᵒᶜ ]
570 //
571 // where cₑˢᵒᶜ = αc(xₖ) + c(xₖ + αpₖˣ)
572 c_e_soc = α_soc * c_e_soc + trial_c_e;
573 rhs.bottomRows(y.rows()) = -c_e_soc;
574
575 // Solve the Newton-KKT system
576 compute_step(soc_step);
577
578 // αˢᵒᶜ = max(α ∈ (0, 1] : sₖ + αpₖˢ ≥ (1−τⱼ)sₖ)
579 α_soc = fraction_to_the_boundary_rule<Scalar>(s, soc_step.p_s, τ);
580 trial_x = x + α_soc * soc_step.p_x;
581 trial_s = s + α_soc * soc_step.p_s;
582
583 // αₖᶻ = max(α ∈ (0, 1] : zₖ + αpₖᶻ ≥ (1−τⱼ)zₖ)
584 α_z_soc = fraction_to_the_boundary_rule<Scalar>(z, soc_step.p_z, τ);
585 trial_y = y + α_z_soc * soc_step.p_y;
586 trial_z = z + α_z_soc * soc_step.p_z;
587
588 trial_f = matrices.f(trial_x);
589 trial_c_e = matrices.c_e(trial_x);
590 trial_c_i = matrices.c_i(trial_x);
591
592 // Constraint violation scale factor for second-order corrections
593 constexpr Scalar κ_soc(0.99);
594
595 // If constraint violation hasn't been sufficiently reduced, stop
596 // making second-order corrections
597 next_constraint_violation =
598 trial_c_e.template lpNorm<1>() +
599 (trial_c_i - trial_s).template lpNorm<1>();
600 if (next_constraint_violation > κ_soc * prev_constraint_violation) {
601 break;
602 }
603
604 // Check whether filter accepts trial iterate
605 FilterEntry trial_entry{trial_f, trial_s, trial_c_e, trial_c_i, μ};
606 if (filter.try_add(current_entry, trial_entry, step.p_x, g, α)) {
607 step = soc_step;
608 α = α_soc;
609 α_z = α_z_soc;
610 step_acceptable = true;
611 }
612 }
613
614 if (step_acceptable) {
615 // Accept step
616 break;
617 }
618 }
619
620 // If we got here and α is the full step, the full step was rejected.
621 // Increment the full-step rejected counter to keep track of how many full
622 // steps have been rejected in a row.
623 if (α == α_max) {
624 ++full_step_rejected_counter;
625 }
626
627 // If the full step was rejected enough times in a row, reset the filter
628 // because it may be impeding progress.
629 //
630 // See section 3.2 case I of [2].
631 if (full_step_rejected_counter >= 4 &&
632 filter.max_constraint_violation >
633 current_entry.constraint_violation / Scalar(10) &&
634 filter.last_rejection_due_to_filter()) {
635 filter.max_constraint_violation *= Scalar(0.1);
636 filter.reset();
637 continue;
638 }
639
640 // Reduce step size
641 α *= α_reduction_factor;
642
643 // If step size hit a minimum, check if the KKT error was reduced. If it
644 // wasn't, invoke feasibility restoration.
645 if (α < α_min) {
646 Scalar current_kkt_error = kkt_error<Scalar, KKTErrorType::ONE_NORM>(
647 g, A_e, c_e, A_i, c_i, s, y, z, μ);
648
649 trial_x = x + α_max * step.p_x;
650 trial_s = s + α_max * step.p_s;
651
652 trial_y = y + α_z * step.p_y;
653 trial_z = z + α_z * step.p_z;
654
655 trial_c_e = matrices.c_e(trial_x);
656 trial_c_i = matrices.c_i(trial_x);
657
658 Scalar next_kkt_error = kkt_error<Scalar, KKTErrorType::ONE_NORM>(
659 matrices.g(trial_x), matrices.A_e(trial_x), matrices.c_e(trial_x),
660 matrices.A_i(trial_x), trial_c_i, trial_s, trial_y, trial_z, μ);
661
662 // If the step using αᵐᵃˣ reduced the KKT error, accept it anyway
663 if (next_kkt_error <= Scalar(0.999) * current_kkt_error) {
664 α = α_max;
665
666 // Accept step
667 break;
668 }
669
670 call_feasibility_restoration = true;
671 break;
672 }
673 }
674
675 line_search_profiler.stop();
676
677 if (call_feasibility_restoration) {
678 // If already in feasibility restoration mode, running it again won't help
679 if (in_feasibility_restoration) {
680 return ExitStatus::FEASIBILITY_RESTORATION_FAILED;
681 }
682
683 FilterEntry initial_entry{matrices.f(x), s, c_e, c_i, μ};
684
685 // Feasibility restoration phase
686 gch::small_vector<std::function<bool(const IterationInfo<Scalar>& info)>>
687 callbacks;
688 for (auto& callback : iteration_callbacks) {
689 callbacks.emplace_back(callback);
690 }
691 callbacks.emplace_back([&](const IterationInfo<Scalar>& info) {
692 DenseVector trial_x =
693 info.x.segment(0, matrices.num_decision_variables);
694 DenseVector trial_s =
695 info.s.segment(0, matrices.num_inequality_constraints);
696
697 DenseVector trial_c_e = matrices.c_e(trial_x);
698 DenseVector trial_c_i = matrices.c_i(trial_x);
699
700 // If the current iterate sufficiently reduces constraint violation and
701 // is accepted by the normal filter, stop feasibility restoration
702 FilterEntry trial_entry{matrices.f(trial_x), trial_s, trial_c_e,
703 trial_c_i, μ};
704 return trial_entry.constraint_violation <
705 Scalar(0.9) * initial_entry.constraint_violation &&
706 filter.try_add(initial_entry, trial_entry, trial_x - x, g, α);
707 });
708 auto status = feasibility_restoration<Scalar>(matrices, callbacks,
709 options, x, s, y, z, μ);
710
711 if (status != ExitStatus::SUCCESS) {
712 // Report failure
713 return status;
714 }
715 } else {
716 // If full step was accepted, reset full-step rejected counter
717 if (α == α_max) {
718 full_step_rejected_counter = 0;
719 }
720
721 // xₖ₊₁ = xₖ + αₖpₖˣ
722 // sₖ₊₁ = sₖ + αₖpₖˢ
723 // yₖ₊₁ = yₖ + αₖᶻpₖʸ
724 // zₖ₊₁ = zₖ + αₖᶻpₖᶻ
725 x += α * step.p_x;
726 s += α * step.p_s;
727 y += α_z * step.p_y;
728 z += α_z * step.p_z;
729
730 // A requirement for the convergence proof is that the primal-dual barrier
731 // term Hessian Σₖ₊₁ does not deviate arbitrarily much from the primal
732 // barrier term Hessian μSₖ₊₁⁻².
733 //
734 // Σₖ₊₁ = μSₖ₊₁⁻²
735 // Sₖ₊₁⁻¹Zₖ₊₁ = μSₖ₊₁⁻²
736 // Zₖ₊₁ = μSₖ₊₁⁻¹
737 //
738 // We ensure this by resetting
739 //
740 // zₖ₊₁ = clamp(zₖ₊₁, 1/κ_Σ μ/sₖ₊₁, κ_Σ μ/sₖ₊₁)
741 //
742 // for some fixed κ_Σ ≥ 1 after each step. See equation (16) of [2].
743 for (int row = 0; row < z.rows(); ++row) {
744 constexpr Scalar κ_Σ(1e10);
745 z[row] =
746 std::clamp(z[row], Scalar(1) / κ_Σ * μ / s[row], κ_Σ * μ / s[row]);
747 }
748 }
749
750 // Update autodiff for Jacobians and Hessian
751 f = matrices.f(x);
752 A_e = matrices.A_e(x);
753 A_i = matrices.A_i(x);
754 g = matrices.g(x);
755 H = matrices.H(x, y, z);
756
757 ScopedProfiler next_iter_prep_profiler{next_iter_prep_prof};
758
759 c_e = matrices.c_e(x);
760 c_i = matrices.c_i(x);
761
762 // Update the error
763 E_0 = kkt_error<Scalar, KKTErrorType::INF_NORM_SCALED>(
764 g, A_e, c_e, A_i, c_i, s, y, z, Scalar(0));
765
766 // Update the barrier parameter if necessary
767 if (E_0 > Scalar(options.tolerance)) {
768 // Barrier parameter scale factor for tolerance checks
769 constexpr Scalar κ_ε(10);
770
771 // While the error is below the desired threshold for this barrier
772 // parameter value, decrease the barrier parameter further
773 Scalar E_μ = kkt_error<Scalar, KKTErrorType::INF_NORM_SCALED>(
774 g, A_e, c_e, A_i, c_i, s, y, z, μ);
775 while (μ > μ_min && E_μ <= κ_ε * μ) {
776 update_barrier_parameter_and_reset_filter();
777 E_μ = kkt_error<Scalar, KKTErrorType::INF_NORM_SCALED>(g, A_e, c_e, A_i,
778 c_i, s, y, z, μ);
779 }
780 }
781
782 next_iter_prep_profiler.stop();
783 inner_iter_profiler.stop();
784
785 if (options.diagnostics) {
786 print_iteration_diagnostics(
787 iterations,
788 in_feasibility_restoration ? IterationType::FEASIBILITY_RESTORATION
789 : IterationType::NORMAL,
790 inner_iter_profiler.current_duration(), E_0, f,
791 c_e.template lpNorm<1>() + (c_i - s).template lpNorm<1>(), s.dot(z),
792 μ, solver.hessian_regularization(), α, α_max, α_reduction_factor,
793 α_z);
794 }
795
796 ++iterations;
797
798 // Check for max iterations
799 if (iterations >= options.max_iterations) {
800 return ExitStatus::MAX_ITERATIONS_EXCEEDED;
801 }
802
803 // Check for max wall clock time
804 if (std::chrono::steady_clock::now() - solve_start_time > options.timeout) {
805 return ExitStatus::TIMEOUT;
806 }
807 }
808
809 return ExitStatus::SUCCESS;
810}
811
812extern template SLEIPNIR_DLLEXPORT ExitStatus
813interior_point(const InteriorPointMatrixCallbacks<double>& matrix_callbacks,
814 std::span<std::function<bool(const IterationInfo<double>& info)>>
815 iteration_callbacks,
816 const Options& options,
817#ifdef SLEIPNIR_ENABLE_BOUND_PROJECTION
818 const Eigen::ArrayX<bool>& bound_constraint_mask,
819#endif
820 Eigen::Vector<double, Eigen::Dynamic>& x);
821
822} // namespace slp