From e0af721a3f1859e5c9f393ff12e73654aa471b3e Mon Sep 17 00:00:00 2001
From: Pantelis Sopasakis
Date: Fri, 27 Mar 2026 18:41:15 +0000
Subject: [PATCH 01/12] TCP error include more information
- wrong number of parameters specified
- errors 1600, 1700, 3003 updated
---
.../opengen/templates/tcp/tcp_server.rs | 21 ++++++++++++++++---
open-codegen/test/test.py | 18 ++++++++++++++++
2 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs
index 042e176f..d3c98f04 100644
--- a/open-codegen/opengen/templates/tcp/tcp_server.rs
+++ b/open-codegen/opengen/templates/tcp/tcp_server.rs
@@ -150,7 +150,12 @@ fn execution_handler(
Some(u0) => {
if u0.len() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES {
warn!("initial guess has incompatible dimensions");
- write_error_message(stream, 1600, "Initial guess has incompatible dimensions");
+ let error_message = format!(
+ "initial guess has incompatible dimensions: provided {}, expected {}",
+ u0.len(),
+ {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES
+ );
+ write_error_message(stream, 1600, &error_message);
return;
}
u.copy_from_slice(u0);
@@ -162,7 +167,12 @@ fn execution_handler(
// ----------------------------------------------------
if let Some(y0) = &execution_parameter.initial_lagrange_multipliers {
if y0.len() != {{meta.optimizer_name|upper}}_N1 {
- write_error_message(stream, 1700, "wrong dimension of Langrange multipliers");
+ let error_message = format!(
+ "wrong dimension of Langrange multipliers: provided {}, expected {}",
+ y0.len(),
+ {{meta.optimizer_name|upper}}_N1
+ );
+ write_error_message(stream, 1700, &error_message);
return;
}
}
@@ -172,7 +182,12 @@ fn execution_handler(
// ----------------------------------------------------
let parameter = &execution_parameter.parameter;
if parameter.len() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS {
- write_error_message(stream, 3003, "wrong number of parameters");
+ let error_message = format!(
+ "wrong number of parameters: provided {}, expected {}",
+ parameter.len(),
+ {{meta.optimizer_name|upper}}_NUM_PARAMETERS
+ );
+ write_error_message(stream, 3003, &error_message);
return;
}
p.copy_from_slice(parameter);
diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py
index d56f3075..c451df8d 100644
--- a/open-codegen/test/test.py
+++ b/open-codegen/test/test.py
@@ -351,18 +351,27 @@ def test_rust_build_only_f1(self):
self.assertFalse(response.is_ok())
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(3003, status.code)
+ self.assertEqual(
+ "wrong number of parameters: provided 3, expected 2",
+ status.message)
response = mng.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2])
self.assertFalse(response.is_ok())
status = response.get()
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(1600, status.code)
+ self.assertEqual(
+ "initial guess has incompatible dimensions: provided 2, expected 5",
+ status.message)
response = mng.call(p=[2.0, 10.0], initial_y=[0.1])
status = response.get()
self.assertFalse(response.is_ok())
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(1700, status.code)
+ self.assertEqual(
+ "wrong dimension of Langrange multipliers: provided 1, expected 2",
+ status.message)
mng.kill()
@@ -405,18 +414,27 @@ def test_rust_build_only_f2_preconditioned(self):
status = response.get()
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(3003, status.code)
+ self.assertEqual(
+ "wrong number of parameters: provided 3, expected 2",
+ status.message)
response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2])
self.assertFalse(response.is_ok())
status = response.get()
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(1600, status.code)
+ self.assertEqual(
+ "initial guess has incompatible dimensions: provided 2, expected 5",
+ status.message)
response = mng1.call(p=[2.0, 10.0], initial_y=[0.1])
self.assertFalse(response.is_ok())
status = response.get()
self.assertEqual(True, isinstance(status, og.tcp.SolverError))
self.assertEqual(1700, status.code)
+ self.assertEqual(
+ "wrong dimension of Langrange multipliers: provided 1, expected 0",
+ status.message)
finally:
mng1.kill()
mng2.kill()
From 99813688124ac22e34d3c9e4911fa3ea1834610d Mon Sep 17 00:00:00 2001
From: Pantelis Sopasakis
Date: Fri, 27 Mar 2026 19:08:29 +0000
Subject: [PATCH 02/12] improve TCP error reporting
- better error handling
- tighter unit tests
- unit tests safely kill managers at the end
---
open-codegen/opengen/tcp/solver_error.py | 4 +-
.../opengen/templates/tcp/tcp_server.rs | 44 ++-
open-codegen/test/test.py | 258 ++++++++++++------
3 files changed, 207 insertions(+), 99 deletions(-)
diff --git a/open-codegen/opengen/tcp/solver_error.py b/open-codegen/opengen/tcp/solver_error.py
index 61ab7fc8..e2d2d72c 100644
--- a/open-codegen/opengen/tcp/solver_error.py
+++ b/open-codegen/opengen/tcp/solver_error.py
@@ -19,10 +19,10 @@ def code(self):
Possible error codes are:
- - **1000**: Invalid request: Malformed or invalid JSON
+ - **1000**: Invalid request: malformed JSON or invalid UTF-8 payload
- **1600**: Initial guess has incomplete dimensions
- **1700**: Wrong dimension of Lagrange multipliers
- - **2000**: Problem solution failed (solver error)
+ - **2000**: Problem solution failed (message may include the solver reason)
- **3003**: Parameter vector has wrong length
:return: Error code
diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs
index d3c98f04..0200e3df 100644
--- a/open-codegen/opengen/templates/tcp/tcp_server.rs
+++ b/open-codegen/opengen/templates/tcp/tcp_server.rs
@@ -91,13 +91,22 @@ fn pong(stream: &mut std::net::TcpStream, code: i32) {
}
/// Writes an error to the communication stream
+#[derive(Serialize)]
+struct ErrorResponse<'a> {
+ #[serde(rename = "type")]
+ response_type: &'a str,
+ code: i32,
+ message: &'a str,
+}
+
fn write_error_message(stream: &mut std::net::TcpStream, code: i32, error_msg: &str) {
- let error_message = format!(
- {% raw %}"{{\n\t\"type\" : \"Error\", \n\t\"code\" : {}, \n\t\"message\" : \"{}\"\n}}\n"{% endraw %},
+ let error_response = ErrorResponse {
+ response_type: "Error",
code,
- error_msg
- );
- warn!("Invalid request {:?}", code);
+ message: error_msg,
+ };
+ let error_message = serde_json::to_string_pretty(&error_response).unwrap();
+ warn!("TCP error {}: {}", code, error_msg);
stream
.write_all(error_message.as_bytes())
.expect("cannot write to stream");
@@ -200,8 +209,9 @@ fn execution_handler(
Ok(ok_status) => {
return_solution_to_client(ok_status, u, stream);
}
- Err(_) => {
- write_error_message(stream, 2000, "Problem solution failed (solver error)");
+ Err(err) => {
+ let error_message = format!("problem solution failed: {:?}", err);
+ write_error_message(stream, 2000, &error_message);
}
}
}
@@ -214,7 +224,7 @@ fn run_server(tcp_config: &TcpServerConfiguration) {
let listener = TcpListener::bind(format!("{}:{}", tcp_config.ip, tcp_config.port)).unwrap();
let mut u = [0.0; {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES];
info!("listening started, ready to accept connections at {}:{}", tcp_config.ip, tcp_config.port);
- for stream in listener.incoming() {
+ 'incoming: for stream in listener.incoming() {
let mut stream = stream.unwrap();
//The following is more robust compared to `read_to_string`
@@ -225,8 +235,17 @@ fn run_server(tcp_config: &TcpServerConfiguration) {
read_data_length = stream
.read(&mut bytes_buffer)
.expect("could not read stream");
- let new_string = String::from_utf8(bytes_buffer[0..read_data_length].to_vec())
- .expect("sent data is not UFT-8");
+ let new_string = match String::from_utf8(bytes_buffer[0..read_data_length].to_vec()) {
+ Ok(new_string) => new_string,
+ Err(err) => {
+ let error_message = format!(
+ "invalid request: request body is not valid UTF-8 ({})",
+ err.utf8_error()
+ );
+ write_error_message(&mut stream, 1000, &error_message);
+ continue 'incoming;
+ }
+ };
buffer.push_str(&new_string);
}
@@ -251,8 +270,9 @@ fn run_server(tcp_config: &TcpServerConfiguration) {
pong(&mut stream, ping_code);
}
},
- Err(_) => {
- write_error_message(&mut stream, 1000, "Invalid request");
+ Err(err) => {
+ let error_message = format!("invalid request: {}", err);
+ write_error_message(&mut stream, 1000, &error_message);
}
}
}
diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py
index c451df8d..852b49f6 100644
--- a/open-codegen/test/test.py
+++ b/open-codegen/test/test.py
@@ -1,5 +1,7 @@
import os
import unittest
+import json
+import socket
import casadi.casadi as cs
import opengen as og
import subprocess
@@ -227,6 +229,53 @@ def setUpHalfspace(cls):
cls.solverConfig())
builder.build()
+ @classmethod
+ def setUpSolverError(cls):
+ u = cs.MX.sym("u", 1)
+ p = cs.MX.sym("p", 1)
+ phi = cs.dot(u, u)
+ bounds = og.constraints.Rectangle(xmin=[-1.0], xmax=[1.0])
+ tcp_config = og.config.TcpServerConfiguration(bind_port=3310)
+ meta = og.config.OptimizerMeta() \
+ .with_optimizer_name("solver_error")
+ problem = og.builder.Problem(u, p, phi) \
+ .with_constraints(bounds)
+ build_config = og.config.BuildConfiguration() \
+ .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \
+ .with_build_directory(RustBuildTestCase.TEST_DIR) \
+ .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \
+ .with_tcp_interface_config(tcp_interface_config=tcp_config)
+ og.builder.OpEnOptimizerBuilder(problem,
+ metadata=meta,
+ build_configuration=build_config,
+ solver_configuration=cls.solverConfig()) \
+ .build()
+
+ target_lib = os.path.join(
+ RustBuildTestCase.TEST_DIR, "solver_error", "src", "lib.rs")
+ with open(target_lib, "r", encoding="utf-8") as fh:
+ solver_lib = fh.read()
+
+ # Look for this excerpt inside lib.rs (in the auto-generated solver)...
+ anchor = (
+ ' assert_eq!(u.len(), SOLVER_ERROR_NUM_DECISION_VARIABLES, '
+ '"Wrong number of decision variables (u)");\n'
+ )
+ # Replace the anchor with this so that if p[0] < 0, the function `solve`
+ # will reutrn an error of type SolverError::Cost
+ injected_guard = (
+ anchor +
+ '\n'
+ ' if p[0] < 0.0 {\n'
+ ' return Err(SolverError::Cost);\n'
+ ' }\n'
+ )
+ if anchor not in solver_lib:
+ raise RuntimeError("Could not inject deterministic solver error")
+
+ with open(target_lib, "w", encoding="utf-8") as fh:
+ fh.write(solver_lib.replace(anchor, injected_guard, 1))
+
@classmethod
def setUpClass(cls):
cls.setUpPythonBindings()
@@ -237,6 +286,30 @@ def setUpClass(cls):
cls.setUpPlain()
cls.setUpOnlyParametricF2()
cls.setUpHalfspace()
+ cls.setUpSolverError()
+
+ @staticmethod
+ def raw_tcp_request(ip, port, payload, buffer_size=4096):
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as conn_socket:
+ conn_socket.connect((ip, port))
+ if isinstance(payload, str):
+ payload = payload.encode()
+ conn_socket.sendall(payload)
+ conn_socket.shutdown(socket.SHUT_WR)
+
+ data = b''
+ while True:
+ data_chunk = conn_socket.recv(buffer_size)
+ if not data_chunk:
+ break
+ data += data_chunk
+
+ return json.loads(data.decode())
+
+ def start_tcp_manager(self, manager):
+ manager.start()
+ self.addCleanup(manager.kill) # at the end, kill the TCP mngr
+ return manager
def test_python_bindings(self):
import sys
@@ -314,22 +387,18 @@ def test_start_multiple_servers(self):
# Start all servers
for m in all_managers:
- m.start()
+ self.start_tcp_manager(m)
# Ping all
for m in all_managers:
m.ping()
- # Kill all
- for m in all_managers:
- m.kill()
-
def test_rust_build_only_f1(self):
# Start the server using a custom bind IP and port
- mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1',
- ip='0.0.0.0',
- port=13757)
- mng.start()
+ mng = self.start_tcp_manager(
+ og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1',
+ ip='0.0.0.0',
+ port=13757))
pong = mng.ping() # check if the server is alive
self.assertEqual(1, pong["Pong"])
@@ -373,75 +442,67 @@ def test_rust_build_only_f1(self):
"wrong dimension of Langrange multipliers: provided 1, expected 2",
status.message)
- mng.kill()
-
def test_rust_build_only_f2_preconditioned(self):
- mng1 = og.tcp.OptimizerTcpManager(
- RustBuildTestCase.TEST_DIR + '/only_f2')
- mng2 = og.tcp.OptimizerTcpManager(
- RustBuildTestCase.TEST_DIR + '/only_f2_precond')
- mng1.start()
- mng2.start()
-
- try:
- response1 = mng1.call(p=[0.5, 8.5], initial_guess=[
- 1, 2, 3, 4, 0]).get()
- response2 = mng2.call(p=[0.5, 8.5], initial_guess=[
- 1, 2, 3, 4, 0]).get()
-
- self.assertEqual("Converged", response1.exit_status)
- self.assertEqual("Converged", response2.exit_status)
-
- # Further testing
- slv_cfg = RustBuildTestCase.solverConfig()
- # check that the solution is (near-) feasible
- self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance)
- self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance)
- # check the nrom of the FPR
- self.assertTrue(response1.last_problem_norm_fpr <
- slv_cfg.tolerance)
- self.assertTrue(response2.last_problem_norm_fpr <
- slv_cfg.tolerance)
- # compare the costs
- self.assertAlmostEqual(response1.cost, response2.cost, 4)
-
- x1, x2 = response1.solution, response2.solution
- for i in range(len(x1)):
- self.assertAlmostEqual(x1[i], x2[i], delta=5e-4)
-
- response = mng1.call(p=[2.0, 10.0, 50.0])
- self.assertFalse(response.is_ok())
- status = response.get()
- self.assertEqual(True, isinstance(status, og.tcp.SolverError))
- self.assertEqual(3003, status.code)
- self.assertEqual(
- "wrong number of parameters: provided 3, expected 2",
- status.message)
-
- response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2])
- self.assertFalse(response.is_ok())
- status = response.get()
- self.assertEqual(True, isinstance(status, og.tcp.SolverError))
- self.assertEqual(1600, status.code)
- self.assertEqual(
- "initial guess has incompatible dimensions: provided 2, expected 5",
- status.message)
-
- response = mng1.call(p=[2.0, 10.0], initial_y=[0.1])
- self.assertFalse(response.is_ok())
- status = response.get()
- self.assertEqual(True, isinstance(status, og.tcp.SolverError))
- self.assertEqual(1700, status.code)
- self.assertEqual(
- "wrong dimension of Langrange multipliers: provided 1, expected 0",
- status.message)
- finally:
- mng1.kill()
- mng2.kill()
+ mng1 = self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/only_f2'))
+ mng2 = self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/only_f2_precond'))
+
+ response1 = mng1.call(p=[0.5, 8.5], initial_guess=[
+ 1, 2, 3, 4, 0]).get()
+ response2 = mng2.call(p=[0.5, 8.5], initial_guess=[
+ 1, 2, 3, 4, 0]).get()
+
+ self.assertEqual("Converged", response1.exit_status)
+ self.assertEqual("Converged", response2.exit_status)
+
+ # Further testing
+ slv_cfg = RustBuildTestCase.solverConfig()
+ # check that the solution is (near-) feasible
+ self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance)
+ self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance)
+ # check the nrom of the FPR
+ self.assertTrue(response1.last_problem_norm_fpr <
+ slv_cfg.tolerance)
+ self.assertTrue(response2.last_problem_norm_fpr <
+ slv_cfg.tolerance)
+ # compare the costs
+ self.assertAlmostEqual(response1.cost, response2.cost, 4)
+
+ x1, x2 = response1.solution, response2.solution
+ for i in range(len(x1)):
+ self.assertAlmostEqual(x1[i], x2[i], delta=5e-4)
+
+ response = mng1.call(p=[2.0, 10.0, 50.0])
+ self.assertFalse(response.is_ok())
+ status = response.get()
+ self.assertEqual(True, isinstance(status, og.tcp.SolverError))
+ self.assertEqual(3003, status.code)
+ self.assertEqual(
+ "wrong number of parameters: provided 3, expected 2",
+ status.message)
+
+ response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2])
+ self.assertFalse(response.is_ok())
+ status = response.get()
+ self.assertEqual(True, isinstance(status, og.tcp.SolverError))
+ self.assertEqual(1600, status.code)
+ self.assertEqual(
+ "initial guess has incompatible dimensions: provided 2, expected 5",
+ status.message)
+
+ response = mng1.call(p=[2.0, 10.0], initial_y=[0.1])
+ self.assertFalse(response.is_ok())
+ status = response.get()
+ self.assertEqual(True, isinstance(status, og.tcp.SolverError))
+ self.assertEqual(1700, status.code)
+ self.assertEqual(
+ "wrong dimension of Langrange multipliers: provided 1, expected 0",
+ status.message)
def test_rust_build_plain(self):
- mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain')
- mng.start()
+ mng = self.start_tcp_manager(
+ og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain'))
pong = mng.ping() # check if the server is alive
self.assertEqual(1, pong["Pong"])
@@ -451,13 +512,44 @@ def test_rust_build_plain(self):
status = response.get()
self.assertEqual("Converged", status.exit_status)
- mng.kill()
+ def test_rust_build_plain_invalid_request_details(self):
+ self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/plain',
+ ip='127.0.0.1',
+ port=13758))
+
+ malformed_response = og.tcp.SolverResponse(
+ RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, '{"Run":'))
+ self.assertFalse(malformed_response.is_ok())
+ malformed_status = malformed_response.get()
+ self.assertEqual(1000, malformed_status.code)
+ self.assertTrue(
+ malformed_status.message.startswith("invalid request:"))
+ self.assertIn("line 1 column", malformed_status.message)
+
+ utf8_response = og.tcp.SolverResponse(
+ RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, b'\xff\xfe'))
+ self.assertFalse(utf8_response.is_ok())
+ utf8_status = utf8_response.get()
+ self.assertEqual(1000, utf8_status.code)
+ self.assertTrue(
+ utf8_status.message.startswith(
+ "invalid request: request body is not valid UTF-8"))
+
+ def test_rust_build_solver_error_details(self):
+ mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/solver_error'))
+
+ response = mng.call(p=[-1.0])
+ self.assertFalse(response.is_ok())
+ status = response.get()
+ self.assertEqual(2000, status.code)
+ self.assertEqual("problem solution failed: Cost", status.message)
def test_rust_build_parametric_f2(self):
# introduced to tackle issue #123
- mng = og.tcp.OptimizerTcpManager(
- RustBuildTestCase.TEST_DIR + '/parametric_f2')
- mng.start()
+ mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/parametric_f2'))
pong = mng.ping() # check if the server is alive
self.assertEqual(1, pong["Pong"])
@@ -467,12 +559,10 @@ def test_rust_build_parametric_f2(self):
status = response.get()
self.assertEqual("Converged", status.exit_status)
self.assertTrue(status.f2_norm < 1e-4)
- mng.kill()
def test_rust_build_parametric_halfspace(self):
- mng = og.tcp.OptimizerTcpManager(
- RustBuildTestCase.TEST_DIR + '/halfspace_optimizer')
- mng.start()
+ mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager(
+ RustBuildTestCase.TEST_DIR + '/halfspace_optimizer'))
pong = mng.ping() # check if the server is alive
self.assertEqual(1, pong["Pong"])
@@ -488,8 +578,6 @@ def test_rust_build_parametric_halfspace(self):
self.assertTrue(sum([u[i] * c[i] for i in range(5)]) - b <= eps)
self.assertTrue(-sum([u[i] * c[i] for i in range(5)]) + b <= eps)
- mng.kill()
-
@staticmethod
def c_bindings_helper(optimizer_name):
p = subprocess.Popen(["/usr/bin/gcc",
From f6b80de5e7f9048da7bfd906fd6795cd09ee5679 Mon Sep 17 00:00:00 2001
From: Pantelis Sopasakis
Date: Fri, 27 Mar 2026 19:39:27 +0000
Subject: [PATCH 03/12] OpEn: good error handling
---
examples/panoc_ex1.rs | 8 +-
.../templates/c/optimizer_cinterface.rs.jinja | 9 +-
.../opengen/templates/tcp/tcp_server.rs | 2 +-
open-codegen/test/test.py | 6 +-
src/alm/alm_factory.rs | 8 +-
src/alm/alm_optimizer.rs | 55 +++++---
src/constraints/affine_space.rs | 18 ++-
src/constraints/ball1.rs | 13 +-
src/constraints/ball2.rs | 4 +-
src/constraints/ballinf.rs | 4 +-
src/constraints/ballp.rs | 17 ++-
src/constraints/cartesian_product.rs | 15 +--
src/constraints/epigraph_squared_norm.rs | 35 +++--
src/constraints/finite.rs | 4 +-
src/constraints/halfspace.rs | 4 +-
src/constraints/hyperplane.rs | 4 +-
src/constraints/mod.rs | 4 +-
src/constraints/no_constraints.rs | 5 +-
src/constraints/rectangle.rs | 15 ++-
src/constraints/simplex.rs | 4 +-
src/constraints/soc.rs | 4 +-
src/constraints/sphere2.rs | 8 +-
src/constraints/tests.rs | 124 +++++++++---------
src/constraints/zero.rs | 4 +-
src/core/fbs/fbs_engine.rs | 27 ++--
src/core/fbs/fbs_optimizer.rs | 4 +-
src/core/fbs/tests.rs | 2 +-
src/core/panoc/panoc_engine.rs | 49 +++++--
src/core/panoc/panoc_optimizer.rs | 6 +-
src/lib.rs | 46 ++++++-
30 files changed, 334 insertions(+), 174 deletions(-)
diff --git a/examples/panoc_ex1.rs b/examples/panoc_ex1.rs
index 01f3e497..c50d3301 100644
--- a/examples/panoc_ex1.rs
+++ b/examples/panoc_ex1.rs
@@ -29,7 +29,9 @@ fn main() {
// define the cost function and its gradient
let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> {
if a < 0.0 || b < 0.0 {
- Err(SolverError::Cost)
+ Err(SolverError::Cost(
+ "Rosenbrock parameters must be nonnegative",
+ ))
} else {
rosenbrock_grad(a, b, u, grad);
Ok(())
@@ -38,7 +40,9 @@ fn main() {
let f = |u: &[f64], c: &mut f64| -> Result<(), SolverError> {
if a < 0.0 || b < 0.0 {
- Err(SolverError::Cost)
+ Err(SolverError::Cost(
+ "Rosenbrock parameters must be nonnegative",
+ ))
} else {
*c = rosenbrock_cost(a, b, u);
Ok(())
diff --git a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja b/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja
index 039dfe0f..c27c356e 100644
--- a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja
+++ b/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja
@@ -179,8 +179,11 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_solve(
},
Err(e) => {{meta.optimizer_name}}SolverStatus {
exit_status: match e {
- SolverError::Cost => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost,
- SolverError::NotFiniteComputation => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation,
+ SolverError::Cost(_)
+ | SolverError::ProjectionFailed(_)
+ | SolverError::LinearAlgebraFailure(_)
+ | SolverError::InvalidProblemState(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost,
+ SolverError::NotFiniteComputation(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation,
},
num_outer_iterations: u64::MAX as c_ulong,
num_inner_iterations: u64::MAX as c_ulong,
@@ -209,4 +212,4 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_free(instance: *mut {{met
assert!(!instance.is_null());
drop(Box::from_raw(instance));
}
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs
index 0200e3df..14823532 100644
--- a/open-codegen/opengen/templates/tcp/tcp_server.rs
+++ b/open-codegen/opengen/templates/tcp/tcp_server.rs
@@ -210,7 +210,7 @@ fn execution_handler(
return_solution_to_client(ok_status, u, stream);
}
Err(err) => {
- let error_message = format!("problem solution failed: {:?}", err);
+ let error_message = format!("problem solution failed: {}", err);
write_error_message(stream, 2000, &error_message);
}
}
diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py
index 852b49f6..3f69413f 100644
--- a/open-codegen/test/test.py
+++ b/open-codegen/test/test.py
@@ -267,7 +267,7 @@ def setUpSolverError(cls):
anchor +
'\n'
' if p[0] < 0.0 {\n'
- ' return Err(SolverError::Cost);\n'
+ ' return Err(SolverError::Cost("forced solver error for TCP test"));\n'
' }\n'
)
if anchor not in solver_lib:
@@ -544,7 +544,9 @@ def test_rust_build_solver_error_details(self):
self.assertFalse(response.is_ok())
status = response.get()
self.assertEqual(2000, status.code)
- self.assertEqual("problem solution failed: Cost", status.message)
+ self.assertEqual(
+ "problem solution failed: cost or gradient evaluation failed: forced solver error for TCP test",
+ status.message)
def test_rust_build_parametric_f2(self):
# introduced to tackle issue #123
diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs
index 21776453..b94cd104 100644
--- a/src/alm/alm_factory.rs
+++ b/src/alm/alm_factory.rs
@@ -233,7 +233,7 @@ where
.zip(y_lagrange_mult.iter())
.for_each(|(ti, yi)| *ti += yi / f64::max(penalty_parameter, 1.0));
s.copy_from_slice(&f1_u_plus_y_over_c);
- set_c.project(&mut s);
+ set_c.project(&mut s)?;
*cost += 0.5
* penalty_parameter
* matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s);
@@ -296,7 +296,7 @@ where
.zip(y_lagrange_mult.iter())
.for_each(|(ti, yi)| *ti += yi / c_penalty_parameter);
s_aux_var.copy_from_slice(&f1_u_plus_y_over_c); // s = t
- set_c.project(&mut s_aux_var); // s = Proj_C(F1(u) + y/c)
+ set_c.project(&mut s_aux_var)?; // s = Proj_C(F1(u) + y/c)
// t = F1(u) + y/c - Proj_C(F1(u) + y/c)
f1_u_plus_y_over_c
@@ -412,7 +412,9 @@ mod tests {
let f2 = mapping_f2;
let jac_f2_tr =
|_u: &[f64], _d: &[f64], _res: &mut [f64]| -> Result<(), crate::SolverError> {
- Err(SolverError::NotFiniteComputation)
+ Err(SolverError::NotFiniteComputation(
+ "mock Jacobian-transpose product returned a non-finite result",
+ ))
};
let factory = AlmFactory::new(
mocks::f0,
diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs
index c7d9ec08..c7a99281 100644
--- a/src/alm/alm_optimizer.rs
+++ b/src/alm/alm_optimizer.rs
@@ -650,7 +650,7 @@ where
.for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = w_alm_aux_i + y_i / c);
// Step #3: y_plus := Proj_C(y_plus)
- alm_set_c.project(y_plus);
+ alm_set_c.project(y_plus)?;
// Step #4
y_plus
@@ -667,7 +667,7 @@ where
}
/// Project y on set Y
- fn project_on_set_y(&mut self) {
+ fn project_on_set_y(&mut self) -> FunctionCallResult {
let problem = &self.alm_problem;
if let Some(y_set) = &problem.alm_set_y {
// NOTE: as_mut() converts from &mut Option to Option<&mut T>
@@ -676,9 +676,10 @@ where
// * which can be treated as Option<&mut [f64]>
// * y_vec is &mut [f64]
if let Some(xi_vec) = self.alm_cache.xi.as_mut() {
- y_set.project(&mut xi_vec[1..]);
+ y_set.project(&mut xi_vec[1..])?;
}
}
+ Ok(())
}
/// Solve inner problem
@@ -740,7 +741,7 @@ where
inner_solver.solve(u)
}
- fn is_exit_criterion_satisfied(&self) -> bool {
+ fn is_exit_criterion_satisfied(&self) -> Result {
let cache = &self.alm_cache;
let problem = &self.alm_problem;
// Criterion 1: ||Delta y|| <= c * delta
@@ -763,9 +764,14 @@ where
// This function will panic is there is no akkt_tolerance
// This should never happen because we set the AKKT tolerance
// in the constructor and can never become `None` again
- let criterion_3 =
- cache.panoc_cache.akkt_tolerance.unwrap() <= self.epsilon_tolerance + SMALL_EPSILON;
- criterion_1 && criterion_2 && criterion_3
+ let criterion_3 = cache
+ .panoc_cache
+ .akkt_tolerance
+ .ok_or(SolverError::InvalidProblemState(
+ "missing inner AKKT tolerance while checking the exit criterion",
+ ))?
+ <= self.epsilon_tolerance + SMALL_EPSILON;
+ Ok(criterion_1 && criterion_2 && criterion_3)
}
/// Whether the penalty parameter should not be updated
@@ -802,13 +808,20 @@ where
}
}
- fn update_inner_akkt_tolerance(&mut self) {
+ fn update_inner_akkt_tolerance(&mut self) -> FunctionCallResult {
let cache = &mut self.alm_cache;
// epsilon_{nu+1} := max(epsilon, beta*epsilon_nu)
+ let akkt_tolerance = cache
+ .panoc_cache
+ .akkt_tolerance
+ .ok_or(SolverError::InvalidProblemState(
+ "missing inner AKKT tolerance while updating it",
+ ))?;
cache.panoc_cache.set_akkt_tolerance(f64::max(
- cache.panoc_cache.akkt_tolerance.unwrap() * self.epsilon_update_factor,
+ akkt_tolerance * self.epsilon_update_factor,
self.epsilon_tolerance,
));
+ Ok(())
}
fn final_cache_update(&mut self) {
@@ -843,7 +856,7 @@ where
let mut inner_exit_status: ExitStatus = ExitStatus::Converged;
// Project y on Y
- self.project_on_set_y();
+ self.project_on_set_y()?;
// If the inner problem fails miserably, the failure should be propagated
// upstream (using `?`). If the inner problem has not converged, that is fine,
@@ -867,7 +880,7 @@ where
self.compute_alm_infeasibility()?; // ALM: ||y_plus - y||
// Check exit criterion
- if self.is_exit_criterion_satisfied() {
+ if self.is_exit_criterion_satisfied()? {
// Do not continue the outer iteration
// An (epsilon, delta)-AKKT point has been found
return Ok(InnerProblemStatus::new(false, inner_exit_status));
@@ -876,7 +889,7 @@ where
}
// Update inner problem tolerance
- self.update_inner_akkt_tolerance();
+ self.update_inner_akkt_tolerance()?;
// conclusive step: updated iteration count, resets PANOC cache,
// sets f2_norm = f2_norm_plus etc
@@ -983,7 +996,9 @@ where
self.alm_cache
.y_plus
.as_ref()
- .expect("Although n1 > 0, there is no vector y (Lagrange multipliers)"),
+ .ok_or(SolverError::InvalidProblemState(
+ "missing Lagrange multipliers at the ALM solution",
+ ))?,
);
Ok(status)
} else {
@@ -1129,7 +1144,7 @@ mod tests {
.with_initial_penalty(25.0)
.with_initial_lagrange_multipliers(&[2., 3., 4., 10.]);
- alm_optimizer.project_on_set_y();
+ alm_optimizer.project_on_set_y().unwrap();
if let Some(xi_after_proj) = &alm_optimizer.alm_cache.xi {
println!("xi = {:#?}", xi_after_proj);
let y_projected_correct = [
@@ -1282,7 +1297,7 @@ mod tests {
.with_initial_inner_tolerance(1e-1)
.with_inner_tolerance_update_factor(0.2);
- alm_optimizer.update_inner_akkt_tolerance();
+ alm_optimizer.update_inner_akkt_tolerance().unwrap();
unit_test_utils::assert_nearly_equal(
0.1,
@@ -1305,7 +1320,7 @@ mod tests {
);
for _i in 1..=5 {
- alm_optimizer.update_inner_akkt_tolerance();
+ alm_optimizer.update_inner_akkt_tolerance().unwrap();
}
unit_test_utils::assert_nearly_equal(
2e-5,
@@ -1411,20 +1426,20 @@ mod tests {
// should not exit yet...
assert!(
- !alm_optimizer.is_exit_criterion_satisfied(),
+ !alm_optimizer.is_exit_criterion_satisfied().unwrap(),
"exists right away"
);
let alm_optimizer = alm_optimizer
.with_initial_inner_tolerance(1e-3)
.with_epsilon_tolerance(1e-3);
- assert!(!alm_optimizer.is_exit_criterion_satisfied());
+ assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap());
alm_optimizer.alm_cache.delta_y_norm_plus = 1e-3;
- assert!(!alm_optimizer.is_exit_criterion_satisfied());
+ assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap());
alm_optimizer.alm_cache.f2_norm_plus = 1e-3;
- assert!(alm_optimizer.is_exit_criterion_satisfied());
+ assert!(alm_optimizer.is_exit_criterion_satisfied().unwrap());
}
#[test]
diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs
index 2f25ceba..7ca78608 100644
--- a/src/constraints/affine_space.rs
+++ b/src/constraints/affine_space.rs
@@ -1,6 +1,5 @@
use super::Constraint;
-use crate::matrix_operations;
-use crate::CholeskyFactorizer;
+use crate::{matrix_operations, CholeskyFactorizer, FunctionCallResult, SolverError};
use ndarray::{ArrayView1, ArrayView2};
@@ -82,26 +81,33 @@ impl Constraint for AffineSpace {
/// ```
///
/// The result is stored in `x` and it can be verified that $Ax = b$.
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
let n = self.n_cols;
assert!(x.len() == n, "x has wrong dimension");
// Step 1: Compute e = Ax - b
let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat)
- .expect("invalid A shape");
+ .map_err(|_| {
+ SolverError::InvalidProblemState(
+ "failed to construct the affine-space matrix view",
+ )
+ })?;
let x_view = ArrayView1::from(&x[..]);
let b = ArrayView1::from(&self.b_vec[..]);
let e = a.dot(&x_view) - b;
- let e_slice: &[f64] = e.as_slice().unwrap();
+ let e_slice: &[f64] = e.as_slice().ok_or(SolverError::InvalidProblemState(
+ "affine-space residual vector is not stored contiguously",
+ ))?;
// Step 2: Solve AA' z = e and compute z
- let z = self.factorizer.solve(e_slice).unwrap();
+ let z = self.factorizer.solve(e_slice)?;
// Step 3: Compute x = x - A'z
let at_z = a.t().dot(&ArrayView1::from(&z[..]));
for (xi, corr) in x.iter_mut().zip(at_z.iter()) {
*xi -= *corr;
}
+ Ok(())
}
/// Affine sets are convex.
diff --git a/src/constraints/ball1.rs b/src/constraints/ball1.rs
index eb1ee77f..3a4fd921 100644
--- a/src/constraints/ball1.rs
+++ b/src/constraints/ball1.rs
@@ -1,5 +1,6 @@
use super::Constraint;
use super::Simplex;
+use crate::FunctionCallResult;
#[derive(Copy, Clone)]
/// A norm-1 ball, that is, a set given by $B_1^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert_1 \leq r\\}$
@@ -23,7 +24,7 @@ impl<'a> Ball1<'a> {
}
}
- fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) {
+ fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) -> FunctionCallResult {
if crate::matrix_operations::norm1(x) > self.radius {
// u = |x| (copied)
let mut u = vec![0.0; x.len()];
@@ -31,16 +32,17 @@ impl<'a> Ball1<'a> {
.zip(x.iter())
.for_each(|(ui, &xi)| *ui = f64::abs(xi));
// u = P_simplex(u)
- self.simplex.project(&mut u);
+ self.simplex.project(&mut u)?;
x.iter_mut()
.zip(u.iter())
.for_each(|(xi, &ui)| *xi = f64::signum(*xi) * ui);
}
+ Ok(())
}
}
impl<'a> Constraint for Ball1<'a> {
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
if let Some(center) = &self.center {
assert_eq!(
x.len(),
@@ -50,13 +52,14 @@ impl<'a> Constraint for Ball1<'a> {
x.iter_mut()
.zip(center.iter())
.for_each(|(xi, &ci)| *xi -= ci);
- self.project_on_ball1_centered_at_origin(x);
+ self.project_on_ball1_centered_at_origin(x)?;
x.iter_mut()
.zip(center.iter())
.for_each(|(xi, &ci)| *xi += ci);
} else {
- self.project_on_ball1_centered_at_origin(x);
+ self.project_on_ball1_centered_at_origin(x)?;
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/ball2.rs b/src/constraints/ball2.rs
index c4475cde..1cbd8e8d 100644
--- a/src/constraints/ball2.rs
+++ b/src/constraints/ball2.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Copy, Clone)]
/// A Euclidean ball, that is, a set given by $B_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert \leq r\\}$
@@ -19,7 +20,7 @@ impl<'a> Ball2<'a> {
}
impl<'a> Constraint for Ball2<'a> {
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
if let Some(center) = &self.center {
assert_eq!(
x.len(),
@@ -46,6 +47,7 @@ impl<'a> Constraint for Ball2<'a> {
x.iter_mut().for_each(|x_| *x_ /= norm_over_radius);
}
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/ballinf.rs b/src/constraints/ballinf.rs
index 8b87c688..ddae792d 100644
--- a/src/constraints/ballinf.rs
+++ b/src/constraints/ballinf.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Copy, Clone)]
/// An infinity ball defined as $B_\infty^r = \\{x\in\mathbb{R}^n {}:{} \Vert{}x{}\Vert_{\infty} \leq r\\}$,
@@ -42,7 +43,7 @@ impl<'a> Constraint for BallInf<'a> {
///
/// for all $i=1,\ldots, n$.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
if let Some(center) = &self.center {
assert_eq!(
x.len(),
@@ -58,6 +59,7 @@ impl<'a> Constraint for BallInf<'a> {
.filter(|xi| xi.abs() > self.radius)
.for_each(|xi| *xi = xi.signum() * self.radius);
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs
index 3a0d893b..7abbb22f 100644
--- a/src/constraints/ballp.rs
+++ b/src/constraints/ballp.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::{FunctionCallResult, SolverError};
#[derive(Copy, Clone)]
/// An $\\ell_p$ ball, that is,
@@ -157,7 +158,7 @@ impl<'a> BallP<'a> {
.powf(1.0 / self.p)
}
- fn project_lp_ball(&self, x: &mut [f64]) {
+ fn project_lp_ball(&self, x: &mut [f64]) -> FunctionCallResult {
let p = self.p;
let r = self.radius;
let tol = self.tolerance;
@@ -165,7 +166,7 @@ impl<'a> BallP<'a> {
let current_norm = self.lp_norm(x);
if current_norm <= r {
- return;
+ return Ok(());
}
let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect();
@@ -188,7 +189,9 @@ impl<'a> BallP<'a> {
while radius_error(lambda_hi) > 0.0 {
lambda_hi *= 2.0;
if lambda_hi > 1e20 {
- panic!("Failed to bracket the Lagrange multiplier");
+ return Err(SolverError::ProjectionFailed(
+ "failed to bracket the Lagrange multiplier",
+ ));
}
}
@@ -215,6 +218,7 @@ impl<'a> BallP<'a> {
let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter);
*xi = xi.signum() * u;
});
+ Ok(())
}
/// Solve for u >= 0 the equation u + lambda * p * u^(p-1) = a
@@ -271,7 +275,7 @@ impl<'a> BallP<'a> {
}
impl<'a> Constraint for BallP<'a> {
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
if let Some(center) = &self.center {
assert_eq!(
x.len(),
@@ -285,14 +289,15 @@ impl<'a> Constraint for BallP<'a> {
.zip(x.iter().zip(center.iter()))
.for_each(|(s, (xi, ci))| *s = *xi - *ci);
- self.project_lp_ball(&mut shifted);
+ self.project_lp_ball(&mut shifted)?;
x.iter_mut()
.zip(shifted.iter().zip(center.iter()))
.for_each(|(xi, (si, ci))| *xi = *ci + *si);
} else {
- self.project_lp_ball(x);
+ self.project_lp_ball(x)?;
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/cartesian_product.rs b/src/constraints/cartesian_product.rs
index edf15b5f..83d2eacc 100644
--- a/src/constraints/cartesian_product.rs
+++ b/src/constraints/cartesian_product.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
/// Cartesian product of constraints.
///
@@ -143,16 +144,14 @@ impl<'a> Constraint for CartesianProduct<'a> {
///
/// The method will panic if the dimension of `x` is not equal to the
/// dimension of the Cartesian product (see `dimension()`)
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
assert!(x.len() == self.dimension(), "x has wrong size");
let mut j = 0;
- self.idx
- .iter()
- .zip(self.constraints.iter())
- .for_each(|(&i, c)| {
- c.project(&mut x[j..i]);
- j = i;
- });
+ for (&i, c) in self.idx.iter().zip(self.constraints.iter()) {
+ c.project(&mut x[j..i])?;
+ j = i;
+ }
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs
index f91617c8..a0032654 100644
--- a/src/constraints/epigraph_squared_norm.rs
+++ b/src/constraints/epigraph_squared_norm.rs
@@ -1,4 +1,4 @@
-use crate::matrix_operations;
+use crate::{matrix_operations, FunctionCallResult, SolverError};
use super::Constraint;
@@ -46,7 +46,10 @@ impl Constraint for EpigraphSquaredNorm {
///
/// Panics if:
///
- /// - `x.len() < 2`,
+ /// - `x.len() < 2`.
+ ///
+ /// Returns an error if:
+ ///
/// - no admissible real root is found,
/// - the Newton derivative becomes too small,
/// - the final scaling factor is numerically singular.
@@ -63,7 +66,7 @@ impl Constraint for EpigraphSquaredNorm {
///
/// epi.project(&mut x);
/// ```
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
assert!(
x.len() >= 2,
"EpigraphSquaredNorm::project requires x.len() >= 2"
@@ -76,7 +79,7 @@ impl Constraint for EpigraphSquaredNorm {
// Already feasible
if norm_z_sq <= t {
- return;
+ return Ok(());
}
// Cubic:
@@ -106,8 +109,9 @@ impl Constraint for EpigraphSquaredNorm {
}
}
- let mut zsol =
- right_root.expect("EpigraphSquaredNorm::project: no admissible real root found");
+ let mut zsol = right_root.ok_or(SolverError::ProjectionFailed(
+ "no admissible real root found for the cubic projection equation",
+ ))?;
// Newton refinement
let newton_max_iters: usize = 5;
@@ -123,10 +127,11 @@ impl Constraint for EpigraphSquaredNorm {
}
let dp_z = 3.0 * a3 * zsol_sq + 2.0 * a2 * zsol + a1;
- assert!(
- dp_z.abs() > 1e-15,
- "EpigraphSquaredNorm::project: Newton derivative too small"
- );
+ if dp_z.abs() <= 1e-15 {
+ return Err(SolverError::ProjectionFailed(
+ "Newton refinement derivative is too small",
+ ));
+ }
zsol -= p_z / dp_z;
}
@@ -134,16 +139,18 @@ impl Constraint for EpigraphSquaredNorm {
let right_root = zsol;
let scaling = 1.0 + 2.0 * (right_root - t);
- assert!(
- scaling.abs() > 1e-15,
- "EpigraphSquaredNorm::project: scaling factor too small"
- );
+ if scaling.abs() <= 1e-15 {
+ return Err(SolverError::ProjectionFailed(
+ "projection scaling factor is numerically singular",
+ ));
+ }
// Projection
for xi in x.iter_mut().take(nx) {
*xi /= scaling;
}
x[nx] = right_root;
+ Ok(())
}
/// This is a convex set, so this function returns `true`.
diff --git a/src/constraints/finite.rs b/src/constraints/finite.rs
index 3f9393cd..fd0dc9f7 100644
--- a/src/constraints/finite.rs
+++ b/src/constraints/finite.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
///
/// A finite set, $X = \\{x_1, x_2, \ldots, x_n\\}\subseteq\mathbb{R}^n$, given vectors
@@ -85,7 +86,7 @@ impl<'a> Constraint for FiniteSet<'a> {
/// This method panics if the dimension of `x` is not equal to the
/// dimension of the points in the finite set.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
assert_eq!(x.len(), self.data[0].len(), "x has incompatible dimension");
let mut idx: usize = 0;
let mut best_distance: f64 = num::Float::infinity();
@@ -97,6 +98,7 @@ impl<'a> Constraint for FiniteSet<'a> {
}
}
x.copy_from_slice(self.data[idx]);
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/halfspace.rs b/src/constraints/halfspace.rs
index 5442ca54..069b38c6 100644
--- a/src/constraints/halfspace.rs
+++ b/src/constraints/halfspace.rs
@@ -1,5 +1,6 @@
use super::Constraint;
use crate::matrix_operations;
+use crate::FunctionCallResult;
#[derive(Clone)]
/// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$.
@@ -79,7 +80,7 @@ impl<'a> Constraint for Halfspace<'a> {
/// This method panics if the length of `x` is not equal to the dimension
/// of the halfspace.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
let inner_product = matrix_operations::inner_product(x, self.normal_vector);
if inner_product > self.offset {
let factor = (inner_product - self.offset) / self.normal_vector_squared_norm;
@@ -87,6 +88,7 @@ impl<'a> Constraint for Halfspace<'a> {
.zip(self.normal_vector.iter())
.for_each(|(x, normal_vector_i)| *x -= factor * normal_vector_i);
}
+ Ok(())
}
/// Halfspaces are convex sets
diff --git a/src/constraints/hyperplane.rs b/src/constraints/hyperplane.rs
index 886fd494..01362f11 100644
--- a/src/constraints/hyperplane.rs
+++ b/src/constraints/hyperplane.rs
@@ -1,5 +1,6 @@
use super::Constraint;
use crate::matrix_operations;
+use crate::FunctionCallResult;
#[derive(Clone)]
/// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$.
@@ -79,13 +80,14 @@ impl<'a> Constraint for Hyperplane<'a> {
/// This method panics if the length of `x` is not equal to the dimension
/// of the hyperplane.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
assert_eq!(x.len(), self.normal_vector.len(), "x has wrong dimension");
let inner_product = matrix_operations::inner_product(x, self.normal_vector);
let factor = (inner_product - self.offset) / self.normal_vector_squared_norm;
x.iter_mut()
.zip(self.normal_vector.iter())
.for_each(|(x, nrm_vct)| *x -= factor * nrm_vct);
+ Ok(())
}
/// Hyperplanes are convex sets
diff --git a/src/constraints/mod.rs b/src/constraints/mod.rs
index d4ce2462..a4cf3c22 100644
--- a/src/constraints/mod.rs
+++ b/src/constraints/mod.rs
@@ -8,6 +8,8 @@
//!
//! [`Constraint`]: trait.Constraint.html
+use crate::FunctionCallResult;
+
mod affine_space;
mod ball1;
mod ball2;
@@ -57,7 +59,7 @@ pub trait Constraint {
///
/// - `x`: The given vector $x$ is updated with the projection on the set
///
- fn project(&self, x: &mut [f64]);
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult;
/// Returns true if and only if the set is convex
fn is_convex(&self) -> bool;
diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs
index 88df8845..cebb0057 100644
--- a/src/constraints/no_constraints.rs
+++ b/src/constraints/no_constraints.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
/// The whole space, no constraints
#[derive(Default, Clone, Copy)]
@@ -13,7 +14,9 @@ impl NoConstraints {
}
impl Constraint for NoConstraints {
- fn project(&self, _x: &mut [f64]) {}
+ fn project(&self, _x: &mut [f64]) -> FunctionCallResult {
+ Ok(())
+ }
fn is_convex(&self) -> bool {
true
diff --git a/src/constraints/rectangle.rs b/src/constraints/rectangle.rs
index 76b52ceb..a4511b59 100644
--- a/src/constraints/rectangle.rs
+++ b/src/constraints/rectangle.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Clone, Copy)]
///
@@ -36,10 +37,13 @@ impl<'a> Rectangle<'a> {
///
pub fn new(xmin: Option<&'a [f64]>, xmax: Option<&'a [f64]>) -> Self {
assert!(xmin.is_some() || xmax.is_some()); // xmin or xmax must be Some
- assert!(
- xmin.is_none() || xmax.is_none() || xmin.unwrap().len() == xmax.unwrap().len(),
- "incompatible dimensions of xmin and xmax"
- );
+ if let (Some(xmin), Some(xmax)) = (xmin, xmax) {
+ assert_eq!(
+ xmin.len(),
+ xmax.len(),
+ "incompatible dimensions of xmin and xmax"
+ );
+ }
if let (Some(xmin), Some(xmax)) = (xmin, xmax) {
assert!(
xmin.iter()
@@ -54,7 +58,7 @@ impl<'a> Rectangle<'a> {
}
impl<'a> Constraint for Rectangle<'a> {
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
if let Some(xmin) = &self.xmin {
assert_eq!(
x.len(),
@@ -80,6 +84,7 @@ impl<'a> Constraint for Rectangle<'a> {
};
});
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs
index 061b390e..2963cbc4 100644
--- a/src/constraints/simplex.rs
+++ b/src/constraints/simplex.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Copy, Clone)]
/// A simplex with level $\alpha$ is a set of the form
@@ -24,7 +25,7 @@ impl Constraint for Simplex {
/// See: Laurent Condat. Fast Projection onto the Simplex and the $\ell_1$ Ball.
/// Mathematical Programming, Series A, Springer, 2016, 158 (1), pp.575-585.
/// ⟨10.1007/s10107-015-0946-6⟩.
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
assert!(!x.is_empty(), "x must be nonempty");
let a = &self.alpha;
@@ -82,6 +83,7 @@ impl Constraint for Simplex {
// ---- step 6
let zero: f64 = 0.0;
x.iter_mut().for_each(|x_n| *x_n = zero.max(*x_n - rho));
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/soc.rs b/src/constraints/soc.rs
index 8ff0759b..91fce945 100644
--- a/src/constraints/soc.rs
+++ b/src/constraints/soc.rs
@@ -1,5 +1,6 @@
use super::Constraint;
use crate::matrix_operations;
+use crate::FunctionCallResult;
#[derive(Clone, Copy)]
///
@@ -56,7 +57,7 @@ impl Constraint for SecondOrderCone {
///
/// This method panics if the length of `x` is less than 2.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
// x = (z, r)
let n = x.len();
assert!(n >= 2, "x must be of dimension at least 2");
@@ -72,6 +73,7 @@ impl Constraint for SecondOrderCone {
.for_each(|v| *v *= self.alpha * beta / norm_z);
x[n - 1] = beta;
}
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/constraints/sphere2.rs b/src/constraints/sphere2.rs
index 86433855..8cb38695 100644
--- a/src/constraints/sphere2.rs
+++ b/src/constraints/sphere2.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Copy, Clone)]
/// A Euclidean sphere, that is, a set given by $S_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert = r\\}$
@@ -38,7 +39,7 @@ impl<'a> Constraint for Sphere2<'a> {
/// Panics if `x` is empty or, when a center is provided, if `x` and
/// `center` have incompatible dimensions.
///
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
let epsilon = 1e-12;
assert!(!x.is_empty(), "x must be nonempty");
if let Some(center) = &self.center {
@@ -51,7 +52,7 @@ impl<'a> Constraint for Sphere2<'a> {
if norm_difference <= epsilon {
x.copy_from_slice(center);
x[0] += self.radius;
- return;
+ return Ok(());
}
x.iter_mut().zip(center.iter()).for_each(|(x, c)| {
*x = *c + self.radius * (*x - *c) / norm_difference;
@@ -60,11 +61,12 @@ impl<'a> Constraint for Sphere2<'a> {
let norm_x = crate::matrix_operations::norm2(x);
if norm_x <= epsilon {
x[0] += self.radius;
- return;
+ return Ok(());
}
let norm_over_radius = self.radius / norm_x;
x.iter_mut().for_each(|x_| *x_ *= norm_over_radius);
}
+ Ok(())
}
/// Returns false (the sphere is not a convex set)
diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs
index 58f88f25..a6734b8b 100644
--- a/src/constraints/tests.rs
+++ b/src/constraints/tests.rs
@@ -10,7 +10,7 @@ fn t_zero_set() {
let zero = Zero::new();
let mut x = [1.0, 2.0, 3.0];
let x_projection = [0.0; 3];
- zero.project(&mut x);
+ zero.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x_projection,
&x,
@@ -31,7 +31,7 @@ fn t_hyperplane() {
0.285_714_285_714_286,
0.928_571_428_571_429,
];
- hyperplane.project(&mut x);
+ hyperplane.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x,
&x_proj_expected,
@@ -54,7 +54,7 @@ fn t_hyperplane_wrong_dimension() {
let normal_vector = [1.0, 2.0, 3.0];
let hyperplane = Hyperplane::new(&normal_vector, 1.0);
let mut x = [1.0, 2.0];
- hyperplane.project(&mut x);
+ hyperplane.project(&mut x).unwrap();
}
#[test]
@@ -64,7 +64,7 @@ fn t_halfspace_project_inside() {
let halfspace = Halfspace::new(&normal_vector, offset);
let mut x = [-1., 3.];
let x_expected = [-1., 3.];
- halfspace.project(&mut x);
+ halfspace.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x,
&x_expected,
@@ -81,7 +81,7 @@ fn t_halfspace_project_outside() {
let halfspace = Halfspace::new(&normal_vector, offset);
let mut x = [-1., 3.];
let x_expected = [-1.8, 1.4];
- halfspace.project(&mut x);
+ halfspace.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x,
&x_expected,
@@ -112,7 +112,7 @@ fn t_finite_set() {
let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0], &[0.0, 1.0], &[1.0, 0.0]];
let finite_set = FiniteSet::new(data);
let mut x = [0.7, 0.6];
- finite_set.project(&mut x);
+ finite_set.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[1.0, 1.0],
&x,
@@ -121,7 +121,7 @@ fn t_finite_set() {
"projection is wrong (should be [1,1])",
);
x = [-0.1, 0.2];
- finite_set.project(&mut x);
+ finite_set.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[0.0, 0.0],
&x,
@@ -130,7 +130,7 @@ fn t_finite_set() {
"projection is wrong (should be [0,0])",
);
x = [0.48, 0.501];
- finite_set.project(&mut x);
+ finite_set.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[0.0, 1.0],
&x,
@@ -139,7 +139,7 @@ fn t_finite_set() {
"projection is wrong (should be [0,1])",
);
x = [0.7, 0.2];
- finite_set.project(&mut x);
+ finite_set.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[1.0, 0.0],
&x,
@@ -155,7 +155,7 @@ fn t_finite_set_project_wrong_dimension() {
let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0]];
let finite_set = FiniteSet::new(data);
let mut x = [0.5, 0.5, 0.5];
- finite_set.project(&mut x);
+ finite_set.project(&mut x).unwrap();
}
#[test]
@@ -165,7 +165,7 @@ fn t_rectangle_bounded() {
let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..]));
let mut x = [1.0, 2.0, 3.0, 4.0, 5.0];
- rectangle.project(&mut x);
+ rectangle.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[2.0, 2.0, 3.0, 4.0, 4.5],
@@ -183,7 +183,7 @@ fn t_rectangle_infinite_bounds() {
let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..]));
let mut x = [-2.0, 3.0, 1.0];
- rectangle.project(&mut x);
+ rectangle.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[-1.0, 3.0, 1.0],
@@ -217,7 +217,7 @@ fn t_rectangle_bounded_negative_entries() {
let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..]));
let mut x = [-6.0, -3.0, 0.0, 3.0, -5.0, 1.0, 2.0, 3.0, -1.0, 0.0, 0.0];
- rectangle.project(&mut x);
+ rectangle.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[-5.0, -3.0, -1.0, 2.0, -1.0, 0.0, 2.0, 3.0, 3.0, 4.0, 5.0],
@@ -234,7 +234,7 @@ fn t_rectangle_only_xmin() {
let rectangle = Rectangle::new(Some(&xmin[..]), None);
let mut x = [1.0, 2.0, 3.0, 4.0, 5.0];
- rectangle.project(&mut x);
+ rectangle.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[2.0, 2.0, 3.0, 4.0, 5.0],
@@ -251,7 +251,7 @@ fn t_rectangle_only_xmax() {
let rectangle = Rectangle::new(None, Some(&xmax[..]));
let mut x = [-10.0, -20.0, 0.0, 5.0, 3.0];
- rectangle.project(&mut x);
+ rectangle.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[-10.0, -20.0, -3.0, -3.0, -3.0],
@@ -268,7 +268,7 @@ fn t_ball2_at_origin() {
let mut x = [1.0, 1.0];
let ball = Ball2::new(None, radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&[
@@ -287,7 +287,7 @@ fn t_ball2_at_origin_different_radius_outside() {
let radius = 0.8;
let mut x = [1.0, 1.0];
let ball = Ball2::new(None, radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
let norm_proj_x = crate::matrix_operations::norm2(&x);
unit_test_utils::assert_nearly_equal(radius, norm_proj_x, 1e-10, 1e-12, "wrong norm");
}
@@ -297,7 +297,7 @@ fn t_ball2_at_origin_different_radius_inside() {
let radius = 0.8;
let mut x = [-0.2, 0.15];
let ball = Ball2::new(None, radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&x, &[-0.2, 0.15], 1e-10, 1e-12, "wrong");
}
@@ -307,7 +307,7 @@ fn t_ball2_at_center_different_radius_outside() {
let mut x = [1.0, 1.0];
let center = [-0.8, -1.1];
let ball = Ball2::new(Some(¢er), radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
let norm_x_minus_c = crate::matrix_operations::norm2_squared_diff(&x, ¢er).sqrt();
unit_test_utils::assert_nearly_equal(radius, norm_x_minus_c, 1e-10, 1e-12, "wrong norm");
}
@@ -318,7 +318,7 @@ fn t_ball2_at_center_different_radius_inside() {
let mut x = [-0.9, -0.85];
let center = [-0.8, -1.1];
let ball = Ball2::new(Some(¢er), radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[-0.9, -0.85], &x, 1e-10, 1e-12, "wrong result");
}
@@ -329,7 +329,7 @@ fn t_ball2_elsewhere() {
let mut x = [2.0, 2.0];
let ball = Ball2::new(Some(¢er[..]), radius);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
let expected_proj_element = std::f64::consts::FRAC_1_SQRT_2 + 1.;
unit_test_utils::assert_nearly_equal_array(
@@ -346,7 +346,7 @@ fn t_no_constraints() {
let mut x = [1.0, 2.0, 3.0];
let whole_space = NoConstraints::new();
- whole_space.project(&mut x);
+ whole_space.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[1., 2., 3.], &x, 1e-10, 1e-15, "x is wrong");
}
@@ -370,7 +370,7 @@ fn t_cartesian_product_constraints_wrong_vector_dim() {
.add_constraint(3, ball1)
.add_constraint(10, ball2);
let mut x = [0.0; 30];
- cart_prod.project(&mut x);
+ cart_prod.project(&mut x).unwrap();
}
#[test]
@@ -385,7 +385,7 @@ fn t_cartesian_product_constraints() {
.add_constraint(idx1, ball1)
.add_constraint(idx2, ball2);
let mut x = [3.0, 4.0, 5.0, 2.0, 1.0];
- cart_prod.project(&mut x);
+ cart_prod.project(&mut x).unwrap();
let r1 = crate::matrix_operations::norm2(&x[0..idx1]);
let r2 = crate::matrix_operations::norm2(&x[idx1..idx2]);
unit_test_utils::assert_nearly_equal(r1, radius1, 1e-8, 1e-12, "r1 is wrong");
@@ -416,7 +416,7 @@ fn t_cartesian_product_ball_and_rectangle() {
/* Projection */
let mut x = [-10.0, 0.5, 10.0, 0.01, -0.01, 0.1, 10.0, -1.0, 1.0];
- cart_prod.project(&mut x);
+ cart_prod.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x[0..3],
@@ -443,7 +443,7 @@ fn t_second_order_cone_case_i() {
let soc = SecondOrderCone::new(1.0);
let mut x = vec![1.0, 1.0, 1.42];
let x_copy = x.clone();
- soc.project(&mut x);
+ soc.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&x, &x_copy, 1e-10, 1e-12, "x has been modified");
}
@@ -452,7 +452,7 @@ fn t_second_order_cone_case_ii() {
let alpha = 0.5;
let soc = SecondOrderCone::new(alpha);
let mut x = vec![1.0, 1.0, -0.71];
- soc.project(&mut x);
+ soc.project(&mut x).unwrap();
let expected = vec![0.0; 3];
unit_test_utils::assert_nearly_equal_array(
&x,
@@ -468,7 +468,7 @@ fn t_second_order_cone_case_iii() {
let alpha = 1.5;
let soc = SecondOrderCone::new(alpha);
let mut x = vec![1.0, 1.0, 0.1];
- soc.project(&mut x);
+ soc.project(&mut x).unwrap();
// make sure the new `x` is in the cone
let norm_z = crate::matrix_operations::norm2(&x[..=1]);
assert!(norm_z <= alpha * x[2]);
@@ -496,7 +496,7 @@ fn t_second_order_cone_short_vector() {
let alpha = 1.0;
let soc = SecondOrderCone::new(alpha);
let mut _x = vec![1.0];
- soc.project(&mut _x);
+ soc.project(&mut _x).unwrap();
}
#[test]
@@ -516,7 +516,7 @@ fn t_cartesian_product_dimension() {
// let's do a projection to make sure this works
// Note: we've used the same set (finite_set), twice
let mut x = [-0.5, 1.1, 0.45, 0.55, 10.0, 10.0, -500.0, 1.0, 1.0, 1.0];
- cartesian.project(&mut x);
+ cartesian.project(&mut x).unwrap();
println!("X = {:#?}", x);
let sqrt_3_over_3 = 3.0_f64.sqrt() / 3.;
unit_test_utils::assert_nearly_equal_array(
@@ -552,7 +552,7 @@ fn t_cartesian_ball_no_constraint() {
.add_constraint(9, no_constraints);
assert_eq!(9, cartesian.dimension());
let mut x = [100., -200., 0.5, 1.5, 3.5, 1000., 5., -500., 2_000_000.];
- cartesian.project(&mut x);
+ cartesian.project(&mut x).unwrap();
let x_proj_ball = [0.869811089019176, 0.390566732942472, 0.911322376865767];
unit_test_utils::assert_nearly_equal_array(
&x[0..=1],
@@ -576,7 +576,7 @@ fn t_ball_inf_origin() {
let ball_inf = BallInf::new(None, 1.0);
let mut x = [0.0, -0.5, 0.5, 1.5, 3.5, 0.8, 1.1, -5.0, -10.0];
let x_correct = [0.0, -0.5, 0.5, 1.0, 1.0, 0.8, 1.0, -1.0, -1.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x_correct,
&x,
@@ -592,31 +592,31 @@ fn t_ball_inf_center() {
let xc = [5.0, -6.0];
let ball_inf = BallInf::new(Some(&xc), 1.5);
let mut x = [11.0, -0.5];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[6.5, -4.5], &x, 1e-10, 1e-12, "upper right");
let mut x = [3.0, -7.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[3.5, -7.0], &x, 1e-10, 1e-12, "left");
let mut x = [800.0, -5.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[6.5, -5.0], &x, 1e-10, 1e-12, "right");
let mut x = [9.0, -10.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[6.5, -7.5], &x, 1e-10, 1e-12, "down right");
let mut x = [3.0, 0.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[3.5, -4.5], &x, 1e-10, 1e-12, "top left");
let mut x = [6.0, -5.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[6.0, -5.0], &x, 1e-10, 1e-12, "inside");
let mut x = [5.0, -6.0];
- ball_inf.project(&mut x);
+ ball_inf.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(&[5.0, -6.0], &x, 1e-10, 1e-12, "centre");
}
@@ -690,7 +690,7 @@ fn t_simplex_projection() {
let mut x = [1.0, 2.0, 3.0];
let alpha = 3.0;
let my_simplex = Simplex::new(alpha);
- my_simplex.project(&mut x);
+ my_simplex.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal(
crate::matrix_operations::sum(&x),
alpha,
@@ -712,7 +712,7 @@ fn t_simplex_projection_random_spam() {
let alpha_scale = 20.;
let alpha = alpha_scale * rand::random::();
let simplex = Simplex::new(alpha);
- simplex.project(&mut x);
+ simplex.project(&mut x).unwrap();
println!("x = {:?}", x);
assert!(x.iter().all(|&xi| xi >= -1e-12));
unit_test_utils::assert_nearly_equal(
@@ -737,7 +737,7 @@ fn t_simplex_projection_random_optimality() {
let alpha = alpha_scale * rand::random::();
let simplex = Simplex::new(alpha);
let y = z.clone();
- simplex.project(&mut z);
+ simplex.project(&mut z).unwrap();
for j in 0..n {
let w = alpha * (y[j] - z[j]) - crate::matrix_operations::inner_product(&z, &y)
+ crate::matrix_operations::norm2_squared(&z);
@@ -769,7 +769,7 @@ fn t_simplex_alpha_negative() {
fn t_simplex_empty_vector() {
let simplex = Simplex::new(1.0);
let mut x = [];
- simplex.project(&mut x);
+ simplex.project(&mut x).unwrap();
}
#[test]
@@ -786,7 +786,7 @@ fn t_ball1_random_optimality_conditions() {
x.copy_from_slice(&x_star);
let radius = 5. * rand::random::();
let ball1 = Ball1::new(None, radius);
- ball1.project(&mut x_star);
+ ball1.project(&mut x_star).unwrap();
// make sure |x|_1 <= radius
assert!(
crate::matrix_operations::norm1(&x_star) <= radius * (1. + 1e-9),
@@ -835,7 +835,7 @@ fn t_ball1_random_optimality_conditions_centered() {
.for_each(|xi| *xi = scale_xc * (2. * rand::random::() - 1.));
let radius = 5. * rand::random::();
let ball1 = Ball1::new(Some(&xc), radius);
- ball1.project(&mut x);
+ ball1.project(&mut x).unwrap();
// x = x - xc
x.iter_mut()
.zip(xc.iter())
@@ -855,7 +855,7 @@ fn t_ball1_wrong_dimensions() {
let mut x = vec![3.0, 4.0, 5.0];
let radius = 1.0;
let ball1 = Ball1::new(Some(&xc), radius);
- ball1.project(&mut x);
+ ball1.project(&mut x).unwrap();
}
#[test]
@@ -864,8 +864,8 @@ fn t_sphere2_no_center() {
let mut x_out = [1.0, 1.0];
let mut x_in = [-0.3, -0.2];
let unit_sphere = Sphere2::new(None, radius);
- unit_sphere.project(&mut x_out);
- unit_sphere.project(&mut x_in);
+ unit_sphere.project(&mut x_out).unwrap();
+ unit_sphere.project(&mut x_in).unwrap();
let norm_out = crate::matrix_operations::norm2(&x_out);
let norm_in = crate::matrix_operations::norm2(&x_in);
unit_test_utils::assert_nearly_equal(radius, norm_out, 1e-10, 1e-12, "norm_out is not 1.0");
@@ -877,7 +877,7 @@ fn t_sphere2_no_center_projection_of_zero() {
let radius = 0.9;
let mut x = [0.0, 0.0];
let unit_sphere = Sphere2::new(None, radius);
- unit_sphere.project(&mut x);
+ unit_sphere.project(&mut x).unwrap();
let norm_result = crate::matrix_operations::norm2(&x);
unit_test_utils::assert_nearly_equal(radius, norm_result, 1e-10, 1e-12, "norm_out is not 1.0");
}
@@ -889,7 +889,7 @@ fn t_sphere2_center() {
let mut x = [1.0, 1.0];
let unit_sphere = Sphere2::new(Some(¢er), radius);
- unit_sphere.project(&mut x);
+ unit_sphere.project(&mut x).unwrap();
let mut x_minus_c = [0.0; 2];
x.iter()
.zip(center.iter())
@@ -909,7 +909,7 @@ fn t_sphere2_center_projection_of_center() {
let mut x = [-3.0, 5.0];
let unit_sphere = Sphere2::new(Some(¢er), radius);
- unit_sphere.project(&mut x);
+ unit_sphere.project(&mut x).unwrap();
let mut x_minus_c = [0.0; 2];
x.iter()
.zip(center.iter())
@@ -928,7 +928,7 @@ fn t_sphere2_empty_vector() {
let radius = 1.0;
let unit_sphere = Sphere2::new(None, radius);
let mut x = [];
- unit_sphere.project(&mut x);
+ unit_sphere.project(&mut x).unwrap();
}
#[test]
@@ -938,7 +938,7 @@ fn t_sphere2_center_wrong_dimension() {
let center = [1.0, 2.0, 3.0];
let unit_sphere = Sphere2::new(Some(¢er), radius);
let mut x = [1.0, 2.0];
- unit_sphere.project(&mut x);
+ unit_sphere.project(&mut x).unwrap();
}
#[test]
@@ -952,7 +952,7 @@ fn t_epigraph_squared_norm_inside() {
let epi = EpigraphSquaredNorm::new();
let mut x = [1., 2., 10.];
let x_correct = x.clone();
- epi.project(&mut x);
+ epi.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x_correct,
&x,
@@ -968,7 +968,7 @@ fn t_epigraph_squared_norm() {
for i in 0..100 {
let t = 0.01 * i as f64;
let mut x = [1., 2., 3., t];
- epi.project(&mut x);
+ epi.project(&mut x).unwrap();
let err = (matrix_operations::norm2_squared(&x[..3]) - x[3]).abs();
assert!(err < 1e-10, "wrong projection on epigraph of squared norm");
}
@@ -984,7 +984,7 @@ fn t_epigraph_squared_norm_correctness() {
1.680426686710711,
4.392630432414829,
];
- epi.project(&mut x);
+ epi.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x_correct,
&x,
@@ -1002,7 +1002,7 @@ fn t_affine_space() {
let b = vec![1., 2., -0.5];
let affine_set = AffineSpace::new(a, b);
let mut x = [1., -2., -0.3, 0.5];
- affine_set.project(&mut x);
+ affine_set.project(&mut x).unwrap();
let x_correct = [
1.888564346697095,
5.629857182200888,
@@ -1026,7 +1026,7 @@ fn t_affine_space_larger() {
let b = vec![1., -2., 3., 4.];
let affine_set = AffineSpace::new(a, b);
let mut x = [10., 11., -9., 4., 5.];
- affine_set.project(&mut x);
+ affine_set.project(&mut x).unwrap();
let x_correct = [
9.238095238095237,
-0.714285714285714,
@@ -1049,7 +1049,7 @@ fn t_affine_space_single_row() {
let b = vec![1.];
let affine_set = AffineSpace::new(a, b);
let mut x = [5., 6., 10., 25.];
- affine_set.project(&mut x);
+ affine_set.project(&mut x).unwrap();
let s = x.iter().sum();
unit_test_utils::assert_nearly_equal(1., s, 1e-12, 1e-14, "wrong sum");
}
@@ -1197,7 +1197,7 @@ fn t_ballp_at_origin_projection() {
let tol = 1e-16;
let max_iters: usize = 200;
let ball = BallP::new(None, radius, p, tol, max_iters);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000));
}
@@ -1210,7 +1210,7 @@ fn t_ballp_at_origin_x_already_inside() {
let tol = 1e-16;
let max_iters: usize = 1200;
let ball = BallP::new(None, radius, p, tol, max_iters);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
unit_test_utils::assert_nearly_equal_array(
&x0,
&x,
@@ -1229,7 +1229,7 @@ fn t_ballp_at_xc_projection() {
let tol = 1e-16;
let max_iters: usize = 200;
let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters);
- ball.project(&mut x);
+ ball.project(&mut x).unwrap();
let nrm = (x
.iter()
diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs
index 81064d33..fe9a1114 100644
--- a/src/constraints/zero.rs
+++ b/src/constraints/zero.rs
@@ -1,4 +1,5 @@
use super::Constraint;
+use crate::FunctionCallResult;
#[derive(Clone, Copy, Default)]
/// Set Zero, $\\{0\\}$
@@ -14,8 +15,9 @@ impl Zero {
impl Constraint for Zero {
/// Computes the projection on $\\{0\\}$, that is, $\Pi_{\\{0\\}}(x) = 0$
/// for all $x$
- fn project(&self, x: &mut [f64]) {
+ fn project(&self, x: &mut [f64]) -> FunctionCallResult {
x.iter_mut().for_each(|xi| *xi = 0.0);
+ Ok(())
}
fn is_convex(&self) -> bool {
diff --git a/src/core/fbs/fbs_engine.rs b/src/core/fbs/fbs_engine.rs
index 717e70c6..fc7392c4 100644
--- a/src/core/fbs/fbs_engine.rs
+++ b/src/core/fbs/fbs_engine.rs
@@ -42,22 +42,24 @@ where
FBSEngine { problem, cache }
}
- fn gradient_step(&mut self, u_current: &mut [f64]) {
- assert_eq!(
- Ok(()),
- (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u),
- "The computation of the gradient of the cost failed miserably"
- );
+ fn gradient_step(&mut self, u_current: &mut [f64]) -> FunctionCallResult {
+ (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u)?;
+ if !crate::matrix_operations::is_finite(&self.cache.work_gradient_u) {
+ return Err(SolverError::NotFiniteComputation(
+ "gradient evaluation returned a non-finite value during an FBS step",
+ ));
+ }
// take a gradient step: u_currect -= gamma * gradient
u_current
.iter_mut()
.zip(self.cache.work_gradient_u.iter())
.for_each(|(u, w)| *u -= self.cache.gamma * *w);
+ Ok(())
}
- fn projection_step(&mut self, u_current: &mut [f64]) {
- self.problem.constraints.project(u_current);
+ fn projection_step(&mut self, u_current: &mut [f64]) -> FunctionCallResult {
+ self.problem.constraints.project(u_current)
}
}
@@ -85,8 +87,13 @@ where
/// or the cost function panics.
fn step(&mut self, u_current: &mut [f64]) -> Result {
self.cache.work_u_previous.copy_from_slice(u_current); // cache the previous step
- self.gradient_step(u_current); // compute the gradient
- self.projection_step(u_current); // project
+ self.gradient_step(u_current)?; // compute the gradient
+ self.projection_step(u_current)?; // project
+ if !crate::matrix_operations::is_finite(u_current) {
+ return Err(SolverError::NotFiniteComputation(
+ "projected iterate contains a non-finite value during an FBS step",
+ ));
+ }
self.cache.norm_fpr =
matrix_operations::norm_inf_diff(u_current, &self.cache.work_u_previous);
diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs
index d714ab67..fbdb62e6 100644
--- a/src/core/fbs/fbs_optimizer.rs
+++ b/src/core/fbs/fbs_optimizer.rs
@@ -124,7 +124,9 @@ where
(self.fbs_engine.problem.cost)(u, &mut cost_value)?;
if !matrix_operations::is_finite(u) || !cost_value.is_finite() {
- return Err(SolverError::NotFiniteComputation);
+ return Err(SolverError::NotFiniteComputation(
+ "final FBS iterate or cost is non-finite",
+ ));
}
// export solution status
diff --git a/src/core/fbs/tests.rs b/src/core/fbs/tests.rs
index b94425b1..7b8f49fb 100644
--- a/src/core/fbs/tests.rs
+++ b/src/core/fbs/tests.rs
@@ -48,7 +48,7 @@ fn t_solve_fbs_hard_failure_nan() {
let mut u = [-12., -160., 55.];
let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache).with_max_iter(10000);
let status = optimizer.solve(&mut u);
- assert_eq!(Err(SolverError::NotFiniteComputation), status);
+ assert!(matches!(status, Err(SolverError::NotFiniteComputation(_))));
}
#[test]
diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs
index 63ad862d..385dd9d9 100644
--- a/src/core/panoc/panoc_engine.rs
+++ b/src/core/panoc/panoc_engine.rs
@@ -135,13 +135,14 @@ where
}
/// Computes a projection on `gradient_step`
- fn half_step(&mut self) {
+ fn half_step(&mut self) -> FunctionCallResult {
let cache = &mut self.cache;
// u_half_step ← projection(gradient_step)
cache.u_half_step.copy_from_slice(&cache.gradient_step);
- self.problem.constraints.project(&mut cache.u_half_step);
+ self.problem.constraints.project(&mut cache.u_half_step)?;
cache.gradient_step_u_half_step_diff_norm_sq =
matrix_operations::norm2_squared_diff(&cache.gradient_step, &cache.u_half_step);
+ Ok(())
}
/// Computes an LBFGS direction; updates `cache.direction_lbfgs`
@@ -179,7 +180,11 @@ where
// Compute the cost at the half step
(self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?;
- debug_assert!(matrix_operations::is_finite(&[self.cache.cost_value]));
+ if !matrix_operations::is_finite(&[self.cache.cost_value, cost_u_half_step]) {
+ return Err(SolverError::NotFiniteComputation(
+ "cost evaluation returned a non-finite value during Lipschitz estimation",
+ ));
+ }
let mut it_lipschitz_search = 0;
@@ -195,11 +200,16 @@ where
// recompute the half step...
self.gradient_step(u_current); // updates self.cache.gradient_step
- self.half_step(); // updates self.cache.u_half_step
+ self.half_step()?; // updates self.cache.u_half_step
// recompute the cost at the half step
// update `cost_u_half_step`
(self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?;
+ if !cost_u_half_step.is_finite() {
+ return Err(SolverError::NotFiniteComputation(
+ "half-step cost became non-finite during Lipschitz backtracking",
+ ));
+ }
// recompute the FPR and the square of its norm
self.compute_fpr(u_current);
@@ -254,10 +264,16 @@ where
// point `u_plus`
(self.problem.cost)(&self.cache.u_plus, &mut self.cache.cost_value)?;
(self.problem.gradf)(&self.cache.u_plus, &mut self.cache.gradient_u)?;
+ if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ {
+ return Err(SolverError::NotFiniteComputation(
+ "line-search candidate produced a non-finite cost or gradient",
+ ));
+ }
self.cache_gradient_norm();
self.gradient_step_uplus(); // gradient_step ← u_plus - gamma * gradient_u
- self.half_step(); // u_half_step ← project(gradient_step)
+ self.half_step()?; // u_half_step ← project(gradient_step)
// Update the LHS of the line search condition
self.cache.lhs_ls = self.cache.cost_value - 0.5 * gamma * self.cache.gradient_u_norm_sq
@@ -271,9 +287,15 @@ where
u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step
(self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value
(self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient
+ if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ {
+ return Err(SolverError::NotFiniteComputation(
+ "first PANOC iterate produced a non-finite cost or gradient",
+ ));
+ }
self.cache_gradient_norm();
self.gradient_step(u_current); // updated self.cache.gradient_step
- self.half_step(); // updates self.cache.u_half_step
+ self.half_step()?; // updates self.cache.u_half_step
Ok(())
}
@@ -302,6 +324,11 @@ where
pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result {
let mut cost = 0.0;
(self.problem.cost)(&self.cache.best_u_half_step, &mut cost)?;
+ if !cost.is_finite() {
+ return Err(SolverError::NotFiniteComputation(
+ "best cached half-step cost is non-finite",
+ ));
+ }
Ok(cost)
}
}
@@ -362,11 +389,17 @@ where
self.cache.reset();
(self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value
self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u)
+ if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ {
+ return Err(SolverError::NotFiniteComputation(
+ "initial PANOC cost or gradient is non-finite",
+ ));
+ }
self.cache_gradient_norm();
self.cache.gamma = GAMMA_L_COEFF / f64::max(self.cache.lipschitz_constant, MIN_L_ESTIMATE);
self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma);
self.gradient_step(u_current); // updated self.cache.gradient_step
- self.half_step(); // updates self.cache.u_half_step
+ self.half_step()?; // updates self.cache.u_half_step
Ok(())
}
@@ -478,7 +511,7 @@ mod tests {
.gradient_step
.copy_from_slice(&[40., 50.]);
- panoc_engine.half_step(); // u_half_step ← projection(gradient_step)
+ panoc_engine.half_step().unwrap(); // u_half_step ← projection(gradient_step)
unit_test_utils::assert_nearly_equal_array(
&[0.312_347_523_777_212, 0.390_434_404_721_515],
diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs
index 98f87667..95f1a919 100644
--- a/src/core/panoc/panoc_optimizer.rs
+++ b/src/core/panoc/panoc_optimizer.rs
@@ -159,7 +159,9 @@ where
// check for possible NaN/inf
if !matrix_operations::is_finite(u) {
- return Err(SolverError::NotFiniteComputation);
+ return Err(SolverError::NotFiniteComputation(
+ "final PANOC iterate contains a non-finite value",
+ ));
}
// exit status
@@ -239,7 +241,7 @@ mod tests {
/* CHECK FEASIBILITY */
let mut u_project = [0.0; 2];
u_project.copy_from_slice(&u_solution);
- bounds.project(&mut u_project);
+ bounds.project(&mut u_project).unwrap();
unit_test_utils::assert_nearly_equal_array(
&u_solution,
&u_project,
diff --git a/src/lib.rs b/src/lib.rs
index e0f61bb0..01362d68 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -40,13 +40,55 @@
extern crate num;
+use std::fmt;
+
/// Exceptions/Errors that may arise while solving a problem
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SolverError {
/// If the gradient or cost function cannot be evaluated
- Cost,
+ Cost(&'static str),
/// Computation failed and NaN/Infinite value was obtained
- NotFiniteComputation,
+ NotFiniteComputation(&'static str),
+ /// A projection could not be computed numerically
+ ProjectionFailed(&'static str),
+ /// A linear algebra operation failed
+ LinearAlgebraFailure(&'static str),
+ /// The solver reached an unexpected internal state
+ InvalidProblemState(&'static str),
+}
+
+impl fmt::Display for SolverError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ SolverError::Cost(reason) => {
+ write!(f, "cost or gradient evaluation failed: {}", reason)
+ }
+ SolverError::NotFiniteComputation(reason) => {
+ write!(f, "non-finite computation: {}", reason)
+ }
+ SolverError::ProjectionFailed(reason) => write!(f, "projection failed: {}", reason),
+ SolverError::LinearAlgebraFailure(reason) => {
+ write!(f, "linear algebra failure: {}", reason)
+ }
+ SolverError::InvalidProblemState(reason) => {
+ write!(f, "invalid internal problem state: {}", reason)
+ }
+ }
+ }
+}
+
+impl std::error::Error for SolverError {}
+
+impl From for SolverError {
+ fn from(_: crate::matrix_operations::MatrixError) -> Self {
+ SolverError::LinearAlgebraFailure("matrix operation failed")
+ }
+}
+
+impl From for SolverError {
+ fn from(_: crate::cholesky_factorizer::CholeskyError) -> Self {
+ SolverError::LinearAlgebraFailure("Cholesky factorization or solve failed")
+ }
}
/// Result of a function call (status)
From bfa66241133c45caa102397daab5dcf59b9f204e Mon Sep 17 00:00:00 2001
From: Pantelis Sopasakis
Date: Fri, 27 Mar 2026 19:42:22 +0000
Subject: [PATCH 04/12] cargo fmt
---
src/alm/alm_optimizer.rs | 41 ++++++++++++++++-----------------
src/constraints/affine_space.rs | 9 +++-----
src/core/panoc/panoc_engine.rs | 9 +++++---
3 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs
index c7a99281..29cd1f34 100644
--- a/src/alm/alm_optimizer.rs
+++ b/src/alm/alm_optimizer.rs
@@ -764,13 +764,14 @@ where
// This function will panic is there is no akkt_tolerance
// This should never happen because we set the AKKT tolerance
// in the constructor and can never become `None` again
- let criterion_3 = cache
- .panoc_cache
- .akkt_tolerance
- .ok_or(SolverError::InvalidProblemState(
- "missing inner AKKT tolerance while checking the exit criterion",
- ))?
- <= self.epsilon_tolerance + SMALL_EPSILON;
+ let criterion_3 =
+ cache
+ .panoc_cache
+ .akkt_tolerance
+ .ok_or(SolverError::InvalidProblemState(
+ "missing inner AKKT tolerance while checking the exit criterion",
+ ))?
+ <= self.epsilon_tolerance + SMALL_EPSILON;
Ok(criterion_1 && criterion_2 && criterion_3)
}
@@ -811,12 +812,13 @@ where
fn update_inner_akkt_tolerance(&mut self) -> FunctionCallResult {
let cache = &mut self.alm_cache;
// epsilon_{nu+1} := max(epsilon, beta*epsilon_nu)
- let akkt_tolerance = cache
- .panoc_cache
- .akkt_tolerance
- .ok_or(SolverError::InvalidProblemState(
- "missing inner AKKT tolerance while updating it",
- ))?;
+ let akkt_tolerance =
+ cache
+ .panoc_cache
+ .akkt_tolerance
+ .ok_or(SolverError::InvalidProblemState(
+ "missing inner AKKT tolerance while updating it",
+ ))?;
cache.panoc_cache.set_akkt_tolerance(f64::max(
akkt_tolerance * self.epsilon_update_factor,
self.epsilon_tolerance,
@@ -992,14 +994,11 @@ where
.with_penalty(c)
.with_cost(cost);
if self.alm_problem.n1 > 0 {
- let status = status.with_lagrange_multipliers(
- self.alm_cache
- .y_plus
- .as_ref()
- .ok_or(SolverError::InvalidProblemState(
- "missing Lagrange multipliers at the ALM solution",
- ))?,
- );
+ let status = status.with_lagrange_multipliers(self.alm_cache.y_plus.as_ref().ok_or(
+ SolverError::InvalidProblemState(
+ "missing Lagrange multipliers at the ALM solution",
+ ),
+ )?);
Ok(status)
} else {
Ok(status)
diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs
index 7ca78608..e783c1ee 100644
--- a/src/constraints/affine_space.rs
+++ b/src/constraints/affine_space.rs
@@ -86,12 +86,9 @@ impl Constraint for AffineSpace {
assert!(x.len() == n, "x has wrong dimension");
// Step 1: Compute e = Ax - b
- let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat)
- .map_err(|_| {
- SolverError::InvalidProblemState(
- "failed to construct the affine-space matrix view",
- )
- })?;
+ let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat).map_err(|_| {
+ SolverError::InvalidProblemState("failed to construct the affine-space matrix view")
+ })?;
let x_view = ArrayView1::from(&x[..]);
let b = ArrayView1::from(&self.b_vec[..]);
let e = a.dot(&x_view) - b;
diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs
index 385dd9d9..554ca439 100644
--- a/src/core/panoc/panoc_engine.rs
+++ b/src/core/panoc/panoc_engine.rs
@@ -264,7 +264,8 @@ where
// point `u_plus`
(self.problem.cost)(&self.cache.u_plus, &mut self.cache.cost_value)?;
(self.problem.gradf)(&self.cache.u_plus, &mut self.cache.gradient_u)?;
- if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ if !self.cache.cost_value.is_finite()
+ || !matrix_operations::is_finite(&self.cache.gradient_u)
{
return Err(SolverError::NotFiniteComputation(
"line-search candidate produced a non-finite cost or gradient",
@@ -287,7 +288,8 @@ where
u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step
(self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value
(self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient
- if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ if !self.cache.cost_value.is_finite()
+ || !matrix_operations::is_finite(&self.cache.gradient_u)
{
return Err(SolverError::NotFiniteComputation(
"first PANOC iterate produced a non-finite cost or gradient",
@@ -389,7 +391,8 @@ where
self.cache.reset();
(self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value
self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u)
- if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u)
+ if !self.cache.cost_value.is_finite()
+ || !matrix_operations::is_finite(&self.cache.gradient_u)
{
return Err(SolverError::NotFiniteComputation(
"initial PANOC cost or gradient is non-finite",
From 02c3d5043ebe41e75b16f5c43a70ccbfd23dd31f Mon Sep 17 00:00:00 2001
From: Pantelis Sopasakis
Date: Fri, 27 Mar 2026 21:01:40 +0000
Subject: [PATCH 05/12] [ci skip] update changelog files
---
CHANGELOG.md | 11 +++++++++--
open-codegen/CHANGELOG.md | 2 ++
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 03ccc2a9..5c09ca90 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,12 +13,19 @@ Note: This is the main Changelog file for the Rust solver. The Changelog file fo
--------------------- -->
## [v0.12.0] - Unreleased
+### Added
+
+- Richer Rust-side solver errors with human-readable reasons for projection failures, non-finite computations, linear algebra failures, and invalid internal solver states
+- Fallible constraint projections via `Constraint::project(...) -> FunctionCallResult`, with error propagation through FBS, PANOC, and ALM
+- Checked affine-space construction through `AffineSpace::try_new(...)` and `AffineSpaceError`
### Changed
- Rust solver supports generic float types
-- Expanded Rust constraint test coverage with constructor validation, boundary/idempotence checks, and additional `BallP` / epigraph projection cases
-- Swap the cross-platform timer dependency to web-time, remove instant-specific wasm feature wiring, update optimizer timing call sites to use `web_time::Instant`, keep existing native and wasm timing behavior without stdweb risk
+- Expanded Rust constraint and solver test coverage with constructor validation, boundary/idempotence checks, additional `BallP` / epigraph projection cases, and broader `f32`/`f64` coverage
+- Swapped the cross-platform timer dependency to `web-time`, removed the old `instant`-specific wasm feature wiring, and updated optimizer timing call sites to use `web_time::Instant`
+- Improved Rust-side error handling across constraints and core solvers so projection failures and invalid numerical states are reported explicitly instead of being silently flattened
+- Refined `BallP`, `EpigraphSquaredNorm`, and related constraint implementations and docs for stronger numerical robustness and clearer behavior