Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 76 additions & 5 deletions src/easyscience/fitting/minimizers/minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
# SPDX-License-Identifier: BSD-3-Clause

import copy
import functools
import inspect
import warnings
from typing import Callable
from typing import List
from typing import Optional
Expand All @@ -28,6 +31,19 @@
FIT_AVAILABLE_IDS_FILTERED.remove('pt')


class _EvalCounter:
def __init__(self, fn: Callable):
self._fn = fn
self.count = 0
self.__name__ = getattr(fn, '__name__', self.__class__.__name__)
self.__signature__ = inspect.signature(fn)
functools.update_wrapper(self, fn)

def __call__(self, *args, **kwargs):
self.count += 1
return self._fn(*args, **kwargs)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thinking about this now, but you're tracking fit iterations by tracking how many times the model function is evaluated. This is not a correct representation of the number of fit iterations, as some fitting routines evaluate the model function multiple times per fitting step in order to best choose the next fitting step.
Optimizers like BUMPS, for example, run multiple model evaluations at the start of each fit.

This might be the best we CAN do, if we can't get the actual fit iterations from the back engine, but it's worth mentioning.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You might want to add to the integration tests where we actually run the fits and check the iteration counters.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the best we can currently do.

The actual iteration counters might differ from machine to machine, from OS to OS, I don't think it's going to be easy to catch the right situation, but I'll give it a try

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just checked, the number of iterations used by Bumps is saved in the engine_result.nit:

Image

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess we may use nit, assuming BUMPS is consistent with it.



class Bumps(MinimizerBase):
"""
This is a wrapper to Bumps: https://bumps.readthedocs.io/
Expand All @@ -54,6 +70,7 @@
"""
super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
self._p_0 = {}
self._eval_counter: Optional[_EvalCounter] = None

@staticmethod
def all_methods() -> List[str]:
Expand Down Expand Up @@ -148,7 +165,11 @@
try:
model_results = bumps_fit(problem, **method_dict, **minimizer_kwargs, **kwargs)
self._set_parameter_fit_result(model_results, stack_status, problem._parameters)
results = self._gen_fit_results(model_results)
results = self._gen_fit_results(
model_results,
max_evaluations=max_evaluations,
tolerance=tolerance,
)
except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -200,7 +221,8 @@
:return: Callable to make a bumps Curve model
:rtype: Callable
"""
fit_func = self._generate_fit_function()
fit_func = _EvalCounter(self._generate_fit_function())
self._eval_counter = fit_func

def _outer(obj):
def _make_func(x, y, weights):
Expand Down Expand Up @@ -249,19 +271,45 @@
if stack_status:
global_object.stack.endMacro()

def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
def _gen_fit_results(
self,
fit_results,
max_evaluations: Optional[int] = None,
tolerance: Optional[float] = None,
**kwargs,
) -> FitResults:
"""Convert fit results into the unified `FitResults` format.

:param fit_result: Fit object which contains info on the fit
:return: fit results container
:rtype: FitResults
"""

results = FitResults()

for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = fit_results.success
n_evaluations = None if self._eval_counter is None else self._eval_counter.count
# BUMPS exposes `nit` as the last reported optimizer step index rather than the
# total number of objective calls. We keep `n_evaluations` as objective-call
# count for cross-backend consistency with LMFit (`nfev`) and DFO-LS (`nf`).
n_iterations = getattr(fit_results, 'nit', None)
# Convert the zero-based step index into the number of optimizer steps that have
# actually been consumed against the configured BUMPS `steps` budget.
n_steps_used = None if n_iterations is None else n_iterations + 1
stopped_on_budget = max_evaluations is not None and (
# For BUMPS, `max_evaluations` is forwarded as `steps`, so budget
# exhaustion must be checked against consumed optimizer steps, not raw
# objective evaluations, which can legitimately exceed the step budget.
(n_steps_used is not None and n_steps_used >= max_evaluations)
or (
n_iterations is None
and n_evaluations is not None
and n_evaluations >= max_evaluations
)
)

results.success = fit_results.success and not stopped_on_budget
pars = self._cached_pars
item = {}
for index, name in enumerate(self._cached_model.pars.keys()):
Expand All @@ -275,6 +323,29 @@
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = self._cached_model.dy
results.n_evaluations = n_evaluations
results.message = ''
if stopped_on_budget:
results.message = (
f'Fit stopped: reached maximum optimizer steps ({max_evaluations}); '
f'objective evaluated {n_evaluations} times'
)
if stopped_on_budget:
if tolerance is None:
warnings.warn(
f'Fit did not converge within the maximum optimizer steps of {max_evaluations} '
f'({n_evaluations} objective evaluations). '
'Consider increasing the maximum number of evaluations or adjusting the tolerance.',
UserWarning,
)
else:
warnings.warn(

Check warning on line 342 in src/easyscience/fitting/minimizers/minimizer_bumps.py

View check run for this annotation

Codecov / codecov/patch

src/easyscience/fitting/minimizers/minimizer_bumps.py#L342

Added line #L342 was not covered by tests
f'Fit did not reach the desired tolerance of {tolerance} within the maximum optimizer steps of {max_evaluations} '
f'({n_evaluations} objective evaluations). '
'Consider increasing the maximum number of evaluations or adjusting the tolerance.',
UserWarning,
)

# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = np.sum(results.residual**2)
results.minimizer_engine = self.__class__
Expand Down
22 changes: 18 additions & 4 deletions src/easyscience/fitting/minimizers/minimizer_dfo.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-FileCopyrightText: 2026 EasyScience contributors <https://github.com/easyscience>
# SPDX-License-Identifier: BSD-3-Clause

import warnings
from typing import Callable
from typing import Dict
from typing import List
Expand Down Expand Up @@ -122,6 +123,10 @@ def fit(
model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
self._set_parameter_fit_result(model_results, stack_status)
results = self._gen_fit_results(model_results, weights)
except FitError:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
raise
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Codecov says this misses a test :)

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test added. you know we shouldn't really aim for 100% coverage? :)

except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -208,7 +213,11 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults:
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = not bool(fit_results.flag)
# DFO-LS stores fixed exit-code constants on each result object;
# EXIT_SUCCESS is 0 and EXIT_MAXFUN_WARNING keeps a different flag value.
results.success = fit_results.flag == fit_results.EXIT_SUCCESS
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain this? What is fit_results.EXIT_SUCCESS?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And should we raise a warning if the fit did not succeed?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In DFO-LS, EXIT_SUCCESS is 0 when “objective is sufficiently small” or “rho has reached rhoend”. Positive codes are warning exits, and negative codes are hard errors.

whether to raise a warning: not for every non-success. Hard failures already become an exception in minimizer_dfo, so warning on those would be redundant. But for warning-style exits that still return results, especially EXIT_MAXFUN_WARNING, adding a UserWarning would be reasonable if we want consistency with BUMPS, which warns when it hits the evaluation budget.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added warning and unit test

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, but what about the fit_results.flag? I assume this can be other values than 0. Do we risk this returning True when the flag and EXIT_SUCCESS are identical values different from 0? What is EXIT_SUCCESS value when we hit a EXIT_MAXFUN_WARNING?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, there is no risk from the value being nonzero by itself.
This is a straight equality check:

results.success = fit_results.flag == fit_results.EXIT_SUCCESS

It only returns True when the flag exactly matches success const. It does not depend on success being 0 specifically. Even if a library used 137 for success, this comparison would still be correct as long as fit_results.flag and fit_results.EXIT_SUCCESS were both 137 for a real success.

For DFO we have:

EXIT_SUCCESS = 0
EXIT_MAXFUN_WARNING = 1
EXIT_SLOW_WARNING = 2
EXIT_FALSE_SUCCESS_WARNING = 3

hard errors have negative values

The line does what we want:
normal success -> True
maxfun warning -> False
any other non-success flag -> False

I added a comment above the assignment to spell it out.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But what about fit_results.flag, what values can that take? Only 0 and 1?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But what about fit_results.flag, what values can that take? Only 0 and 1?

Line 271: DFO results object is returned, if results.flag is either 0 (success) or 1 (MAXFUN_WARNING), the results object is returned. Otherwise it is assumed the fit failed and assert raised.

So in line 219, where this is actually being checked, flag_results.flag is only 0 or 1.

Copy link
Copy Markdown
Contributor

@damskii9992 damskii9992 Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ahh, now I think I get it. DFO sets fit_results.flag to some integer depending on if the fit failed (negative), succeeded cleanly (0) or succeeded with a warning (positive). To remember what each of these integers means, they're also assigned to the attributes EXIT_SUCCESS, EXIT_MAXFUN_WARNING etc.
In other words, the flag attribute is a variable and the EXIT attributes are static constants.
So when you compare, you're just checking the flag against the static attribute.
I guess this is done so that they can change the flag integers for different fit terminations without breaking code, or for more easily readable syntax . . . Hmm, interesting.
Could you boil my realization down and add as a comment? Either instead of your comment or as an addendum :)

if fit_results.flag == fit_results.EXIT_MAXFUN_WARNING:
warnings.warn(str(fit_results.msg), UserWarning)

pars = {}
for p_name, par in self._cached_pars.items():
Expand All @@ -220,11 +229,14 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults:
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = weights
results.n_evaluations = int(fit_results.nf)
Comment thread
rozyczko marked this conversation as resolved.
results.message = str(fit_results.msg)
# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = fit_results.f

results.minimizer_engine = self.__class__
results.fit_args = None
results.engine_result = fit_results
# results.check_sanity()

return results
Expand Down Expand Up @@ -258,10 +270,12 @@ def _dfo_fit(

results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)

if 'Success' not in results.msg:
raise FitError(f'Fit failed with message: {results.msg}')
# DFO-LS uses EXIT_MAXFUN_WARNING when it stops on the evaluation budget;
# we still return the partial fit result and let the unified result mark it as non-success.
if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you also explain this? Maybe add a comment?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added comment

return results

return results
raise FitError(f'Fit failed with message: {results.msg}')

@staticmethod
def _prepare_kwargs(
Expand Down
5 changes: 5 additions & 0 deletions src/easyscience/fitting/minimizers/minimizer_lmfit.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-FileCopyrightText: 2026 EasyScience contributors <https://github.com/easyscience>
# SPDX-License-Identifier: BSD-3-Clause

import warnings
from typing import Callable
from typing import List
from typing import Optional
Expand Down Expand Up @@ -298,6 +299,10 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults:
# results.goodness_of_fit = fit_results.chisqr
results.y_calc = fit_results.best_fit
results.y_err = 1 / fit_results.weights
results.n_evaluations = fit_results.nfev
results.message = fit_results.message
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should probably either raise an error or a warning when max_evaluations have been reached.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As mentioned earlier, I would prefer to let the caller decide on what to do with this information. Let's spit a warning and be gracious wrt. max_evaluations.

if fit_results.success is False and fit_results.message:
warnings.warn(str(fit_results.message), UserWarning)
results.minimizer_engine = self.__class__
results.fit_args = None

Expand Down
39 changes: 38 additions & 1 deletion src/easyscience/fitting/minimizers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class FitResults:
'y_obs',
'y_calc',
'y_err',
'n_evaluations',
'message',
'engine_result',
'total_results',
]
Expand All @@ -35,9 +37,44 @@ def __init__(self):
self.y_obs = np.ndarray([])
self.y_calc = np.ndarray([])
self.y_err = np.ndarray([])
self.n_evaluations = None
self.message = ''
self.engine_result = None
self.total_results = None

def __repr__(self) -> str:
engine_name = self.minimizer_engine.__name__ if self.minimizer_engine else None
try:
chi2_val = self.chi2
reduced_val = self.reduced_chi2
if not np.isfinite(chi2_val) or not np.isfinite(reduced_val):
raise ValueError
chi2 = f'{chi2_val:.4g}'
reduced = f'{reduced_val:.4g}'
except Exception:
chi2 = 'N/A'
reduced = 'N/A'

try:
n_points = len(self.x)
except TypeError:
n_points = 0

lines = [
f'FitResults(success={self.success}',
f' n_pars={self.n_pars}, n_points={n_points}',
f' chi2={chi2}, reduced_chi2={reduced}',
f' n_evaluations={self.n_evaluations}',
f' minimizer={engine_name}',
]
if self.message:
lines.append(f" message='{self.message}'")
if self.p:
par_str = ', '.join(f'{k}={v:.4g}' for k, v in self.p.items())
lines.append(f' parameters={{{par_str}}}')
lines.append(')')
return '\n'.join(lines)

@property
def n_pars(self):
return len(self.p)
Expand All @@ -51,7 +88,7 @@ def chi2(self):
return ((self.residual / self.y_err) ** 2).sum()

@property
def reduced_chi(self):
def reduced_chi2(self):
return self.chi2 / (len(self.x) - self.n_pars)


Expand Down
2 changes: 2 additions & 0 deletions src/easyscience/fitting/multi_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ def _post_compute_reshaping(
current_results.minimizer_engine = fit_result_obj.minimizer_engine
current_results.p = fit_result_obj.p
current_results.p0 = fit_result_obj.p0
current_results.n_evaluations = fit_result_obj.n_evaluations
current_results.message = fit_result_obj.message
current_results.x = this_x
current_results.y_obs = y[idx]
current_results.y_calc = np.reshape(
Expand Down
Loading
Loading