pyerrors.fits

  1import gc
  2from collections.abc import Sequence
  3import warnings
  4import numpy as np
  5import autograd.numpy as anp
  6import scipy.optimize
  7import scipy.stats
  8import matplotlib.pyplot as plt
  9from matplotlib import gridspec
 10from scipy.odr import ODR, Model, RealData
 11import iminuit
 12from autograd import jacobian as auto_jacobian
 13from autograd import hessian as auto_hessian
 14from autograd import elementwise_grad as egrad
 15from numdifftools import Jacobian as num_jacobian
 16from numdifftools import Hessian as num_hessian
 17from .obs import Obs, derived_observable, covariance, cov_Obs
 18
 19
 20class Fit_result(Sequence):
 21    """Represents fit results.
 22
 23    Attributes
 24    ----------
 25    fit_parameters : list
 26        results for the individual fit parameters,
 27        also accessible via indices.
 28    chisquare_by_dof : float
 29        reduced chisquare.
 30    p_value : float
 31        p-value of the fit
 32    t2_p_value : float
 33        Hotelling t-squared p-value for correlated fits.
 34    """
 35
 36    def __init__(self):
 37        self.fit_parameters = None
 38
 39    def __getitem__(self, idx):
 40        return self.fit_parameters[idx]
 41
 42    def __len__(self):
 43        return len(self.fit_parameters)
 44
 45    def gamma_method(self, **kwargs):
 46        """Apply the gamma method to all fit parameters"""
 47        [o.gamma_method(**kwargs) for o in self.fit_parameters]
 48
 49    gm = gamma_method
 50
 51    def __str__(self):
 52        my_str = 'Goodness of fit:\n'
 53        if hasattr(self, 'chisquare_by_dof'):
 54            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
 55        elif hasattr(self, 'residual_variance'):
 56            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
 57        if hasattr(self, 'chisquare_by_expected_chisquare'):
 58            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
 59        if hasattr(self, 'p_value'):
 60            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
 61        if hasattr(self, 't2_p_value'):
 62            my_str += 't\u00B2p-value = ' + f'{self.t2_p_value:2.4f}' + '\n'
 63        my_str += 'Fit parameters:\n'
 64        for i_par, par in enumerate(self.fit_parameters):
 65            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
 66        return my_str
 67
 68    def __repr__(self):
 69        m = max(map(len, list(self.__dict__.keys()))) + 1
 70        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
 71
 72
 73def least_squares(x, y, func, priors=None, silent=False, **kwargs):
 74    r'''Performs a non-linear fit to y = func(x).
 75        ```
 76
 77    Parameters
 78    ----------
 79    For an uncombined fit:
 80
 81    x : list
 82        list of floats.
 83    y : list
 84        list of Obs.
 85    func : object
 86        fit function, has to be of the form
 87
 88        ```python
 89        import autograd.numpy as anp
 90
 91        def func(a, x):
 92            return a[0] + a[1] * x + a[2] * anp.sinh(x)
 93        ```
 94
 95        For multiple x values func can be of the form
 96
 97        ```python
 98        def func(a, x):
 99            (x1, x2) = x
100            return a[0] * x1 ** 2 + a[1] * x2
101        ```
102        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
103        will not work.
104
105    OR For a combined fit:
106
107    x : dict
108        dict of lists.
109    y : dict
110        dict of lists of Obs.
111    funcs : dict
112        dict of objects
113        fit functions have to be of the form (here a[0] is the common fit parameter)
114        ```python
115        import autograd.numpy as anp
116        funcs = {"a": func_a,
117                "b": func_b}
118
119        def func_a(a, x):
120            return a[1] * anp.exp(-a[0] * x)
121
122        def func_b(a, x):
123            return a[2] * anp.exp(-a[0] * x)
124
125        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
126        will not work.
127
128    priors : dict or list, optional
129        priors can either be a dictionary with integer keys and the corresponding priors as values or
130        a list with an entry for every parameter in the fit. The entries can either be
131        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
132        0.548(23), 500(40) or 0.5(0.4)
133    silent : bool, optional
134        If true all output to the console is omitted (default False).
135    initial_guess : list
136        can provide an initial guess for the input parameters. Relevant for
137        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
138        an uncorrelated fit which then serves as guess for the correlated fit.
139    method : str, optional
140        can be used to choose an alternative method for the minimization of chisquare.
141        The possible methods are the ones which can be used for scipy.optimize.minimize and
142        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
143        Reliable alternatives are migrad, Powell and Nelder-Mead.
144    tol: float, optional
145        can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
146        to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
147        invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
148        The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
149    correlated_fit : bool
150        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
151        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
152        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
153        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
154        At the moment this option only works for `prior==None` and when no `method` is given.
155    expected_chisquare : bool
156        If True estimates the expected chisquare which is
157        corrected by effects caused by correlated input data (default False).
158    resplot : bool
159        If True, a plot which displays fit, data and residuals is generated (default False).
160    qqplot : bool
161        If True, a quantile-quantile plot of the fit result is generated (default False).
162    num_grad : bool
163        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
164
165    Returns
166    -------
167    output : Fit_result
168        Parameters and information on the fitted result.
169    '''
170    output = Fit_result()
171
172    if (type(x) == dict and type(y) == dict and type(func) == dict):
173        xd = {key: anp.asarray(x[key]) for key in x}
174        yd = y
175        funcd = func
176        output.fit_function = func
177    elif (type(x) == dict or type(y) == dict or type(func) == dict):
178        raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
179    else:
180        x = np.asarray(x)
181        xd = {"": x}
182        yd = {"": y}
183        funcd = {"": func}
184        output.fit_function = func
185
186    if kwargs.get('num_grad') is True:
187        jacobian = num_jacobian
188        hessian = num_hessian
189    else:
190        jacobian = auto_jacobian
191        hessian = auto_hessian
192
193    key_ls = sorted(list(xd.keys()))
194
195    if sorted(list(yd.keys())) != key_ls:
196        raise Exception('x and y dictionaries do not contain the same keys.')
197
198    if sorted(list(funcd.keys())) != key_ls:
199        raise Exception('x and func dictionaries do not contain the same keys.')
200
201    x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
202    y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
203
204    y_f = [o.value for o in y_all]
205    dy_f = [o.dvalue for o in y_all]
206
207    if len(x_all.shape) > 2:
208        raise Exception('Unknown format for x values')
209
210    if np.any(np.asarray(dy_f) <= 0.0):
211        raise Exception('No y errors available, run the gamma method first.')
212
213    # number of fit parameters
214    n_parms_ls = []
215    for key in key_ls:
216        if not callable(funcd[key]):
217            raise TypeError('func (key=' + key + ') is not a function.')
218        if np.asarray(xd[key]).shape[-1] != len(yd[key]):
219            raise Exception('x and y input (key=' + key + ') do not have the same length')
220        for i in range(100):
221            try:
222                funcd[key](np.arange(i), x_all.T[0])
223            except TypeError:
224                continue
225            except IndexError:
226                continue
227            else:
228                break
229        else:
230            raise RuntimeError("Fit function (key=" + key + ") is not valid.")
231        n_parms = i
232        n_parms_ls.append(n_parms)
233    n_parms = max(n_parms_ls)
234    if not silent:
235        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
236
237    if priors is not None:
238        if isinstance(priors, (list, np.ndarray)):
239            if n_parms != len(priors):
240                raise ValueError("'priors' does not have the correct length.")
241
242            loc_priors = []
243            for i_n, i_prior in enumerate(priors):
244                if isinstance(i_prior, Obs):
245                    loc_priors.append(i_prior)
246                elif isinstance(i_prior, str):
247                    loc_val, loc_dval = _extract_val_and_dval(i_prior)
248                    loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}"))
249                else:
250                    raise TypeError("Prior entries need to be 'Obs' or 'str'.")
251
252            prior_mask = np.arange(len(priors))
253            output.priors = loc_priors
254
255        elif isinstance(priors, dict):
256            loc_priors = []
257            prior_mask = []
258            output.priors = {}
259            for pos, prior in priors.items():
260                if isinstance(pos, int):
261                    prior_mask.append(pos)
262                else:
263                    raise TypeError("Prior position needs to be an integer.")
264                if isinstance(prior, Obs):
265                    loc_priors.append(prior)
266                elif isinstance(prior, str):
267                    loc_val, loc_dval = _extract_val_and_dval(prior)
268                    loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(pos) + f"_{np.random.randint(2147483647):010d}"))
269                else:
270                    raise TypeError("Prior entries need to be 'Obs' or 'str'.")
271                output.priors[pos] = loc_priors[-1]
272            if max(prior_mask) >= n_parms:
273                raise ValueError("Prior position out of range.")
274        else:
275            raise TypeError("Unkown type for `priors`.")
276
277        p_f = [o.value for o in loc_priors]
278        dp_f = [o.dvalue for o in loc_priors]
279        if np.any(np.asarray(dp_f) <= 0.0):
280            raise Exception('No prior errors available, run the gamma method first.')
281    else:
282        p_f = dp_f = np.array([])
283        prior_mask = []
284        loc_priors = []
285
286    if 'initial_guess' in kwargs:
287        x0 = kwargs.get('initial_guess')
288        if len(x0) != n_parms:
289            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
290    else:
291        x0 = [0.1] * n_parms
292
293    if priors is None:
294        def general_chisqfunc_uncorr(p, ivars, pr):
295            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
296            return (ivars - model) / dy_f
297    else:
298        def general_chisqfunc_uncorr(p, ivars, pr):
299            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
300            return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f))
301
302    def chisqfunc_uncorr(p):
303        return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
304
305    if kwargs.get('correlated_fit') is True:
306        corr = covariance(y_all, correlation=True, **kwargs)
307        covdiag = np.diag(1 / np.asarray(dy_f))
308        condn = np.linalg.cond(corr)
309        if condn > 0.1 / np.finfo(float).eps:
310            raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
311        if condn > 1e13:
312            warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
313        chol = np.linalg.cholesky(corr)
314        chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
315
316        def general_chisqfunc(p, ivars, pr):
317            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
318            return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f))
319
320        def chisqfunc(p):
321            return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2)
322    else:
323        general_chisqfunc = general_chisqfunc_uncorr
324        chisqfunc = chisqfunc_uncorr
325
326    output.method = kwargs.get('method', 'Levenberg-Marquardt')
327    if not silent:
328        print('Method:', output.method)
329
330    if output.method != 'Levenberg-Marquardt':
331        if output.method == 'migrad':
332            tolerance = 1e-4  # default value of 1e-1 set by iminuit can be problematic
333            if 'tol' in kwargs:
334                tolerance = kwargs.get('tol')
335            fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance)  # Stopping criterion 0.002 * tol * errordef
336            if kwargs.get('correlated_fit') is True:
337                fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance)
338            output.iterations = fit_result.nfev
339        else:
340            tolerance = 1e-12
341            if 'tol' in kwargs:
342                tolerance = kwargs.get('tol')
343            fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance)
344            if kwargs.get('correlated_fit') is True:
345                fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance)
346            output.iterations = fit_result.nit
347
348        chisquare = fit_result.fun
349
350    else:
351        if 'tol' in kwargs:
352            print('tol cannot be set for Levenberg-Marquardt')
353
354        def chisqfunc_residuals_uncorr(p):
355            return general_chisqfunc_uncorr(p, y_f, p_f)
356
357        fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
358        if kwargs.get('correlated_fit') is True:
359
360            def chisqfunc_residuals(p):
361                return general_chisqfunc(p, y_f, p_f)
362
363            fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
364
365        chisquare = np.sum(fit_result.fun ** 2)
366        assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
367
368        output.iterations = fit_result.nfev
369
370    if not fit_result.success:
371        raise Exception('The minimization procedure did not converge.')
372
373    if x_all.shape[-1] - n_parms > 0:
374        output.chisquare = chisquare
375        output.dof = x_all.shape[-1] - n_parms + len(loc_priors)
376        output.chisquare_by_dof = output.chisquare / output.dof
377        output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
378    else:
379        output.chisquare_by_dof = float('nan')
380
381    output.message = fit_result.message
382    if not silent:
383        print(fit_result.message)
384        print('chisquare/d.o.f.:', output.chisquare_by_dof)
385        print('fit parameters', fit_result.x)
386
387    def prepare_hat_matrix():
388        hat_vector = []
389        for key in key_ls:
390            if (len(xd[key]) != 0):
391                hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key]))
392        hat_vector = [item for sublist in hat_vector for item in sublist]
393        return hat_vector
394
395    if kwargs.get('expected_chisquare') is True:
396        if kwargs.get('correlated_fit') is not True:
397            W = np.diag(1 / np.asarray(dy_f))
398            cov = covariance(y_all)
399            hat_vector = prepare_hat_matrix()
400            A = W @ hat_vector
401            P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
402            expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
403            output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
404            if not silent:
405                print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
406
407    fitp = fit_result.x
408    if np.any(np.asarray(dy_f) <= 0.0):
409        raise Exception('No y errors available, run the gamma method first.')
410
411    try:
412        hess = hessian(chisqfunc)(fitp)
413    except TypeError:
414        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
415
416    len_y = len(y_f)
417
418    def chisqfunc_compact(d):
419        return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2)
420
421    jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f)))
422
423    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
424    try:
425        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:])
426    except np.linalg.LinAlgError:
427        raise Exception("Cannot invert hessian matrix.")
428
429    result = []
430    for i in range(n_parms):
431        result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i])))
432
433    output.fit_parameters = result
434
435    # Hotelling t-squared p-value for correlated fits.
436    if kwargs.get('correlated_fit') is True:
437        n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all))
438        output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare,
439                                                  output.dof, n_cov - output.dof)
440
441    if kwargs.get('resplot') is True:
442        for key in key_ls:
443            residual_plot(xd[key], yd[key], funcd[key], result, title=key)
444
445    if kwargs.get('qqplot') is True:
446        for key in key_ls:
447            qqplot(xd[key], yd[key], funcd[key], result, title=key)
448
449    return output
450
451
452def total_least_squares(x, y, func, silent=False, **kwargs):
453    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
454
455    Parameters
456    ----------
457    x : list
458        list of Obs, or a tuple of lists of Obs
459    y : list
460        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
461    func : object
462        func has to be of the form
463
464        ```python
465        import autograd.numpy as anp
466
467        def func(a, x):
468            return a[0] + a[1] * x + a[2] * anp.sinh(x)
469        ```
470
471        For multiple x values func can be of the form
472
473        ```python
474        def func(a, x):
475            (x1, x2) = x
476            return a[0] * x1 ** 2 + a[1] * x2
477        ```
478
479        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
480        will not work.
481    silent : bool, optional
482        If true all output to the console is omitted (default False).
483    initial_guess : list
484        can provide an initial guess for the input parameters. Relevant for non-linear
485        fits with many parameters.
486    expected_chisquare : bool
487        If true prints the expected chisquare which is
488        corrected by effects caused by correlated input data.
489        This can take a while as the full correlation matrix
490        has to be calculated (default False).
491    num_grad : bool
492        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
493
494    Notes
495    -----
496    Based on the orthogonal distance regression module of scipy.
497
498    Returns
499    -------
500    output : Fit_result
501        Parameters and information on the fitted result.
502    '''
503
504    output = Fit_result()
505
506    output.fit_function = func
507
508    x = np.array(x)
509
510    x_shape = x.shape
511
512    if kwargs.get('num_grad') is True:
513        jacobian = num_jacobian
514        hessian = num_hessian
515    else:
516        jacobian = auto_jacobian
517        hessian = auto_hessian
518
519    if not callable(func):
520        raise TypeError('func has to be a function.')
521
522    for i in range(42):
523        try:
524            func(np.arange(i), x.T[0])
525        except TypeError:
526            continue
527        except IndexError:
528            continue
529        else:
530            break
531    else:
532        raise RuntimeError("Fit function is not valid.")
533
534    n_parms = i
535    if not silent:
536        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
537
538    x_f = np.vectorize(lambda o: o.value)(x)
539    dx_f = np.vectorize(lambda o: o.dvalue)(x)
540    y_f = np.array([o.value for o in y])
541    dy_f = np.array([o.dvalue for o in y])
542
543    if np.any(np.asarray(dx_f) <= 0.0):
544        raise Exception('No x errors available, run the gamma method first.')
545
546    if np.any(np.asarray(dy_f) <= 0.0):
547        raise Exception('No y errors available, run the gamma method first.')
548
549    if 'initial_guess' in kwargs:
550        x0 = kwargs.get('initial_guess')
551        if len(x0) != n_parms:
552            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
553    else:
554        x0 = [1] * n_parms
555
556    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
557    model = Model(func)
558    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
559    odr.set_job(fit_type=0, deriv=1)
560    out = odr.run()
561
562    output.residual_variance = out.res_var
563
564    output.method = 'ODR'
565
566    output.message = out.stopreason
567
568    output.xplus = out.xplus
569
570    if not silent:
571        print('Method: ODR')
572        print(*out.stopreason)
573        print('Residual variance:', output.residual_variance)
574
575    if out.info > 3:
576        raise Exception('The minimization procedure did not converge.')
577
578    m = x_f.size
579
580    def odr_chisquare(p):
581        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
582        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
583        return chisq
584
585    if kwargs.get('expected_chisquare') is True:
586        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
587
588        if kwargs.get('covariance') is not None:
589            cov = kwargs.get('covariance')
590        else:
591            cov = covariance(np.concatenate((y, x.ravel())))
592
593        number_of_x_parameters = int(m / x_f.shape[-1])
594
595        old_jac = jacobian(func)(out.beta, out.xplus)
596        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
597        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
598        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
599
600        A = W @ new_jac
601        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
602        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
603        if expected_chisquare <= 0.0:
604            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
605            expected_chisquare = np.abs(expected_chisquare)
606        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
607        if not silent:
608            print('chisquare/expected_chisquare:',
609                  output.chisquare_by_expected_chisquare)
610
611    fitp = out.beta
612    try:
613        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
614    except TypeError:
615        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
616
617    def odr_chisquare_compact_x(d):
618        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
619        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
620        return chisq
621
622    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
623
624    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
625    try:
626        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
627    except np.linalg.LinAlgError:
628        raise Exception("Cannot invert hessian matrix.")
629
630    def odr_chisquare_compact_y(d):
631        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
632        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
633        return chisq
634
635    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
636
637    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
638    try:
639        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
640    except np.linalg.LinAlgError:
641        raise Exception("Cannot invert hessian matrix.")
642
643    result = []
644    for i in range(n_parms):
645        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
646
647    output.fit_parameters = result
648
649    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
650    output.dof = x.shape[-1] - n_parms
651    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
652
653    return output
654
655
656def fit_lin(x, y, **kwargs):
657    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
658
659    Parameters
660    ----------
661    x : list
662        Can either be a list of floats in which case no xerror is assumed, or
663        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
664    y : list
665        List of Obs, the dvalues of the Obs are used as yerror for the fit.
666
667    Returns
668    -------
669    fit_parameters : list[Obs]
670        LIist of fitted observables.
671    """
672
673    def f(a, x):
674        y = a[0] + a[1] * x
675        return y
676
677    if all(isinstance(n, Obs) for n in x):
678        out = total_least_squares(x, y, f, **kwargs)
679        return out.fit_parameters
680    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
681        out = least_squares(x, y, f, **kwargs)
682        return out.fit_parameters
683    else:
684        raise Exception('Unsupported types for x')
685
686
687def qqplot(x, o_y, func, p, title=""):
688    """Generates a quantile-quantile plot of the fit result which can be used to
689       check if the residuals of the fit are gaussian distributed.
690
691    Returns
692    -------
693    None
694    """
695
696    residuals = []
697    for i_x, i_y in zip(x, o_y):
698        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
699    residuals = sorted(residuals)
700    my_y = [o.value for o in residuals]
701    probplot = scipy.stats.probplot(my_y)
702    my_x = probplot[0][0]
703    plt.figure(figsize=(8, 8 / 1.618))
704    plt.errorbar(my_x, my_y, fmt='o')
705    fit_start = my_x[0]
706    fit_stop = my_x[-1]
707    samples = np.arange(fit_start, fit_stop, 0.01)
708    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
709    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
710
711    plt.xlabel('Theoretical quantiles')
712    plt.ylabel('Ordered Values')
713    plt.legend(title=title)
714    plt.draw()
715
716
717def residual_plot(x, y, func, fit_res, title=""):
718    """Generates a plot which compares the fit to the data and displays the corresponding residuals
719
720    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
721
722    Returns
723    -------
724    None
725    """
726    sorted_x = sorted(x)
727    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
728    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
729    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
730
731    plt.figure(figsize=(8, 8 / 1.618))
732    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
733    ax0 = plt.subplot(gs[0])
734    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
735    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
736    ax0.set_xticklabels([])
737    ax0.set_xlim([xstart, xstop])
738    ax0.set_xticklabels([])
739    ax0.legend(title=title)
740
741    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
742    ax1 = plt.subplot(gs[1])
743    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
744    ax1.tick_params(direction='out')
745    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
746    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
747    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
748    ax1.set_xlim([xstart, xstop])
749    ax1.set_ylabel('Residuals')
750    plt.subplots_adjust(wspace=None, hspace=None)
751    plt.draw()
752
753
754def error_band(x, func, beta):
755    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
756
757    Returns
758    -------
759    err : np.array(Obs)
760        Error band for an array of sample values x
761    """
762    cov = covariance(beta)
763    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
764        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
765
766    deriv = []
767    for i, item in enumerate(x):
768        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
769
770    err = []
771    for i, item in enumerate(x):
772        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
773    err = np.array(err)
774
775    return err
776
777
778def ks_test(objects=None):
779    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
780
781    Parameters
782    ----------
783    objects : list
784        List of fit results to include in the analysis (optional).
785
786    Returns
787    -------
788    None
789    """
790
791    if objects is None:
792        obs_list = []
793        for obj in gc.get_objects():
794            if isinstance(obj, Fit_result):
795                obs_list.append(obj)
796    else:
797        obs_list = objects
798
799    p_values = [o.p_value for o in obs_list]
800
801    bins = len(p_values)
802    x = np.arange(0, 1.001, 0.001)
803    plt.plot(x, x, 'k', zorder=1)
804    plt.xlim(0, 1)
805    plt.ylim(0, 1)
806    plt.xlabel('p-value')
807    plt.ylabel('Cumulative probability')
808    plt.title(str(bins) + ' p-values')
809
810    n = np.arange(1, bins + 1) / np.float64(bins)
811    Xs = np.sort(p_values)
812    plt.step(Xs, n)
813    diffs = n - Xs
814    loc_max_diff = np.argmax(np.abs(diffs))
815    loc = Xs[loc_max_diff]
816    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
817    plt.draw()
818
819    print(scipy.stats.kstest(p_values, 'uniform'))
820
821
822def _extract_val_and_dval(string):
823    split_string = string.split('(')
824    if '.' in split_string[0] and '.' not in split_string[1][:-1]:
825        factor = 10 ** -len(split_string[0].partition('.')[2])
826    else:
827        factor = 1
828    return float(split_string[0]), float(split_string[1][:-1]) * factor
class Fit_result(collections.abc.Sequence):
21class Fit_result(Sequence):
22    """Represents fit results.
23
24    Attributes
25    ----------
26    fit_parameters : list
27        results for the individual fit parameters,
28        also accessible via indices.
29    chisquare_by_dof : float
30        reduced chisquare.
31    p_value : float
32        p-value of the fit
33    t2_p_value : float
34        Hotelling t-squared p-value for correlated fits.
35    """
36
37    def __init__(self):
38        self.fit_parameters = None
39
40    def __getitem__(self, idx):
41        return self.fit_parameters[idx]
42
43    def __len__(self):
44        return len(self.fit_parameters)
45
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]
49
50    gm = gamma_method
51
52    def __str__(self):
53        my_str = 'Goodness of fit:\n'
54        if hasattr(self, 'chisquare_by_dof'):
55            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
56        elif hasattr(self, 'residual_variance'):
57            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
58        if hasattr(self, 'chisquare_by_expected_chisquare'):
59            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
60        if hasattr(self, 'p_value'):
61            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
62        if hasattr(self, 't2_p_value'):
63            my_str += 't\u00B2p-value = ' + f'{self.t2_p_value:2.4f}' + '\n'
64        my_str += 'Fit parameters:\n'
65        for i_par, par in enumerate(self.fit_parameters):
66            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
67        return my_str
68
69    def __repr__(self):
70        m = max(map(len, list(self.__dict__.keys()))) + 1
71        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])

Represents fit results.

Attributes
  • fit_parameters (list): results for the individual fit parameters, also accessible via indices.
  • chisquare_by_dof (float): reduced chisquare.
  • p_value (float): p-value of the fit
  • t2_p_value (float): Hotelling t-squared p-value for correlated fits.
def gamma_method(self, **kwargs):
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]

Apply the gamma method to all fit parameters

def gm(self, **kwargs):
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]

Apply the gamma method to all fit parameters

Inherited Members
collections.abc.Sequence
index
count
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
 74def least_squares(x, y, func, priors=None, silent=False, **kwargs):
 75    r'''Performs a non-linear fit to y = func(x).
 76        ```
 77
 78    Parameters
 79    ----------
 80    For an uncombined fit:
 81
 82    x : list
 83        list of floats.
 84    y : list
 85        list of Obs.
 86    func : object
 87        fit function, has to be of the form
 88
 89        ```python
 90        import autograd.numpy as anp
 91
 92        def func(a, x):
 93            return a[0] + a[1] * x + a[2] * anp.sinh(x)
 94        ```
 95
 96        For multiple x values func can be of the form
 97
 98        ```python
 99        def func(a, x):
100            (x1, x2) = x
101            return a[0] * x1 ** 2 + a[1] * x2
102        ```
103        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
104        will not work.
105
106    OR For a combined fit:
107
108    x : dict
109        dict of lists.
110    y : dict
111        dict of lists of Obs.
112    funcs : dict
113        dict of objects
114        fit functions have to be of the form (here a[0] is the common fit parameter)
115        ```python
116        import autograd.numpy as anp
117        funcs = {"a": func_a,
118                "b": func_b}
119
120        def func_a(a, x):
121            return a[1] * anp.exp(-a[0] * x)
122
123        def func_b(a, x):
124            return a[2] * anp.exp(-a[0] * x)
125
126        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
127        will not work.
128
129    priors : dict or list, optional
130        priors can either be a dictionary with integer keys and the corresponding priors as values or
131        a list with an entry for every parameter in the fit. The entries can either be
132        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
133        0.548(23), 500(40) or 0.5(0.4)
134    silent : bool, optional
135        If true all output to the console is omitted (default False).
136    initial_guess : list
137        can provide an initial guess for the input parameters. Relevant for
138        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
139        an uncorrelated fit which then serves as guess for the correlated fit.
140    method : str, optional
141        can be used to choose an alternative method for the minimization of chisquare.
142        The possible methods are the ones which can be used for scipy.optimize.minimize and
143        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
144        Reliable alternatives are migrad, Powell and Nelder-Mead.
145    tol: float, optional
146        can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
147        to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
148        invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
149        The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
150    correlated_fit : bool
151        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
152        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
153        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
154        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
155        At the moment this option only works for `prior==None` and when no `method` is given.
156    expected_chisquare : bool
157        If True estimates the expected chisquare which is
158        corrected by effects caused by correlated input data (default False).
159    resplot : bool
160        If True, a plot which displays fit, data and residuals is generated (default False).
161    qqplot : bool
162        If True, a quantile-quantile plot of the fit result is generated (default False).
163    num_grad : bool
164        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
165
166    Returns
167    -------
168    output : Fit_result
169        Parameters and information on the fitted result.
170    '''
171    output = Fit_result()
172
173    if (type(x) == dict and type(y) == dict and type(func) == dict):
174        xd = {key: anp.asarray(x[key]) for key in x}
175        yd = y
176        funcd = func
177        output.fit_function = func
178    elif (type(x) == dict or type(y) == dict or type(func) == dict):
179        raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
180    else:
181        x = np.asarray(x)
182        xd = {"": x}
183        yd = {"": y}
184        funcd = {"": func}
185        output.fit_function = func
186
187    if kwargs.get('num_grad') is True:
188        jacobian = num_jacobian
189        hessian = num_hessian
190    else:
191        jacobian = auto_jacobian
192        hessian = auto_hessian
193
194    key_ls = sorted(list(xd.keys()))
195
196    if sorted(list(yd.keys())) != key_ls:
197        raise Exception('x and y dictionaries do not contain the same keys.')
198
199    if sorted(list(funcd.keys())) != key_ls:
200        raise Exception('x and func dictionaries do not contain the same keys.')
201
202    x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
203    y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
204
205    y_f = [o.value for o in y_all]
206    dy_f = [o.dvalue for o in y_all]
207
208    if len(x_all.shape) > 2:
209        raise Exception('Unknown format for x values')
210
211    if np.any(np.asarray(dy_f) <= 0.0):
212        raise Exception('No y errors available, run the gamma method first.')
213
214    # number of fit parameters
215    n_parms_ls = []
216    for key in key_ls:
217        if not callable(funcd[key]):
218            raise TypeError('func (key=' + key + ') is not a function.')
219        if np.asarray(xd[key]).shape[-1] != len(yd[key]):
220            raise Exception('x and y input (key=' + key + ') do not have the same length')
221        for i in range(100):
222            try:
223                funcd[key](np.arange(i), x_all.T[0])
224            except TypeError:
225                continue
226            except IndexError:
227                continue
228            else:
229                break
230        else:
231            raise RuntimeError("Fit function (key=" + key + ") is not valid.")
232        n_parms = i
233        n_parms_ls.append(n_parms)
234    n_parms = max(n_parms_ls)
235    if not silent:
236        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
237
238    if priors is not None:
239        if isinstance(priors, (list, np.ndarray)):
240            if n_parms != len(priors):
241                raise ValueError("'priors' does not have the correct length.")
242
243            loc_priors = []
244            for i_n, i_prior in enumerate(priors):
245                if isinstance(i_prior, Obs):
246                    loc_priors.append(i_prior)
247                elif isinstance(i_prior, str):
248                    loc_val, loc_dval = _extract_val_and_dval(i_prior)
249                    loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}"))
250                else:
251                    raise TypeError("Prior entries need to be 'Obs' or 'str'.")
252
253            prior_mask = np.arange(len(priors))
254            output.priors = loc_priors
255
256        elif isinstance(priors, dict):
257            loc_priors = []
258            prior_mask = []
259            output.priors = {}
260            for pos, prior in priors.items():
261                if isinstance(pos, int):
262                    prior_mask.append(pos)
263                else:
264                    raise TypeError("Prior position needs to be an integer.")
265                if isinstance(prior, Obs):
266                    loc_priors.append(prior)
267                elif isinstance(prior, str):
268                    loc_val, loc_dval = _extract_val_and_dval(prior)
269                    loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(pos) + f"_{np.random.randint(2147483647):010d}"))
270                else:
271                    raise TypeError("Prior entries need to be 'Obs' or 'str'.")
272                output.priors[pos] = loc_priors[-1]
273            if max(prior_mask) >= n_parms:
274                raise ValueError("Prior position out of range.")
275        else:
276            raise TypeError("Unkown type for `priors`.")
277
278        p_f = [o.value for o in loc_priors]
279        dp_f = [o.dvalue for o in loc_priors]
280        if np.any(np.asarray(dp_f) <= 0.0):
281            raise Exception('No prior errors available, run the gamma method first.')
282    else:
283        p_f = dp_f = np.array([])
284        prior_mask = []
285        loc_priors = []
286
287    if 'initial_guess' in kwargs:
288        x0 = kwargs.get('initial_guess')
289        if len(x0) != n_parms:
290            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
291    else:
292        x0 = [0.1] * n_parms
293
294    if priors is None:
295        def general_chisqfunc_uncorr(p, ivars, pr):
296            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
297            return (ivars - model) / dy_f
298    else:
299        def general_chisqfunc_uncorr(p, ivars, pr):
300            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
301            return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f))
302
303    def chisqfunc_uncorr(p):
304        return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
305
306    if kwargs.get('correlated_fit') is True:
307        corr = covariance(y_all, correlation=True, **kwargs)
308        covdiag = np.diag(1 / np.asarray(dy_f))
309        condn = np.linalg.cond(corr)
310        if condn > 0.1 / np.finfo(float).eps:
311            raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
312        if condn > 1e13:
313            warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
314        chol = np.linalg.cholesky(corr)
315        chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
316
317        def general_chisqfunc(p, ivars, pr):
318            model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
319            return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f))
320
321        def chisqfunc(p):
322            return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2)
323    else:
324        general_chisqfunc = general_chisqfunc_uncorr
325        chisqfunc = chisqfunc_uncorr
326
327    output.method = kwargs.get('method', 'Levenberg-Marquardt')
328    if not silent:
329        print('Method:', output.method)
330
331    if output.method != 'Levenberg-Marquardt':
332        if output.method == 'migrad':
333            tolerance = 1e-4  # default value of 1e-1 set by iminuit can be problematic
334            if 'tol' in kwargs:
335                tolerance = kwargs.get('tol')
336            fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance)  # Stopping criterion 0.002 * tol * errordef
337            if kwargs.get('correlated_fit') is True:
338                fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance)
339            output.iterations = fit_result.nfev
340        else:
341            tolerance = 1e-12
342            if 'tol' in kwargs:
343                tolerance = kwargs.get('tol')
344            fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance)
345            if kwargs.get('correlated_fit') is True:
346                fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance)
347            output.iterations = fit_result.nit
348
349        chisquare = fit_result.fun
350
351    else:
352        if 'tol' in kwargs:
353            print('tol cannot be set for Levenberg-Marquardt')
354
355        def chisqfunc_residuals_uncorr(p):
356            return general_chisqfunc_uncorr(p, y_f, p_f)
357
358        fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
359        if kwargs.get('correlated_fit') is True:
360
361            def chisqfunc_residuals(p):
362                return general_chisqfunc(p, y_f, p_f)
363
364            fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
365
366        chisquare = np.sum(fit_result.fun ** 2)
367        assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
368
369        output.iterations = fit_result.nfev
370
371    if not fit_result.success:
372        raise Exception('The minimization procedure did not converge.')
373
374    if x_all.shape[-1] - n_parms > 0:
375        output.chisquare = chisquare
376        output.dof = x_all.shape[-1] - n_parms + len(loc_priors)
377        output.chisquare_by_dof = output.chisquare / output.dof
378        output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
379    else:
380        output.chisquare_by_dof = float('nan')
381
382    output.message = fit_result.message
383    if not silent:
384        print(fit_result.message)
385        print('chisquare/d.o.f.:', output.chisquare_by_dof)
386        print('fit parameters', fit_result.x)
387
388    def prepare_hat_matrix():
389        hat_vector = []
390        for key in key_ls:
391            if (len(xd[key]) != 0):
392                hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key]))
393        hat_vector = [item for sublist in hat_vector for item in sublist]
394        return hat_vector
395
396    if kwargs.get('expected_chisquare') is True:
397        if kwargs.get('correlated_fit') is not True:
398            W = np.diag(1 / np.asarray(dy_f))
399            cov = covariance(y_all)
400            hat_vector = prepare_hat_matrix()
401            A = W @ hat_vector
402            P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
403            expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
404            output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
405            if not silent:
406                print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
407
408    fitp = fit_result.x
409    if np.any(np.asarray(dy_f) <= 0.0):
410        raise Exception('No y errors available, run the gamma method first.')
411
412    try:
413        hess = hessian(chisqfunc)(fitp)
414    except TypeError:
415        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
416
417    len_y = len(y_f)
418
419    def chisqfunc_compact(d):
420        return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2)
421
422    jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f)))
423
424    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
425    try:
426        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:])
427    except np.linalg.LinAlgError:
428        raise Exception("Cannot invert hessian matrix.")
429
430    result = []
431    for i in range(n_parms):
432        result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i])))
433
434    output.fit_parameters = result
435
436    # Hotelling t-squared p-value for correlated fits.
437    if kwargs.get('correlated_fit') is True:
438        n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all))
439        output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare,
440                                                  output.dof, n_cov - output.dof)
441
442    if kwargs.get('resplot') is True:
443        for key in key_ls:
444            residual_plot(xd[key], yd[key], funcd[key], result, title=key)
445
446    if kwargs.get('qqplot') is True:
447        for key in key_ls:
448            qqplot(xd[key], yd[key], funcd[key], result, title=key)
449
450    return output

Performs a non-linear fit to y = func(x). ```

Parameters
  • For an uncombined fit:
  • x (list): list of floats.
  • y (list): list of Obs.
  • func (object): fit function, has to be of the form

    import autograd.numpy as anp
    
    def func(a, x):
       return a[0] + a[1] * x + a[2] * anp.sinh(x)
    

    For multiple x values func can be of the form

    def func(a, x):
       (x1, x2) = x
       return a[0] * x1 ** 2 + a[1] * x2
    

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • OR For a combined fit:
  • x (dict): dict of lists.
  • y (dict): dict of lists of Obs.
  • funcs (dict): dict of objects fit functions have to be of the form (here a[0] is the common fit parameter) ```python import autograd.numpy as anp funcs = {"a": func_a, "b": func_b}

    def func_a(a, x): return a[1] * anp.exp(-a[0] * x)

    def func_b(a, x): return a[2] * anp.exp(-a[0] * x)

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • priors (dict or list, optional): priors can either be a dictionary with integer keys and the corresponding priors as values or a list with an entry for every parameter in the fit. The entries can either be Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like 0.548(23), 500(40) or 0.5(0.4)
  • silent (bool, optional): If true all output to the console is omitted (default False).
  • initial_guess (list): can provide an initial guess for the input parameters. Relevant for non-linear fits with many parameters. In case of correlated fits the guess is used to perform an uncorrelated fit which then serves as guess for the correlated fit.
  • method (str, optional): can be used to choose an alternative method for the minimization of chisquare. The possible methods are the ones which can be used for scipy.optimize.minimize and migrad of iminuit. If no method is specified, Levenberg-Marquard is used. Reliable alternatives are migrad, Powell and Nelder-Mead.
  • tol (float, optional): can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
  • correlated_fit (bool): If True, use the full inverse covariance matrix in the definition of the chisquare cost function. For details about how the covariance matrix is estimated see pyerrors.obs.covariance. In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix). This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning). At the moment this option only works for prior==None and when no method is given.
  • expected_chisquare (bool): If True estimates the expected chisquare which is corrected by effects caused by correlated input data (default False).
  • resplot (bool): If True, a plot which displays fit, data and residuals is generated (default False).
  • qqplot (bool): If True, a quantile-quantile plot of the fit result is generated (default False).
  • num_grad (bool): Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Returns
  • output (Fit_result): Parameters and information on the fitted result.
def total_least_squares(x, y, func, silent=False, **kwargs):
453def total_least_squares(x, y, func, silent=False, **kwargs):
454    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
455
456    Parameters
457    ----------
458    x : list
459        list of Obs, or a tuple of lists of Obs
460    y : list
461        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
462    func : object
463        func has to be of the form
464
465        ```python
466        import autograd.numpy as anp
467
468        def func(a, x):
469            return a[0] + a[1] * x + a[2] * anp.sinh(x)
470        ```
471
472        For multiple x values func can be of the form
473
474        ```python
475        def func(a, x):
476            (x1, x2) = x
477            return a[0] * x1 ** 2 + a[1] * x2
478        ```
479
480        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
481        will not work.
482    silent : bool, optional
483        If true all output to the console is omitted (default False).
484    initial_guess : list
485        can provide an initial guess for the input parameters. Relevant for non-linear
486        fits with many parameters.
487    expected_chisquare : bool
488        If true prints the expected chisquare which is
489        corrected by effects caused by correlated input data.
490        This can take a while as the full correlation matrix
491        has to be calculated (default False).
492    num_grad : bool
493        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
494
495    Notes
496    -----
497    Based on the orthogonal distance regression module of scipy.
498
499    Returns
500    -------
501    output : Fit_result
502        Parameters and information on the fitted result.
503    '''
504
505    output = Fit_result()
506
507    output.fit_function = func
508
509    x = np.array(x)
510
511    x_shape = x.shape
512
513    if kwargs.get('num_grad') is True:
514        jacobian = num_jacobian
515        hessian = num_hessian
516    else:
517        jacobian = auto_jacobian
518        hessian = auto_hessian
519
520    if not callable(func):
521        raise TypeError('func has to be a function.')
522
523    for i in range(42):
524        try:
525            func(np.arange(i), x.T[0])
526        except TypeError:
527            continue
528        except IndexError:
529            continue
530        else:
531            break
532    else:
533        raise RuntimeError("Fit function is not valid.")
534
535    n_parms = i
536    if not silent:
537        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
538
539    x_f = np.vectorize(lambda o: o.value)(x)
540    dx_f = np.vectorize(lambda o: o.dvalue)(x)
541    y_f = np.array([o.value for o in y])
542    dy_f = np.array([o.dvalue for o in y])
543
544    if np.any(np.asarray(dx_f) <= 0.0):
545        raise Exception('No x errors available, run the gamma method first.')
546
547    if np.any(np.asarray(dy_f) <= 0.0):
548        raise Exception('No y errors available, run the gamma method first.')
549
550    if 'initial_guess' in kwargs:
551        x0 = kwargs.get('initial_guess')
552        if len(x0) != n_parms:
553            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
554    else:
555        x0 = [1] * n_parms
556
557    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
558    model = Model(func)
559    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
560    odr.set_job(fit_type=0, deriv=1)
561    out = odr.run()
562
563    output.residual_variance = out.res_var
564
565    output.method = 'ODR'
566
567    output.message = out.stopreason
568
569    output.xplus = out.xplus
570
571    if not silent:
572        print('Method: ODR')
573        print(*out.stopreason)
574        print('Residual variance:', output.residual_variance)
575
576    if out.info > 3:
577        raise Exception('The minimization procedure did not converge.')
578
579    m = x_f.size
580
581    def odr_chisquare(p):
582        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
583        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
584        return chisq
585
586    if kwargs.get('expected_chisquare') is True:
587        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
588
589        if kwargs.get('covariance') is not None:
590            cov = kwargs.get('covariance')
591        else:
592            cov = covariance(np.concatenate((y, x.ravel())))
593
594        number_of_x_parameters = int(m / x_f.shape[-1])
595
596        old_jac = jacobian(func)(out.beta, out.xplus)
597        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
598        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
599        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
600
601        A = W @ new_jac
602        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
603        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
604        if expected_chisquare <= 0.0:
605            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
606            expected_chisquare = np.abs(expected_chisquare)
607        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
608        if not silent:
609            print('chisquare/expected_chisquare:',
610                  output.chisquare_by_expected_chisquare)
611
612    fitp = out.beta
613    try:
614        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
615    except TypeError:
616        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
617
618    def odr_chisquare_compact_x(d):
619        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
620        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
621        return chisq
622
623    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
624
625    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
626    try:
627        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
628    except np.linalg.LinAlgError:
629        raise Exception("Cannot invert hessian matrix.")
630
631    def odr_chisquare_compact_y(d):
632        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
633        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
634        return chisq
635
636    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
637
638    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
639    try:
640        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
641    except np.linalg.LinAlgError:
642        raise Exception("Cannot invert hessian matrix.")
643
644    result = []
645    for i in range(n_parms):
646        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
647
648    output.fit_parameters = result
649
650    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
651    output.dof = x.shape[-1] - n_parms
652    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
653
654    return output

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

Parameters
  • x (list): list of Obs, or a tuple of lists of Obs
  • y (list): list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
  • func (object): func has to be of the form

    import autograd.numpy as anp
    
    def func(a, x):
       return a[0] + a[1] * x + a[2] * anp.sinh(x)
    

    For multiple x values func can be of the form

    def func(a, x):
       (x1, x2) = x
       return a[0] * x1 ** 2 + a[1] * x2
    

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • silent (bool, optional): If true all output to the console is omitted (default False).
  • initial_guess (list): can provide an initial guess for the input parameters. Relevant for non-linear fits with many parameters.
  • expected_chisquare (bool): If true prints the expected chisquare which is corrected by effects caused by correlated input data. This can take a while as the full correlation matrix has to be calculated (default False).
  • num_grad (bool): Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Notes

Based on the orthogonal distance regression module of scipy.

Returns
  • output (Fit_result): Parameters and information on the fitted result.
def fit_lin(x, y, **kwargs):
657def fit_lin(x, y, **kwargs):
658    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
659
660    Parameters
661    ----------
662    x : list
663        Can either be a list of floats in which case no xerror is assumed, or
664        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
665    y : list
666        List of Obs, the dvalues of the Obs are used as yerror for the fit.
667
668    Returns
669    -------
670    fit_parameters : list[Obs]
671        LIist of fitted observables.
672    """
673
674    def f(a, x):
675        y = a[0] + a[1] * x
676        return y
677
678    if all(isinstance(n, Obs) for n in x):
679        out = total_least_squares(x, y, f, **kwargs)
680        return out.fit_parameters
681    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
682        out = least_squares(x, y, f, **kwargs)
683        return out.fit_parameters
684    else:
685        raise Exception('Unsupported types for x')

Performs a linear fit to y = n + m * x and returns two Obs n, m.

Parameters
  • x (list): Can either be a list of floats in which case no xerror is assumed, or a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
  • y (list): List of Obs, the dvalues of the Obs are used as yerror for the fit.
Returns
  • fit_parameters (list[Obs]): LIist of fitted observables.
def qqplot(x, o_y, func, p, title=''):
688def qqplot(x, o_y, func, p, title=""):
689    """Generates a quantile-quantile plot of the fit result which can be used to
690       check if the residuals of the fit are gaussian distributed.
691
692    Returns
693    -------
694    None
695    """
696
697    residuals = []
698    for i_x, i_y in zip(x, o_y):
699        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
700    residuals = sorted(residuals)
701    my_y = [o.value for o in residuals]
702    probplot = scipy.stats.probplot(my_y)
703    my_x = probplot[0][0]
704    plt.figure(figsize=(8, 8 / 1.618))
705    plt.errorbar(my_x, my_y, fmt='o')
706    fit_start = my_x[0]
707    fit_stop = my_x[-1]
708    samples = np.arange(fit_start, fit_stop, 0.01)
709    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
710    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
711
712    plt.xlabel('Theoretical quantiles')
713    plt.ylabel('Ordered Values')
714    plt.legend(title=title)
715    plt.draw()

Generates a quantile-quantile plot of the fit result which can be used to check if the residuals of the fit are gaussian distributed.

Returns
  • None
def residual_plot(x, y, func, fit_res, title=''):
718def residual_plot(x, y, func, fit_res, title=""):
719    """Generates a plot which compares the fit to the data and displays the corresponding residuals
720
721    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
722
723    Returns
724    -------
725    None
726    """
727    sorted_x = sorted(x)
728    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
729    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
730    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
731
732    plt.figure(figsize=(8, 8 / 1.618))
733    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
734    ax0 = plt.subplot(gs[0])
735    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
736    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
737    ax0.set_xticklabels([])
738    ax0.set_xlim([xstart, xstop])
739    ax0.set_xticklabels([])
740    ax0.legend(title=title)
741
742    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
743    ax1 = plt.subplot(gs[1])
744    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
745    ax1.tick_params(direction='out')
746    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
747    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
748    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
749    ax1.set_xlim([xstart, xstop])
750    ax1.set_ylabel('Residuals')
751    plt.subplots_adjust(wspace=None, hspace=None)
752    plt.draw()

Generates a plot which compares the fit to the data and displays the corresponding residuals

For uncorrelated data the residuals are expected to be distributed ~N(0,1).

Returns
  • None
def error_band(x, func, beta):
755def error_band(x, func, beta):
756    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
757
758    Returns
759    -------
760    err : np.array(Obs)
761        Error band for an array of sample values x
762    """
763    cov = covariance(beta)
764    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
765        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
766
767    deriv = []
768    for i, item in enumerate(x):
769        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
770
771    err = []
772    for i, item in enumerate(x):
773        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
774    err = np.array(err)
775
776    return err

Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.

Returns
  • err (np.array(Obs)): Error band for an array of sample values x
def ks_test(objects=None):
779def ks_test(objects=None):
780    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
781
782    Parameters
783    ----------
784    objects : list
785        List of fit results to include in the analysis (optional).
786
787    Returns
788    -------
789    None
790    """
791
792    if objects is None:
793        obs_list = []
794        for obj in gc.get_objects():
795            if isinstance(obj, Fit_result):
796                obs_list.append(obj)
797    else:
798        obs_list = objects
799
800    p_values = [o.p_value for o in obs_list]
801
802    bins = len(p_values)
803    x = np.arange(0, 1.001, 0.001)
804    plt.plot(x, x, 'k', zorder=1)
805    plt.xlim(0, 1)
806    plt.ylim(0, 1)
807    plt.xlabel('p-value')
808    plt.ylabel('Cumulative probability')
809    plt.title(str(bins) + ' p-values')
810
811    n = np.arange(1, bins + 1) / np.float64(bins)
812    Xs = np.sort(p_values)
813    plt.step(Xs, n)
814    diffs = n - Xs
815    loc_max_diff = np.argmax(np.abs(diffs))
816    loc = Xs[loc_max_diff]
817    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
818    plt.draw()
819
820    print(scipy.stats.kstest(p_values, 'uniform'))

Performs a Kolmogorov–Smirnov test for the p-values of all fit object.

Parameters
  • objects (list): List of fit results to include in the analysis (optional).
Returns
  • None