diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 640c6476..fe5844bf 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -341,598 +341,604 @@ 230 n_parms_ls.append(n_loc) 231 232 n_parms = max(n_parms_ls) -233 if not silent: -234 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -235 -236 if priors is not None: -237 if isinstance(priors, (list, np.ndarray)): -238 if n_parms != len(priors): -239 raise ValueError("'priors' does not have the correct length.") -240 -241 loc_priors = [] -242 for i_n, i_prior in enumerate(priors): -243 loc_priors.append(_construct_prior_obs(i_prior, i_n)) -244 -245 prior_mask = np.arange(len(priors)) -246 output.priors = loc_priors -247 -248 elif isinstance(priors, dict): -249 loc_priors = [] -250 prior_mask = [] -251 output.priors = {} -252 for pos, prior in priors.items(): -253 if isinstance(pos, int): -254 prior_mask.append(pos) -255 else: -256 raise TypeError("Prior position needs to be an integer.") -257 loc_priors.append(_construct_prior_obs(prior, pos)) -258 -259 output.priors[pos] = loc_priors[-1] -260 if max(prior_mask) >= n_parms: -261 raise ValueError("Prior position out of range.") -262 else: -263 raise TypeError("Unkown type for `priors`.") +233 +234 if len(key_ls) > 1: +235 for key in key_ls: +236 if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape: +237 raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.") +238 +239 if not silent: +240 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +241 +242 if priors is not None: +243 if isinstance(priors, (list, np.ndarray)): +244 if n_parms != len(priors): +245 raise ValueError("'priors' does not have the correct length.") +246 +247 loc_priors = [] +248 for i_n, i_prior in enumerate(priors): +249 loc_priors.append(_construct_prior_obs(i_prior, i_n)) +250 +251 prior_mask = np.arange(len(priors)) +252 output.priors = loc_priors +253 +254 elif isinstance(priors, dict): +255 loc_priors = [] +256 prior_mask = [] +257 output.priors = {} +258 for pos, prior in priors.items(): +259 if isinstance(pos, int): +260 prior_mask.append(pos) +261 else: +262 raise TypeError("Prior position needs to be an integer.") +263 loc_priors.append(_construct_prior_obs(prior, pos)) 264 -265 p_f = [o.value for o in loc_priors] -266 dp_f = [o.dvalue for o in loc_priors] -267 if np.any(np.asarray(dp_f) <= 0.0): -268 raise Exception("No prior errors available, run the gamma method first.") -269 else: -270 p_f = dp_f = np.array([]) -271 prior_mask = [] -272 loc_priors = [] -273 -274 if 'initial_guess' in kwargs: -275 x0 = kwargs.get('initial_guess') -276 if len(x0) != n_parms: -277 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -278 else: -279 x0 = [0.1] * n_parms -280 -281 if priors is None: -282 def general_chisqfunc_uncorr(p, ivars, pr): -283 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -284 return (ivars - model) / dy_f -285 else: -286 def general_chisqfunc_uncorr(p, ivars, pr): -287 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -288 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) -289 -290 def chisqfunc_uncorr(p): -291 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) -292 -293 if kwargs.get('correlated_fit') is True: -294 corr = covariance(y_all, correlation=True, **kwargs) -295 covdiag = np.diag(1 / np.asarray(dy_f)) -296 condn = np.linalg.cond(corr) -297 if condn > 0.1 / np.finfo(float).eps: -298 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -299 if condn > 1e13: -300 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -301 chol = np.linalg.cholesky(corr) -302 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) -303 -304 def general_chisqfunc(p, ivars, pr): -305 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -306 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -307 -308 def chisqfunc(p): -309 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -310 else: -311 general_chisqfunc = general_chisqfunc_uncorr -312 chisqfunc = chisqfunc_uncorr +265 output.priors[pos] = loc_priors[-1] +266 if max(prior_mask) >= n_parms: +267 raise ValueError("Prior position out of range.") +268 else: +269 raise TypeError("Unkown type for `priors`.") +270 +271 p_f = [o.value for o in loc_priors] +272 dp_f = [o.dvalue for o in loc_priors] +273 if np.any(np.asarray(dp_f) <= 0.0): +274 raise Exception("No prior errors available, run the gamma method first.") +275 else: +276 p_f = dp_f = np.array([]) +277 prior_mask = [] +278 loc_priors = [] +279 +280 if 'initial_guess' in kwargs: +281 x0 = kwargs.get('initial_guess') +282 if len(x0) != n_parms: +283 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +284 else: +285 x0 = [0.1] * n_parms +286 +287 if priors is None: +288 def general_chisqfunc_uncorr(p, ivars, pr): +289 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +290 return (ivars - model) / dy_f +291 else: +292 def general_chisqfunc_uncorr(p, ivars, pr): +293 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +294 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) +295 +296 def chisqfunc_uncorr(p): +297 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +298 +299 if kwargs.get('correlated_fit') is True: +300 corr = covariance(y_all, correlation=True, **kwargs) +301 covdiag = np.diag(1 / np.asarray(dy_f)) +302 condn = np.linalg.cond(corr) +303 if condn > 0.1 / np.finfo(float).eps: +304 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +305 if condn > 1e13: +306 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +307 chol = np.linalg.cholesky(corr) +308 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) +309 +310 def general_chisqfunc(p, ivars, pr): +311 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +312 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) 313 -314 output.method = kwargs.get('method', 'Levenberg-Marquardt') -315 if not silent: -316 print('Method:', output.method) -317 -318 if output.method != 'Levenberg-Marquardt': -319 if output.method == 'migrad': -320 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -321 if 'tol' in kwargs: -322 tolerance = kwargs.get('tol') -323 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -324 if kwargs.get('correlated_fit') is True: -325 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -326 output.iterations = fit_result.nfev -327 else: -328 tolerance = 1e-12 -329 if 'tol' in kwargs: -330 tolerance = kwargs.get('tol') -331 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -332 if kwargs.get('correlated_fit') is True: -333 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -334 output.iterations = fit_result.nit -335 -336 chisquare = fit_result.fun -337 -338 else: -339 if 'tol' in kwargs: -340 print('tol cannot be set for Levenberg-Marquardt') +314 def chisqfunc(p): +315 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +316 else: +317 general_chisqfunc = general_chisqfunc_uncorr +318 chisqfunc = chisqfunc_uncorr +319 +320 output.method = kwargs.get('method', 'Levenberg-Marquardt') +321 if not silent: +322 print('Method:', output.method) +323 +324 if output.method != 'Levenberg-Marquardt': +325 if output.method == 'migrad': +326 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +327 if 'tol' in kwargs: +328 tolerance = kwargs.get('tol') +329 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +330 if kwargs.get('correlated_fit') is True: +331 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +332 output.iterations = fit_result.nfev +333 else: +334 tolerance = 1e-12 +335 if 'tol' in kwargs: +336 tolerance = kwargs.get('tol') +337 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +338 if kwargs.get('correlated_fit') is True: +339 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +340 output.iterations = fit_result.nit 341 -342 def chisqfunc_residuals_uncorr(p): -343 return general_chisqfunc_uncorr(p, y_f, p_f) -344 -345 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -346 if kwargs.get('correlated_fit') is True: +342 chisquare = fit_result.fun +343 +344 else: +345 if 'tol' in kwargs: +346 print('tol cannot be set for Levenberg-Marquardt') 347 -348 def chisqfunc_residuals(p): -349 return general_chisqfunc(p, y_f, p_f) +348 def chisqfunc_residuals_uncorr(p): +349 return general_chisqfunc_uncorr(p, y_f, p_f) 350 -351 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -352 -353 chisquare = np.sum(fit_result.fun ** 2) -354 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -355 -356 output.iterations = fit_result.nfev -357 -358 if not fit_result.success: -359 raise Exception('The minimization procedure did not converge.') -360 -361 output.chisquare = chisquare -362 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) -363 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -364 if output.dof > 0: -365 output.chisquare_by_dof = output.chisquare / output.dof -366 else: -367 output.chisquare_by_dof = float('nan') -368 -369 output.message = fit_result.message -370 if not silent: -371 print(fit_result.message) -372 print('chisquare/d.o.f.:', output.chisquare_by_dof) -373 print('fit parameters', fit_result.x) +351 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +352 if kwargs.get('correlated_fit') is True: +353 +354 def chisqfunc_residuals(p): +355 return general_chisqfunc(p, y_f, p_f) +356 +357 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +358 +359 chisquare = np.sum(fit_result.fun ** 2) +360 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) +361 +362 output.iterations = fit_result.nfev +363 +364 if not fit_result.success: +365 raise Exception('The minimization procedure did not converge.') +366 +367 output.chisquare = chisquare +368 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) +369 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +370 if output.dof > 0: +371 output.chisquare_by_dof = output.chisquare / output.dof +372 else: +373 output.chisquare_by_dof = float('nan') 374 -375 def prepare_hat_matrix(): -376 hat_vector = [] -377 for key in key_ls: -378 if (len(xd[key]) != 0): -379 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -380 hat_vector = [item for sublist in hat_vector for item in sublist] -381 return hat_vector -382 -383 if kwargs.get('expected_chisquare') is True: -384 if kwargs.get('correlated_fit') is not True: -385 W = np.diag(1 / np.asarray(dy_f)) -386 cov = covariance(y_all) -387 hat_vector = prepare_hat_matrix() -388 A = W @ hat_vector -389 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -390 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) -391 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -392 if not silent: -393 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -394 -395 fitp = fit_result.x -396 -397 try: -398 hess = hessian(chisqfunc)(fitp) -399 except TypeError: -400 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -401 -402 len_y = len(y_f) -403 -404 def chisqfunc_compact(d): -405 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -406 -407 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -408 -409 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -410 try: -411 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -412 except np.linalg.LinAlgError: -413 raise Exception("Cannot invert hessian matrix.") +375 output.message = fit_result.message +376 if not silent: +377 print(fit_result.message) +378 print('chisquare/d.o.f.:', output.chisquare_by_dof) +379 print('fit parameters', fit_result.x) +380 +381 def prepare_hat_matrix(): +382 hat_vector = [] +383 for key in key_ls: +384 if (len(xd[key]) != 0): +385 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +386 hat_vector = [item for sublist in hat_vector for item in sublist] +387 return hat_vector +388 +389 if kwargs.get('expected_chisquare') is True: +390 if kwargs.get('correlated_fit') is not True: +391 W = np.diag(1 / np.asarray(dy_f)) +392 cov = covariance(y_all) +393 hat_vector = prepare_hat_matrix() +394 A = W @ hat_vector +395 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +396 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) +397 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +398 if not silent: +399 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) +400 +401 fitp = fit_result.x +402 +403 try: +404 hess = hessian(chisqfunc)(fitp) +405 except TypeError: +406 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +407 +408 len_y = len(y_f) +409 +410 def chisqfunc_compact(d): +411 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +412 +413 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) 414 -415 result = [] -416 for i in range(n_parms): -417 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -418 -419 output.fit_parameters = result +415 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +416 try: +417 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +418 except np.linalg.LinAlgError: +419 raise Exception("Cannot invert hessian matrix.") 420 -421 # Hotelling t-squared p-value for correlated fits. -422 if kwargs.get('correlated_fit') is True: -423 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -424 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -425 output.dof, n_cov - output.dof) +421 result = [] +422 for i in range(n_parms): +423 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +424 +425 output.fit_parameters = result 426 -427 if kwargs.get('resplot') is True: -428 for key in key_ls: -429 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -430 -431 if kwargs.get('qqplot') is True: -432 for key in key_ls: -433 qqplot(xd[key], yd[key], funcd[key], result, title=key) -434 -435 return output +427 # Hotelling t-squared p-value for correlated fits. +428 if kwargs.get('correlated_fit') is True: +429 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +430 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +431 output.dof, n_cov - output.dof) +432 +433 if kwargs.get('resplot') is True: +434 for key in key_ls: +435 residual_plot(xd[key], yd[key], funcd[key], result, title=key) 436 -437 -438def total_least_squares(x, y, func, silent=False, **kwargs): -439 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +437 if kwargs.get('qqplot') is True: +438 for key in key_ls: +439 qqplot(xd[key], yd[key], funcd[key], result, title=key) 440 -441 Parameters -442 ---------- -443 x : list -444 list of Obs, or a tuple of lists of Obs -445 y : list -446 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. -447 func : object -448 func has to be of the form -449 -450 ```python -451 import autograd.numpy as anp -452 -453 def func(a, x): -454 return a[0] + a[1] * x + a[2] * anp.sinh(x) -455 ``` -456 -457 For multiple x values func can be of the form +441 return output +442 +443 +444def total_least_squares(x, y, func, silent=False, **kwargs): +445 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +446 +447 Parameters +448 ---------- +449 x : list +450 list of Obs, or a tuple of lists of Obs +451 y : list +452 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +453 func : object +454 func has to be of the form +455 +456 ```python +457 import autograd.numpy as anp 458 -459 ```python -460 def func(a, x): -461 (x1, x2) = x -462 return a[0] * x1 ** 2 + a[1] * x2 -463 ``` +459 def func(a, x): +460 return a[0] + a[1] * x + a[2] * anp.sinh(x) +461 ``` +462 +463 For multiple x values func can be of the form 464 -465 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation -466 will not work. -467 silent : bool, optional -468 If true all output to the console is omitted (default False). -469 initial_guess : list -470 can provide an initial guess for the input parameters. Relevant for non-linear -471 fits with many parameters. -472 expected_chisquare : bool -473 If true prints the expected chisquare which is -474 corrected by effects caused by correlated input data. -475 This can take a while as the full correlation matrix -476 has to be calculated (default False). -477 num_grad : bool -478 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). -479 -480 Notes -481 ----- -482 Based on the orthogonal distance regression module of scipy. -483 -484 Returns -485 ------- -486 output : Fit_result -487 Parameters and information on the fitted result. -488 ''' +465 ```python +466 def func(a, x): +467 (x1, x2) = x +468 return a[0] * x1 ** 2 + a[1] * x2 +469 ``` +470 +471 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation +472 will not work. +473 silent : bool, optional +474 If true all output to the console is omitted (default False). +475 initial_guess : list +476 can provide an initial guess for the input parameters. Relevant for non-linear +477 fits with many parameters. +478 expected_chisquare : bool +479 If true prints the expected chisquare which is +480 corrected by effects caused by correlated input data. +481 This can take a while as the full correlation matrix +482 has to be calculated (default False). +483 num_grad : bool +484 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). +485 +486 Notes +487 ----- +488 Based on the orthogonal distance regression module of scipy. 489 -490 output = Fit_result() -491 -492 output.fit_function = func -493 -494 x = np.array(x) +490 Returns +491 ------- +492 output : Fit_result +493 Parameters and information on the fitted result. +494 ''' 495 -496 x_shape = x.shape +496 output = Fit_result() 497 -498 if kwargs.get('num_grad') is True: -499 jacobian = num_jacobian -500 hessian = num_hessian -501 else: -502 jacobian = auto_jacobian -503 hessian = auto_hessian -504 -505 if not callable(func): -506 raise TypeError('func has to be a function.') -507 -508 for i in range(42): -509 try: -510 func(np.arange(i), x.T[0]) -511 except TypeError: -512 continue -513 except IndexError: -514 continue -515 else: -516 break -517 else: -518 raise RuntimeError("Fit function is not valid.") -519 -520 n_parms = i -521 if not silent: -522 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -523 -524 x_f = np.vectorize(lambda o: o.value)(x) -525 dx_f = np.vectorize(lambda o: o.dvalue)(x) -526 y_f = np.array([o.value for o in y]) -527 dy_f = np.array([o.dvalue for o in y]) -528 -529 if np.any(np.asarray(dx_f) <= 0.0): -530 raise Exception('No x errors available, run the gamma method first.') -531 -532 if np.any(np.asarray(dy_f) <= 0.0): -533 raise Exception('No y errors available, run the gamma method first.') +498 output.fit_function = func +499 +500 x = np.array(x) +501 +502 x_shape = x.shape +503 +504 if kwargs.get('num_grad') is True: +505 jacobian = num_jacobian +506 hessian = num_hessian +507 else: +508 jacobian = auto_jacobian +509 hessian = auto_hessian +510 +511 if not callable(func): +512 raise TypeError('func has to be a function.') +513 +514 for i in range(42): +515 try: +516 func(np.arange(i), x.T[0]) +517 except TypeError: +518 continue +519 except IndexError: +520 continue +521 else: +522 break +523 else: +524 raise RuntimeError("Fit function is not valid.") +525 +526 n_parms = i +527 if not silent: +528 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +529 +530 x_f = np.vectorize(lambda o: o.value)(x) +531 dx_f = np.vectorize(lambda o: o.dvalue)(x) +532 y_f = np.array([o.value for o in y]) +533 dy_f = np.array([o.dvalue for o in y]) 534 -535 if 'initial_guess' in kwargs: -536 x0 = kwargs.get('initial_guess') -537 if len(x0) != n_parms: -538 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -539 else: -540 x0 = [1] * n_parms -541 -542 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) -543 model = Model(func) -544 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) -545 odr.set_job(fit_type=0, deriv=1) -546 out = odr.run() +535 if np.any(np.asarray(dx_f) <= 0.0): +536 raise Exception('No x errors available, run the gamma method first.') +537 +538 if np.any(np.asarray(dy_f) <= 0.0): +539 raise Exception('No y errors available, run the gamma method first.') +540 +541 if 'initial_guess' in kwargs: +542 x0 = kwargs.get('initial_guess') +543 if len(x0) != n_parms: +544 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +545 else: +546 x0 = [1] * n_parms 547 -548 output.residual_variance = out.res_var -549 -550 output.method = 'ODR' -551 -552 output.message = out.stopreason +548 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) +549 model = Model(func) +550 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) +551 odr.set_job(fit_type=0, deriv=1) +552 out = odr.run() 553 -554 output.xplus = out.xplus +554 output.residual_variance = out.res_var 555 -556 if not silent: -557 print('Method: ODR') -558 print(*out.stopreason) -559 print('Residual variance:', output.residual_variance) -560 -561 if out.info > 3: -562 raise Exception('The minimization procedure did not converge.') -563 -564 m = x_f.size -565 -566 def odr_chisquare(p): -567 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) -568 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) -569 return chisq -570 -571 if kwargs.get('expected_chisquare') is True: -572 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) -573 -574 if kwargs.get('covariance') is not None: -575 cov = kwargs.get('covariance') -576 else: -577 cov = covariance(np.concatenate((y, x.ravel()))) -578 -579 number_of_x_parameters = int(m / x_f.shape[-1]) -580 -581 old_jac = jacobian(func)(out.beta, out.xplus) -582 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) -583 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) -584 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) -585 -586 A = W @ new_jac -587 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -588 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) -589 if expected_chisquare <= 0.0: -590 warnings.warn("Negative expected_chisquare.", RuntimeWarning) -591 expected_chisquare = np.abs(expected_chisquare) -592 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare -593 if not silent: -594 print('chisquare/expected_chisquare:', -595 output.chisquare_by_expected_chisquare) -596 -597 fitp = out.beta -598 try: -599 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) -600 except TypeError: -601 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +556 output.method = 'ODR' +557 +558 output.message = out.stopreason +559 +560 output.xplus = out.xplus +561 +562 if not silent: +563 print('Method: ODR') +564 print(*out.stopreason) +565 print('Residual variance:', output.residual_variance) +566 +567 if out.info > 3: +568 raise Exception('The minimization procedure did not converge.') +569 +570 m = x_f.size +571 +572 def odr_chisquare(p): +573 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) +574 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) +575 return chisq +576 +577 if kwargs.get('expected_chisquare') is True: +578 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) +579 +580 if kwargs.get('covariance') is not None: +581 cov = kwargs.get('covariance') +582 else: +583 cov = covariance(np.concatenate((y, x.ravel()))) +584 +585 number_of_x_parameters = int(m / x_f.shape[-1]) +586 +587 old_jac = jacobian(func)(out.beta, out.xplus) +588 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) +589 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) +590 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +591 +592 A = W @ new_jac +593 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +594 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) +595 if expected_chisquare <= 0.0: +596 warnings.warn("Negative expected_chisquare.", RuntimeWarning) +597 expected_chisquare = np.abs(expected_chisquare) +598 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare +599 if not silent: +600 print('chisquare/expected_chisquare:', +601 output.chisquare_by_expected_chisquare) 602 -603 def odr_chisquare_compact_x(d): -604 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -605 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -606 return chisq -607 -608 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -609 -610 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv -611 try: -612 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) -613 except np.linalg.LinAlgError: -614 raise Exception("Cannot invert hessian matrix.") +603 fitp = out.beta +604 try: +605 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) +606 except TypeError: +607 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +608 +609 def odr_chisquare_compact_x(d): +610 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +611 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +612 return chisq +613 +614 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) 615 -616 def odr_chisquare_compact_y(d): -617 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -618 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -619 return chisq -620 -621 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) -622 -623 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -624 try: -625 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) -626 except np.linalg.LinAlgError: -627 raise Exception("Cannot invert hessian matrix.") +616 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +617 try: +618 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +619 except np.linalg.LinAlgError: +620 raise Exception("Cannot invert hessian matrix.") +621 +622 def odr_chisquare_compact_y(d): +623 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +624 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +625 return chisq +626 +627 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) 628 -629 result = [] -630 for i in range(n_parms): -631 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -632 -633 output.fit_parameters = result +629 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +630 try: +631 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +632 except np.linalg.LinAlgError: +633 raise Exception("Cannot invert hessian matrix.") 634 -635 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -636 output.dof = x.shape[-1] - n_parms -637 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) +635 result = [] +636 for i in range(n_parms): +637 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) 638 -639 return output +639 output.fit_parameters = result 640 -641 -642def fit_lin(x, y, **kwargs): -643 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +641 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +642 output.dof = x.shape[-1] - n_parms +643 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) 644 -645 Parameters -646 ---------- -647 x : list -648 Can either be a list of floats in which case no xerror is assumed, or -649 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -650 y : list -651 List of Obs, the dvalues of the Obs are used as yerror for the fit. -652 -653 Returns -654 ------- -655 fit_parameters : list[Obs] -656 LIist of fitted observables. -657 """ +645 return output +646 +647 +648def fit_lin(x, y, **kwargs): +649 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +650 +651 Parameters +652 ---------- +653 x : list +654 Can either be a list of floats in which case no xerror is assumed, or +655 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +656 y : list +657 List of Obs, the dvalues of the Obs are used as yerror for the fit. 658 -659 def f(a, x): -660 y = a[0] + a[1] * x -661 return y -662 -663 if all(isinstance(n, Obs) for n in x): -664 out = total_least_squares(x, y, f, **kwargs) -665 return out.fit_parameters -666 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -667 out = least_squares(x, y, f, **kwargs) -668 return out.fit_parameters -669 else: -670 raise TypeError('Unsupported types for x') -671 -672 -673def qqplot(x, o_y, func, p, title=""): -674 """Generates a quantile-quantile plot of the fit result which can be used to -675 check if the residuals of the fit are gaussian distributed. -676 -677 Returns -678 ------- -679 None -680 """ -681 -682 residuals = [] -683 for i_x, i_y in zip(x, o_y): -684 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -685 residuals = sorted(residuals) -686 my_y = [o.value for o in residuals] -687 probplot = scipy.stats.probplot(my_y) -688 my_x = probplot[0][0] -689 plt.figure(figsize=(8, 8 / 1.618)) -690 plt.errorbar(my_x, my_y, fmt='o') -691 fit_start = my_x[0] -692 fit_stop = my_x[-1] -693 samples = np.arange(fit_start, fit_stop, 0.01) -694 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -695 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -696 -697 plt.xlabel('Theoretical quantiles') -698 plt.ylabel('Ordered Values') -699 plt.legend(title=title) -700 plt.draw() -701 +659 Returns +660 ------- +661 fit_parameters : list[Obs] +662 LIist of fitted observables. +663 """ +664 +665 def f(a, x): +666 y = a[0] + a[1] * x +667 return y +668 +669 if all(isinstance(n, Obs) for n in x): +670 out = total_least_squares(x, y, f, **kwargs) +671 return out.fit_parameters +672 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +673 out = least_squares(x, y, f, **kwargs) +674 return out.fit_parameters +675 else: +676 raise TypeError('Unsupported types for x') +677 +678 +679def qqplot(x, o_y, func, p, title=""): +680 """Generates a quantile-quantile plot of the fit result which can be used to +681 check if the residuals of the fit are gaussian distributed. +682 +683 Returns +684 ------- +685 None +686 """ +687 +688 residuals = [] +689 for i_x, i_y in zip(x, o_y): +690 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +691 residuals = sorted(residuals) +692 my_y = [o.value for o in residuals] +693 probplot = scipy.stats.probplot(my_y) +694 my_x = probplot[0][0] +695 plt.figure(figsize=(8, 8 / 1.618)) +696 plt.errorbar(my_x, my_y, fmt='o') +697 fit_start = my_x[0] +698 fit_stop = my_x[-1] +699 samples = np.arange(fit_start, fit_stop, 0.01) +700 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +701 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') 702 -703def residual_plot(x, y, func, fit_res, title=""): -704 """Generates a plot which compares the fit to the data and displays the corresponding residuals -705 -706 For uncorrelated data the residuals are expected to be distributed ~N(0,1). +703 plt.xlabel('Theoretical quantiles') +704 plt.ylabel('Ordered Values') +705 plt.legend(title=title) +706 plt.draw() 707 -708 Returns -709 ------- -710 None -711 """ -712 sorted_x = sorted(x) -713 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -714 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -715 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -716 -717 plt.figure(figsize=(8, 8 / 1.618)) -718 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -719 ax0 = plt.subplot(gs[0]) -720 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -721 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -722 ax0.set_xticklabels([]) -723 ax0.set_xlim([xstart, xstop]) -724 ax0.set_xticklabels([]) -725 ax0.legend(title=title) -726 -727 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) -728 ax1 = plt.subplot(gs[1]) -729 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -730 ax1.tick_params(direction='out') -731 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -732 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -733 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -734 ax1.set_xlim([xstart, xstop]) -735 ax1.set_ylabel('Residuals') -736 plt.subplots_adjust(wspace=None, hspace=None) -737 plt.draw() -738 -739 -740def error_band(x, func, beta): -741 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. -742 -743 Returns -744 ------- -745 err : np.array(Obs) -746 Error band for an array of sample values x -747 """ -748 cov = covariance(beta) -749 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -750 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -751 -752 deriv = [] -753 for i, item in enumerate(x): -754 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -755 -756 err = [] -757 for i, item in enumerate(x): -758 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -759 err = np.array(err) -760 -761 return err -762 -763 -764def ks_test(objects=None): -765 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +708 +709def residual_plot(x, y, func, fit_res, title=""): +710 """Generates a plot which compares the fit to the data and displays the corresponding residuals +711 +712 For uncorrelated data the residuals are expected to be distributed ~N(0,1). +713 +714 Returns +715 ------- +716 None +717 """ +718 sorted_x = sorted(x) +719 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +720 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +721 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +722 +723 plt.figure(figsize=(8, 8 / 1.618)) +724 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +725 ax0 = plt.subplot(gs[0]) +726 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +727 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +728 ax0.set_xticklabels([]) +729 ax0.set_xlim([xstart, xstop]) +730 ax0.set_xticklabels([]) +731 ax0.legend(title=title) +732 +733 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) +734 ax1 = plt.subplot(gs[1]) +735 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +736 ax1.tick_params(direction='out') +737 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +738 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +739 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +740 ax1.set_xlim([xstart, xstop]) +741 ax1.set_ylabel('Residuals') +742 plt.subplots_adjust(wspace=None, hspace=None) +743 plt.draw() +744 +745 +746def error_band(x, func, beta): +747 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. +748 +749 Returns +750 ------- +751 err : np.array(Obs) +752 Error band for an array of sample values x +753 """ +754 cov = covariance(beta) +755 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +756 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +757 +758 deriv = [] +759 for i, item in enumerate(x): +760 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +761 +762 err = [] +763 for i, item in enumerate(x): +764 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +765 err = np.array(err) 766 -767 Parameters -768 ---------- -769 objects : list -770 List of fit results to include in the analysis (optional). -771 -772 Returns -773 ------- -774 None -775 """ -776 -777 if objects is None: -778 obs_list = [] -779 for obj in gc.get_objects(): -780 if isinstance(obj, Fit_result): -781 obs_list.append(obj) -782 else: -783 obs_list = objects -784 -785 p_values = [o.p_value for o in obs_list] -786 -787 bins = len(p_values) -788 x = np.arange(0, 1.001, 0.001) -789 plt.plot(x, x, 'k', zorder=1) -790 plt.xlim(0, 1) -791 plt.ylim(0, 1) -792 plt.xlabel('p-value') -793 plt.ylabel('Cumulative probability') -794 plt.title(str(bins) + ' p-values') -795 -796 n = np.arange(1, bins + 1) / np.float64(bins) -797 Xs = np.sort(p_values) -798 plt.step(Xs, n) -799 diffs = n - Xs -800 loc_max_diff = np.argmax(np.abs(diffs)) -801 loc = Xs[loc_max_diff] -802 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -803 plt.draw() -804 -805 print(scipy.stats.kstest(p_values, 'uniform')) -806 -807 -808def _extract_val_and_dval(string): -809 split_string = string.split('(') -810 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -811 factor = 10 ** -len(split_string[0].partition('.')[2]) -812 else: -813 factor = 1 -814 return float(split_string[0]), float(split_string[1][:-1]) * factor -815 -816 -817def _construct_prior_obs(i_prior, i_n): -818 if isinstance(i_prior, Obs): -819 return i_prior -820 elif isinstance(i_prior, str): -821 loc_val, loc_dval = _extract_val_and_dval(i_prior) -822 return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}") -823 else: -824 raise TypeError("Prior entries need to be 'Obs' or 'str'.") +767 return err +768 +769 +770def ks_test(objects=None): +771 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +772 +773 Parameters +774 ---------- +775 objects : list +776 List of fit results to include in the analysis (optional). +777 +778 Returns +779 ------- +780 None +781 """ +782 +783 if objects is None: +784 obs_list = [] +785 for obj in gc.get_objects(): +786 if isinstance(obj, Fit_result): +787 obs_list.append(obj) +788 else: +789 obs_list = objects +790 +791 p_values = [o.p_value for o in obs_list] +792 +793 bins = len(p_values) +794 x = np.arange(0, 1.001, 0.001) +795 plt.plot(x, x, 'k', zorder=1) +796 plt.xlim(0, 1) +797 plt.ylim(0, 1) +798 plt.xlabel('p-value') +799 plt.ylabel('Cumulative probability') +800 plt.title(str(bins) + ' p-values') +801 +802 n = np.arange(1, bins + 1) / np.float64(bins) +803 Xs = np.sort(p_values) +804 plt.step(Xs, n) +805 diffs = n - Xs +806 loc_max_diff = np.argmax(np.abs(diffs)) +807 loc = Xs[loc_max_diff] +808 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +809 plt.draw() +810 +811 print(scipy.stats.kstest(p_values, 'uniform')) +812 +813 +814def _extract_val_and_dval(string): +815 split_string = string.split('(') +816 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +817 factor = 10 ** -len(split_string[0].partition('.')[2]) +818 else: +819 factor = 1 +820 return float(split_string[0]), float(split_string[1][:-1]) * factor +821 +822 +823def _construct_prior_obs(i_prior, i_n): +824 if isinstance(i_prior, Obs): +825 return i_prior +826 elif isinstance(i_prior, str): +827 loc_val, loc_dval = _extract_val_and_dval(i_prior) +828 return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}") +829 else: +830 raise TypeError("Prior entries need to be 'Obs' or 'str'.") @@ -1257,209 +1263,215 @@ Hotelling t-squared p-value for correlated fits. 231 n_parms_ls.append(n_loc) 232 233 n_parms = max(n_parms_ls) -234 if not silent: -235 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -236 -237 if priors is not None: -238 if isinstance(priors, (list, np.ndarray)): -239 if n_parms != len(priors): -240 raise ValueError("'priors' does not have the correct length.") -241 -242 loc_priors = [] -243 for i_n, i_prior in enumerate(priors): -244 loc_priors.append(_construct_prior_obs(i_prior, i_n)) -245 -246 prior_mask = np.arange(len(priors)) -247 output.priors = loc_priors -248 -249 elif isinstance(priors, dict): -250 loc_priors = [] -251 prior_mask = [] -252 output.priors = {} -253 for pos, prior in priors.items(): -254 if isinstance(pos, int): -255 prior_mask.append(pos) -256 else: -257 raise TypeError("Prior position needs to be an integer.") -258 loc_priors.append(_construct_prior_obs(prior, pos)) -259 -260 output.priors[pos] = loc_priors[-1] -261 if max(prior_mask) >= n_parms: -262 raise ValueError("Prior position out of range.") -263 else: -264 raise TypeError("Unkown type for `priors`.") +234 +235 if len(key_ls) > 1: +236 for key in key_ls: +237 if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape: +238 raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.") +239 +240 if not silent: +241 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +242 +243 if priors is not None: +244 if isinstance(priors, (list, np.ndarray)): +245 if n_parms != len(priors): +246 raise ValueError("'priors' does not have the correct length.") +247 +248 loc_priors = [] +249 for i_n, i_prior in enumerate(priors): +250 loc_priors.append(_construct_prior_obs(i_prior, i_n)) +251 +252 prior_mask = np.arange(len(priors)) +253 output.priors = loc_priors +254 +255 elif isinstance(priors, dict): +256 loc_priors = [] +257 prior_mask = [] +258 output.priors = {} +259 for pos, prior in priors.items(): +260 if isinstance(pos, int): +261 prior_mask.append(pos) +262 else: +263 raise TypeError("Prior position needs to be an integer.") +264 loc_priors.append(_construct_prior_obs(prior, pos)) 265 -266 p_f = [o.value for o in loc_priors] -267 dp_f = [o.dvalue for o in loc_priors] -268 if np.any(np.asarray(dp_f) <= 0.0): -269 raise Exception("No prior errors available, run the gamma method first.") -270 else: -271 p_f = dp_f = np.array([]) -272 prior_mask = [] -273 loc_priors = [] -274 -275 if 'initial_guess' in kwargs: -276 x0 = kwargs.get('initial_guess') -277 if len(x0) != n_parms: -278 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -279 else: -280 x0 = [0.1] * n_parms -281 -282 if priors is None: -283 def general_chisqfunc_uncorr(p, ivars, pr): -284 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -285 return (ivars - model) / dy_f -286 else: -287 def general_chisqfunc_uncorr(p, ivars, pr): -288 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -289 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) -290 -291 def chisqfunc_uncorr(p): -292 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) -293 -294 if kwargs.get('correlated_fit') is True: -295 corr = covariance(y_all, correlation=True, **kwargs) -296 covdiag = np.diag(1 / np.asarray(dy_f)) -297 condn = np.linalg.cond(corr) -298 if condn > 0.1 / np.finfo(float).eps: -299 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -300 if condn > 1e13: -301 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -302 chol = np.linalg.cholesky(corr) -303 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) -304 -305 def general_chisqfunc(p, ivars, pr): -306 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -307 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -308 -309 def chisqfunc(p): -310 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -311 else: -312 general_chisqfunc = general_chisqfunc_uncorr -313 chisqfunc = chisqfunc_uncorr +266 output.priors[pos] = loc_priors[-1] +267 if max(prior_mask) >= n_parms: +268 raise ValueError("Prior position out of range.") +269 else: +270 raise TypeError("Unkown type for `priors`.") +271 +272 p_f = [o.value for o in loc_priors] +273 dp_f = [o.dvalue for o in loc_priors] +274 if np.any(np.asarray(dp_f) <= 0.0): +275 raise Exception("No prior errors available, run the gamma method first.") +276 else: +277 p_f = dp_f = np.array([]) +278 prior_mask = [] +279 loc_priors = [] +280 +281 if 'initial_guess' in kwargs: +282 x0 = kwargs.get('initial_guess') +283 if len(x0) != n_parms: +284 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +285 else: +286 x0 = [0.1] * n_parms +287 +288 if priors is None: +289 def general_chisqfunc_uncorr(p, ivars, pr): +290 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +291 return (ivars - model) / dy_f +292 else: +293 def general_chisqfunc_uncorr(p, ivars, pr): +294 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +295 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) +296 +297 def chisqfunc_uncorr(p): +298 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +299 +300 if kwargs.get('correlated_fit') is True: +301 corr = covariance(y_all, correlation=True, **kwargs) +302 covdiag = np.diag(1 / np.asarray(dy_f)) +303 condn = np.linalg.cond(corr) +304 if condn > 0.1 / np.finfo(float).eps: +305 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +306 if condn > 1e13: +307 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +308 chol = np.linalg.cholesky(corr) +309 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) +310 +311 def general_chisqfunc(p, ivars, pr): +312 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +313 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) 314 -315 output.method = kwargs.get('method', 'Levenberg-Marquardt') -316 if not silent: -317 print('Method:', output.method) -318 -319 if output.method != 'Levenberg-Marquardt': -320 if output.method == 'migrad': -321 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -322 if 'tol' in kwargs: -323 tolerance = kwargs.get('tol') -324 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -325 if kwargs.get('correlated_fit') is True: -326 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -327 output.iterations = fit_result.nfev -328 else: -329 tolerance = 1e-12 -330 if 'tol' in kwargs: -331 tolerance = kwargs.get('tol') -332 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -333 if kwargs.get('correlated_fit') is True: -334 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -335 output.iterations = fit_result.nit -336 -337 chisquare = fit_result.fun -338 -339 else: -340 if 'tol' in kwargs: -341 print('tol cannot be set for Levenberg-Marquardt') +315 def chisqfunc(p): +316 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +317 else: +318 general_chisqfunc = general_chisqfunc_uncorr +319 chisqfunc = chisqfunc_uncorr +320 +321 output.method = kwargs.get('method', 'Levenberg-Marquardt') +322 if not silent: +323 print('Method:', output.method) +324 +325 if output.method != 'Levenberg-Marquardt': +326 if output.method == 'migrad': +327 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +328 if 'tol' in kwargs: +329 tolerance = kwargs.get('tol') +330 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +331 if kwargs.get('correlated_fit') is True: +332 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +333 output.iterations = fit_result.nfev +334 else: +335 tolerance = 1e-12 +336 if 'tol' in kwargs: +337 tolerance = kwargs.get('tol') +338 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +339 if kwargs.get('correlated_fit') is True: +340 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +341 output.iterations = fit_result.nit 342 -343 def chisqfunc_residuals_uncorr(p): -344 return general_chisqfunc_uncorr(p, y_f, p_f) -345 -346 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -347 if kwargs.get('correlated_fit') is True: +343 chisquare = fit_result.fun +344 +345 else: +346 if 'tol' in kwargs: +347 print('tol cannot be set for Levenberg-Marquardt') 348 -349 def chisqfunc_residuals(p): -350 return general_chisqfunc(p, y_f, p_f) +349 def chisqfunc_residuals_uncorr(p): +350 return general_chisqfunc_uncorr(p, y_f, p_f) 351 -352 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -353 -354 chisquare = np.sum(fit_result.fun ** 2) -355 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -356 -357 output.iterations = fit_result.nfev -358 -359 if not fit_result.success: -360 raise Exception('The minimization procedure did not converge.') -361 -362 output.chisquare = chisquare -363 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) -364 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -365 if output.dof > 0: -366 output.chisquare_by_dof = output.chisquare / output.dof -367 else: -368 output.chisquare_by_dof = float('nan') -369 -370 output.message = fit_result.message -371 if not silent: -372 print(fit_result.message) -373 print('chisquare/d.o.f.:', output.chisquare_by_dof) -374 print('fit parameters', fit_result.x) +352 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +353 if kwargs.get('correlated_fit') is True: +354 +355 def chisqfunc_residuals(p): +356 return general_chisqfunc(p, y_f, p_f) +357 +358 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +359 +360 chisquare = np.sum(fit_result.fun ** 2) +361 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) +362 +363 output.iterations = fit_result.nfev +364 +365 if not fit_result.success: +366 raise Exception('The minimization procedure did not converge.') +367 +368 output.chisquare = chisquare +369 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) +370 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +371 if output.dof > 0: +372 output.chisquare_by_dof = output.chisquare / output.dof +373 else: +374 output.chisquare_by_dof = float('nan') 375 -376 def prepare_hat_matrix(): -377 hat_vector = [] -378 for key in key_ls: -379 if (len(xd[key]) != 0): -380 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -381 hat_vector = [item for sublist in hat_vector for item in sublist] -382 return hat_vector -383 -384 if kwargs.get('expected_chisquare') is True: -385 if kwargs.get('correlated_fit') is not True: -386 W = np.diag(1 / np.asarray(dy_f)) -387 cov = covariance(y_all) -388 hat_vector = prepare_hat_matrix() -389 A = W @ hat_vector -390 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -391 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) -392 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -393 if not silent: -394 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -395 -396 fitp = fit_result.x -397 -398 try: -399 hess = hessian(chisqfunc)(fitp) -400 except TypeError: -401 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -402 -403 len_y = len(y_f) -404 -405 def chisqfunc_compact(d): -406 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -407 -408 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -409 -410 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -411 try: -412 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -413 except np.linalg.LinAlgError: -414 raise Exception("Cannot invert hessian matrix.") +376 output.message = fit_result.message +377 if not silent: +378 print(fit_result.message) +379 print('chisquare/d.o.f.:', output.chisquare_by_dof) +380 print('fit parameters', fit_result.x) +381 +382 def prepare_hat_matrix(): +383 hat_vector = [] +384 for key in key_ls: +385 if (len(xd[key]) != 0): +386 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +387 hat_vector = [item for sublist in hat_vector for item in sublist] +388 return hat_vector +389 +390 if kwargs.get('expected_chisquare') is True: +391 if kwargs.get('correlated_fit') is not True: +392 W = np.diag(1 / np.asarray(dy_f)) +393 cov = covariance(y_all) +394 hat_vector = prepare_hat_matrix() +395 A = W @ hat_vector +396 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +397 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) +398 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +399 if not silent: +400 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) +401 +402 fitp = fit_result.x +403 +404 try: +405 hess = hessian(chisqfunc)(fitp) +406 except TypeError: +407 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +408 +409 len_y = len(y_f) +410 +411 def chisqfunc_compact(d): +412 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +413 +414 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) 415 -416 result = [] -417 for i in range(n_parms): -418 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -419 -420 output.fit_parameters = result +416 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +417 try: +418 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +419 except np.linalg.LinAlgError: +420 raise Exception("Cannot invert hessian matrix.") 421 -422 # Hotelling t-squared p-value for correlated fits. -423 if kwargs.get('correlated_fit') is True: -424 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -425 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -426 output.dof, n_cov - output.dof) +422 result = [] +423 for i in range(n_parms): +424 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +425 +426 output.fit_parameters = result 427 -428 if kwargs.get('resplot') is True: -429 for key in key_ls: -430 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -431 -432 if kwargs.get('qqplot') is True: -433 for key in key_ls: -434 qqplot(xd[key], yd[key], funcd[key], result, title=key) -435 -436 return output +428 # Hotelling t-squared p-value for correlated fits. +429 if kwargs.get('correlated_fit') is True: +430 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +431 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +432 output.dof, n_cov - output.dof) +433 +434 if kwargs.get('resplot') is True: +435 for key in key_ls: +436 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +437 +438 if kwargs.get('qqplot') is True: +439 for key in key_ls: +440 qqplot(xd[key], yd[key], funcd[key], result, title=key) +441 +442 return output @@ -1575,208 +1587,208 @@ Parameters and information on the fitted result. -
439def total_least_squares(x, y, func, silent=False, **kwargs):
-440    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
-441
-442    Parameters
-443    ----------
-444    x : list
-445        list of Obs, or a tuple of lists of Obs
-446    y : list
-447        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
-448    func : object
-449        func has to be of the form
-450
-451        ```python
-452        import autograd.numpy as anp
-453
-454        def func(a, x):
-455            return a[0] + a[1] * x + a[2] * anp.sinh(x)
-456        ```
-457
-458        For multiple x values func can be of the form
+            
445def total_least_squares(x, y, func, silent=False, **kwargs):
+446    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+447
+448    Parameters
+449    ----------
+450    x : list
+451        list of Obs, or a tuple of lists of Obs
+452    y : list
+453        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+454    func : object
+455        func has to be of the form
+456
+457        ```python
+458        import autograd.numpy as anp
 459
-460        ```python
-461        def func(a, x):
-462            (x1, x2) = x
-463            return a[0] * x1 ** 2 + a[1] * x2
-464        ```
+460        def func(a, x):
+461            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+462        ```
+463
+464        For multiple x values func can be of the form
 465
-466        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
-467        will not work.
-468    silent : bool, optional
-469        If true all output to the console is omitted (default False).
-470    initial_guess : list
-471        can provide an initial guess for the input parameters. Relevant for non-linear
-472        fits with many parameters.
-473    expected_chisquare : bool
-474        If true prints the expected chisquare which is
-475        corrected by effects caused by correlated input data.
-476        This can take a while as the full correlation matrix
-477        has to be calculated (default False).
-478    num_grad : bool
-479        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
-480
-481    Notes
-482    -----
-483    Based on the orthogonal distance regression module of scipy.
-484
-485    Returns
-486    -------
-487    output : Fit_result
-488        Parameters and information on the fitted result.
-489    '''
+466        ```python
+467        def func(a, x):
+468            (x1, x2) = x
+469            return a[0] * x1 ** 2 + a[1] * x2
+470        ```
+471
+472        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+473        will not work.
+474    silent : bool, optional
+475        If true all output to the console is omitted (default False).
+476    initial_guess : list
+477        can provide an initial guess for the input parameters. Relevant for non-linear
+478        fits with many parameters.
+479    expected_chisquare : bool
+480        If true prints the expected chisquare which is
+481        corrected by effects caused by correlated input data.
+482        This can take a while as the full correlation matrix
+483        has to be calculated (default False).
+484    num_grad : bool
+485        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
+486
+487    Notes
+488    -----
+489    Based on the orthogonal distance regression module of scipy.
 490
-491    output = Fit_result()
-492
-493    output.fit_function = func
-494
-495    x = np.array(x)
+491    Returns
+492    -------
+493    output : Fit_result
+494        Parameters and information on the fitted result.
+495    '''
 496
-497    x_shape = x.shape
+497    output = Fit_result()
 498
-499    if kwargs.get('num_grad') is True:
-500        jacobian = num_jacobian
-501        hessian = num_hessian
-502    else:
-503        jacobian = auto_jacobian
-504        hessian = auto_hessian
-505
-506    if not callable(func):
-507        raise TypeError('func has to be a function.')
-508
-509    for i in range(42):
-510        try:
-511            func(np.arange(i), x.T[0])
-512        except TypeError:
-513            continue
-514        except IndexError:
-515            continue
-516        else:
-517            break
-518    else:
-519        raise RuntimeError("Fit function is not valid.")
-520
-521    n_parms = i
-522    if not silent:
-523        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
-524
-525    x_f = np.vectorize(lambda o: o.value)(x)
-526    dx_f = np.vectorize(lambda o: o.dvalue)(x)
-527    y_f = np.array([o.value for o in y])
-528    dy_f = np.array([o.dvalue for o in y])
-529
-530    if np.any(np.asarray(dx_f) <= 0.0):
-531        raise Exception('No x errors available, run the gamma method first.')
-532
-533    if np.any(np.asarray(dy_f) <= 0.0):
-534        raise Exception('No y errors available, run the gamma method first.')
+499    output.fit_function = func
+500
+501    x = np.array(x)
+502
+503    x_shape = x.shape
+504
+505    if kwargs.get('num_grad') is True:
+506        jacobian = num_jacobian
+507        hessian = num_hessian
+508    else:
+509        jacobian = auto_jacobian
+510        hessian = auto_hessian
+511
+512    if not callable(func):
+513        raise TypeError('func has to be a function.')
+514
+515    for i in range(42):
+516        try:
+517            func(np.arange(i), x.T[0])
+518        except TypeError:
+519            continue
+520        except IndexError:
+521            continue
+522        else:
+523            break
+524    else:
+525        raise RuntimeError("Fit function is not valid.")
+526
+527    n_parms = i
+528    if not silent:
+529        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+530
+531    x_f = np.vectorize(lambda o: o.value)(x)
+532    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+533    y_f = np.array([o.value for o in y])
+534    dy_f = np.array([o.dvalue for o in y])
 535
-536    if 'initial_guess' in kwargs:
-537        x0 = kwargs.get('initial_guess')
-538        if len(x0) != n_parms:
-539            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
-540    else:
-541        x0 = [1] * n_parms
-542
-543    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
-544    model = Model(func)
-545    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
-546    odr.set_job(fit_type=0, deriv=1)
-547    out = odr.run()
+536    if np.any(np.asarray(dx_f) <= 0.0):
+537        raise Exception('No x errors available, run the gamma method first.')
+538
+539    if np.any(np.asarray(dy_f) <= 0.0):
+540        raise Exception('No y errors available, run the gamma method first.')
+541
+542    if 'initial_guess' in kwargs:
+543        x0 = kwargs.get('initial_guess')
+544        if len(x0) != n_parms:
+545            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
+546    else:
+547        x0 = [1] * n_parms
 548
-549    output.residual_variance = out.res_var
-550
-551    output.method = 'ODR'
-552
-553    output.message = out.stopreason
+549    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+550    model = Model(func)
+551    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+552    odr.set_job(fit_type=0, deriv=1)
+553    out = odr.run()
 554
-555    output.xplus = out.xplus
+555    output.residual_variance = out.res_var
 556
-557    if not silent:
-558        print('Method: ODR')
-559        print(*out.stopreason)
-560        print('Residual variance:', output.residual_variance)
-561
-562    if out.info > 3:
-563        raise Exception('The minimization procedure did not converge.')
-564
-565    m = x_f.size
-566
-567    def odr_chisquare(p):
-568        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
-569        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
-570        return chisq
-571
-572    if kwargs.get('expected_chisquare') is True:
-573        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
-574
-575        if kwargs.get('covariance') is not None:
-576            cov = kwargs.get('covariance')
-577        else:
-578            cov = covariance(np.concatenate((y, x.ravel())))
-579
-580        number_of_x_parameters = int(m / x_f.shape[-1])
-581
-582        old_jac = jacobian(func)(out.beta, out.xplus)
-583        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
-584        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
-585        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
-586
-587        A = W @ new_jac
-588        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
-589        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
-590        if expected_chisquare <= 0.0:
-591            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
-592            expected_chisquare = np.abs(expected_chisquare)
-593        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
-594        if not silent:
-595            print('chisquare/expected_chisquare:',
-596                  output.chisquare_by_expected_chisquare)
-597
-598    fitp = out.beta
-599    try:
-600        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
-601    except TypeError:
-602        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+557    output.method = 'ODR'
+558
+559    output.message = out.stopreason
+560
+561    output.xplus = out.xplus
+562
+563    if not silent:
+564        print('Method: ODR')
+565        print(*out.stopreason)
+566        print('Residual variance:', output.residual_variance)
+567
+568    if out.info > 3:
+569        raise Exception('The minimization procedure did not converge.')
+570
+571    m = x_f.size
+572
+573    def odr_chisquare(p):
+574        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+575        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+576        return chisq
+577
+578    if kwargs.get('expected_chisquare') is True:
+579        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+580
+581        if kwargs.get('covariance') is not None:
+582            cov = kwargs.get('covariance')
+583        else:
+584            cov = covariance(np.concatenate((y, x.ravel())))
+585
+586        number_of_x_parameters = int(m / x_f.shape[-1])
+587
+588        old_jac = jacobian(func)(out.beta, out.xplus)
+589        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+590        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+591        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+592
+593        A = W @ new_jac
+594        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
+595        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+596        if expected_chisquare <= 0.0:
+597            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+598            expected_chisquare = np.abs(expected_chisquare)
+599        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+600        if not silent:
+601            print('chisquare/expected_chisquare:',
+602                  output.chisquare_by_expected_chisquare)
 603
-604    def odr_chisquare_compact_x(d):
-605        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-606        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-607        return chisq
-608
-609    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
-610
-611    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
-612    try:
-613        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
-614    except np.linalg.LinAlgError:
-615        raise Exception("Cannot invert hessian matrix.")
+604    fitp = out.beta
+605    try:
+606        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
+607    except TypeError:
+608        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+609
+610    def odr_chisquare_compact_x(d):
+611        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+612        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+613        return chisq
+614
+615    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
 616
-617    def odr_chisquare_compact_y(d):
-618        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-619        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-620        return chisq
-621
-622    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
-623
-624    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
-625    try:
-626        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
-627    except np.linalg.LinAlgError:
-628        raise Exception("Cannot invert hessian matrix.")
+617    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
+618    try:
+619        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
+620    except np.linalg.LinAlgError:
+621        raise Exception("Cannot invert hessian matrix.")
+622
+623    def odr_chisquare_compact_y(d):
+624        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+625        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+626        return chisq
+627
+628    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
 629
-630    result = []
-631    for i in range(n_parms):
-632        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
-633
-634    output.fit_parameters = result
+630    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
+631    try:
+632        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
+633    except np.linalg.LinAlgError:
+634        raise Exception("Cannot invert hessian matrix.")
 635
-636    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
-637    output.dof = x.shape[-1] - n_parms
-638    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+636    result = []
+637    for i in range(n_parms):
+638        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
 639
-640    return output
+640    output.fit_parameters = result
+641
+642    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+643    output.dof = x.shape[-1] - n_parms
+644    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+645
+646    return output
 
@@ -1850,35 +1862,35 @@ Parameters and information on the fitted result.
-
643def fit_lin(x, y, **kwargs):
-644    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-645
-646    Parameters
-647    ----------
-648    x : list
-649        Can either be a list of floats in which case no xerror is assumed, or
-650        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-651    y : list
-652        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-653
-654    Returns
-655    -------
-656    fit_parameters : list[Obs]
-657        LIist of fitted observables.
-658    """
+            
649def fit_lin(x, y, **kwargs):
+650    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+651
+652    Parameters
+653    ----------
+654    x : list
+655        Can either be a list of floats in which case no xerror is assumed, or
+656        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+657    y : list
+658        List of Obs, the dvalues of the Obs are used as yerror for the fit.
 659
-660    def f(a, x):
-661        y = a[0] + a[1] * x
-662        return y
-663
-664    if all(isinstance(n, Obs) for n in x):
-665        out = total_least_squares(x, y, f, **kwargs)
-666        return out.fit_parameters
-667    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-668        out = least_squares(x, y, f, **kwargs)
-669        return out.fit_parameters
-670    else:
-671        raise TypeError('Unsupported types for x')
+660    Returns
+661    -------
+662    fit_parameters : list[Obs]
+663        LIist of fitted observables.
+664    """
+665
+666    def f(a, x):
+667        y = a[0] + a[1] * x
+668        return y
+669
+670    if all(isinstance(n, Obs) for n in x):
+671        out = total_least_squares(x, y, f, **kwargs)
+672        return out.fit_parameters
+673    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+674        out = least_squares(x, y, f, **kwargs)
+675        return out.fit_parameters
+676    else:
+677        raise TypeError('Unsupported types for x')
 
@@ -1915,34 +1927,34 @@ LIist of fitted observables.
-
674def qqplot(x, o_y, func, p, title=""):
-675    """Generates a quantile-quantile plot of the fit result which can be used to
-676       check if the residuals of the fit are gaussian distributed.
-677
-678    Returns
-679    -------
-680    None
-681    """
-682
-683    residuals = []
-684    for i_x, i_y in zip(x, o_y):
-685        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-686    residuals = sorted(residuals)
-687    my_y = [o.value for o in residuals]
-688    probplot = scipy.stats.probplot(my_y)
-689    my_x = probplot[0][0]
-690    plt.figure(figsize=(8, 8 / 1.618))
-691    plt.errorbar(my_x, my_y, fmt='o')
-692    fit_start = my_x[0]
-693    fit_stop = my_x[-1]
-694    samples = np.arange(fit_start, fit_stop, 0.01)
-695    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-696    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-697
-698    plt.xlabel('Theoretical quantiles')
-699    plt.ylabel('Ordered Values')
-700    plt.legend(title=title)
-701    plt.draw()
+            
680def qqplot(x, o_y, func, p, title=""):
+681    """Generates a quantile-quantile plot of the fit result which can be used to
+682       check if the residuals of the fit are gaussian distributed.
+683
+684    Returns
+685    -------
+686    None
+687    """
+688
+689    residuals = []
+690    for i_x, i_y in zip(x, o_y):
+691        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+692    residuals = sorted(residuals)
+693    my_y = [o.value for o in residuals]
+694    probplot = scipy.stats.probplot(my_y)
+695    my_x = probplot[0][0]
+696    plt.figure(figsize=(8, 8 / 1.618))
+697    plt.errorbar(my_x, my_y, fmt='o')
+698    fit_start = my_x[0]
+699    fit_stop = my_x[-1]
+700    samples = np.arange(fit_start, fit_stop, 0.01)
+701    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+702    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+703
+704    plt.xlabel('Theoretical quantiles')
+705    plt.ylabel('Ordered Values')
+706    plt.legend(title=title)
+707    plt.draw()
 
@@ -1969,41 +1981,41 @@ LIist of fitted observables.
-
704def residual_plot(x, y, func, fit_res, title=""):
-705    """Generates a plot which compares the fit to the data and displays the corresponding residuals
-706
-707    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
-708
-709    Returns
-710    -------
-711    None
-712    """
-713    sorted_x = sorted(x)
-714    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-715    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-716    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-717
-718    plt.figure(figsize=(8, 8 / 1.618))
-719    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-720    ax0 = plt.subplot(gs[0])
-721    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-722    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-723    ax0.set_xticklabels([])
-724    ax0.set_xlim([xstart, xstop])
-725    ax0.set_xticklabels([])
-726    ax0.legend(title=title)
-727
-728    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
-729    ax1 = plt.subplot(gs[1])
-730    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-731    ax1.tick_params(direction='out')
-732    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-733    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-734    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-735    ax1.set_xlim([xstart, xstop])
-736    ax1.set_ylabel('Residuals')
-737    plt.subplots_adjust(wspace=None, hspace=None)
-738    plt.draw()
+            
710def residual_plot(x, y, func, fit_res, title=""):
+711    """Generates a plot which compares the fit to the data and displays the corresponding residuals
+712
+713    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
+714
+715    Returns
+716    -------
+717    None
+718    """
+719    sorted_x = sorted(x)
+720    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+721    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+722    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+723
+724    plt.figure(figsize=(8, 8 / 1.618))
+725    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+726    ax0 = plt.subplot(gs[0])
+727    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+728    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+729    ax0.set_xticklabels([])
+730    ax0.set_xlim([xstart, xstop])
+731    ax0.set_xticklabels([])
+732    ax0.legend(title=title)
+733
+734    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
+735    ax1 = plt.subplot(gs[1])
+736    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+737    ax1.tick_params(direction='out')
+738    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+739    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+740    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+741    ax1.set_xlim([xstart, xstop])
+742    ax1.set_ylabel('Residuals')
+743    plt.subplots_adjust(wspace=None, hspace=None)
+744    plt.draw()
 
@@ -2031,28 +2043,28 @@ LIist of fitted observables.
-
741def error_band(x, func, beta):
-742    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
-743
-744    Returns
-745    -------
-746    err : np.array(Obs)
-747        Error band for an array of sample values x
-748    """
-749    cov = covariance(beta)
-750    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-751        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-752
-753    deriv = []
-754    for i, item in enumerate(x):
-755        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-756
-757    err = []
-758    for i, item in enumerate(x):
-759        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-760    err = np.array(err)
-761
-762    return err
+            
747def error_band(x, func, beta):
+748    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
+749
+750    Returns
+751    -------
+752    err : np.array(Obs)
+753        Error band for an array of sample values x
+754    """
+755    cov = covariance(beta)
+756    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+757        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+758
+759    deriv = []
+760    for i, item in enumerate(x):
+761        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+762
+763    err = []
+764    for i, item in enumerate(x):
+765        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+766    err = np.array(err)
+767
+768    return err
 
@@ -2079,48 +2091,48 @@ Error band for an array of sample values x
-
765def ks_test(objects=None):
-766    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-767
-768    Parameters
-769    ----------
-770    objects : list
-771        List of fit results to include in the analysis (optional).
-772
-773    Returns
-774    -------
-775    None
-776    """
-777
-778    if objects is None:
-779        obs_list = []
-780        for obj in gc.get_objects():
-781            if isinstance(obj, Fit_result):
-782                obs_list.append(obj)
-783    else:
-784        obs_list = objects
-785
-786    p_values = [o.p_value for o in obs_list]
-787
-788    bins = len(p_values)
-789    x = np.arange(0, 1.001, 0.001)
-790    plt.plot(x, x, 'k', zorder=1)
-791    plt.xlim(0, 1)
-792    plt.ylim(0, 1)
-793    plt.xlabel('p-value')
-794    plt.ylabel('Cumulative probability')
-795    plt.title(str(bins) + ' p-values')
-796
-797    n = np.arange(1, bins + 1) / np.float64(bins)
-798    Xs = np.sort(p_values)
-799    plt.step(Xs, n)
-800    diffs = n - Xs
-801    loc_max_diff = np.argmax(np.abs(diffs))
-802    loc = Xs[loc_max_diff]
-803    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-804    plt.draw()
-805
-806    print(scipy.stats.kstest(p_values, 'uniform'))
+            
771def ks_test(objects=None):
+772    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+773
+774    Parameters
+775    ----------
+776    objects : list
+777        List of fit results to include in the analysis (optional).
+778
+779    Returns
+780    -------
+781    None
+782    """
+783
+784    if objects is None:
+785        obs_list = []
+786        for obj in gc.get_objects():
+787            if isinstance(obj, Fit_result):
+788                obs_list.append(obj)
+789    else:
+790        obs_list = objects
+791
+792    p_values = [o.p_value for o in obs_list]
+793
+794    bins = len(p_values)
+795    x = np.arange(0, 1.001, 0.001)
+796    plt.plot(x, x, 'k', zorder=1)
+797    plt.xlim(0, 1)
+798    plt.ylim(0, 1)
+799    plt.xlabel('p-value')
+800    plt.ylabel('Cumulative probability')
+801    plt.title(str(bins) + ' p-values')
+802
+803    n = np.arange(1, bins + 1) / np.float64(bins)
+804    Xs = np.sort(p_values)
+805    plt.step(Xs, n)
+806    diffs = n - Xs
+807    loc_max_diff = np.argmax(np.abs(diffs))
+808    loc = Xs[loc_max_diff]
+809    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+810    plt.draw()
+811
+812    print(scipy.stats.kstest(p_values, 'uniform'))