diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html
index e447989b..4007e417 100644
--- a/docs/pyerrors/fits.html
+++ b/docs/pyerrors/fits.html
@@ -361,7 +361,10 @@
output.chisquare_by_expected_chisquare)
fitp = out.beta
- hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
+ try:
+ hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
+ except TypeError:
+ raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
def odr_chisquare_compact_x(d):
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
@@ -638,7 +641,10 @@
output.chisquare_by_expected_chisquare)
fitp = fit_result.x
- hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
+ try:
+ hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
+ except TypeError:
+ raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
if kwargs.get('correlated_fit') is True:
def chisqfunc_compact(d):
@@ -1214,7 +1220,10 @@ If true, use the full correlation matrix in the definition of the chisquare
output.chisquare_by_expected_chisquare)
fitp = out.beta
- hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
+ try:
+ hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
+ except TypeError:
+ raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
def odr_chisquare_compact_x(d):
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
diff --git a/docs/pyerrors/roots.html b/docs/pyerrors/roots.html
index bd093e30..1d76d700 100644
--- a/docs/pyerrors/roots.html
+++ b/docs/pyerrors/roots.html
@@ -102,7 +102,10 @@
# Error propagation as detailed in arXiv:1809.01289
dx = jacobian(func)(root[0], d.value)
- da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+ try:
+ da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+ except TypeError:
+ raise Exception("It is required to use autograd.numpy instead of numpy within root functions, see the documentation for details.") from None
deriv = - da / dx
res = derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (d.value + np.finfo(np.float64).eps) * root[0], [d], man_grad=[deriv])
@@ -149,7 +152,10 @@
# Error propagation as detailed in arXiv:1809.01289
dx = jacobian(func)(root[0], d.value)
- da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+ try:
+ da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+ except TypeError:
+ raise Exception("It is required to use autograd.numpy instead of numpy within root functions, see the documentation for details.") from None
deriv = - da / dx
res = derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (d.value + np.finfo(np.float64).eps) * root[0], [d], man_grad=[deriv])