diff --git a/docs/pyerrors/linalg.html b/docs/pyerrors/linalg.html
index 659472b8..d5ca9c87 100644
--- a/docs/pyerrors/linalg.html
+++ b/docs/pyerrors/linalg.html
@@ -311,8 +311,6 @@
"""Computes the matrix to scalar operation op to a given matrix of Obs."""
def _mat(x, **kwargs):
dim = int(np.sqrt(len(x)))
- if np.sqrt(len(x)) != dim:
- raise Exception('Input has to have dim**2 entries')
mat = []
for i in range(dim):
@@ -325,8 +323,6 @@
if isinstance(obs, np.ndarray):
raveled_obs = (1 * (obs.ravel())).tolist()
- elif isinstance(obs, list):
- raveled_obs = obs
else:
raise TypeError('Unproper type of input.')
return derived_observable(_mat, raveled_obs, **kwargs)
@@ -346,10 +342,7 @@
A[n, m] = entry
B[n, m] = 0.0
big_matrix = np.block([[A, -B], [B, A]])
- if kwargs.get('num_grad') is True:
- op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs)
- else:
- op_big_matrix = derived_observable(lambda x, **kwargs: op(x), [big_matrix], array_mode=True)[0]
+ op_big_matrix = derived_observable(lambda x, **kwargs: op(x), [big_matrix], array_mode=True)[0]
dim = op_big_matrix.shape[0]
op_A = op_big_matrix[0: dim // 2, 0: dim // 2]
op_B = op_big_matrix[dim // 2:, 0: dim // 2]
@@ -358,15 +351,11 @@
res[n, m] = CObs(op_A[n, m], op_B[n, m])
return res
else:
- if kwargs.get('num_grad') is True:
- return _num_diff_mat_mat_op(op, obs, **kwargs)
return derived_observable(lambda x, **kwargs: op(x), [obs], array_mode=True)[0]
def eigh(obs, **kwargs):
"""Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
- if kwargs.get('num_grad') is True:
- return _num_diff_eigh(obs, **kwargs)
w = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[0], obs)
v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
return w, v
@@ -374,235 +363,21 @@
def eig(obs, **kwargs):
"""Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig."""
- if kwargs.get('num_grad') is True:
- return _num_diff_eig(obs, **kwargs)
- # Note: Automatic differentiation of eig is implemented in the git of autograd
- # but not yet released to PyPi (1.3)
w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs)
return w
def pinv(obs, **kwargs):
"""Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
- if kwargs.get('num_grad') is True:
- return _num_diff_pinv(obs, **kwargs)
return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)
def svd(obs, **kwargs):
"""Computes the singular value decomposition of a matrix of Obs."""
- if kwargs.get('num_grad') is True:
- return _num_diff_svd(obs, **kwargs)
u = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[0], obs)
s = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[1], obs)
vh = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[2], obs)
return (u, s, vh)
-
-
-# Variants for numerical differentiation
-
-def _num_diff_mat_mat_op(op, obs, **kwargs):
- """Computes the matrix to matrix operation op to a given matrix of Obs elementwise
- which is suitable for numerical differentiation."""
- def _mat(x, **kwargs):
- dim = int(np.sqrt(len(x)))
- if np.sqrt(len(x)) != dim:
- raise Exception('Input has to have dim**2 entries')
-
- mat = []
- for i in range(dim):
- row = []
- for j in range(dim):
- row.append(x[j + dim * i])
- mat.append(row)
-
- return op(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
-
- if isinstance(obs, np.ndarray):
- raveled_obs = (1 * (obs.ravel())).tolist()
- elif isinstance(obs, list):
- raveled_obs = obs
- else:
- raise TypeError('Unproper type of input.')
-
- dim = int(np.sqrt(len(raveled_obs)))
-
- res_mat = []
- for i in range(dim):
- row = []
- for j in range(dim):
- row.append(derived_observable(_mat, raveled_obs, i=i, j=j, **kwargs))
- res_mat.append(row)
-
- return np.array(res_mat) @ np.identity(dim)
-
-
-def _num_diff_eigh(obs, **kwargs):
- """Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh
- elementwise which is suitable for numerical differentiation."""
- def _mat(x, **kwargs):
- dim = int(np.sqrt(len(x)))
- if np.sqrt(len(x)) != dim:
- raise Exception('Input has to have dim**2 entries')
-
- mat = []
- for i in range(dim):
- row = []
- for j in range(dim):
- row.append(x[j + dim * i])
- mat.append(row)
-
- n = kwargs.get('n')
- res = np.linalg.eigh(np.array(mat))[n]
-
- if n == 0:
- return res[kwargs.get('i')]
- else:
- return res[kwargs.get('i')][kwargs.get('j')]
-
- if isinstance(obs, np.ndarray):
- raveled_obs = (1 * (obs.ravel())).tolist()
- elif isinstance(obs, list):
- raveled_obs = obs
- else:
- raise TypeError('Unproper type of input.')
-
- dim = int(np.sqrt(len(raveled_obs)))
-
- res_vec = []
- for i in range(dim):
- res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
-
- res_mat = []
- for i in range(dim):
- row = []
- for j in range(dim):
- row.append(derived_observable(_mat, raveled_obs, n=1, i=i, j=j, **kwargs))
- res_mat.append(row)
-
- return (np.array(res_vec) @ np.identity(dim), np.array(res_mat) @ np.identity(dim))
-
-
-def _num_diff_eig(obs, **kwargs):
- """Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig
- elementwise which is suitable for numerical differentiation."""
- def _mat(x, **kwargs):
- dim = int(np.sqrt(len(x)))
- if np.sqrt(len(x)) != dim:
- raise Exception('Input has to have dim**2 entries')
-
- mat = []
- for i in range(dim):
- row = []
- for j in range(dim):
- row.append(x[j + dim * i])
- mat.append(row)
-
- n = kwargs.get('n')
- res = np.linalg.eig(np.array(mat))[n]
-
- if n == 0:
- # Discard imaginary part of eigenvalue here
- return np.real(res[kwargs.get('i')])
- else:
- return res[kwargs.get('i')][kwargs.get('j')]
-
- if isinstance(obs, np.ndarray):
- raveled_obs = (1 * (obs.ravel())).tolist()
- elif isinstance(obs, list):
- raveled_obs = obs
- else:
- raise TypeError('Unproper type of input.')
-
- dim = int(np.sqrt(len(raveled_obs)))
-
- res_vec = []
- for i in range(dim):
- # Note: Automatic differentiation of eig is implemented in the git of autograd
- # but not yet released to PyPi (1.3)
- res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
-
- return np.array(res_vec) @ np.identity(dim)
-
-
-def _num_diff_pinv(obs, **kwargs):
- """Computes the Moore-Penrose pseudoinverse of a matrix of Obs elementwise which is suitable
- for numerical differentiation."""
- def _mat(x, **kwargs):
- shape = kwargs.get('shape')
-
- mat = []
- for i in range(shape[0]):
- row = []
- for j in range(shape[1]):
- row.append(x[j + shape[1] * i])
- mat.append(row)
-
- return np.linalg.pinv(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
-
- if isinstance(obs, np.ndarray):
- shape = obs.shape
- raveled_obs = (1 * (obs.ravel())).tolist()
- else:
- raise TypeError('Unproper type of input.')
-
- res_mat = []
- for i in range(shape[1]):
- row = []
- for j in range(shape[0]):
- row.append(derived_observable(_mat, raveled_obs, shape=shape, i=i, j=j, **kwargs))
- res_mat.append(row)
-
- return np.array(res_mat) @ np.identity(shape[0])
-
-
-def _num_diff_svd(obs, **kwargs):
- """Computes the singular value decomposition of a matrix of Obs elementwise which
- is suitable for numerical differentiation."""
- def _mat(x, **kwargs):
- shape = kwargs.get('shape')
-
- mat = []
- for i in range(shape[0]):
- row = []
- for j in range(shape[1]):
- row.append(x[j + shape[1] * i])
- mat.append(row)
-
- res = np.linalg.svd(np.array(mat), full_matrices=False)
-
- if kwargs.get('n') == 1:
- return res[1][kwargs.get('i')]
- else:
- return res[kwargs.get('n')][kwargs.get('i')][kwargs.get('j')]
-
- if isinstance(obs, np.ndarray):
- shape = obs.shape
- raveled_obs = (1 * (obs.ravel())).tolist()
- else:
- raise TypeError('Unproper type of input.')
-
- mid_index = min(shape[0], shape[1])
-
- res_mat0 = []
- for i in range(shape[0]):
- row = []
- for j in range(mid_index):
- row.append(derived_observable(_mat, raveled_obs, shape=shape, n=0, i=i, j=j, **kwargs))
- res_mat0.append(row)
-
- res_mat1 = []
- for i in range(mid_index):
- res_mat1.append(derived_observable(_mat, raveled_obs, shape=shape, n=1, i=i, **kwargs))
-
- res_mat2 = []
- for i in range(mid_index):
- row = []
- for j in range(shape[1]):
- row.append(derived_observable(_mat, raveled_obs, shape=shape, n=2, i=i, j=j, **kwargs))
- res_mat2.append(row)
-
- return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1]))
@@ -956,8 +731,6 @@ Obs valued.
def eigh(obs, **kwargs): """Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh.""" - if kwargs.get('num_grad') is True: - return _num_diff_eigh(obs, **kwargs) w = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[0], obs) v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs) return w, v @@ -982,10 +755,6 @@ Obs valued.View Source @@ -1009,8 +778,6 @@ Obs valued.def eig(obs, **kwargs): """Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig.""" - if kwargs.get('num_grad') is True: - return _num_diff_eig(obs, **kwargs) - # Note: Automatic differentiation of eig is implemented in the git of autograd - # but not yet released to PyPi (1.3) w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs) return wView Source @@ -1033,8 +800,6 @@ Obs valued.def pinv(obs, **kwargs): """Computes the Moore-Penrose pseudoinverse of a matrix of Obs.""" - if kwargs.get('num_grad') is True: - return _num_diff_pinv(obs, **kwargs) return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)View Source def svd(obs, **kwargs): """Computes the singular value decomposition of a matrix of Obs.""" - if kwargs.get('num_grad') is True: - return _num_diff_svd(obs, **kwargs) u = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[0], obs) s = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[1], obs) vh = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[2], obs)