From c3f8b24a2176a96552590a2d6aa466eb5a400002 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 22 Oct 2021 14:24:51 +0100 Subject: [PATCH 01/27] matrix valued operations speed up, inv and cholesky explictly added --- pyerrors/linalg.py | 212 +++++++++++++++++++++++++++++++++++-------- tests/test_linalg.py | 33 ++++++- 2 files changed, 200 insertions(+), 45 deletions(-) diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index 0394af8e..d47e5dc0 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -2,8 +2,9 @@ # coding: utf-8 import numpy as np +from autograd import jacobian import autograd.numpy as anp # Thinly-wrapped numpy -from .pyerrors import derived_observable, CObs +from .pyerrors import derived_observable, CObs, Obs # This code block is directly taken from the current master branch of autograd and remains @@ -11,50 +12,135 @@ from .pyerrors import derived_observable, CObs from functools import partial from autograd.extend import defvjp -_dot = partial(anp.einsum, '...ij,...jk->...ik') + +def derived_array(func, data, **kwargs): + """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. + + Parameters + ---------- + func -- arbitrary function of the form func(data, **kwargs). For the + automatic differentiation to work, all numpy functions have to have + the autograd wrapper (use 'import autograd.numpy as anp'). + data -- list of Obs, e.g. [obs1, obs2, obs3]. + + Keyword arguments + ----------------- + num_grad -- if True, numerical derivatives are used instead of autograd + (default False). To control the numerical differentiation the + kwargs of numdifftools.step_generators.MaxStepGenerator + can be used. + man_grad -- manually supply a list or an array which contains the jacobian + of func. Use cautiously, supplying the wrong derivative will + not be intercepted. + + Notes + ----- + For simple mathematical operations it can be practical to use anonymous + functions. For the ratio of two observables one can e.g. use + + new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) + """ + + data = np.asarray(data) + raveled_data = data.ravel() + + # Workaround for matrix operations containing non Obs data + for i_data in raveled_data: + if isinstance(i_data, Obs): + first_name = i_data.names[0] + first_shape = i_data.shape[first_name] + break + + for i in range(len(raveled_data)): + if isinstance(raveled_data[i], (int, float)): + raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name]) + + n_obs = len(raveled_data) + new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) + + new_shape = {} + for i_data in raveled_data: + for name in new_names: + tmp = i_data.shape.get(name) + if tmp is not None: + if new_shape.get(name) is None: + new_shape[name] = tmp + else: + if new_shape[name] != tmp: + raise Exception('Shapes of ensemble', name, 'do not match.') + if data.ndim == 1: + values = np.array([o.value for o in data]) + else: + values = np.vectorize(lambda x: x.value)(data) + + new_values = func(values, **kwargs) + + new_r_values = {} + for name in new_names: + tmp_values = np.zeros(n_obs) + for i, item in enumerate(raveled_data): + tmp = item.r_values.get(name) + if tmp is None: + tmp = item.value + tmp_values[i] = tmp + tmp_values = np.array(tmp_values).reshape(data.shape) + new_r_values[name] = func(tmp_values, **kwargs) + + if 'man_grad' in kwargs: + deriv = np.asarray(kwargs.get('man_grad')) + if new_values.shape + data.shape != deriv.shape: + raise Exception('Manual derivative does not have correct shape.') + elif kwargs.get('num_grad') is True: + raise Exception('Multi mode currently not supported for numerical derivative') + else: + deriv = jacobian(func)(values, **kwargs) + + final_result = np.zeros(new_values.shape, dtype=object) + + d_extracted = {} + for name in new_names: + d_extracted[name] = [] + for i_dat, dat in enumerate(data): + ens_length = dat.ravel()[0].shape[name] + d_extracted[name].append(np.array([o.deltas[name] for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) + + for i_val, new_val in np.ndenumerate(new_values): + new_deltas = {} + for name in new_names: + ens_length = d_extracted[name][0].shape[-1] + new_deltas[name] = np.zeros(ens_length) + for i_dat, dat in enumerate(d_extracted[name]): + new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) + + new_samples = [] + new_means = [] + for name in new_names: + new_samples.append(new_deltas[name]) + new_means.append(new_r_values[name][i_val]) + + final_result[i_val] = Obs(new_samples, new_names, means=new_means) + final_result[i_val]._value = new_val + + return final_result -# batched diag -def _diag(a): - return anp.eye(a.shape[-1]) * a +def matmul(x1, x2): + if isinstance(x1[0, 0], CObs) or isinstance(x2[0, 0], CObs): + Lr, Li = np.vectorize(lambda x: (np.real(x), np.imag(x)))(x1) + Rr, Ri = np.vectorize(lambda x: (np.real(x), np.imag(x)))(x2) + Nr = derived_array(lambda x: x[0] @ x[2] - x[1] @ x[3], [Lr, Li, Rr, Ri]) + Ni = derived_array(lambda x: x[0] @ x[3] + x[1] @ x[2], [Lr, Li, Rr, Ri]) + return (1 + 0j) * Nr + 1j * Ni + else: + return derived_array(lambda x: x[0] @ x[1], [x1, x2]) -# batched diagonal, similar to matrix_diag in tensorflow -def _matrix_diag(a): - reps = anp.array(a.shape) - reps[:-1] = 1 - reps[-1] = a.shape[-1] - newshape = list(a.shape) + [a.shape[-1]] - return _diag(anp.tile(a, reps).reshape(newshape)) - -# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77) -# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete +def inv(x): + return mat_mat_op(anp.linalg.inv, x) -def grad_eig(ans, x): - """Gradient of a general square (complex valued) matrix""" - e, u = ans # eigenvalues as 1d array, eigenvectors in columns - n = e.shape[-1] - - def vjp(g): - ge, gu = g - ge = _matrix_diag(ge) - f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20) - f -= _diag(f) - ut = anp.swapaxes(u, -1, -2) - r1 = f * _dot(ut, gu) - r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n))) - r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut) - if not anp.iscomplexobj(x): - r = anp.real(r) - # the derivative is still complex for real input (imaginary delta is allowed), real output - # but the derivative should be real in real input case when imaginary delta is forbidden - return r - return vjp - - -defvjp(anp.linalg.eig, grad_eig) -# End of the code block from autograd.master +def cholesky(x): + return mat_mat_op(anp.linalg.cholesky, x) def scalar_mat_op(op, obs, **kwargs): @@ -107,7 +193,7 @@ def mat_mat_op(op, obs, **kwargs): else: if kwargs.get('num_grad') is True: return _num_diff_mat_mat_op(op, obs, **kwargs) - return derived_observable(lambda x, **kwargs: op(x), obs) + return derived_array(lambda x, **kwargs: op(x), [obs])[0] def eigh(obs, **kwargs): @@ -375,3 +461,49 @@ def _num_diff_svd(obs, **kwargs): res_mat2.append(row) return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1])) + + +_dot = partial(anp.einsum, '...ij,...jk->...ik') + + +# batched diag +def _diag(a): + return anp.eye(a.shape[-1]) * a + + +# batched diagonal, similar to matrix_diag in tensorflow +def _matrix_diag(a): + reps = anp.array(a.shape) + reps[:-1] = 1 + reps[-1] = a.shape[-1] + newshape = list(a.shape) + [a.shape[-1]] + return _diag(anp.tile(a, reps).reshape(newshape)) + +# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77) +# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete + + +def grad_eig(ans, x): + """Gradient of a general square (complex valued) matrix""" + e, u = ans # eigenvalues as 1d array, eigenvectors in columns + n = e.shape[-1] + + def vjp(g): + ge, gu = g + ge = _matrix_diag(ge) + f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20) + f -= _diag(f) + ut = anp.swapaxes(u, -1, -2) + r1 = f * _dot(ut, gu) + r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n))) + r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut) + if not anp.iscomplexobj(x): + r = anp.real(r) + # the derivative is still complex for real input (imaginary delta is allowed), real output + # but the derivative should be real in real input case when imaginary delta is forbidden + return r + return vjp + + +defvjp(anp.linalg.eig, grad_eig) +# End of the code block from autograd.master diff --git a/tests/test_linalg.py b/tests/test_linalg.py index af121912..b978b35b 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -1,4 +1,5 @@ -import autograd.numpy as np +import numpy as np +import autograd.numpy as anp import math import pyerrors as pe import pytest @@ -6,6 +7,28 @@ import pytest np.random.seed(0) +def test_matmul(): + for dim in [4, 8]: + my_list = [] + length = 1000 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) + my_array = np.array(my_list).reshape((dim, dim)) + tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t + + my_list = [] + length = 1000 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), + pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) + my_array = np.array(my_list).reshape((dim, dim)) + tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t + + def test_matrix_inverse(): content = [] for t in range(9): @@ -14,7 +37,7 @@ def test_matrix_inverse(): content.append(1.0) # Add 1.0 as a float matrix = np.diag(content) - inverse_matrix = pe.linalg.mat_mat_op(np.linalg.inv, matrix) + inverse_matrix = pe.linalg.inv(matrix) assert all([o.is_zero() for o in np.diag(matrix) * np.diag(inverse_matrix) - 1]) @@ -35,7 +58,7 @@ def test_complex_matrix_inverse(): matrix[n, m] = entry.real.value + 1j * entry.imag.value inverse_matrix = np.linalg.inv(matrix) - inverse_obs_matrix = pe.linalg.mat_mat_op(np.linalg.inv, obs_matrix) + inverse_obs_matrix = pe.linalg.inv(obs_matrix) for (n, m), entry in np.ndenumerate(inverse_matrix): assert np.isclose(inverse_matrix[n, m].real, inverse_obs_matrix[n, m].real.value) assert np.isclose(inverse_matrix[n, m].imag, inverse_obs_matrix[n, m].imag.value) @@ -53,7 +76,7 @@ def test_matrix_functions(): matrix = np.array(matrix) @ np.identity(dim) # Check inverse of matrix - inv = pe.linalg.mat_mat_op(np.linalg.inv, matrix) + inv = pe.linalg.inv(matrix) check_inv = matrix @ inv for (i, j), entry in np.ndenumerate(check_inv): @@ -66,7 +89,7 @@ def test_matrix_functions(): # Check Cholesky decomposition sym = np.dot(matrix, matrix.T) - cholesky = pe.linalg.mat_mat_op(np.linalg.cholesky, sym) + cholesky = pe.linalg.cholesky(sym) check = cholesky @ cholesky.T for (i, j), entry in np.ndenumerate(check): From 89ea9133ccf54379f9e4015dc7f599915b12bebe Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 22 Oct 2021 14:46:34 +0100 Subject: [PATCH 02/27] npr module now uses new linalg functions --- pyerrors/npr.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index 96e35514..c1641d51 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -1,7 +1,6 @@ import warnings import numpy as np -import autograd.numpy as anp -from .linalg import mat_mat_op +from .linalg import inv, matmul from .dirac import gamma, gamma5 @@ -69,7 +68,7 @@ def inv_propagator(prop): """ Inverts a 12x12 quark propagator""" if prop.shape != (12, 12): raise Exception("Only 12x12 propagators can be inverted.") - return Npr_matrix(mat_mat_op(anp.linalg.inv, prop), prop.mom_in) + return Npr_matrix(inv(prop), prop.mom_in) def Zq(inv_prop, fermion='Wilson'): @@ -90,7 +89,7 @@ def Zq(inv_prop, fermion='Wilson'): else: raise Exception("Fermion type '" + fermion + "' not implemented") - res = 1 / 12. * np.trace(inv_prop @ np.kron(np.eye(3, dtype=int), p_slash)) + res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash))) res.gamma_method() if not res.imag.is_zero_within_error(5): From 1223676b08cb40a030c1d9eb16df55205b883255 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 22 Oct 2021 16:45:40 +0100 Subject: [PATCH 03/27] recombination of complex matrices from real representation optimized --- pyerrors/linalg.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index d47e5dc0..e05ecdda 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -130,7 +130,10 @@ def matmul(x1, x2): Rr, Ri = np.vectorize(lambda x: (np.real(x), np.imag(x)))(x2) Nr = derived_array(lambda x: x[0] @ x[2] - x[1] @ x[3], [Lr, Li, Rr, Ri]) Ni = derived_array(lambda x: x[0] @ x[3] + x[1] @ x[2], [Lr, Li, Rr, Ri]) - return (1 + 0j) * Nr + 1j * Ni + res = np.empty_like(Nr) + for (n, m), entry in np.ndenumerate(Nr): + res[n, m] = CObs(Nr[n, m], Ni[n, m]) + return res else: return derived_array(lambda x: x[0] @ x[1], [x1, x2]) @@ -185,11 +188,14 @@ def mat_mat_op(op, obs, **kwargs): if kwargs.get('num_grad') is True: op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs) else: - op_big_matrix = derived_observable(lambda x, **kwargs: op(x), big_matrix) + op_big_matrix = derived_array(lambda x, **kwargs: op(x), [big_matrix])[0] dim = op_big_matrix.shape[0] op_A = op_big_matrix[0: dim // 2, 0: dim // 2] op_B = op_big_matrix[dim // 2:, 0: dim // 2] - return (1 + 0j) * op_A + 1j * op_B + res = np.empty_like(op_A) + for (n, m), entry in np.ndenumerate(op_A): + res[n, m] = CObs(op_A[n, m], op_B[n, m]) + return res else: if kwargs.get('num_grad') is True: return _num_diff_mat_mat_op(op, obs, **kwargs) From 94e3ebe7f03d949e3162ed32084b957671c75911 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 22 Oct 2021 18:29:28 +0100 Subject: [PATCH 04/27] read bilinear added to input/hadrons --- pyerrors/input/hadrons.py | 57 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/pyerrors/input/hadrons.py b/pyerrors/input/hadrons.py index 509c0922..2e9b52d4 100644 --- a/pyerrors/input/hadrons.py +++ b/pyerrors/input/hadrons.py @@ -109,3 +109,60 @@ def read_ExternalLeg_hd5(path, filestem, ens_id, order='F'): matrix[si, sj, ci, cj].gamma_method() return Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom) + + +def read_Bilinear_hd5(path, filestem, ens_id, order='F'): + """Read hadrons Bilinear hdf5 file and output an array of CObs + + Parameters + ----------------- + path -- path to the files to read + filestem -- namestem of the files to read + ens_id -- name of the ensemble, required for internal bookkeeping + order -- order in which the array is to be reshaped, + 'F' for the first index changing fastest (9 4x4 matrices) default. + 'C' for the last index changing fastest (16 3x3 matrices), + """ + + files = _get_files(path, filestem) + + mom_in = None + mom_out = None + + corr_data = {} + for hd5_file in files: + file = h5py.File(path + '/' + hd5_file, "r") + for i in range(16): + name = file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['gamma'][0].decode('UTF-8') + if name not in corr_data: + corr_data[name] = [] + raw_data = file['Bilinear/Bilinear_' + str(i) + '/corr'][0][0].view('complex') + corr_data[name].append(raw_data) + if mom_in is not None: + assert np.allclose(mom_in, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)) + else: + mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int) + if mom_out is not None: + assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)) + else: + mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int) + + file.close() + + result_dict = {} + + for key, data in corr_data.items(): + local_data = np.array(data) + + rolled_array = np.rollaxis(local_data, 0, 5) + + matrix = np.empty((rolled_array.shape[:-1]), dtype=object) + for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]): + real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id]) + imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id]) + matrix[si, sj, ci, cj] = CObs(real, imag) + matrix[si, sj, ci, cj].gamma_method() + + result_dict[key] = Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom_in, mom_out=mom_out) + + return result_dict From 8aa3fba222ecad11226787324f541d82a3b02cc9 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sat, 23 Oct 2021 17:02:50 +0100 Subject: [PATCH 05/27] README updated --- README.md | 10 +++++++--- setup.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e67157ea..d1f22b1e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![CI](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml) [![](https://img.shields.io/badge/python-3.6+-blue.svg)](https://www.python.org/downloads/) +[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![Build/Test](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml) [![](https://img.shields.io/badge/python-3.6+-blue.svg)](https://www.python.org/downloads/) # pyerrors `pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data. It is based on the **gamma method** [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are: @@ -16,7 +16,11 @@ There exist similar implementations of gamma method error analysis suites in ## Installation To install the most recent release of `pyerrors` run ```bash -pip install git+https://github.com/fjosw/pyerrors.git +pip install git+https://github.com/fjosw/pyerrors.git@master +``` +to install the current `develop` version run +```bash +pip install git+https://github.com/fjosw/pyerrors.git@develop ``` ## Usage @@ -31,6 +35,7 @@ obs1.print() Often one is interested in secondary observables which can be arbitrary functions of primary observables. `pyerrors` overloads most basic math operations and `numpy` functions such that the user can work with `Obs` objects as if they were floats ```python import numpy as np + obs3 = 12.0 / obs1 ** 2 - np.exp(-1.0 / obs2) obs3.gamma_method() obs3.print() @@ -44,6 +49,5 @@ More detailed examples can be found in the `examples` folder: * [04_fit_example](examples/04_fit_example.ipynb) * [05_matrix_operations](examples/05_matrix_operations.ipynb) - ## License [MIT](https://choosealicense.com/licenses/mit/) diff --git a/setup.py b/setup.py index df32d698..20e4803b 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from setuptools import setup, find_packages setup(name='pyerrors', - version='1.1.0', + version='2.0.0', description='Error analysis for lattice QCD', author='Fabian Joswig', author_email='fabian.joswig@ed.ac.uk', From e1fd4606d3b770bfe1655fc8122b5f0bda36be98 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sat, 23 Oct 2021 17:24:39 +0100 Subject: [PATCH 06/27] Tests made more rigorous --- tests/test_linalg.py | 8 ++------ tests/test_pyerrors.py | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/test_linalg.py b/tests/test_linalg.py index b978b35b..49a1aaa0 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -94,15 +94,11 @@ def test_matrix_functions(): for (i, j), entry in np.ndenumerate(check): diff = entry - sym[i, j] - diff.gamma_method() - assert math.isclose(diff.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) - assert math.isclose(diff.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + assert diff.is_zero() # Check eigh e, v = pe.linalg.eigh(sym) for i in range(dim): tmp = sym @ v[:, i] - v[:, i] * e[i] for j in range(dim): - tmp[j].gamma_method() - assert math.isclose(tmp[j].value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) - assert math.isclose(tmp[j].dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + assert tmp[j].is_zero() diff --git a/tests/test_pyerrors.py b/tests/test_pyerrors.py index 2740d5d9..39192c6f 100644 --- a/tests/test_pyerrors.py +++ b/tests/test_pyerrors.py @@ -245,4 +245,4 @@ def test_cobs(): assert np.allclose(0.0, diff.real.deltas['t']) assert np.allclose(0.0, diff.imag.deltas['t']) - div = my_cobs / other + assert (my_cobs / other * other - my_cobs).is_zero() From 9b023e339f3a70463f19827a301134bd3736286e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 09:41:34 +0100 Subject: [PATCH 07/27] Bug in complex mutliplication fixed --- pyerrors/pyerrors.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 32500af3..7d6f4257 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -714,16 +714,17 @@ class CObs: return -1 * (self - other) def __mul__(self, other): - if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - [self.real, other.real, self.imag, other.imag], - man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - [self.real, other.real, self.imag, other.imag], - man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - elif hasattr(other, 'real') and getattr(other, 'imag', 0) != 0: - return CObs(self.real * other.real - self.imag * other.imag, - self.imag * other.real + self.real * other.imag) + if hasattr(other, 'real') and getattr(other, 'imag', 0) != 0: + if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + [self.real, other.real, self.imag, other.imag], + man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + [self.real, other.real, self.imag, other.imag], + man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + else: + return CObs(self.real * other.real - self.imag * other.imag, + self.imag * other.real + self.real * other.imag) else: return CObs(self.real * np.real(other), self.imag * np.real(other)) From c634d183a125cf5b8db9c2dfd08d1c8f6e9b6fd5 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 09:51:30 +0100 Subject: [PATCH 08/27] Another bug in complex mutliplication fixed --- pyerrors/pyerrors.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 7d6f4257..52192026 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -714,7 +714,7 @@ class CObs: return -1 * (self - other) def __mul__(self, other): - if hasattr(other, 'real') and getattr(other, 'imag', 0) != 0: + if hasattr(other, 'real') and hasattr(other, 'imag'): if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], [self.real, other.real, self.imag, other.imag], @@ -722,11 +722,13 @@ class CObs: derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], [self.real, other.real, self.imag, other.imag], man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - else: + elif getattr(other, 'imag', 0) != 0: return CObs(self.real * other.real - self.imag * other.imag, self.imag * other.real + self.real * other.imag) + else: + return CObs(self.real * other.real, self.imag * other.real) else: - return CObs(self.real * np.real(other), self.imag * np.real(other)) + return CObs(self.real * other, self.imag * other) def __rmul__(self, other): return self * other From 325293f2b4d9c2ff9e206c2573b7a609f0cdff98 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 10:44:51 +0100 Subject: [PATCH 09/27] rtruediv implemented for CObs, tests extended --- pyerrors/npr.py | 3 +++ pyerrors/pyerrors.py | 7 +++++++ tests/test_pyerrors.py | 28 +++++++++++++++++----------- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index c1641d51..c04813fc 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -95,4 +95,7 @@ def Zq(inv_prop, fermion='Wilson'): if not res.imag.is_zero_within_error(5): warnings.warn("Imaginary part of Zq is not zero within 5 sigma") return res + if not np.abs(res.imag.value) <= 1e-6: + warnings.warn("Imaginary part of Zq is not smaller than 1e-6") + return res return res.real diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 52192026..7d0b9f24 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -740,6 +740,13 @@ class CObs: else: return CObs(self.real / other, self.imag / other) + def __rtruediv__(self, other): + r = self.real ** 2 + self.imag ** 2 + if hasattr(other, 'real') and hasattr(other, 'imag'): + return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + else: + return CObs(self.real * other / r, -self.imag * other / r) + def __abs__(self): return np.sqrt(self.real**2 + self.imag**2) diff --git a/tests/test_pyerrors.py b/tests/test_pyerrors.py index 39192c6f..9752f093 100644 --- a/tests/test_pyerrors.py +++ b/tests/test_pyerrors.py @@ -54,8 +54,12 @@ def test_function_overloading(): t1 = f([a, b]) t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 - assert c.value == 0.0, str(i) - assert np.all(np.abs(c.deltas['e1']) < 1e-14), str(i) + assert c.is_zero() + + assert np.log(np.exp(b)) == b + assert np.exp(np.log(b)) == b + assert np.sqrt(b ** 2) == b + assert np.sqrt(b) ** 2 == b def test_overloading_vectorization(): @@ -197,6 +201,7 @@ def test_overloaded_functions(): assert np.abs((ad_obs.value - item(val)) / ad_obs.value) < 1e-10, item.__name__ assert np.abs(ad_obs.dvalue - dval * np.abs(deriv[i](val))) < 1e-6, item.__name__ + def test_utils(): my_obs = pe.pseudo_Obs(1.0, 0.5, 't') my_obs.print(0) @@ -211,6 +216,7 @@ def test_utils(): assert my_obs > (my_obs - 1) assert my_obs < (my_obs + 1) + def test_cobs(): obs1 = pe.pseudo_Obs(1.0, 0.1, 't') obs2 = pe.pseudo_Obs(-0.2, 0.03, 't') @@ -227,22 +233,22 @@ def test_cobs(): fs = [[lambda x: x[0] + x[1], lambda x: x[1] + x[0]], [lambda x: x[0] * x[1], lambda x: x[1] * x[0]]] - for other in [1, 1.1, (1.1-0.2j), pe.CObs(obs1), pe.CObs(obs1, obs2)]: + for other in [3, 1.1, (1.1 - 0.2j), (2.3 + 0j), (0.0 + 7.7j), pe.CObs(obs1), pe.CObs(obs1, obs2)]: for funcs in fs: ta = funcs[0]([my_cobs, other]) tb = funcs[1]([my_cobs, other]) diff = ta - tb - assert np.isclose(0.0, float(diff.real)) - assert np.isclose(0.0, float(diff.imag)) - assert np.allclose(0.0, diff.real.deltas['t']) - assert np.allclose(0.0, diff.imag.deltas['t']) + assert diff.is_zero() ta = my_cobs - other tb = other - my_cobs diff = ta + tb - assert np.isclose(0.0, float(diff.real)) - assert np.isclose(0.0, float(diff.imag)) - assert np.allclose(0.0, diff.real.deltas['t']) - assert np.allclose(0.0, diff.imag.deltas['t']) + assert diff.is_zero() + + ta = my_cobs / other + tb = other / my_cobs + diff = ta * tb - 1 + assert diff.is_zero() assert (my_cobs / other * other - my_cobs).is_zero() + assert (other / my_cobs * my_cobs - other).is_zero() From 68b25ba4ca1a097d0dc110cb6fdfe9a506db4b1e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 12:24:54 +0100 Subject: [PATCH 10/27] npr.Zq now checks if imag part is at machine precision --- pyerrors/npr.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index c04813fc..a8b5ab9a 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -92,10 +92,7 @@ def Zq(inv_prop, fermion='Wilson'): res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash))) res.gamma_method() - if not res.imag.is_zero_within_error(5): + if not res.imag.is_zero() and not res.imag.is_zero_within_error(5): warnings.warn("Imaginary part of Zq is not zero within 5 sigma") return res - if not np.abs(res.imag.value) <= 1e-6: - warnings.warn("Imaginary part of Zq is not smaller than 1e-6") - return res return res.real From d4b86f5f732ca7b101373a2416c89c43479c7056 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 13:58:16 +0100 Subject: [PATCH 11/27] linalg.matmul now works with any number of operands --- pyerrors/linalg.py | 46 +++++++++++++++++++++++++++++++++++++------- tests/test_linalg.py | 22 +++++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index e05ecdda..99fa0bb3 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -124,18 +124,50 @@ def derived_array(func, data, **kwargs): return final_result -def matmul(x1, x2): - if isinstance(x1[0, 0], CObs) or isinstance(x2[0, 0], CObs): - Lr, Li = np.vectorize(lambda x: (np.real(x), np.imag(x)))(x1) - Rr, Ri = np.vectorize(lambda x: (np.real(x), np.imag(x)))(x2) - Nr = derived_array(lambda x: x[0] @ x[2] - x[1] @ x[3], [Lr, Li, Rr, Ri]) - Ni = derived_array(lambda x: x[0] @ x[3] + x[1] @ x[2], [Lr, Li, Rr, Ri]) +def matmul(*operands): + if any(isinstance(o[0, 0], CObs) for o in operands): + extended_operands = [] + for op in operands: + tmp = np.vectorize(lambda x: (np.real(x), np.imag(x)))(op) + extended_operands.append(tmp[0]) + extended_operands.append(tmp[1]) + + def multi_dot(operands, part): + stack_r = operands[0] + stack_i = operands[1] + for op_r, op_i in zip(operands[2::2], operands[3::2]): + tmp_r = stack_r @ op_r - stack_i @ op_i + tmp_i = stack_r @ op_i + stack_i @ op_r + + stack_r = tmp_r + stack_i = tmp_i + + if part == 'Real': + return stack_r + else: + return stack_i + + def multi_dot_r(operands): + return multi_dot(operands, 'Real') + + def multi_dot_i(operands): + return multi_dot(operands, 'Imag') + + Nr = derived_array(multi_dot_r, extended_operands) + Ni = derived_array(multi_dot_i, extended_operands) + res = np.empty_like(Nr) for (n, m), entry in np.ndenumerate(Nr): res[n, m] = CObs(Nr[n, m], Ni[n, m]) + return res else: - return derived_array(lambda x: x[0] @ x[1], [x1, x2]) + def multi_dot(operands): + stack = operands[0] + for op in operands[1:]: + stack = stack @ op + return stack + return derived_array(multi_dot, operands) def inv(x): diff --git a/tests/test_linalg.py b/tests/test_linalg.py index 49a1aaa0..3350e9c9 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -29,6 +29,28 @@ def test_matmul(): assert e.is_zero(), t +def test_multi_dot(): + for dim in [4, 8]: + my_list = [] + length = 1000 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) + my_array = np.array(my_list).reshape((dim, dim)) + tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t + + my_list = [] + length = 1000 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), + pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) + my_array = np.array(my_list).reshape((dim, dim)) + tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t + + def test_matrix_inverse(): content = [] for t in range(9): From a24e6084132b9131b7a5179b98fd1b02835d894c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 14:26:58 +0100 Subject: [PATCH 12/27] g5H operation of Npr_matrix optimized --- pyerrors/npr.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index a8b5ab9a..acd21dff 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -26,10 +26,9 @@ class Npr_matrix(np.ndarray): if self.shape != (12, 12): raise Exception('g5H only works for 12x12 matrices.') extended_g5 = np.kron(np.eye(3, dtype=int), gamma5) - new_matrix = extended_g5 @ self.conj().T @ extended_g5 - new_matrix.mom_in = self.mom_out - new_matrix.mom_out = self.mom_in - return new_matrix + return Npr_matrix(matmul(extended_g5, self.conj().T, extended_g5), + mom_in=self.mom_out, + mom_out=self.mom_in) def _propagate_mom(self, other, name): s_mom = getattr(self, name, None) From ad53f28e4633ee7e93a4d6b3cee581798e6cb72a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 14:32:28 +0100 Subject: [PATCH 13/27] mat_mat_op renamed to _mat_mat_op --- pyerrors/correlators.py | 8 ++++---- pyerrors/linalg.py | 6 +++--- pyerrors/npr.py | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 8ef26ce2..d9f3142b 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import scipy.linalg from .pyerrors import Obs, dump_object from .fits import standard_fit -from .linalg import eigh, mat_mat_op +from .linalg import eigh, inv, cholesky from .roots import find_root @@ -187,10 +187,10 @@ class Corr: def Eigenvalue(self, t0, state=1): G = self.smearing_symmetric() G0 = G.content[t0] - L = mat_mat_op(anp.linalg.cholesky, G0) - Li = mat_mat_op(anp.linalg.inv, L) + L = cholesky(G0) + Li = inv(L) LT = L.T - LTi = mat_mat_op(anp.linalg.inv, LT) + LTi = inv(LT) newcontent = [] for t in range(self.T): Gt = G.content[t] diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index 99fa0bb3..4d2aea61 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -171,11 +171,11 @@ def matmul(*operands): def inv(x): - return mat_mat_op(anp.linalg.inv, x) + return _mat_mat_op(anp.linalg.inv, x) def cholesky(x): - return mat_mat_op(anp.linalg.cholesky, x) + return _mat_mat_op(anp.linalg.cholesky, x) def scalar_mat_op(op, obs, **kwargs): @@ -203,7 +203,7 @@ def scalar_mat_op(op, obs, **kwargs): return derived_observable(_mat, raveled_obs, **kwargs) -def mat_mat_op(op, obs, **kwargs): +def _mat_mat_op(op, obs, **kwargs): """Computes the matrix to matrix operation op to a given matrix of Obs.""" # Use real representation to calculate matrix operations for complex matrices if isinstance(obs.ravel()[0], CObs): diff --git a/pyerrors/npr.py b/pyerrors/npr.py index acd21dff..16c86442 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -27,8 +27,8 @@ class Npr_matrix(np.ndarray): raise Exception('g5H only works for 12x12 matrices.') extended_g5 = np.kron(np.eye(3, dtype=int), gamma5) return Npr_matrix(matmul(extended_g5, self.conj().T, extended_g5), - mom_in=self.mom_out, - mom_out=self.mom_in) + mom_in=self.mom_out, + mom_out=self.mom_in) def _propagate_mom(self, other, name): s_mom = getattr(self, name, None) From f395cb3d88d499e0c86bfef0ce19101733e0d94b Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 15:10:18 +0100 Subject: [PATCH 14/27] Complex scalar array operations now correctly overloaded --- pyerrors/pyerrors.py | 16 ++++++++++++---- tests/test_linalg.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 7d0b9f24..1c8699a0 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -695,7 +695,9 @@ class CObs: return CObs(self.real, -self.imag) def __add__(self, other): - if hasattr(other, 'real') and hasattr(other, 'imag'): + if isinstance(other, np.ndarray): + return other + self + elif hasattr(other, 'real') and hasattr(other, 'imag'): return CObs(self.real + other.real, self.imag + other.imag) else: @@ -705,7 +707,9 @@ class CObs: return self + y def __sub__(self, other): - if hasattr(other, 'real') and hasattr(other, 'imag'): + if isinstance(other, np.ndarray): + return -1 * (other - self) + elif hasattr(other, 'real') and hasattr(other, 'imag'): return CObs(self.real - other.real, self.imag - other.imag) else: return CObs(self.real - other, self.imag) @@ -714,7 +718,9 @@ class CObs: return -1 * (self - other) def __mul__(self, other): - if hasattr(other, 'real') and hasattr(other, 'imag'): + if isinstance(other, np.ndarray): + return other * self + elif hasattr(other, 'real') and hasattr(other, 'imag'): if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], [self.real, other.real, self.imag, other.imag], @@ -734,7 +740,9 @@ class CObs: return self * other def __truediv__(self, other): - if hasattr(other, 'real') and hasattr(other, 'imag'): + if isinstance(other, np.ndarray): + return 1 / (other / self) + elif hasattr(other, 'real') and hasattr(other, 'imag'): r = other.real ** 2 + other.imag ** 2 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) else: diff --git a/tests/test_linalg.py b/tests/test_linalg.py index 3350e9c9..c441426d 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -124,3 +124,35 @@ def test_matrix_functions(): tmp = sym @ v[:, i] - v[:, i] * e[i] for j in range(dim): assert tmp[j].is_zero() + + +def test_complex_matrix_operations(): + dimension = 4 + base_matrix = np.empty((dimension, dimension), dtype=object) + for (n, m), entry in np.ndenumerate(base_matrix): + exponent_real = np.random.normal(3, 5) + exponent_imag = np.random.normal(3, 5) + base_matrix[n, m] = pe.CObs(pe.pseudo_Obs(2 + 10 ** exponent_real, 10 ** (exponent_real - 1), 't'), + pe.pseudo_Obs(2 + 10 ** exponent_imag, 10 ** (exponent_imag - 1), 't')) + + for other in [2, 2.3, (1 - 0.1j), (0 + 2.1j)]: + ta = base_matrix * other + tb = other * base_matrix + diff = ta - tb + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() + ta = base_matrix + other + tb = other + base_matrix + diff = ta - tb + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() + ta = base_matrix - other + tb = other - base_matrix + diff = ta + tb + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() + ta = base_matrix / other + tb = other / base_matrix + diff = ta * tb - 1 + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() From f24451090a5371ad57af352042fd2eddb2a482bc Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 17:34:59 +0100 Subject: [PATCH 15/27] Bug in gamma_method fixed --- pyerrors/input/hadrons.py | 4 ++-- pyerrors/pyerrors.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyerrors/input/hadrons.py b/pyerrors/input/hadrons.py index 2e9b52d4..b5229f8a 100644 --- a/pyerrors/input/hadrons.py +++ b/pyerrors/input/hadrons.py @@ -143,9 +143,9 @@ def read_Bilinear_hd5(path, filestem, ens_id, order='F'): else: mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int) if mom_out is not None: - assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)) + assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)) else: - mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int) + mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int) file.close() diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 1c8699a0..9e8388e9 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -34,7 +34,7 @@ class Obs: ensemble. N_sigma_global -- Standard value for N_sigma (default 1.0) """ - # __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', 'value', 'dvalue', + # __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue', # 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 'e_names', # 'e_content', 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', # 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', @@ -152,7 +152,7 @@ class Obs: self.e_rho = {} self.e_drho = {} self._dvalue = 0 - self._ddvalue = 0 + self.ddvalue = 0 self.S = {} self.tau_exp = {} From 14cf1f7d1b6bb9f89604657b032b0012673639e5 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 21:35:50 +0100 Subject: [PATCH 16/27] Additional test for multiple calls of gamma_method added --- tests/test_pyerrors.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/test_pyerrors.py b/tests/test_pyerrors.py index 9752f093..46580275 100644 --- a/tests/test_pyerrors.py +++ b/tests/test_pyerrors.py @@ -95,6 +95,28 @@ def test_gamma_method(): assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t'] +def test_gamma_method_persistance(): + my_obs = pe.Obs([np.random.rand(730)], ['t']) + my_obs.gamma_method() + value = my_obs.value + dvalue = my_obs.dvalue + ddvalue = my_obs.ddvalue + my_obs = 1.0 * my_obs + my_obs.gamma_method() + assert value == my_obs.value + assert dvalue == my_obs.dvalue + assert ddvalue == my_obs.ddvalue + my_obs.gamma_method() + assert value == my_obs.value + assert dvalue == my_obs.dvalue + assert ddvalue == my_obs.ddvalue + my_obs.gamma_method(S=3.7) + my_obs.gamma_method() + assert value == my_obs.value + assert dvalue == my_obs.dvalue + assert ddvalue == my_obs.ddvalue + + def test_covariance_is_variance(): value = np.random.normal(5, 10) dvalue = np.abs(np.random.normal(0, 1)) From eedad7dedacbe952900a96b5019201f55fb6e84e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 21:59:12 +0100 Subject: [PATCH 17/27] docstrings and tests added to linalg module --- pyerrors/linalg.py | 28 ++++++++++++---------------- tests/test_linalg.py | 10 ++++++++-- tests/test_roots.py | 2 +- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index 4d2aea61..0f08d72b 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -6,15 +6,13 @@ from autograd import jacobian import autograd.numpy as anp # Thinly-wrapped numpy from .pyerrors import derived_observable, CObs, Obs - -# This code block is directly taken from the current master branch of autograd and remains -# only until the new version is released on PyPi from functools import partial from autograd.extend import defvjp def derived_array(func, data, **kwargs): - """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. + """Construct a derived Obs according to func(data, **kwargs) of matrix value data + using automatic differentiation. Parameters ---------- @@ -25,20 +23,9 @@ def derived_array(func, data, **kwargs): Keyword arguments ----------------- - num_grad -- if True, numerical derivatives are used instead of autograd - (default False). To control the numerical differentiation the - kwargs of numdifftools.step_generators.MaxStepGenerator - can be used. man_grad -- manually supply a list or an array which contains the jacobian of func. Use cautiously, supplying the wrong derivative will not be intercepted. - - Notes - ----- - For simple mathematical operations it can be practical to use anonymous - functions. For the ratio of two observables one can e.g. use - - new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) """ data = np.asarray(data) @@ -125,6 +112,11 @@ def derived_array(func, data, **kwargs): def matmul(*operands): + """Matrix multiply all operands. + + Supports real and complex valued matrices and is faster compared to + standard multiplication via the @ operator. + """ if any(isinstance(o[0, 0], CObs) for o in operands): extended_operands = [] for op in operands: @@ -171,10 +163,12 @@ def matmul(*operands): def inv(x): + """Inverse of Obs or CObs valued matrices.""" return _mat_mat_op(anp.linalg.inv, x) def cholesky(x): + """Cholesky decompostion of Obs or CObs valued matrices.""" return _mat_mat_op(anp.linalg.cholesky, x) @@ -270,7 +264,7 @@ def svd(obs, **kwargs): return (u, s, vh) -def slog_det(obs, **kwargs): +def slogdet(obs, **kwargs): """Computes the determinant of a matrix of Obs via np.linalg.slogdet.""" def _mat(x): dim = int(np.sqrt(len(x))) @@ -501,6 +495,8 @@ def _num_diff_svd(obs, **kwargs): return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1])) +# This code block is directly taken from the current master branch of autograd and remains +# only until the new version is released on PyPi _dot = partial(anp.einsum, '...ij,...jk->...ik') diff --git a/tests/test_linalg.py b/tests/test_linalg.py index c441426d..6e3455f6 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -87,8 +87,7 @@ def test_complex_matrix_inverse(): def test_matrix_functions(): - dim = 3 + int(4 * np.random.rand()) - print(dim) + dim = 4 matrix = [] for i in range(dim): row = [] @@ -125,6 +124,13 @@ def test_matrix_functions(): for j in range(dim): assert tmp[j].is_zero() + # Check svd + u, v, vh = pe.linalg.svd(sym) + diff = sym - u @ np.diag(v) @ vh + + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() + def test_complex_matrix_operations(): dimension = 4 diff --git a/tests/test_roots.py b/tests/test_roots.py index 8d3a8191..d7c4ed1f 100644 --- a/tests/test_roots.py +++ b/tests/test_roots.py @@ -16,4 +16,4 @@ def test_root_linear(): assert np.isclose(my_root.value, value) difference = my_obs - my_root - assert np.allclose(0.0, difference.deltas['t']) + assert difference.is_zero() From 8655fe2cd134dcd4675cc990aa4fadc1a407ac1f Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 10:00:18 +0100 Subject: [PATCH 18/27] Obs.is_zero() now correctly detects when an Obs is zero within machine precision --- pyerrors/pyerrors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 9e8388e9..25f1280e 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -355,11 +355,11 @@ class Obs: print(e_name, ':', self.e_content[e_name]) def is_zero_within_error(self, sigma=1): - """ Checks whether the observable is zero within 'sigma' standard errors. + """Checks whether the observable is zero within 'sigma' standard errors. Works only properly when the gamma method was run. """ - return np.abs(self.value) <= sigma * self.dvalue + return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue def is_zero(self): return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values()) From b5f3dd6ac35dd72623cb01c42d78e12df50cd0ff Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 10:20:47 +0100 Subject: [PATCH 19/27] Benchmarks added --- .github/workflows/{CI.yml => pytest.yml} | 3 +- .gitignore | 1 + tests/benchmark_test.py | 55 +++++++++++++++++++ ...est_correlators.py => correlators_test.py} | 0 tests/{test_dirac.py => dirac_test.py} | 0 tests/{test_fits.py => fits_test.py} | 0 tests/{test_linalg.py => linalg_test.py} | 0 tests/{test_pyerrors.py => pyerrors_test.py} | 0 tests/{test_roots.py => roots_test.py} | 0 9 files changed, 58 insertions(+), 1 deletion(-) rename .github/workflows/{CI.yml => pytest.yml} (92%) create mode 100644 tests/benchmark_test.py rename tests/{test_correlators.py => correlators_test.py} (100%) rename tests/{test_dirac.py => dirac_test.py} (100%) rename tests/{test_fits.py => fits_test.py} (100%) rename tests/{test_linalg.py => linalg_test.py} (100%) rename tests/{test_pyerrors.py => pyerrors_test.py} (100%) rename tests/{test_roots.py => roots_test.py} (100%) diff --git a/.github/workflows/CI.yml b/.github/workflows/pytest.yml similarity index 92% rename from .github/workflows/CI.yml rename to .github/workflows/pytest.yml index f1fddc58..2340d9d5 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/pytest.yml @@ -1,4 +1,4 @@ -name: CI +name: pytest on: push: @@ -30,6 +30,7 @@ jobs: pip install . pip install pytest pip install pytest-cov + pip install pytest-benchmark - name: Run tests run: pytest --cov=pyerrors -v diff --git a/.gitignore b/.gitignore index 18c31bfa..77feb98c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ __pycache__ *.pyc .ipynb_* .coverage +.benchmarks .cache examples/B1k2_pcac_plateau.p examples/Untitled.* diff --git a/tests/benchmark_test.py b/tests/benchmark_test.py new file mode 100644 index 00000000..707b3791 --- /dev/null +++ b/tests/benchmark_test.py @@ -0,0 +1,55 @@ +import numpy as np +import pyerrors as pe +import pytest +import time + +np.random.seed(0) + +def test_bench_mul(benchmark): + length = 1000 + my_obs = pe.Obs([np.random.rand(length)], ['t1']) + + def mul(x, y): + return x * y + + benchmark(mul, my_obs, my_obs) + + +def test_bench_cmul(benchmark): + length = 1000 + my_obs = pe.CObs(pe.Obs([np.random.rand(length)], ['t1']), + pe.Obs([np.random.rand(length)], ['t1'])) + + def mul(x, y): + return x * y + + benchmark(mul, my_obs, my_obs) + + +def test_bench_matmul(benchmark): + dim = 4 + my_list = [] + length = 1000 + for i in range(dim ** 2): + my_list.append(pe.Obs([np.random.rand(length)], ['t1'])) + my_array = np.array(my_list).reshape((dim, dim)) + + benchmark(pe.linalg.matmul, my_array, my_array) + + +def test_bench_cmatmul(benchmark): + dim = 4 + my_list = [] + length = 1000 + for i in range(dim ** 2): + my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']), + pe.Obs([np.random.rand(length)], ['t1']))) + my_array = np.array(my_list).reshape((dim, dim)) + + benchmark(pe.linalg.matmul, my_array, my_array) + + +def test_bench_gamma_method(benchmark): + length = 1000 + my_obs = pe.Obs([np.random.rand(length)], ['t1']) + benchmark(my_obs.gamma_method) diff --git a/tests/test_correlators.py b/tests/correlators_test.py similarity index 100% rename from tests/test_correlators.py rename to tests/correlators_test.py diff --git a/tests/test_dirac.py b/tests/dirac_test.py similarity index 100% rename from tests/test_dirac.py rename to tests/dirac_test.py diff --git a/tests/test_fits.py b/tests/fits_test.py similarity index 100% rename from tests/test_fits.py rename to tests/fits_test.py diff --git a/tests/test_linalg.py b/tests/linalg_test.py similarity index 100% rename from tests/test_linalg.py rename to tests/linalg_test.py diff --git a/tests/test_pyerrors.py b/tests/pyerrors_test.py similarity index 100% rename from tests/test_pyerrors.py rename to tests/pyerrors_test.py diff --git a/tests/test_roots.py b/tests/roots_test.py similarity index 100% rename from tests/test_roots.py rename to tests/roots_test.py From f38f86ae323dd4bf28e355a24827dd1d1af363bd Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 10:21:48 +0100 Subject: [PATCH 20/27] README updated --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d1f22b1e..6e53ea35 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![Build/Test](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml) [![](https://img.shields.io/badge/python-3.6+-blue.svg)](https://www.python.org/downloads/) +[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![](https://img.shields.io/badge/python-3.6+-blue.svg)](https://www.python.org/downloads/) # pyerrors `pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data. It is based on the **gamma method** [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are: From 7874bdb1fdeea49d1e3a00f283c97d5b1af12fe0 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 10:41:36 +0100 Subject: [PATCH 21/27] benchmarks simplified --- tests/benchmark_test.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/tests/benchmark_test.py b/tests/benchmark_test.py index 707b3791..ebdf4681 100644 --- a/tests/benchmark_test.py +++ b/tests/benchmark_test.py @@ -5,42 +5,37 @@ import time np.random.seed(0) -def test_bench_mul(benchmark): - length = 1000 - my_obs = pe.Obs([np.random.rand(length)], ['t1']) +length = 1000 - def mul(x, y): - return x * y +def mul(x, y): + return x * y + + +def test_b_mul(benchmark): + my_obs = pe.Obs([np.random.rand(length)], ['t1']) benchmark(mul, my_obs, my_obs) -def test_bench_cmul(benchmark): - length = 1000 +def test_b_cmul(benchmark): my_obs = pe.CObs(pe.Obs([np.random.rand(length)], ['t1']), pe.Obs([np.random.rand(length)], ['t1'])) - def mul(x, y): - return x * y - benchmark(mul, my_obs, my_obs) -def test_bench_matmul(benchmark): +def test_b_matmul(benchmark): dim = 4 my_list = [] - length = 1000 for i in range(dim ** 2): my_list.append(pe.Obs([np.random.rand(length)], ['t1'])) my_array = np.array(my_list).reshape((dim, dim)) benchmark(pe.linalg.matmul, my_array, my_array) - -def test_bench_cmatmul(benchmark): +def test_b_cmatmul(benchmark): dim = 4 my_list = [] - length = 1000 for i in range(dim ** 2): my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']), pe.Obs([np.random.rand(length)], ['t1']))) @@ -49,7 +44,6 @@ def test_bench_cmatmul(benchmark): benchmark(pe.linalg.matmul, my_array, my_array) -def test_bench_gamma_method(benchmark): - length = 1000 +def test_b_gamma(benchmark): my_obs = pe.Obs([np.random.rand(length)], ['t1']) benchmark(my_obs.gamma_method) From caa5fdb348e362278ae49013cc492313ad36e422 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 15:43:30 +0100 Subject: [PATCH 22/27] Most dictionaries removed from Obs.__init__, the dicitonaries are now only initialized when the gamma_method is executed --- pyerrors/input/bdio.py | 2 +- pyerrors/pyerrors.py | 57 ++++++++++++++++-------------------------- 2 files changed, 22 insertions(+), 37 deletions(-) diff --git a/pyerrors/input/bdio.py b/pyerrors/input/bdio.py index 8d14b440..58d4a5e3 100644 --- a/pyerrors/input/bdio.py +++ b/pyerrors/input/bdio.py @@ -175,7 +175,7 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs): """ for obs in obs_list: - if not obs.e_names: + if not hasattr(obs, 'e_names'): raise Exception('Run the gamma method first for all obs.') bdio = ctypes.cdll.LoadLibrary(bdio_path) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 25f1280e..8c03297b 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -87,23 +87,6 @@ class Obs: self.ddvalue = 0.0 self.reweighted = 0 - self.S = {} - self.tau_exp = {} - self.N_sigma = 0 - - self.e_names = {} - self.e_content = {} - - self.e_dvalue = {} - self.e_ddvalue = {} - self.e_tauint = {} - self.e_dtauint = {} - self.e_windowsize = {} - self.e_rho = {} - self.e_drho = {} - self.e_n_tauint = {} - self.e_n_dtauint = {} - self.tag = None @property @@ -340,19 +323,20 @@ class Obs: else: percentage = np.abs(self.dvalue / self.value) * 100 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage)) - if len(self.e_names) > 1: - print(' Ensemble errors:') - for e_name in self.e_names: + if hasattr(self, 'e_names'): if len(self.e_names) > 1: - print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - if self.tau_exp[e_name] > 0: - print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma)) - else: - print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) - if level > 1: - print(self.N, 'samples in', len(self.e_names), 'ensembles:') + print(' Ensemble errors:') for e_name in self.e_names: - print(e_name, ':', self.e_content[e_name]) + if len(self.e_names) > 1: + print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + if self.tau_exp[e_name] > 0: + print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma)) + else: + print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) + if level > 1: + print(self.N, 'samples in', len(self.e_names), 'ensembles:') + for e_name in self.e_names: + print(e_name, ':', self.e_content[e_name]) def is_zero_within_error(self, sigma=1): """Checks whether the observable is zero within 'sigma' standard errors. @@ -362,11 +346,12 @@ class Obs: return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue def is_zero(self): + """Checks whether the observable is zero within machine precision.""" return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values()) def plot_tauint(self, save=None): """Plot integrated autocorrelation time for each ensemble.""" - if not self.e_names: + if not hasattr(self, 'e_names'): raise Exception('Run the gamma method first.') fig = plt.figure() @@ -399,7 +384,7 @@ class Obs: def plot_rho(self): """Plot normalized autocorrelation function time for each ensemble.""" - if not self.e_names: + if not hasattr(self, 'e_names'): raise Exception('Run the gamma method first.') for e, e_name in enumerate(self.e_names): plt.xlabel('W') @@ -421,7 +406,7 @@ class Obs: def plot_rep_dist(self): """Plot replica distribution for each ensemble with more than one replicum.""" - if not self.e_names: + if not hasattr(self, 'e_names'): raise Exception('Run the gamma method first.') for e, e_name in enumerate(self.e_names): if len(self.e_content[e_name]) == 1: @@ -443,7 +428,7 @@ class Obs: def plot_history(self): """Plot derived Monte Carlo history for each ensemble.""" - if not self.e_names: + if not hasattr(self, 'e_names'): raise Exception('Run the gamma method first.') for e, e_name in enumerate(self.e_names): @@ -465,7 +450,7 @@ class Obs: def plot_piechart(self): """Plot piechart which shows the fractional contribution of each ensemble to the error and returns a dictionary containing the fractions.""" - if not self.e_names: + if not hasattr(self, 'e_names'): raise Exception('Run the gamma method first.') if self.dvalue == 0.0: raise Exception('Error is 0.0') @@ -967,7 +952,7 @@ def covariance(obs1, obs2, correlation=False, **kwargs): if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None): raise Exception('Shapes of ensemble', name, 'do not fit') - if obs1.e_names == {} or obs2.e_names == {}: + if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'): raise Exception('The gamma method has to be applied to both Obs first.') dvalue = 0 @@ -1021,7 +1006,7 @@ def covariance2(obs1, obs2, correlation=False, **kwargs): if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None): raise Exception('Shapes of ensemble', name, 'do not fit') - if obs1.e_names == {} or obs2.e_names == {}: + if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'): raise Exception('The gamma method has to be applied to both Obs first.') dvalue = 0 @@ -1117,7 +1102,7 @@ def covariance3(obs1, obs2, correlation=False, **kwargs): if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None): raise Exception('Shapes of ensemble', name, 'do not fit') - if obs1.e_names == {} or obs2.e_names == {}: + if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'): raise Exception('The gamma method has to be applied to both Obs first.') tau_exp = [] From 0ad5076b7e348e036dcb890f9fec83b4ca45b2db Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 26 Oct 2021 17:12:25 +0100 Subject: [PATCH 23/27] docstrings added --- pyerrors/npr.py | 2 +- pyerrors/pyerrors.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index 16c86442..7ff81cc0 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -91,7 +91,7 @@ def Zq(inv_prop, fermion='Wilson'): res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash))) res.gamma_method() - if not res.imag.is_zero() and not res.imag.is_zero_within_error(5): + if not res.imag.is_zero_within_error(5): warnings.warn("Imaginary part of Zq is not zero within 5 sigma") return res return res.real diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 8c03297b..3df41b7a 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -668,12 +668,14 @@ class CObs: return self._imag def gamma_method(self, **kwargs): + """Executes the gamma_method for the real and the imaginary part.""" if isinstance(self.real, Obs): self.real.gamma_method(**kwargs) if isinstance(self.imag, Obs): self.imag.gamma_method(**kwargs) def is_zero(self): + """Checks whether both real and imaginary part are zero within machine precision.""" return self.real == 0.0 and self.imag == 0.0 def conjugate(self): From 2b5bd7c5ed49bfddcfbdac67437e5304ec34c748 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 27 Oct 2021 09:08:37 +0100 Subject: [PATCH 24/27] CONTRIBUTING updated --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ea780c69..299e5891 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,8 +23,9 @@ When implementing a new feature or fixing a bug please add meaningful tests to t ### Continous integration For all pull requests tests are executed for the most recent python releases via ``` -pytest -v +pytest --cov=pyerrors -v ``` +requiring `pytest`, `pytest-cov` and `pytest-benchmark` and the linter `flake8` is executed with the command ``` flake8 --ignore=E501,E722 --exclude=__init__.py pyerrors From 5bdfa3f8bedb33e1a4e8f04a2de8942152eeaadc Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 27 Oct 2021 18:16:59 +0100 Subject: [PATCH 25/27] DWF p_slash expression added to npr --- pyerrors/npr.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index 7ff81cc0..6594f03f 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -85,6 +85,11 @@ def Zq(inv_prop, fermion='Wilson'): if fermion == 'Wilson': p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) / np.sum(sin_mom ** 2) + elif fermion == 'DWF': + W = np.sum(1 - np.cos(2 * np.pi / L * mom)) + s2 = np.sum(sin_mom ** 2) + p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) + p_slash /= 2 * (W - 1 + np.sqrt((1 - W) ** 2 + s2)) else: raise Exception("Fermion type '" + fermion + "' not implemented") From 2d3578508779139e709218245bcad5cc4bd1628d Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Oct 2021 14:42:17 +0100 Subject: [PATCH 26/27] Continuum tree-level propagator added to npr --- pyerrors/npr.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyerrors/npr.py b/pyerrors/npr.py index 6594f03f..b1c32c8a 100644 --- a/pyerrors/npr.py +++ b/pyerrors/npr.py @@ -85,6 +85,9 @@ def Zq(inv_prop, fermion='Wilson'): if fermion == 'Wilson': p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) / np.sum(sin_mom ** 2) + elif fermion == 'Continuum': + p_mom = 2 * np.pi / L * mom + p_slash = -1j * (p_mom[0] * gamma[0] + p_mom[1] * gamma[1] + p_mom[2] * gamma[2] + p_mom[3] * gamma[3]) / np.sum(p_mom ** 2) elif fermion == 'DWF': W = np.sum(1 - np.cos(2 * np.pi / L * mom)) s2 = np.sum(sin_mom ** 2) From ed8d9f9c5c41249945256344b3e2a4d34ff5e5fe Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Oct 2021 14:47:13 +0100 Subject: [PATCH 27/27] __slots__ with __dict__ added to Obs Memory consumption for an average Obs reduced by another 5% Consider also dropping __dict__ for another 120 bytes per object --- pyerrors/pyerrors.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pyerrors/pyerrors.py b/pyerrors/pyerrors.py index 3df41b7a..75d1b187 100644 --- a/pyerrors/pyerrors.py +++ b/pyerrors/pyerrors.py @@ -34,11 +34,11 @@ class Obs: ensemble. N_sigma_global -- Standard value for N_sigma (default 1.0) """ - # __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue', - # 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 'e_names', - # 'e_content', 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', - # 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', - # 'tag'] + __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue', + 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 'e_names', + 'e_content', 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', + 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', + 'tag', '__dict__'] e_tag_global = 0 S_global = 2.0