diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index bb44870d..237e6713 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -1,123 +1,11 @@ import numpy as np -from autograd import jacobian import autograd.numpy as anp # Thinly-wrapped numpy -from .obs import derived_observable, CObs, Obs, _merge_idx, _expand_deltas_for_merge, _filter_zeroes, import_jackknife +from .obs import derived_observable, CObs, Obs, import_jackknife from functools import partial from autograd.extend import defvjp -def derived_array(func, data, **kwargs): - """Construct a derived Obs for a matrix valued function according to func(data, **kwargs) using automatic differentiation. - - Parameters - ---------- - func : object - arbitrary function of the form func(data, **kwargs). For the - automatic differentiation to work, all numpy functions have to have - the autograd wrapper (use 'import autograd.numpy as anp'). - data : list - list of Obs, e.g. [obs1, obs2, obs3]. - man_grad : list - manually supply a list or an array which contains the jacobian - of func. Use cautiously, supplying the wrong derivative will - not be intercepted. - """ - - data = np.asarray(data) - raveled_data = data.ravel() - - # Workaround for matrix operations containing non Obs data - for i_data in raveled_data: - if isinstance(i_data, Obs): - first_name = i_data.names[0] - first_shape = i_data.shape[first_name] - first_idl = i_data.idl[first_name] - break - - for i in range(len(raveled_data)): - if isinstance(raveled_data[i], (int, float)): - raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name], idl=[first_idl]) - - n_obs = len(raveled_data) - new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) - - is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_names} - reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 - new_idl_d = {} - for name in new_names: - idl = [] - for i_data in raveled_data: - tmp = i_data.idl.get(name) - if tmp is not None: - idl.append(tmp) - new_idl_d[name] = _merge_idx(idl) - if not is_merged[name]: - is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) - - if data.ndim == 1: - values = np.array([o.value for o in data]) - else: - values = np.vectorize(lambda x: x.value)(data) - - new_values = func(values, **kwargs) - - new_r_values = {} - for name in new_names: - tmp_values = np.zeros(n_obs) - for i, item in enumerate(raveled_data): - tmp = item.r_values.get(name) - if tmp is None: - tmp = item.value - tmp_values[i] = tmp - tmp_values = np.array(tmp_values).reshape(data.shape) - new_r_values[name] = func(tmp_values, **kwargs) - - if 'man_grad' in kwargs: - deriv = np.asarray(kwargs.get('man_grad')) - if new_values.shape + data.shape != deriv.shape: - raise Exception('Manual derivative does not have correct shape.') - else: - deriv = jacobian(func)(values, **kwargs) - - final_result = np.zeros(new_values.shape, dtype=object) - - d_extracted = {} - for name in new_names: - d_extracted[name] = [] - for i_dat, dat in enumerate(data): - ens_length = len(new_idl_d[name]) - d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas[name], o.idl[name], o.shape[name], new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) - - for i_val, new_val in np.ndenumerate(new_values): - new_deltas = {} - for name in new_names: - ens_length = d_extracted[name][0].shape[-1] - new_deltas[name] = np.zeros(ens_length) - for i_dat, dat in enumerate(d_extracted[name]): - new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) - - new_samples = [] - new_means = [] - new_idl = [] - for name in new_names: - if is_merged[name]: - filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) - else: - filtered_deltas = new_deltas[name] - filtered_idl_d = new_idl_d[name] - - new_samples.append(filtered_deltas) - new_idl.append(filtered_idl_d) - new_means.append(new_r_values[name][i_val]) - final_result[i_val] = Obs(new_samples, new_names, means=new_means, idl=new_idl) - final_result[i_val]._value = new_val - final_result[i_val].is_merged = is_merged - final_result[i_val].reweighted = reweighted - - return final_result - - def matmul(*operands): """Matrix multiply all operands. @@ -157,8 +45,8 @@ def matmul(*operands): def multi_dot_i(operands): return multi_dot(operands, 'Imag') - Nr = derived_array(multi_dot_r, extended_operands) - Ni = derived_array(multi_dot_i, extended_operands) + Nr = derived_observable(multi_dot_r, extended_operands, array_mode=True) + Ni = derived_observable(multi_dot_i, extended_operands, array_mode=True) res = np.empty_like(Nr) for (n, m), entry in np.ndenumerate(Nr): @@ -171,7 +59,7 @@ def matmul(*operands): for op in operands[1:]: stack = stack @ op return stack - return derived_array(multi_dot, operands) + return derived_observable(multi_dot, operands, array_mode=True) def jack_matmul(*operands): @@ -360,7 +248,7 @@ def _mat_mat_op(op, obs, **kwargs): if kwargs.get('num_grad') is True: op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs) else: - op_big_matrix = derived_array(lambda x, **kwargs: op(x), [big_matrix])[0] + op_big_matrix = derived_observable(lambda x, **kwargs: op(x), [big_matrix], array_mode=True)[0] dim = op_big_matrix.shape[0] op_A = op_big_matrix[0: dim // 2, 0: dim // 2] op_B = op_big_matrix[dim // 2:, 0: dim // 2] @@ -371,7 +259,7 @@ def _mat_mat_op(op, obs, **kwargs): else: if kwargs.get('num_grad') is True: return _num_diff_mat_mat_op(op, obs, **kwargs) - return derived_array(lambda x, **kwargs: op(x), [obs])[0] + return derived_observable(lambda x, **kwargs: op(x), [obs], array_mode=True)[0] def eigh(obs, **kwargs): diff --git a/pyerrors/obs.py b/pyerrors/obs.py index c6c97197..bc60a3a2 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -1020,7 +1020,7 @@ def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): return deltas, idx -def derived_observable(func, data, **kwargs): +def derived_observable(func, data, array_mode=False, **kwargs): """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. Parameters @@ -1053,6 +1053,7 @@ def derived_observable(func, data, **kwargs): raveled_data = data.ravel() # Workaround for matrix operations containing non Obs data + # TODO: Find more elegant solution here. for i_data in raveled_data: if isinstance(i_data, Obs): first_name = i_data.names[0] @@ -1075,11 +1076,13 @@ def derived_observable(func, data, **kwargs): n_obs = len(raveled_data) new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) + new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) + new_sample_names = sorted(set(new_names) - set(new_cov_names)) - is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_names} + is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 new_idl_d = {} - for name in new_names: + for name in new_sample_names: idl = [] for i_data in raveled_data: tmp = i_data.idl.get(name) @@ -1101,7 +1104,7 @@ def derived_observable(func, data, **kwargs): multi = 1 new_r_values = {} - for name in new_names: + for name in new_sample_names: tmp_values = np.zeros(n_obs) for i, item in enumerate(raveled_data): tmp = item.r_values.get(name) @@ -1143,15 +1146,48 @@ def derived_observable(func, data, **kwargs): final_result = np.zeros(new_values.shape, dtype=object) + if array_mode is True: + + new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) + + class _Zero_grad(): + def __init__(self, N): + # self.grad = np.zeros(N) + self.grad = np.zeros((N, 1)) + + d_extracted = {} + g_extracted = {} + for name in new_sample_names: + d_extracted[name] = [] + ens_length = len(new_idl_d[name]) + for i_dat, dat in enumerate(data): + d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) + for name in new_cov_names: + g_extracted[name] = [] + zero_grad = _Zero_grad(new_covobs_lengths[name]) + for i_dat, dat in enumerate(data): + g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) + for i_val, new_val in np.ndenumerate(new_values): new_deltas = {} new_grad = {} - for j_obs, obs in np.ndenumerate(data): - for name in obs.names: - if name in obs.cov_names: - new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad - else: - new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) + if array_mode is True: + for name in new_sample_names: + ens_length = d_extracted[name][0].shape[-1] + new_deltas[name] = np.zeros(ens_length) + for i_dat, dat in enumerate(d_extracted[name]): + new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) + for name in new_cov_names: + new_grad[name] = 0 + for i_dat, dat in enumerate(g_extracted[name]): + new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) + else: + for j_obs, obs in np.ndenumerate(data): + for name in obs.names: + if name in obs.cov_names: + new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad + else: + new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} diff --git a/tests/linalg_test.py b/tests/linalg_test.py index d34da29f..58301814 100644 --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -29,25 +29,26 @@ def get_complex_matrix(dimension): def test_matmul(): - for dim in [4, 8]: - my_list = [] - length = 1000 + np.random.randint(200) - for i in range(dim ** 2): - my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) - my_array = np.array(my_list).reshape((dim, dim)) - tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array - for t, e in np.ndenumerate(tt): - assert e.is_zero(), t + for dim in [4, 6]: + for const in [1, pe.cov_Obs([1.0, 1.0], [[0.001,0.0001], [0.0001, 0.002]], 'norm')[1]]: + my_list = [] + length = 100 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) + my_array = const * np.array(my_list).reshape((dim, dim)) + tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t - my_list = [] - length = 1000 + np.random.randint(200) - for i in range(dim ** 2): - my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), - pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) - my_array = np.array(my_list).reshape((dim, dim)) - tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array - for t, e in np.ndenumerate(tt): - assert e.is_zero(), t + my_list = [] + length = 100 + np.random.randint(200) + for i in range(dim ** 2): + my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), + pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) + my_array = np.array(my_list).reshape((dim, dim)) * const + tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array + for t, e in np.ndenumerate(tt): + assert e.is_zero(), t def test_jack_matmul(): @@ -152,7 +153,7 @@ def test_multi_dot(): length = 1000 + np.random.randint(200) for i in range(dim ** 2): my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) - my_array = np.array(my_list).reshape((dim, dim)) + my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim)) tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array for t, e in np.ndenumerate(tt): assert e.is_zero(), t @@ -162,7 +163,7 @@ def test_multi_dot(): for i in range(dim ** 2): my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) - my_array = np.array(my_list).reshape((dim, dim)) + my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array for t, e in np.ndenumerate(tt): assert e.is_zero(), t @@ -188,13 +189,13 @@ def test_matmul_irregular_histories(): standard_array = [] for i in range(dim ** 2): standard_array.append(pe.Obs([np.random.normal(1.1, 0.2, length)], ['ens1'])) - standard_matrix = np.array(standard_array).reshape((dim, dim)) + standard_matrix = np.array(standard_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(0.1, 0.002, 'qr') for idl in [range(1, 501, 2), range(250, 273), [2, 8, 19, 20, 78]]: irregular_array = [] for i in range(dim ** 2): irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl])) - irregular_matrix = np.array(irregular_array).reshape((dim, dim)) + irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs([1.0, 1.0], [[0.001,0.0001], [0.0001, 0.002]], 'norm')[0] t1 = standard_matrix @ irregular_matrix t2 = pe.linalg.matmul(standard_matrix, irregular_matrix) @@ -212,7 +213,7 @@ def test_irregular_matrix_inverse(): irregular_array = [] for i in range(dim ** 2): irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)])) - irregular_matrix = np.array(irregular_array).reshape((dim, dim)) + irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23') invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T