mirror of
https://github.com/fjosw/pyerrors.git
synced 2026-05-13 16:46:52 +02:00
[chore] Stricter ruff rules
This commit is contained in:
parent
b28c2f0b6f
commit
c896843664
22 changed files with 400 additions and 311 deletions
|
|
@ -477,16 +477,15 @@ A JSON schema that may be used to verify the correctness of a file with respect
|
|||
|
||||
Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl), can be found [here](https://github.com/fjosw/ADjson.jl).
|
||||
'''
|
||||
from .obs import *
|
||||
from .correlators import *
|
||||
from .fits import *
|
||||
from .misc import *
|
||||
from . import dirac as dirac
|
||||
from . import input as input
|
||||
from . import integrate as integrate
|
||||
from . import linalg as linalg
|
||||
from . import mpm as mpm
|
||||
from . import roots as roots
|
||||
from . import integrate as integrate
|
||||
from . import special as special
|
||||
|
||||
from .correlators import *
|
||||
from .fits import *
|
||||
from .misc import *
|
||||
from .obs import *
|
||||
from .version import __version__ as __version__
|
||||
|
|
|
|||
|
|
@ -1,14 +1,16 @@
|
|||
import warnings
|
||||
from itertools import permutations
|
||||
import numpy as np
|
||||
|
||||
import autograd.numpy as anp
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scipy.linalg
|
||||
from .obs import Obs, reweight, correlate, CObs
|
||||
from .misc import dump_object, _assert_equal_properties
|
||||
from .fits import least_squares
|
||||
from .roots import find_root
|
||||
|
||||
from . import linalg
|
||||
from .fits import least_squares
|
||||
from .misc import _assert_equal_properties, dump_object
|
||||
from .obs import CObs, Obs, correlate, reweight
|
||||
from .roots import find_root
|
||||
|
||||
|
||||
class Corr:
|
||||
|
|
@ -42,7 +44,7 @@ class Corr:
|
|||
|
||||
__slots__ = ["content", "N", "T", "tag", "prange"]
|
||||
|
||||
def __init__(self, data_input, padding=[0, 0], prange=None):
|
||||
def __init__(self, data_input, padding=None, prange=None):
|
||||
""" Initialize a Corr object.
|
||||
|
||||
Parameters
|
||||
|
|
@ -58,6 +60,9 @@ class Corr:
|
|||
region identified for this correlator.
|
||||
"""
|
||||
|
||||
if padding is None:
|
||||
padding = [0, 0]
|
||||
|
||||
if isinstance(data_input, np.ndarray):
|
||||
if data_input.ndim == 1:
|
||||
data_input = list(data_input)
|
||||
|
|
@ -77,7 +82,7 @@ class Corr:
|
|||
for t in range(T):
|
||||
if any([(item.content[t] is None) for item in data_input.flatten()]):
|
||||
if not all([(item.content[t] is None) for item in data_input.flatten()]):
|
||||
warnings.warn("Input ill-defined at different timeslices. Conversion leads to data loss.!", RuntimeWarning)
|
||||
warnings.warn("Input ill-defined at different timeslices. Conversion leads to data loss.!", RuntimeWarning, stacklevel=2)
|
||||
input_as_list.append(None)
|
||||
else:
|
||||
array_at_timeslace = np.empty([N, N], dtype="object")
|
||||
|
|
@ -178,14 +183,14 @@ class Corr:
|
|||
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||
raise ValueError("Vectors are of wrong shape!")
|
||||
if normalize:
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
vector_l, vector_r = vector_l / np.sqrt(vector_l @ vector_l), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
|
||||
else:
|
||||
# There are no checks here yet. There are so many possible scenarios, where this can go wrong.
|
||||
if normalize:
|
||||
for t in range(self.T):
|
||||
vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
|
||||
vector_l[t], vector_r[t] = vector_l[t] / np.sqrt(vector_l[t] @ vector_l[t]), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
|
||||
|
||||
newcontent = [None if (_check_for_none(self, self.content[t]) or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
|
||||
return Corr(newcontent)
|
||||
|
|
@ -228,7 +233,7 @@ class Corr:
|
|||
|
||||
if self.content[0] is not None:
|
||||
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
||||
warnings.warn("Correlator does not seem to be symmetric around x0=0.", RuntimeWarning)
|
||||
warnings.warn("Correlator does not seem to be symmetric around x0=0.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
newcontent = [self.content[0]]
|
||||
for t in range(1, self.T):
|
||||
|
|
@ -250,7 +255,7 @@ class Corr:
|
|||
test = 1 * self
|
||||
test.gamma_method()
|
||||
if not all([o.is_zero_within_error(3) for o in test.content[0]]):
|
||||
warnings.warn("Correlator does not seem to be anti-symmetric around x0=0.", RuntimeWarning)
|
||||
warnings.warn("Correlator does not seem to be anti-symmetric around x0=0.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
newcontent = [self.content[0]]
|
||||
for t in range(1, self.T):
|
||||
|
|
@ -342,7 +347,7 @@ class Corr:
|
|||
raise ValueError("ts has to be larger than t0.")
|
||||
|
||||
if "sorted_list" in kwargs:
|
||||
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
||||
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning, stacklevel=2)
|
||||
sort = kwargs.get("sorted_list")
|
||||
|
||||
if self.is_matrix_symmetric():
|
||||
|
|
@ -381,7 +386,7 @@ class Corr:
|
|||
|
||||
elif sort in ["Eigenvalue", "Eigenvector"]:
|
||||
if sort == "Eigenvalue" and ts is not None:
|
||||
warnings.warn("ts has no effect when sorting by eigenvalue is chosen.", RuntimeWarning)
|
||||
warnings.warn("ts has no effect when sorting by eigenvalue is chosen.", RuntimeWarning, stacklevel=2)
|
||||
all_vecs = [None] * (t0 + 1)
|
||||
for t in range(t0 + 1, self.T):
|
||||
try:
|
||||
|
|
@ -439,7 +444,7 @@ class Corr:
|
|||
|
||||
array = np.empty([N, N], dtype="object")
|
||||
new_content = []
|
||||
for t in range(self.T):
|
||||
for _t in range(self.T):
|
||||
new_content.append(array.copy())
|
||||
|
||||
def wrap(i):
|
||||
|
|
@ -569,7 +574,7 @@ class Corr:
|
|||
if not t_slice[0].is_zero_within_error(5):
|
||||
t_slices.append(x0)
|
||||
if t_slices:
|
||||
warnings.warn("T symmetry partners do not agree within 5 sigma on time slices " + str(t_slices) + ".", RuntimeWarning)
|
||||
warnings.warn("T symmetry partners do not agree within 5 sigma on time slices " + str(t_slices) + ".", RuntimeWarning, stacklevel=2)
|
||||
|
||||
return (self + T_partner) / 2
|
||||
|
||||
|
|
@ -663,7 +668,7 @@ class Corr:
|
|||
if (self.content[t - 1] is None) or (self.content[t + 1] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||
newcontent.append(self.content[t + 1] - 2 * self.content[t] + self.content[t - 1])
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[1, 1])
|
||||
|
|
@ -1005,7 +1010,7 @@ class Corr:
|
|||
raise ValueError("Correlator needs to be projected first.")
|
||||
|
||||
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content, strict=True) if o is not None]
|
||||
|
||||
for name in mc_names:
|
||||
data = np.array([o[0].deltas[name] + o[0].r_values[name] for o in self.content if o is not None]).T
|
||||
|
|
@ -1396,7 +1401,7 @@ class Corr:
|
|||
if basematrix is None:
|
||||
basematrix = self
|
||||
if Ntrunc >= basematrix.N:
|
||||
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
||||
raise ValueError(f'Cannot truncate using Ntrunc <= {basematrix.N}')
|
||||
if basematrix.N != self.N:
|
||||
raise ValueError('basematrix and targetmatrix have to be of the same size.')
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class Covobs:
|
|||
raise Exception('Have to specify position of cov-element belonging to mean!')
|
||||
else:
|
||||
if pos > self.N:
|
||||
raise Exception('pos %d too large for covariance matrix with dimension %dx%d!' % (pos, self.N, self.N))
|
||||
raise Exception(f'pos {pos} too large for covariance matrix with dimension {self.N}x{self.N}!')
|
||||
self._grad = np.zeros((self.N, 1))
|
||||
self._grad[pos] = 1.
|
||||
else:
|
||||
|
|
@ -72,7 +72,7 @@ class Covobs:
|
|||
for i in range(self.N):
|
||||
for j in range(i):
|
||||
if not self._cov[i][j] == self._cov[j][i]:
|
||||
raise Exception('Covariance matrix is non-symmetric for (%d, %d' % (i, j))
|
||||
raise Exception(f'Covariance matrix is non-symmetric for ({i}, {j}')
|
||||
|
||||
evals = np.linalg.eigvalsh(self._cov)
|
||||
for ev in evals:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
gammaX = np.array(
|
||||
[[0, 0, 0, 1j], [0, 0, 1j, 0], [0, -1j, 0, 0], [-1j, 0, 0, 0]],
|
||||
dtype=complex)
|
||||
|
|
|
|||
|
|
@ -1,20 +1,22 @@
|
|||
import gc
|
||||
from collections.abc import Sequence
|
||||
import warnings
|
||||
import numpy as np
|
||||
from collections.abc import Sequence
|
||||
|
||||
import autograd.numpy as anp
|
||||
import iminuit
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scipy.optimize
|
||||
import scipy.stats
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import gridspec
|
||||
from odrpack import odr_fit
|
||||
import iminuit
|
||||
from autograd import jacobian as auto_jacobian
|
||||
from autograd import hessian as auto_hessian
|
||||
from autograd import elementwise_grad as egrad
|
||||
from numdifftools import Jacobian as num_jacobian
|
||||
from autograd import hessian as auto_hessian
|
||||
from autograd import jacobian as auto_jacobian
|
||||
from matplotlib import gridspec
|
||||
from numdifftools import Hessian as num_hessian
|
||||
from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
|
||||
from numdifftools import Jacobian as num_jacobian
|
||||
from odrpack import odr_fit
|
||||
|
||||
from .obs import Obs, cov_Obs, covariance, derived_observable, invert_corr_cov_cholesky
|
||||
|
||||
|
||||
class Fit_result(Sequence):
|
||||
|
|
@ -354,7 +356,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
if 'initial_guess' in kwargs:
|
||||
x0 = kwargs.get('initial_guess')
|
||||
if len(x0) != n_parms:
|
||||
raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
|
||||
raise ValueError(f'Initial guess does not have the correct length: {len(x0)} vs. {n_parms}')
|
||||
else:
|
||||
x0 = [0.1] * n_parms
|
||||
|
||||
|
|
@ -494,12 +496,12 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
except np.linalg.LinAlgError as err:
|
||||
raise Exception("Cannot invert hessian matrix.") from err
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i])))
|
||||
result.append(derived_observable(lambda x_all, i=i, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i])))
|
||||
|
||||
output.fit_parameters = result
|
||||
|
||||
|
|
@ -636,7 +638,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
if 'initial_guess' in kwargs:
|
||||
x0 = np.asarray(kwargs.get('initial_guess'), dtype=np.float64)
|
||||
if len(x0) != n_parms:
|
||||
raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
|
||||
raise Exception(f'Initial guess does not have the correct length: {len(x0)} vs. {n_parms}')
|
||||
else:
|
||||
x0 = np.ones(n_parms, dtype=np.float64)
|
||||
|
||||
|
|
@ -681,7 +683,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
f"ODR fit is rank deficient (irank={out.irank}, inv_condnum={out.inv_condnum:.2e}). "
|
||||
"This may indicate a vanishing chi-squared (n_obs == n_parms). "
|
||||
"Results may be unreliable.",
|
||||
RuntimeWarning
|
||||
RuntimeWarning, stacklevel=2
|
||||
)
|
||||
else:
|
||||
raise Exception('The minimization procedure did not converge.')
|
||||
|
|
@ -712,7 +714,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
|
||||
expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
|
||||
if expected_chisquare <= 0.0:
|
||||
warnings.warn("Negative expected_chisquare.", RuntimeWarning)
|
||||
warnings.warn("Negative expected_chisquare.", RuntimeWarning, stacklevel=2)
|
||||
expected_chisquare = np.abs(expected_chisquare)
|
||||
output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplusd.ravel()))) / expected_chisquare
|
||||
if not silent:
|
||||
|
|
@ -735,8 +737,8 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
# Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
except np.linalg.LinAlgError as err:
|
||||
raise Exception("Cannot invert hessian matrix.") from err
|
||||
|
||||
def odr_chisquare_compact_y(d):
|
||||
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
|
||||
|
|
@ -748,12 +750,12 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
except np.linalg.LinAlgError as err:
|
||||
raise Exception("Cannot invert hessian matrix.") from err
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
|
||||
result.append(derived_observable(lambda my_var, i=i, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
|
||||
|
||||
output.fit_parameters = result
|
||||
|
||||
|
|
@ -805,7 +807,7 @@ def qqplot(x, o_y, func, p, title=""):
|
|||
"""
|
||||
|
||||
residuals = []
|
||||
for i_x, i_y in zip(x, o_y):
|
||||
for i_x, i_y in zip(x, o_y, strict=True):
|
||||
residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
|
||||
residuals = sorted(residuals)
|
||||
my_y = [o.value for o in residuals]
|
||||
|
|
@ -872,14 +874,14 @@ def error_band(x, func, beta):
|
|||
"""
|
||||
cov = covariance(beta)
|
||||
if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
|
||||
warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
|
||||
warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning, stacklevel=2)
|
||||
|
||||
deriv = []
|
||||
for i, item in enumerate(x):
|
||||
for item in x:
|
||||
deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
|
||||
|
||||
err = []
|
||||
for i, item in enumerate(x):
|
||||
for i, _item in enumerate(x):
|
||||
err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
|
||||
err = np.array(err)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import ctypes
|
||||
import hashlib
|
||||
|
||||
import autograd.numpy as np # Thinly-wrapped numpy
|
||||
|
||||
from ..obs import Obs
|
||||
|
||||
|
||||
|
|
@ -100,39 +102,39 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
print('neid', neid)
|
||||
|
||||
ndata = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
ndata.append(read_c_size_t())
|
||||
print('ndata', ndata)
|
||||
|
||||
nrep = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
nrep.append(read_c_size_t())
|
||||
print('nrep', nrep)
|
||||
|
||||
vrep = []
|
||||
for index in range(neid):
|
||||
vrep.append([])
|
||||
for jndex in range(nrep[index]):
|
||||
for _jndex in range(nrep[index]):
|
||||
vrep[-1].append(read_c_size_t())
|
||||
print('vrep', vrep)
|
||||
|
||||
ids = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
ids.append(read_c_size_t())
|
||||
print('ids', ids)
|
||||
|
||||
nt = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
nt.append(read_c_size_t())
|
||||
print('nt', nt)
|
||||
|
||||
zero = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
zero.append(read_c_double())
|
||||
print('zero', zero)
|
||||
|
||||
four = []
|
||||
for index in range(neid):
|
||||
for _ in range(neid):
|
||||
four.append(read_c_double())
|
||||
print('four', four)
|
||||
|
||||
|
|
@ -147,10 +149,10 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
assert len(ids) == len(no_reps)
|
||||
tmp_names = []
|
||||
ens_length = max([len(str(o)) for o in ids])
|
||||
for loc_id, reps in zip(ids, no_reps):
|
||||
for loc_id, reps in zip(ids, no_reps, strict=True):
|
||||
for index in range(reps):
|
||||
missing_chars = ens_length - len(str(loc_id))
|
||||
tmp_names.append(str(loc_id) + ' ' * missing_chars + '|r' + '{0:03d}'.format(index))
|
||||
tmp_names.append(str(loc_id) + ' ' * missing_chars + '|r' + f'{index:03d}')
|
||||
|
||||
return_list.append(Obs(samples, tmp_names))
|
||||
|
||||
|
|
@ -237,7 +239,7 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
ids.append(int(hashlib.sha256(key.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
|
||||
print('ids', ids)
|
||||
nt = []
|
||||
for e, e_name in enumerate(obs.e_names):
|
||||
for _e, e_name in enumerate(obs.e_names):
|
||||
|
||||
r_length = []
|
||||
for r_name in obs.e_content[e_name]:
|
||||
|
|
@ -451,11 +453,11 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
if cnfg_no > kwargs.get('stop'):
|
||||
break
|
||||
idl.append(cnfg_no)
|
||||
print('\r%s %i' % ('Reading configuration', cnfg_no), end='\r')
|
||||
print(f'\rReading configuration {cnfg_no}', end='\r')
|
||||
if len(idl) == 1:
|
||||
no_corrs = len(corr_name)
|
||||
data = []
|
||||
for c in range(no_corrs):
|
||||
for _ in range(no_corrs):
|
||||
data.append([])
|
||||
|
||||
corr_no = 0
|
||||
|
|
@ -490,7 +492,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
try:
|
||||
indices = [idl.index(i) for i in idl_target]
|
||||
except ValueError as err:
|
||||
raise Exception('Configurations in file do no match target list!', err)
|
||||
raise Exception('Configurations in file do no match target list!', err) from err
|
||||
else:
|
||||
indices = None
|
||||
|
||||
|
|
@ -652,11 +654,11 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
if cnfg_no > kwargs.get('stop'):
|
||||
break
|
||||
idl.append(cnfg_no)
|
||||
print('\r%s %i' % ('Reading configuration', cnfg_no), end='\r')
|
||||
print(f'\rReading configuration {cnfg_no}', end='\r')
|
||||
if len(idl) == 1:
|
||||
no_corrs = len(corr_name)
|
||||
data = []
|
||||
for c in range(no_corrs):
|
||||
for _ in range(no_corrs):
|
||||
data.append([])
|
||||
|
||||
corr_no = 0
|
||||
|
|
@ -680,7 +682,7 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
try:
|
||||
indices = [idl.index(i) for i in idl_target]
|
||||
except ValueError as err:
|
||||
raise Exception('Configurations in file do no match target list!', err)
|
||||
raise Exception('Configurations in file do no match target list!', err) from err
|
||||
|
||||
result = {}
|
||||
for c in range(no_corrs):
|
||||
|
|
|
|||
|
|
@ -1,16 +1,17 @@
|
|||
from collections import defaultdict
|
||||
import gzip
|
||||
import lxml.etree as et
|
||||
import getpass
|
||||
import socket
|
||||
import datetime
|
||||
import getpass
|
||||
import gzip
|
||||
import json
|
||||
import socket
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
|
||||
import lxml.etree as et
|
||||
import numpy as np
|
||||
from ..obs import Obs
|
||||
from ..obs import _merge_idx
|
||||
from ..covobs import Covobs
|
||||
|
||||
from .. import version as pyerrorsversion
|
||||
from ..covobs import Covobs
|
||||
from ..obs import Obs, _merge_idx
|
||||
|
||||
|
||||
# Based on https://stackoverflow.com/a/10076823
|
||||
|
|
@ -45,15 +46,15 @@ def _dict_to_xmlstring(d):
|
|||
if k.startswith('#'):
|
||||
for la in d[k]:
|
||||
iters += la
|
||||
iters = '<array>\n' + iters + '<%sarray>\n' % ('/')
|
||||
iters = '<array>\n' + iters + '<{}array>\n'.format('/')
|
||||
return iters
|
||||
if isinstance(d[k], dict):
|
||||
iters += '<%s>\n' % (k) + _dict_to_xmlstring(d[k]) + '<%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}>\n' + _dict_to_xmlstring(d[k]) + '<{}{}>\n'.format('/', k)
|
||||
elif isinstance(d[k], str):
|
||||
if len(d[k]) > 100:
|
||||
iters += '<%s>\n ' % (k) + d[k] + ' \n<%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}>\n ' + d[k] + ' \n<{}{}>\n'.format('/', k)
|
||||
else:
|
||||
iters += '<%s> ' % (k) + d[k] + ' <%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}> ' + d[k] + ' <{}{}>\n'.format('/', k)
|
||||
elif isinstance(d[k], list):
|
||||
for i in range(len(d[k])):
|
||||
iters += _dict_to_xmlstring(d[k][i])
|
||||
|
|
@ -72,20 +73,20 @@ def _dict_to_xmlstring_spaces(d, space=' '):
|
|||
c = 0
|
||||
cm = False
|
||||
for li in s.split('\n'):
|
||||
if li.startswith('<%s' % ('/')):
|
||||
if li.startswith('<{}'.format('/')):
|
||||
c -= 1
|
||||
cm = True
|
||||
for i in range(c):
|
||||
for _i in range(c):
|
||||
o += space
|
||||
o += li + '\n'
|
||||
if li.startswith('<') and not cm:
|
||||
if '<%s' % ('/') not in li:
|
||||
if '<{}'.format('/') not in li:
|
||||
c += 1
|
||||
cm = False
|
||||
return o
|
||||
|
||||
|
||||
def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
||||
def create_pobs_string(obsl, name, spec='', origin='', symbol=None, enstag=None):
|
||||
"""Export a list of Obs or structures containing Obs to an xml string
|
||||
according to the Zeuthen pobs format.
|
||||
|
||||
|
|
@ -113,6 +114,9 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
|||
XML formatted string of the input data
|
||||
"""
|
||||
|
||||
if symbol is None:
|
||||
symbol = []
|
||||
|
||||
od = {}
|
||||
ename = obsl[0].e_names[0]
|
||||
names = list(obsl[0].deltas.keys())
|
||||
|
|
@ -143,31 +147,31 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
|||
pd['enstag'] = enstag
|
||||
else:
|
||||
pd['enstag'] = ename
|
||||
pd['nr'] = '%d' % (nr)
|
||||
pd['nr'] = f'{nr}'
|
||||
pd['array'] = []
|
||||
osymbol = 'cfg'
|
||||
if not isinstance(symbol, list):
|
||||
raise Exception('Symbol has to be a list!')
|
||||
if not (len(symbol) == 0 or len(symbol) == len(obsl)):
|
||||
raise Exception('Symbol has to be a list of lenght 0 or %d!' % (len(obsl)))
|
||||
raise Exception(f'Symbol has to be a list of lenght 0 or {len(obsl)}!')
|
||||
for s in symbol:
|
||||
osymbol += ' %s' % s
|
||||
osymbol += f' {s}'
|
||||
for r in range(nr):
|
||||
ad = {}
|
||||
ad['id'] = onames[r]
|
||||
Nconf = len(obsl[0].deltas[names[r]])
|
||||
layout = '%d i f%d' % (Nconf, len(obsl))
|
||||
layout = f'{Nconf} i f{len(obsl)}'
|
||||
ad['layout'] = layout
|
||||
ad['symbol'] = osymbol
|
||||
data = ''
|
||||
for c in range(Nconf):
|
||||
data += '%d ' % obsl[0].idl[names[r]][c]
|
||||
data += f'{obsl[0].idl[names[r]][c]} '
|
||||
for o in obsl:
|
||||
num = o.deltas[names[r]][c] + o.r_values[names[r]]
|
||||
if num == 0:
|
||||
data += '0 '
|
||||
else:
|
||||
data += '%1.16e ' % (num)
|
||||
data += f'{num:1.16e} '
|
||||
data += '\n'
|
||||
ad['#data'] = data
|
||||
pd['array'].append(ad)
|
||||
|
|
@ -176,7 +180,7 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
|||
return rs
|
||||
|
||||
|
||||
def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
|
||||
def write_pobs(obsl, fname, name, spec='', origin='', symbol=None, enstag=None, gz=True):
|
||||
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
||||
according to the Zeuthen pobs format.
|
||||
|
||||
|
|
@ -236,7 +240,7 @@ class _NoTagInDataError(Exception):
|
|||
"""Raised when tag is not in data"""
|
||||
def __init__(self, tag):
|
||||
self.tag = tag
|
||||
super().__init__('Tag %s not in data!' % (self.tag))
|
||||
super().__init__(f'Tag {self.tag} not in data!')
|
||||
|
||||
|
||||
def _find_tag(dat, tag):
|
||||
|
|
@ -333,8 +337,8 @@ def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
|
|||
content = fin.read()
|
||||
else:
|
||||
if fname.endswith('.gz'):
|
||||
warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
|
||||
with open(fname, 'r') as fin:
|
||||
warnings.warn(f"Trying to read from {fname} without unzipping!", UserWarning, stacklevel=2)
|
||||
with open(fname) as fin:
|
||||
content = fin.read()
|
||||
|
||||
# parse xml file content
|
||||
|
|
@ -359,7 +363,7 @@ def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
|
|||
elif isinstance(separator_insertion, int):
|
||||
name = name[:separator_insertion] + '|' + name[separator_insertion:]
|
||||
elif isinstance(separator_insertion, str):
|
||||
name = name.replace(separator_insertion, "|%s" % (separator_insertion))
|
||||
name = name.replace(separator_insertion, f"|{separator_insertion}")
|
||||
else:
|
||||
raise Exception("separator_insertion has to be string or int, is ", type(separator_insertion))
|
||||
names.append(name)
|
||||
|
|
@ -479,7 +483,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
|
|||
elif isinstance(separator_insertion, int):
|
||||
rname = rname[:separator_insertion] + '|' + rname[separator_insertion:]
|
||||
elif isinstance(separator_insertion, str):
|
||||
rname = rname.replace(separator_insertion, "|%s" % (separator_insertion))
|
||||
rname = rname.replace(separator_insertion, f"|{separator_insertion}")
|
||||
else:
|
||||
raise Exception("separator_insertion has to be string or int, is ", type(separator_insertion))
|
||||
if '|' in rname:
|
||||
|
|
@ -612,8 +616,8 @@ def read_dobs(fname, full_output=False, gz=True, separator_insertion=True):
|
|||
content = fin.read()
|
||||
else:
|
||||
if fname.endswith('.gz'):
|
||||
warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
|
||||
with open(fname, 'r') as fin:
|
||||
warnings.warn(f"Trying to read from {fname} without unzipping!", UserWarning, stacklevel=2)
|
||||
with open(fname) as fin:
|
||||
content = fin.read()
|
||||
|
||||
return import_dobs_string(content, full_output, separator_insertion=separator_insertion)
|
||||
|
|
@ -630,26 +634,26 @@ def _dobsdict_to_xmlstring(d):
|
|||
elif k.startswith('#'):
|
||||
for li in d[k]:
|
||||
iters += li
|
||||
iters = '<array>\n' + iters + '<%sarray>\n' % ('/')
|
||||
iters = '<array>\n' + iters + '<{}array>\n'.format('/')
|
||||
return iters
|
||||
if isinstance(d[k], dict):
|
||||
iters += '<%s>\n' % (k) + _dobsdict_to_xmlstring(d[k]) + '<%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}>\n' + _dobsdict_to_xmlstring(d[k]) + '<{}{}>\n'.format('/', k)
|
||||
elif isinstance(d[k], str):
|
||||
if len(d[k]) > 100:
|
||||
iters += '<%s>\n ' % (k) + d[k] + ' \n<%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}>\n ' + d[k] + ' \n<{}{}>\n'.format('/', k)
|
||||
else:
|
||||
iters += '<%s> ' % (k) + d[k] + ' <%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}> ' + d[k] + ' <{}{}>\n'.format('/', k)
|
||||
elif isinstance(d[k], list):
|
||||
tmps = ''
|
||||
if k in ['edata', 'cdata']:
|
||||
for i in range(len(d[k])):
|
||||
tmps += '<%s>\n' % (k) + _dobsdict_to_xmlstring(d[k][i]) + '</%s>\n' % (k)
|
||||
tmps += f'<{k}>\n' + _dobsdict_to_xmlstring(d[k][i]) + f'</{k}>\n'
|
||||
else:
|
||||
for i in range(len(d[k])):
|
||||
tmps += _dobsdict_to_xmlstring(d[k][i])
|
||||
iters += tmps
|
||||
elif isinstance(d[k], (int, float)):
|
||||
iters += '<%s> ' % (k) + str(d[k]) + ' <%s%s>\n' % ('/', k)
|
||||
iters += f'<{k}> ' + str(d[k]) + ' <{}{}>\n'.format('/', k)
|
||||
elif not d[k]:
|
||||
return '\n'
|
||||
else:
|
||||
|
|
@ -665,20 +669,20 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
|||
c = 0
|
||||
cm = False
|
||||
for li in s.split('\n'):
|
||||
if li.startswith('<%s' % ('/')):
|
||||
if li.startswith('<{}'.format('/')):
|
||||
c -= 1
|
||||
cm = True
|
||||
for i in range(c):
|
||||
for _i in range(c):
|
||||
o += space
|
||||
o += li + '\n'
|
||||
if li.startswith('<') and not cm:
|
||||
if '<%s' % ('/') not in li:
|
||||
if '<{}'.format('/') not in li:
|
||||
c += 1
|
||||
cm = False
|
||||
return o
|
||||
|
||||
|
||||
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
|
||||
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=None, who=None, enstags=None):
|
||||
"""Generate the string for the export of a list of Obs or structures containing Obs
|
||||
to a .xml.gz file according to the Zeuthen dobs format.
|
||||
|
||||
|
|
@ -711,6 +715,8 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
"""
|
||||
if enstags is None:
|
||||
enstags = {}
|
||||
if symbol is None:
|
||||
symbol = []
|
||||
od = {}
|
||||
r_names = []
|
||||
for o in obsl:
|
||||
|
|
@ -742,21 +748,21 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
pd['name'] = name
|
||||
pd['array'] = {}
|
||||
pd['array']['id'] = 'val'
|
||||
pd['array']['layout'] = '1 f%d' % (len(obsl))
|
||||
pd['array']['layout'] = f'1 f{len(obsl)}'
|
||||
osymbol = ''
|
||||
if symbol:
|
||||
if not isinstance(symbol, list):
|
||||
raise Exception('Symbol has to be a list!')
|
||||
if not (len(symbol) == 0 or len(symbol) == len(obsl)):
|
||||
raise Exception('Symbol has to be a list of lenght 0 or %d!' % (len(obsl)))
|
||||
raise Exception(f'Symbol has to be a list of lenght 0 or {len(obsl)}!')
|
||||
osymbol = symbol[0]
|
||||
for s in symbol[1:]:
|
||||
osymbol += ' %s' % s
|
||||
osymbol += f' {s}'
|
||||
pd['array']['symbol'] = osymbol
|
||||
|
||||
pd['array']['#values'] = [' '.join(['%1.16e' % o.value for o in obsl])]
|
||||
pd['ne'] = '%d' % (ne)
|
||||
pd['nc'] = '%d' % (nc)
|
||||
pd['array']['#values'] = [' '.join([f'{o.value:1.16e}' for o in obsl])]
|
||||
pd['ne'] = f'{ne}'
|
||||
pd['nc'] = f'{nc}'
|
||||
pd['edata'] = []
|
||||
for name in mc_names:
|
||||
ed = {}
|
||||
|
|
@ -772,13 +778,13 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
ad['id'] = repname.replace('|', '')
|
||||
idx = _merge_idx([o.idl.get(repname, []) for o in obsl])
|
||||
Nconf = len(idx)
|
||||
layout = '%d i f%d' % (Nconf, len(obsl))
|
||||
layout = f'{Nconf} i f{len(obsl)}'
|
||||
ad['layout'] = layout
|
||||
data = ''
|
||||
counters = [0 for o in obsl]
|
||||
offsets = [o.r_values[repname] - o.value if repname in o.r_values else 0 for o in obsl]
|
||||
for ci in idx:
|
||||
data += '%d ' % ci
|
||||
data += f'{ci} '
|
||||
for oi in range(len(obsl)):
|
||||
o = obsl[oi]
|
||||
if repname in o.idl:
|
||||
|
|
@ -787,14 +793,14 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
if num == 0:
|
||||
data += '0 '
|
||||
else:
|
||||
data += '%1.16e ' % (num)
|
||||
data += f'{num:1.16e} '
|
||||
continue
|
||||
if o.idl[repname][counters[oi]] == ci:
|
||||
num = o.deltas[repname][counters[oi]] + offsets[oi]
|
||||
if num == 0:
|
||||
data += '0 '
|
||||
else:
|
||||
data += '%1.16e ' % (num)
|
||||
data += f'{num:1.16e} '
|
||||
counters[oi] += 1
|
||||
if counters[oi] >= len(o.idl[repname]):
|
||||
counters[oi] = -1
|
||||
|
|
@ -803,7 +809,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
if num == 0:
|
||||
data += '0 '
|
||||
else:
|
||||
data += '%1.16e ' % (num)
|
||||
data += f'{num:1.16e} '
|
||||
else:
|
||||
data += '0 '
|
||||
data += '\n'
|
||||
|
|
@ -816,7 +822,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
for cname in o.cov_names:
|
||||
if cname in allcov:
|
||||
if not np.array_equal(allcov[cname], o.covobs[cname].cov):
|
||||
raise Exception('Inconsistent covariance matrices for %s!' % (cname))
|
||||
raise Exception(f'Inconsistent covariance matrices for {cname}!')
|
||||
else:
|
||||
allcov[cname] = o.covobs[cname].cov
|
||||
pd['cdata'] = []
|
||||
|
|
@ -828,12 +834,12 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
if allcov[cname].shape == ():
|
||||
ncov = 1
|
||||
covd['layout'] = '1 1 f'
|
||||
covd['#data'] = '%1.14e' % (allcov[cname])
|
||||
covd['#data'] = f'{allcov[cname]:1.14e}'
|
||||
else:
|
||||
shape = allcov[cname].shape
|
||||
assert (shape[0] == shape[1])
|
||||
ncov = shape[0]
|
||||
covd['layout'] = '%d %d f' % (ncov, ncov)
|
||||
covd['layout'] = f'{ncov} {ncov} f'
|
||||
ds = ''
|
||||
for i in range(ncov):
|
||||
for j in range(ncov):
|
||||
|
|
@ -841,19 +847,19 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
if val == 0:
|
||||
ds += '0 '
|
||||
else:
|
||||
ds += '%1.14e ' % (val)
|
||||
ds += f'{val:1.14e} '
|
||||
ds += '\n'
|
||||
covd['#data'] = ds
|
||||
|
||||
gradd = {'id': 'grad'}
|
||||
gradd['layout'] = '%d f%d' % (ncov, len(obsl))
|
||||
gradd['layout'] = f'{ncov} f{len(obsl)}'
|
||||
ds = ''
|
||||
for i in range(ncov):
|
||||
for o in obsl:
|
||||
if cname in o.covobs:
|
||||
val = o.covobs[cname].grad[i].item()
|
||||
if val != 0:
|
||||
ds += '%1.14e ' % (val)
|
||||
ds += f'{val:1.14e} '
|
||||
else:
|
||||
ds += '0 '
|
||||
else:
|
||||
|
|
@ -867,7 +873,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
return rs
|
||||
|
||||
|
||||
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
|
||||
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=None, who=None, enstags=None, gz=True):
|
||||
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
||||
according to the Zeuthen dobs format.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import os
|
||||
from collections import Counter
|
||||
import h5py
|
||||
from pathlib import Path
|
||||
|
||||
import h5py
|
||||
import numpy as np
|
||||
from ..obs import Obs, CObs
|
||||
|
||||
from ..correlators import Corr
|
||||
from ..dirac import epsilon_tensor_rank4
|
||||
from ..obs import CObs, Obs
|
||||
from .misc import fit_t0
|
||||
|
||||
|
||||
|
|
@ -239,13 +241,13 @@ def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_rang
|
|||
raise Exception("Not all flow times were equal.")
|
||||
|
||||
t2E_dict = {}
|
||||
for t2, dat in zip(corr_data["FlowObservables_0"][0], corr_data[obs_key].T):
|
||||
for t2, dat in zip(corr_data["FlowObservables_0"][0], corr_data[obs_key].T, strict=True):
|
||||
t2E_dict[t2] = Obs([dat], [ens_id], idl=[idx]) - 0.3
|
||||
|
||||
return fit_t0(t2E_dict, fit_range, plot_fit=kwargs.get('plot_fit'))
|
||||
|
||||
|
||||
def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
|
||||
def read_DistillationContraction_hd5(path, ens_id, diagrams=None, idl=None):
|
||||
"""Read hadrons DistillationContraction hdf5 files in given directory structure
|
||||
|
||||
Parameters
|
||||
|
|
@ -265,6 +267,9 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None
|
|||
extracted DistillationContration data
|
||||
"""
|
||||
|
||||
if diagrams is None:
|
||||
diagrams = ["direct"]
|
||||
|
||||
res_dict = {}
|
||||
|
||||
directories, idx = _get_files(path, "data", idl)
|
||||
|
|
@ -288,7 +293,7 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None
|
|||
corr_data[diagram] = []
|
||||
|
||||
try:
|
||||
for n_file, (hd5_file, n_traj) in enumerate(zip(file_list, list(idx))):
|
||||
for n_file, (hd5_file, _n_traj) in enumerate(zip(file_list, list(idx), strict=True)):
|
||||
h5file = h5py.File(hd5_file)
|
||||
|
||||
if n_file == 0:
|
||||
|
|
@ -486,7 +491,7 @@ def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
|
|||
return result_dict
|
||||
|
||||
|
||||
def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
|
||||
def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=None):
|
||||
"""Read hadrons FourquarkFullyConnected hdf5 file and output an array of CObs
|
||||
|
||||
Parameters
|
||||
|
|
@ -508,6 +513,9 @@ def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
|
|||
extracted fourquark matrizes
|
||||
"""
|
||||
|
||||
if vertices is None:
|
||||
vertices = ["VA", "AV"]
|
||||
|
||||
files, idx = _get_files(path, filestem, idl)
|
||||
|
||||
mom_in = None
|
||||
|
|
|
|||
|
|
@ -1,17 +1,19 @@
|
|||
import rapidjson as json
|
||||
import gzip
|
||||
import getpass
|
||||
import socket
|
||||
import datetime
|
||||
import getpass
|
||||
import gzip
|
||||
import platform
|
||||
import warnings
|
||||
import re
|
||||
import socket
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from ..obs import Obs
|
||||
from ..covobs import Covobs
|
||||
from ..correlators import Corr
|
||||
from ..misc import _assert_equal_properties
|
||||
import rapidjson as json
|
||||
|
||||
from .. import version as pyerrorsversion
|
||||
from ..correlators import Corr
|
||||
from ..covobs import Covobs
|
||||
from ..misc import _assert_equal_properties
|
||||
from ..obs import Obs
|
||||
|
||||
|
||||
def create_json_string(ol, description='', indent=1):
|
||||
|
|
@ -93,7 +95,7 @@ def create_json_string(ol, description='', indent=1):
|
|||
_assert_equal_properties(ol)
|
||||
d = {}
|
||||
d['type'] = 'List'
|
||||
d['layout'] = '%d' % len(ol)
|
||||
d['layout'] = f'{len(ol)}'
|
||||
taglist = [o.tag for o in ol]
|
||||
if np.any([tag is not None for tag in taglist]):
|
||||
d['tag'] = taglist
|
||||
|
|
@ -167,7 +169,7 @@ def create_json_string(ol, description='', indent=1):
|
|||
ol = [ol]
|
||||
|
||||
d = {}
|
||||
d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__)
|
||||
d['program'] = f'pyerrors {pyerrorsversion.__version__}'
|
||||
d['version'] = '1.1'
|
||||
d['who'] = getpass.getuser()
|
||||
d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z')
|
||||
|
|
@ -209,7 +211,7 @@ def create_json_string(ol, description='', indent=1):
|
|||
elif isinstance(obj, np.floating):
|
||||
return float(obj)
|
||||
else:
|
||||
raise ValueError('%r is not JSON serializable' % (obj,))
|
||||
raise ValueError(f'{obj!r} is not JSON serializable')
|
||||
|
||||
if indent:
|
||||
return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_SINGLE_LINE_ARRAY)
|
||||
|
|
@ -325,7 +327,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
def get_Obs_from_dict(o):
|
||||
layouts = o.get('layout', '1').strip()
|
||||
if layouts != '1':
|
||||
raise Exception("layout is %s has to be 1 for type Obs." % (layouts), RuntimeWarning)
|
||||
raise Exception(f"layout is {layouts} has to be 1 for type Obs.", RuntimeWarning)
|
||||
|
||||
values = o['value']
|
||||
od = _gen_obsd_from_datad(o.get('data', {}))
|
||||
|
|
@ -433,11 +435,11 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
date = json_dict.get('date', '')
|
||||
host = json_dict.get('host', '')
|
||||
if prog and verbose:
|
||||
print('Data has been written using %s.' % (prog))
|
||||
print(f'Data has been written using {prog}.')
|
||||
if version and verbose:
|
||||
print('Format version %s' % (version))
|
||||
print(f'Format version {version}')
|
||||
if np.any([who, date, host] and verbose):
|
||||
print('Written by %s on %s on host %s' % (who, date, host))
|
||||
print(f'Written by {who} on {date} on host {host}')
|
||||
description = json_dict.get('description', '')
|
||||
if description and verbose:
|
||||
print()
|
||||
|
|
@ -542,8 +544,8 @@ def load_json(fname, verbose=True, gz=True, full_output=False):
|
|||
d = json.load(fin)
|
||||
else:
|
||||
if fname.endswith('.gz'):
|
||||
warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
|
||||
with open(fname, 'r', encoding='utf-8') as fin:
|
||||
warnings.warn(f"Trying to read from {fname} without unzipping!", UserWarning, stacklevel=2)
|
||||
with open(fname, encoding='utf-8') as fin:
|
||||
d = json.loads(fin.read())
|
||||
|
||||
return _parse_json_dict(d, verbose, full_output)
|
||||
|
|
@ -582,11 +584,11 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
|||
v = list_replace_obs(v)
|
||||
elif isinstance(v, obstypes):
|
||||
ol.append(v)
|
||||
v = reps + '%d' % (counter)
|
||||
v = reps + f'{counter}'
|
||||
counter += 1
|
||||
elif isinstance(v, str):
|
||||
if bool(re.match(r'%s[0-9]+' % (reps), v)):
|
||||
raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be safely exported.' % (v, reps))
|
||||
if bool(re.match(rf'{reps}[0-9]+', v)):
|
||||
raise Exception(f'Dict contains string {v} that matches the placeholder! {reps} Cannot be safely exported.')
|
||||
x[k] = v
|
||||
return x
|
||||
|
||||
|
|
@ -602,11 +604,11 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
|||
e = dict_replace_obs(e)
|
||||
elif isinstance(e, obstypes):
|
||||
ol.append(e)
|
||||
e = reps + '%d' % (counter)
|
||||
e = reps + f'{counter}'
|
||||
counter += 1
|
||||
elif isinstance(e, str):
|
||||
if bool(re.match(r'%s[0-9]+' % (reps), e)):
|
||||
raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be safely exported.' % (e, reps))
|
||||
if bool(re.match(rf'{reps}[0-9]+', e)):
|
||||
raise Exception(f'Dict contains string {e} that matches the placeholder! {reps} Cannot be safely exported.')
|
||||
x.append(e)
|
||||
return x
|
||||
|
||||
|
|
@ -617,7 +619,7 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
|||
il.append(e)
|
||||
|
||||
ol.append(il)
|
||||
x = reps + '%d' % (counter)
|
||||
x = reps + f'{counter}'
|
||||
counter += 1
|
||||
return x
|
||||
|
||||
|
|
@ -697,7 +699,7 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
|
|||
v = dict_replace_string(v)
|
||||
elif isinstance(v, list):
|
||||
v = list_replace_string(v)
|
||||
elif isinstance(v, str) and bool(re.match(r'%s[0-9]+' % (reps), v)):
|
||||
elif isinstance(v, str) and bool(re.match(rf'{reps}[0-9]+', v)):
|
||||
index = int(v[len(reps):])
|
||||
v = ol[index]
|
||||
counter += 1
|
||||
|
|
@ -712,7 +714,7 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
|
|||
e = list_replace_string(e)
|
||||
elif isinstance(e, dict):
|
||||
e = dict_replace_string(e)
|
||||
elif isinstance(e, str) and bool(re.match(r'%s[0-9]+' % (reps), e)):
|
||||
elif isinstance(e, str) and bool(re.match(rf'{reps}[0-9]+', e)):
|
||||
index = int(e[len(reps):])
|
||||
e = ol[index]
|
||||
counter += 1
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
import os
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import warnings
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
from matplotlib import gridspec
|
||||
from ..obs import Obs
|
||||
|
||||
from ..fits import fit_lin
|
||||
from ..obs import Obs
|
||||
|
||||
|
||||
def fit_t0(t2E_dict, fit_range, plot_fit=False, observable='t0'):
|
||||
|
|
@ -54,7 +56,7 @@ def fit_t0(t2E_dict, fit_range, plot_fit=False, observable='t0'):
|
|||
[o.gamma_method() for o in y]
|
||||
|
||||
if len(x) < 2 * fit_range:
|
||||
warnings.warn('Fit range smaller than expected! Fitting from %1.2e to %1.2e' % (x[0], x[-1]))
|
||||
warnings.warn(f'Fit range smaller than expected! Fitting from {x[0]:1.2e} to {x[-1]:1.2e}', stacklevel=2)
|
||||
|
||||
fit_result = fit_lin(x, y)
|
||||
|
||||
|
|
@ -114,7 +116,7 @@ def read_pbp(path, prefix, **kwargs):
|
|||
"""
|
||||
|
||||
ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
for (_dirpath, _dirnames, filenames) in os.walk(path):
|
||||
ls.extend(filenames)
|
||||
break
|
||||
|
||||
|
|
@ -161,24 +163,24 @@ def read_pbp(path, prefix, **kwargs):
|
|||
t = fp.read(4) # number of reweighting factors
|
||||
if rep == 0:
|
||||
nrw = struct.unpack('i', t)[0]
|
||||
for k in range(nrw):
|
||||
for _ in range(nrw):
|
||||
deltas.append([])
|
||||
else:
|
||||
if nrw != struct.unpack('i', t)[0]:
|
||||
raise Exception('Error: different number of factors for replicum', rep)
|
||||
|
||||
for k in range(nrw):
|
||||
for _ in range(nrw):
|
||||
tmp_array.append([])
|
||||
|
||||
# This block is necessary for openQCD1.6 ms1 files
|
||||
nfct = []
|
||||
for i in range(nrw):
|
||||
for _ in range(nrw):
|
||||
t = fp.read(4)
|
||||
nfct.append(struct.unpack('i', t)[0])
|
||||
print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting
|
||||
|
||||
nsrc = []
|
||||
for i in range(nrw):
|
||||
for _ in range(nrw):
|
||||
t = fp.read(4)
|
||||
nsrc.append(struct.unpack('i', t)[0])
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
import os
|
||||
import fnmatch
|
||||
import os
|
||||
import struct
|
||||
import warnings
|
||||
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
from ..obs import Obs
|
||||
from ..obs import CObs
|
||||
|
||||
from ..correlators import Corr
|
||||
from ..obs import CObs, Obs
|
||||
from .misc import fit_t0
|
||||
from .utils import sort_names
|
||||
|
||||
|
|
@ -122,27 +123,27 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
nrw = struct.unpack('i', t)[0]
|
||||
if version == '2.0':
|
||||
nrw = int(nrw / 2)
|
||||
for k in range(nrw):
|
||||
for _ in range(nrw):
|
||||
deltas.append([])
|
||||
else:
|
||||
if ((nrw != struct.unpack('i', t)[0] and (not version == '2.0')) or (nrw != struct.unpack('i', t)[0] / 2 and version == '2.0')):
|
||||
raise Exception('Error: different number of reweighting factors for replicum', rep)
|
||||
|
||||
for k in range(nrw):
|
||||
for _ in range(nrw):
|
||||
tmp_array.append([])
|
||||
|
||||
# This block is necessary for openQCD1.6 and openQCD2.0 ms1 files
|
||||
nfct = []
|
||||
if version in ['1.6', '2.0']:
|
||||
for i in range(nrw):
|
||||
for _ in range(nrw):
|
||||
t = fp.read(4)
|
||||
nfct.append(struct.unpack('i', t)[0])
|
||||
else:
|
||||
for i in range(nrw):
|
||||
for _ in range(nrw):
|
||||
nfct.append(1)
|
||||
|
||||
nsrc = []
|
||||
for i in range(nrw):
|
||||
for _ in range(nrw):
|
||||
t = fp.read(4)
|
||||
nsrc.append(struct.unpack('i', t)[0])
|
||||
if version == '2.0':
|
||||
|
|
@ -189,7 +190,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
diffmeas = configlist[-1][-1] - configlist[-1][-2]
|
||||
configlist[-1] = [item // diffmeas for item in configlist[-1]]
|
||||
if configlist[-1][0] > 1 and diffmeas > 1:
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config.')
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config.', stacklevel=2)
|
||||
offset = configlist[-1][0] - 1
|
||||
configlist[-1] = [item - offset for item in configlist[-1]]
|
||||
|
||||
|
|
@ -199,8 +200,9 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
try:
|
||||
r_start_index.append(configlist[-1].index(r_start[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_start[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_start[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
if r_stop[rep] is None:
|
||||
r_stop_index.append(len(configlist[-1]) - 1)
|
||||
|
|
@ -208,8 +210,9 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
try:
|
||||
r_stop_index.append(configlist[-1].index(r_stop[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_stop[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_stop[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
for k in range(nrw):
|
||||
deltas[k].append(tmp_array[k][r_start_index[rep]:r_stop_index[rep] + 1][::r_step])
|
||||
|
|
@ -218,7 +221,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
|
||||
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
|
||||
if np.any([step != 1 for step in stepsizes]):
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning)
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning, stacklevel=2)
|
||||
|
||||
print(',', nrw, 'reweighting factors with', nsrc, 'sources')
|
||||
result = []
|
||||
|
|
@ -371,7 +374,7 @@ def _extract_flowed_energy_density(path, prefix, dtr_read, xmin, spatial_extent,
|
|||
t = fp.read(8 * tmax * (nn + 1))
|
||||
|
||||
Ysum.append([])
|
||||
for i, item in enumerate(Ysl):
|
||||
for _i, item in enumerate(Ysl):
|
||||
Ysum[-1].append([np.mean(item[current + xmin:
|
||||
current + tmax - xmin])
|
||||
for current in range(0, len(item), tmax)])
|
||||
|
|
@ -379,7 +382,7 @@ def _extract_flowed_energy_density(path, prefix, dtr_read, xmin, spatial_extent,
|
|||
diffmeas = configlist[-1][-1] - configlist[-1][-2]
|
||||
configlist[-1] = [item // diffmeas for item in configlist[-1]]
|
||||
if kwargs.get('assume_thermalization', True) and configlist[-1][0] > 1:
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config.')
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config.', stacklevel=2)
|
||||
offset = configlist[-1][0] - 1
|
||||
configlist[-1] = [item - offset for item in configlist[-1]]
|
||||
|
||||
|
|
@ -389,8 +392,9 @@ def _extract_flowed_energy_density(path, prefix, dtr_read, xmin, spatial_extent,
|
|||
try:
|
||||
r_start_index.append(configlist[-1].index(r_start[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_start[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_start[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
if r_stop[rep] is None:
|
||||
r_stop_index.append(len(configlist[-1]) - 1)
|
||||
|
|
@ -398,14 +402,15 @@ def _extract_flowed_energy_density(path, prefix, dtr_read, xmin, spatial_extent,
|
|||
try:
|
||||
r_stop_index.append(configlist[-1].index(r_stop[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_stop[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_stop[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
if np.any([len(np.unique(np.diff(cl))) != 1 for cl in configlist]):
|
||||
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
|
||||
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
|
||||
if np.any([step != 1 for step in stepsizes]):
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning)
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning, stacklevel=2)
|
||||
|
||||
idl = [range(configlist[rep][r_start_index[rep]], configlist[rep][r_stop_index[rep]] + 1, r_step) for rep in range(replica)]
|
||||
E_dict = {}
|
||||
|
|
@ -596,7 +601,9 @@ def _parse_array_openQCD2(d, n, size, wa, quadrupel=False):
|
|||
return arr
|
||||
|
||||
|
||||
def _find_files(path, prefix, postfix, ext, known_files=[]):
|
||||
def _find_files(path, prefix, postfix, ext, known_files=None):
|
||||
if known_files is None:
|
||||
known_files = []
|
||||
found = []
|
||||
files = []
|
||||
|
||||
|
|
@ -611,7 +618,7 @@ def _find_files(path, prefix, postfix, ext, known_files=[]):
|
|||
|
||||
pattern = prefix + "*" + postfix + ext
|
||||
|
||||
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
|
||||
for (_dirpath, _dirnames, filenames) in os.walk(path + "/"):
|
||||
found.extend(filenames)
|
||||
break
|
||||
|
||||
|
|
@ -640,7 +647,7 @@ def _read_array_openQCD2(fp):
|
|||
t = fp.read(4)
|
||||
d = struct.unpack('i', t)[0]
|
||||
t = fp.read(4 * d)
|
||||
n = struct.unpack('%di' % (d), t)
|
||||
n = struct.unpack(f'{d}i', t)
|
||||
t = fp.read(4)
|
||||
size = struct.unpack('i', t)[0]
|
||||
if size == 4:
|
||||
|
|
@ -656,7 +663,7 @@ def _read_array_openQCD2(fp):
|
|||
m *= n[i]
|
||||
|
||||
t = fp.read(m * size)
|
||||
tmp = struct.unpack('%d%s' % (m, types), t)
|
||||
tmp = struct.unpack(f'{m}{types}', t)
|
||||
|
||||
arr = _parse_array_openQCD2(d, n, size, tmp, quadrupel=True)
|
||||
return {'d': d, 'n': n, 'size': size, 'arr': arr}
|
||||
|
|
@ -922,7 +929,7 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
cmax = header2[1] # highest value of c used
|
||||
|
||||
if c > cmax:
|
||||
raise Exception('Flow has been determined between c=0 and c=%lf with tolerance %lf' % (cmax, tol))
|
||||
raise Exception(f'Flow has been determined between c=0 and c={cmax:f} with tolerance {tol:f}')
|
||||
|
||||
if (zthfl == 2):
|
||||
nfl = 2 # number of flows
|
||||
|
|
@ -936,7 +943,7 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
break
|
||||
traj_list.append(struct.unpack('i', t)[0]) # trajectory number when measurement was done
|
||||
|
||||
for j in range(ncs + 1):
|
||||
for _j in range(ncs + 1):
|
||||
for i in range(iobs):
|
||||
t = fp.read(8 * tmax)
|
||||
if (i == obspos): # determines the flow observable -> i=0 <-> Zeuthen flow
|
||||
|
|
@ -984,8 +991,7 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
configlist.append([tr // steps // dtr_cnfg for tr in traj_list])
|
||||
if configlist[-1][0] > 1:
|
||||
offset = configlist[-1][0] - 1
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config. Offset = %d configs (%d trajectories / cycles)' % (
|
||||
offset, offset * steps))
|
||||
warnings.warn(f'Assume thermalization and that the first measurement belongs to the first config. Offset = {offset} configs ({offset * steps} trajectories / cycles)', stacklevel=2)
|
||||
configlist[-1] = [item - offset for item in configlist[-1]]
|
||||
|
||||
if r_start[rep] is None:
|
||||
|
|
@ -994,8 +1000,9 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
try:
|
||||
r_start_index.append(configlist[-1].index(r_start[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_start[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_start[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
if r_stop[rep] is None:
|
||||
r_stop_index.append(len(configlist[-1]) - 1)
|
||||
|
|
@ -1003,8 +1010,9 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
try:
|
||||
r_stop_index.append(configlist[-1].index(r_stop[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_stop[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
raise Exception(
|
||||
f'Config {r_stop[rep]} not in file with range [{configlist[-1][0]}, {configlist[-1][-1]}]'
|
||||
) from None
|
||||
|
||||
if version in ['sfqcd']:
|
||||
cstepsize = cmax / ncs
|
||||
|
|
@ -1014,7 +1022,7 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
index_aim = round(t_aim / eps / dn)
|
||||
|
||||
Q_sum = []
|
||||
for i, item in enumerate(Q):
|
||||
for item in Q:
|
||||
if sum_t is True:
|
||||
Q_sum.append([sum(item[current:current + tmax])
|
||||
for current in range(0, len(item), tmax)])
|
||||
|
|
@ -1038,9 +1046,9 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
|
|||
if "names" not in kwargs:
|
||||
try:
|
||||
idx = truncated_file.index('r')
|
||||
except Exception:
|
||||
except Exception as err:
|
||||
if "names" not in kwargs:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") from err
|
||||
ens_name = truncated_file[:idx]
|
||||
rep_names.append(ens_name + '|' + truncated_file[idx:].split(".")[0])
|
||||
else:
|
||||
|
|
@ -1254,7 +1262,7 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
|
|||
cnfgs.append([])
|
||||
realsamples.append([])
|
||||
imagsamples.append([])
|
||||
for t in range(tmax):
|
||||
for _ in range(tmax):
|
||||
realsamples[repnum].append([])
|
||||
imagsamples[repnum].append([])
|
||||
if 'idl' in kwargs:
|
||||
|
|
@ -1289,7 +1297,7 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
|
|||
if expected_idl[repnum] == left_idl:
|
||||
raise ValueError("None of the idls searched for were found in replikum of file " + file)
|
||||
elif len(left_idl) > 0:
|
||||
warnings.warn('Could not find idls ' + str(left_idl) + ' in replikum of file ' + file, UserWarning)
|
||||
warnings.warn('Could not find idls ' + str(left_idl) + ' in replikum of file ' + file, UserWarning, stacklevel=2)
|
||||
repnum += 1
|
||||
s = "Read correlator " + corr + " from " + str(repnum) + " replika with idls" + str(realsamples[0][t])
|
||||
for rep in range(1, repnum):
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
import warnings
|
||||
import gzip
|
||||
import sqlite3
|
||||
import warnings
|
||||
from contextlib import closing
|
||||
import pandas as pd
|
||||
from ..obs import Obs
|
||||
from ..correlators import Corr
|
||||
from .json import create_json_string, import_json_string
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from ..correlators import Corr
|
||||
from ..obs import Obs
|
||||
from .json import create_json_string, import_json_string
|
||||
|
||||
|
||||
def to_sql(df, table_name, db, if_exists='fail', gz=True, **kwargs):
|
||||
|
|
@ -81,7 +83,7 @@ def dump_df(df, fname, gz=True):
|
|||
if not serialize:
|
||||
if all(isinstance(entry, (int, np.integer, float, np.floating)) for entry in df[column]):
|
||||
if any([np.isnan(entry) for entry in df[column]]):
|
||||
warnings.warn("nan value in column " + column + " will be replaced by None", UserWarning)
|
||||
warnings.warn("nan value in column " + column + " will be replaced by None", UserWarning, stacklevel=2)
|
||||
|
||||
out = _serialize_df(df, gz=False)
|
||||
|
||||
|
|
@ -124,7 +126,7 @@ def load_df(fname, auto_gamma=False, gz=True):
|
|||
re_import = pd.read_csv(f, keep_default_na=False)
|
||||
else:
|
||||
if fname.endswith('.gz'):
|
||||
warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
|
||||
warnings.warn(f"Trying to read from {fname} without unzipping!", UserWarning, stacklevel=2)
|
||||
re_import = pd.read_csv(fname, keep_default_na=False)
|
||||
|
||||
return _deserialize_df(re_import, auto_gamma=auto_gamma)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,13 @@
|
|||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
from ..obs import Obs
|
||||
from .utils import sort_names, check_idl
|
||||
import itertools
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
|
||||
from ..obs import Obs
|
||||
from .utils import check_idl, sort_names
|
||||
|
||||
sep = "/"
|
||||
|
||||
|
|
@ -76,7 +77,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0,
|
|||
return ret[name][quarks][str(noffset)][str(wf)][str(wf2)]
|
||||
|
||||
|
||||
def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=['bi'], noffset_list=[0], wf_list=[0], wf2_list=[0], version="1.0c", cfg_separator="n", cfg_func=None, silent=False, keyed_out=False, **kwargs):
|
||||
def read_sfcf_multi(path, prefix, name_list, quarks_list=None, corr_type_list=None, noffset_list=None, wf_list=None, wf2_list=None, version="1.0c", cfg_separator="n", cfg_func=None, silent=False, keyed_out=False, **kwargs):
|
||||
"""Read sfcf files from given folder structure.
|
||||
|
||||
Parameters
|
||||
|
|
@ -141,6 +142,17 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
dict[name][quarks][offset][wf][wf2] = list[Obs]
|
||||
"""
|
||||
|
||||
if quarks_list is None:
|
||||
quarks_list = ['.*']
|
||||
if corr_type_list is None:
|
||||
corr_type_list = ['bi']
|
||||
if noffset_list is None:
|
||||
noffset_list = [0]
|
||||
if wf_list is None:
|
||||
wf_list = [0]
|
||||
if wf2_list is None:
|
||||
wf2_list = [0]
|
||||
|
||||
if kwargs.get('im'):
|
||||
im = 1
|
||||
part = 'imaginary'
|
||||
|
|
@ -167,7 +179,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
if "replica" in kwargs:
|
||||
ls = kwargs.get("replica")
|
||||
else:
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
for (_dirpath, dirnames, filenames) in os.walk(path):
|
||||
if not appended:
|
||||
ls.extend(dirnames)
|
||||
else:
|
||||
|
|
@ -214,7 +226,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
|
||||
# setup dict structures
|
||||
intern = {}
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
for name, corr_type in zip(name_list, corr_type_list, strict=True):
|
||||
intern[name] = {}
|
||||
b2b, single = _extract_corr_type(corr_type)
|
||||
intern[name]["b2b"] = b2b
|
||||
|
|
@ -236,7 +248,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
|
||||
internal_ret_dict = {}
|
||||
needed_keys = []
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
for name, corr_type in zip(name_list, corr_type_list, strict=True):
|
||||
b2b, single = _extract_corr_type(corr_type)
|
||||
if b2b:
|
||||
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, wf2_list))
|
||||
|
|
@ -282,8 +294,8 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
rep_idl.append(cfg_func(cfg, *cfg_func_args))
|
||||
else:
|
||||
rep_idl.append(int(cfg[3:]))
|
||||
except Exception:
|
||||
raise Exception("Couldn't parse idl from directory, problem with file " + cfg)
|
||||
except Exception as err:
|
||||
raise Exception("Couldn't parse idl from directory, problem with file " + cfg) from err
|
||||
rep_idl.sort()
|
||||
# maybe there is a better way to print the idls
|
||||
if not silent:
|
||||
|
|
@ -316,7 +328,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
# preparing the datastructure
|
||||
# the correlators get parsed into...
|
||||
deltas = []
|
||||
for j in range(intern[name]["T"]):
|
||||
for _j in range(intern[name]["T"]):
|
||||
deltas.append([])
|
||||
internal_ret_dict[sep.join([name, key])] = deltas
|
||||
|
||||
|
|
@ -365,7 +377,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
T, rep_idl, rep_data = _read_append_rep(filename, pattern, intern[name]['b2b'], im, intern[name]['single'], cfg_func, cfg_func_args)
|
||||
if rep == 0:
|
||||
intern[name]['T'] = T
|
||||
for t in range(intern[name]['T']):
|
||||
for _ in range(intern[name]['T']):
|
||||
deltas.append([])
|
||||
for t in range(intern[name]['T']):
|
||||
deltas[t].append(rep_data[t])
|
||||
|
|
@ -395,7 +407,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
|
||||
result_dict[key] = result
|
||||
else:
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
for name, corr_type in zip(name_list, corr_type_list, strict=True):
|
||||
result_dict[name] = {}
|
||||
for quarks in quarks_list:
|
||||
result_dict[name][quarks] = {}
|
||||
|
|
@ -472,12 +484,14 @@ def _extract_corr_type(corr_type):
|
|||
return b2b, single
|
||||
|
||||
|
||||
def _find_files(rep_path, prefix, compact, files=[]):
|
||||
def _find_files(rep_path, prefix, compact, files=None):
|
||||
if files is None:
|
||||
files = []
|
||||
sub_ls = []
|
||||
if not files == []:
|
||||
files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||
else:
|
||||
for (dirpath, dirnames, filenames) in os.walk(rep_path):
|
||||
for (_dirpath, dirnames, filenames) in os.walk(rep_path):
|
||||
if compact:
|
||||
sub_ls.extend(filenames)
|
||||
else:
|
||||
|
|
@ -516,7 +530,7 @@ def _make_pattern(version, name, noffset, wf, wf2, b2b, quarks):
|
|||
def _find_correlator(file_name, version, pattern, b2b, silent=False):
|
||||
T = 0
|
||||
|
||||
with open(file_name, "r") as my_file:
|
||||
with open(file_name) as my_file:
|
||||
|
||||
content = my_file.read()
|
||||
match = re.search(pattern, content)
|
||||
|
|
@ -578,7 +592,7 @@ def _read_compact_rep(path, rep, sub_ls, intern, needed_keys, im):
|
|||
for key in needed_keys:
|
||||
name = _key2specs(key)[0]
|
||||
deltas = []
|
||||
for t in range(intern[name]["T"]):
|
||||
for _ in range(intern[name]["T"]):
|
||||
deltas.append(np.zeros(no_cfg))
|
||||
return_vals[key] = deltas
|
||||
|
||||
|
|
@ -598,7 +612,7 @@ def _read_chunk_data(chunk, start_read, T, corr_line, b2b, pattern, im, single):
|
|||
for li in chunk[corr_line + 1:corr_line + 6 + b2b]:
|
||||
found_pat += li
|
||||
if re.search(pattern, found_pat):
|
||||
for t, line in enumerate(chunk[start_read:start_read + T]):
|
||||
for _t, line in enumerate(chunk[start_read:start_read + T]):
|
||||
floats = list(map(float, line.split()))
|
||||
data.append(floats[im + 1 - single])
|
||||
return data
|
||||
|
|
@ -623,7 +637,7 @@ def _check_append_rep(content, start_list):
|
|||
data_len_list.append(len(chunk) - header_len)
|
||||
|
||||
if len(set(header_len_list)) > 1:
|
||||
warnings.warn("Not all headers have the same length. Data parts do.")
|
||||
warnings.warn("Not all headers have the same length. Data parts do.", stacklevel=2)
|
||||
has_regular_len_heads = False
|
||||
|
||||
if len(set(data_len_list)) > 1:
|
||||
|
|
@ -654,7 +668,7 @@ def _read_chunk_structure(chunk, pattern, b2b):
|
|||
|
||||
|
||||
def _read_append_rep(filename, pattern, b2b, im, single, idl_func, cfg_func_args):
|
||||
with open(filename, 'r') as fp:
|
||||
with open(filename) as fp:
|
||||
content = fp.readlines()
|
||||
chunk_start_lines = []
|
||||
for linenumber, line in enumerate(content):
|
||||
|
|
@ -665,8 +679,8 @@ def _read_append_rep(filename, pattern, b2b, im, single, idl_func, cfg_func_args
|
|||
chunk = content[:chunk_start_lines[1]]
|
||||
try:
|
||||
gauge_line, corr_line, start_read, T = _read_chunk_structure(chunk, pattern, b2b)
|
||||
except ValueError:
|
||||
raise ValueError("Did not find pattern\n", pattern, "\nin\n", filename, "lines", 1, "to", chunk_start_lines[1] + 1)
|
||||
except ValueError as err:
|
||||
raise ValueError("Did not find pattern\n", pattern, "\nin\n", filename, "lines", 1, "to", chunk_start_lines[1] + 1) from err
|
||||
# if has_regular_len_heads is true, all other chunks should follow the same structure
|
||||
rep_idl = []
|
||||
rep_data = []
|
||||
|
|
@ -682,8 +696,8 @@ def _read_append_rep(filename, pattern, b2b, im, single, idl_func, cfg_func_args
|
|||
gauge_line, corr_line, start_read, T = _read_chunk_structure(chunk, pattern, b2b)
|
||||
try:
|
||||
idl = idl_func(chunk[gauge_line], *cfg_func_args)
|
||||
except Exception:
|
||||
raise Exception("Couldn't parse idl from file", filename, ", problem with chunk of lines", start + 1, "to", stop + 1)
|
||||
except Exception as err:
|
||||
raise Exception("Couldn't parse idl from file", filename, ", problem with chunk of lines", start + 1, "to", stop + 1) from err
|
||||
data = _read_chunk_data(chunk, start_read, T, corr_line, b2b, pattern, im, single)
|
||||
rep_idl.append(idl)
|
||||
rep_data.append(data)
|
||||
|
|
@ -702,8 +716,8 @@ def _get_rep_names(ls, ens_name=None, rep_sep='r'):
|
|||
for entry in ls:
|
||||
try:
|
||||
idx = entry.index(rep_sep)
|
||||
except Exception:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||
except Exception as err:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") from err
|
||||
|
||||
if ens_name:
|
||||
new_names.append(ens_name + '|' + entry[idx:])
|
||||
|
|
@ -722,8 +736,8 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
|
|||
myentry = entry[:-len(name) - 1]
|
||||
try:
|
||||
idx = myentry.index(rep_sep)
|
||||
except Exception:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||
except Exception as err:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") from err
|
||||
|
||||
if ens_name:
|
||||
new_names.append(ens_name + '|' + entry[idx:])
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
"""Utilities for the input"""
|
||||
|
||||
import re
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
def sort_names(ll):
|
||||
|
|
@ -104,7 +104,7 @@ def check_params(path, param_hash, prefix, param_prefix="parameters_"):
|
|||
"""
|
||||
|
||||
ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
for (_dirpath, dirnames, _filenames) in os.walk(path):
|
||||
ls.extend(dirnames)
|
||||
break
|
||||
if not ls:
|
||||
|
|
@ -120,7 +120,7 @@ def check_params(path, param_hash, prefix, param_prefix="parameters_"):
|
|||
rep_path = path + '/' + rep
|
||||
# files of replicum
|
||||
sub_ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(rep_path):
|
||||
for (_dirpath, _dirnames, filenames) in os.walk(rep_path):
|
||||
sub_ls.extend(filenames)
|
||||
|
||||
# filter
|
||||
|
|
@ -132,8 +132,8 @@ def check_params(path, param_hash, prefix, param_prefix="parameters_"):
|
|||
rep_nums = ''
|
||||
for file in param_files:
|
||||
with open(rep_path + '/' + file) as fp:
|
||||
last_line = ''
|
||||
for line in fp:
|
||||
pass
|
||||
last_line = line
|
||||
if last_line.split()[2] != param_hash:
|
||||
rep_nums += file.split("_")[1] + ','
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import numpy as np
|
||||
from .obs import derived_observable, Obs
|
||||
from autograd import jacobian
|
||||
from scipy.integrate import quad as squad
|
||||
|
||||
from .obs import Obs, derived_observable
|
||||
|
||||
|
||||
def quad(func, p, a, b, **kwargs):
|
||||
'''Performs a (one-dimensional) numeric integration of f(p, x) from a to b.
|
||||
|
|
@ -72,7 +73,7 @@ def quad(func, p, a, b, **kwargs):
|
|||
derivint = []
|
||||
for i in range(Np):
|
||||
if isobs[i]:
|
||||
ifunc = np.vectorize(lambda x: jac(pval, x)[i])
|
||||
ifunc = np.vectorize(lambda x, i=i: jac(pval, x)[i])
|
||||
derivint.append(squad(ifunc, bounds[0], bounds[1], **ikwargs)[0])
|
||||
|
||||
for i in range(2):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import numpy as np
|
||||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
from .obs import derived_observable, CObs, Obs, import_jackknife
|
||||
import numpy as np
|
||||
|
||||
from .obs import CObs, Obs, derived_observable, import_jackknife
|
||||
|
||||
|
||||
def matmul(*operands):
|
||||
|
|
@ -24,7 +25,7 @@ def matmul(*operands):
|
|||
def multi_dot(operands, part):
|
||||
stack_r = operands[0]
|
||||
stack_i = operands[1]
|
||||
for op_r, op_i in zip(operands[2::2], operands[3::2]):
|
||||
for op_r, op_i in zip(operands[2::2], operands[3::2], strict=True):
|
||||
tmp_r = stack_r @ op_r - stack_i @ op_i
|
||||
tmp_i = stack_r @ op_i + stack_i @ op_r
|
||||
|
||||
|
|
@ -46,7 +47,7 @@ def matmul(*operands):
|
|||
Ni = derived_observable(multi_dot_i, extended_operands, array_mode=True)
|
||||
|
||||
res = np.empty_like(Nr)
|
||||
for (n, m), entry in np.ndenumerate(Nr):
|
||||
for (n, m), _entry in np.ndenumerate(Nr):
|
||||
res[n, m] = CObs(Nr[n, m], Ni[n, m])
|
||||
|
||||
return res
|
||||
|
|
@ -134,13 +135,13 @@ def einsum(subscripts, *operands):
|
|||
|
||||
def _exp_to_jack(matrix):
|
||||
base_matrix = []
|
||||
for index, entry in np.ndenumerate(matrix):
|
||||
for _index, entry in np.ndenumerate(matrix):
|
||||
base_matrix.append(entry.export_jackknife())
|
||||
return np.asarray(base_matrix).reshape(matrix.shape + base_matrix[0].shape)
|
||||
|
||||
def _exp_to_jack_c(matrix):
|
||||
base_matrix = []
|
||||
for index, entry in np.ndenumerate(matrix):
|
||||
for _index, entry in np.ndenumerate(matrix):
|
||||
base_matrix.append(entry.real.export_jackknife() + 1j * entry.imag.export_jackknife())
|
||||
return np.asarray(base_matrix).reshape(matrix.shape + base_matrix[0].shape)
|
||||
|
||||
|
|
@ -251,7 +252,7 @@ def _mat_mat_op(op, obs, **kwargs):
|
|||
op_A = op_big_matrix[0: dim // 2, 0: dim // 2]
|
||||
op_B = op_big_matrix[dim // 2:, 0: dim // 2]
|
||||
res = np.empty_like(op_A)
|
||||
for (n, m), entry in np.ndenumerate(op_A):
|
||||
for (n, m), _entry in np.ndenumerate(op_A):
|
||||
res[n, m] = CObs(op_A[n, m], op_B[n, m])
|
||||
return res
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
import pickle
|
||||
import platform
|
||||
import numpy as np
|
||||
import scipy
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pickle
|
||||
import scipy
|
||||
|
||||
from .obs import Obs
|
||||
from .version import __version__
|
||||
|
||||
|
|
@ -37,7 +39,7 @@ def errorbar(x, y, axes=plt, **kwargs):
|
|||
"""
|
||||
val = {}
|
||||
err = {}
|
||||
for name, comp in zip(["x", "y"], [x, y]):
|
||||
for name, comp in zip(["x", "y"], [x, y], strict=True):
|
||||
if all(isinstance(o, Obs) for o in comp):
|
||||
if not all(hasattr(o, 'e_dvalue') for o in comp):
|
||||
[o.gamma_method() for o in comp]
|
||||
|
|
@ -120,7 +122,7 @@ def pseudo_Obs(value, dvalue, name, samples=1000):
|
|||
for _ in range(100):
|
||||
deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
|
||||
deltas -= np.mean(deltas)
|
||||
deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
|
||||
deltas *= dvalue / np.sqrt(np.var(deltas) / samples) / np.sqrt(1 + 3 / samples)
|
||||
deltas += value
|
||||
res = Obs(deltas, [name])
|
||||
res.gamma_method(S=2, tau_exp=0)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import numpy as np
|
||||
import scipy.linalg
|
||||
|
||||
from .linalg import eig, svd
|
||||
from .obs import Obs
|
||||
from .linalg import svd, eig
|
||||
|
||||
|
||||
def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
|
||||
|
|
|
|||
|
|
@ -1,14 +1,16 @@
|
|||
import warnings
|
||||
import hashlib
|
||||
import pickle
|
||||
import numpy as np
|
||||
import warnings
|
||||
from itertools import groupby
|
||||
|
||||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
import matplotlib.pyplot as plt
|
||||
import numdifftools as nd
|
||||
import numpy as np
|
||||
import scipy
|
||||
from autograd import jacobian
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.stats import skew, skewtest, kurtosis, kurtosistest
|
||||
import numdifftools as nd
|
||||
from itertools import groupby
|
||||
from scipy.stats import kurtosis, kurtosistest, skew, skewtest
|
||||
|
||||
from .covobs import Covobs
|
||||
|
||||
# Improve print output of numpy.ndarrays containing Obs objects.
|
||||
|
|
@ -100,37 +102,37 @@ class Obs:
|
|||
self.N = 0
|
||||
self.idl = {}
|
||||
if idl is not None:
|
||||
for name, idx in sorted(zip(names, idl)):
|
||||
for name, idx in sorted(zip(names, idl, strict=True)):
|
||||
if isinstance(idx, range):
|
||||
self.idl[name] = idx
|
||||
elif isinstance(idx, (list, np.ndarray)):
|
||||
dc = np.unique(np.diff(idx))
|
||||
if np.any(dc < 0):
|
||||
raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]])))
|
||||
raise ValueError("Unsorted idx for idl[{}] at position {}".format(name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]])))
|
||||
elif np.any(dc == 0):
|
||||
raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]])))
|
||||
raise ValueError("Duplicate entries in idx for idl[{}] at position {}".format(name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]])))
|
||||
if len(dc) == 1:
|
||||
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
||||
else:
|
||||
self.idl[name] = list(idx)
|
||||
else:
|
||||
raise TypeError('incompatible type for idl[%s].' % (name))
|
||||
raise TypeError(f'incompatible type for idl[{name}].')
|
||||
else:
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
for name, sample in sorted(zip(names, samples, strict=True)):
|
||||
self.idl[name] = range(1, len(sample) + 1)
|
||||
|
||||
if kwargs.get("means") is not None:
|
||||
for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
|
||||
for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"), strict=True)):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
self.r_values[name] = mean
|
||||
self.deltas[name] = sample
|
||||
else:
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
for name, sample in sorted(zip(names, samples, strict=True)):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
if len(sample) != self.shape[name]:
|
||||
raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
||||
raise ValueError(f'Incompatible samples and idx for {name}: {len(sample)} vs. {self.shape[name]}')
|
||||
self.r_values[name] = np.mean(sample)
|
||||
self.deltas[name] = sample - self.r_values[name]
|
||||
self._value += self.shape[name] * self.r_values[name]
|
||||
|
|
@ -165,7 +167,7 @@ class Obs:
|
|||
@property
|
||||
def e_content(self):
|
||||
res = {}
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
for _e, e_name in enumerate(self.e_names):
|
||||
res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names))
|
||||
if e_name in self.names:
|
||||
res[e_name].append(e_name)
|
||||
|
|
@ -225,12 +227,12 @@ class Obs:
|
|||
if isinstance(tmp, (int, float)):
|
||||
if tmp < 0:
|
||||
raise ValueError(kwarg_name + ' has to be larger or equal to 0.')
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
for _e, e_name in enumerate(self.e_names):
|
||||
getattr(self, kwarg_name)[e_name] = tmp
|
||||
else:
|
||||
raise TypeError(kwarg_name + ' is not in proper format.')
|
||||
else:
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
for _e, e_name in enumerate(self.e_names):
|
||||
if e_name in getattr(Obs, kwarg_name + '_dict'):
|
||||
getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
|
||||
else:
|
||||
|
|
@ -240,7 +242,7 @@ class Obs:
|
|||
_parse_kwarg('tau_exp')
|
||||
_parse_kwarg('N_sigma')
|
||||
|
||||
for e, e_name in enumerate(self.mc_names):
|
||||
for _e, e_name in enumerate(self.mc_names):
|
||||
gapsize = _determine_gap(self, e_content, e_name)
|
||||
|
||||
r_length = []
|
||||
|
|
@ -261,7 +263,7 @@ class Obs:
|
|||
|
||||
gamma_div = np.zeros(w_max)
|
||||
for r_name in e_content[e_name]:
|
||||
gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
|
||||
gamma_div += self._calc_gamma(np.ones(self.shape[r_name]), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
|
||||
gamma_div[gamma_div < 1] = 1.0
|
||||
e_gamma[e_name] /= gamma_div[:w_max]
|
||||
|
||||
|
|
@ -281,7 +283,7 @@ class Obs:
|
|||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||
self.e_n_dtauint[e_name][0] = 0.0
|
||||
|
||||
def _compute_drho(i):
|
||||
def _compute_drho(i, e_name=e_name, w_max=w_max, e_N=e_N):
|
||||
tmp = (self.e_rho[e_name][i + 1:w_max]
|
||||
+ np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1],
|
||||
self.e_rho[e_name][1:max(1, w_max - 2 * i)]])
|
||||
|
|
@ -390,13 +392,13 @@ class Obs:
|
|||
if self.tag is not None:
|
||||
print("Description:", self.tag)
|
||||
if not hasattr(self, 'e_dvalue'):
|
||||
print('Result\t %3.8e' % (self.value))
|
||||
print(f'Result\t {self.value:3.8e}')
|
||||
else:
|
||||
if self.value == 0.0:
|
||||
percentage = np.nan
|
||||
else:
|
||||
percentage = np.abs(self._dvalue / self.value) * 100
|
||||
print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
|
||||
print(f'Result\t {self.value:3.8e} +/- {self._dvalue:3.8e} +/- {self.ddvalue:3.8e} ({percentage:3.3f}%)')
|
||||
if len(self.e_names) > 1:
|
||||
print(' Ensemble errors:')
|
||||
e_content = self.e_content
|
||||
|
|
@ -404,18 +406,18 @@ class Obs:
|
|||
gap = _determine_gap(self, e_content, e_name)
|
||||
|
||||
if len(self.e_names) > 1:
|
||||
print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
||||
print('', e_name, f'\t {self.e_dvalue[e_name]:3.6e} +/- {self.e_ddvalue[e_name]:3.6e}')
|
||||
tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
|
||||
tau_string += f" in units of {gap} config"
|
||||
if gap > 1:
|
||||
tau_string += "s"
|
||||
if self.tau_exp[e_name] > 0:
|
||||
tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
|
||||
tau_string = f"{tau_string: <45}" + f'\t(\N{GREEK SMALL LETTER TAU}_exp={self.tau_exp[e_name]:3.2f}, N_\N{GREEK SMALL LETTER SIGMA}={self.N_sigma[e_name]:g})'
|
||||
else:
|
||||
tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
|
||||
tau_string = f"{tau_string: <45}" + f'\t(S={self.S[e_name]:3.2f})'
|
||||
print(tau_string)
|
||||
for e_name in self.cov_names:
|
||||
print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
|
||||
print('', e_name, f'\t {self.e_dvalue[e_name]:3.8e}')
|
||||
if ens_content is True:
|
||||
if len(self.e_names) == 1:
|
||||
print(self.N, 'samples in', len(self.e_names), 'ensemble:')
|
||||
|
|
@ -560,13 +562,13 @@ class Obs:
|
|||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||
if not hasattr(self, 'e_dvalue'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
for e, e_name in enumerate(self.mc_names):
|
||||
for _e, e_name in enumerate(self.mc_names):
|
||||
if len(self.e_content[e_name]) == 1:
|
||||
print('No replica distribution for a single replicum (', e_name, ')')
|
||||
continue
|
||||
r_length = []
|
||||
sub_r_mean = 0
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
for r_name in self.e_content[e_name]:
|
||||
r_length.append(len(self.deltas[r_name]))
|
||||
sub_r_mean += self.shape[r_name] * self.r_values[r_name]
|
||||
e_N = np.sum(r_length)
|
||||
|
|
@ -586,12 +588,12 @@ class Obs:
|
|||
expand : bool
|
||||
show expanded history for irregular Monte Carlo chains (default: True).
|
||||
"""
|
||||
for e, e_name in enumerate(self.mc_names):
|
||||
for _e, e_name in enumerate(self.mc_names):
|
||||
plt.figure()
|
||||
r_length = []
|
||||
tmp = []
|
||||
tmp_expanded = []
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
for _r, r_name in enumerate(self.e_content[e_name]):
|
||||
tmp.append(self.deltas[r_name] + self.r_values[r_name])
|
||||
if expand:
|
||||
tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name])
|
||||
|
|
@ -632,7 +634,7 @@ class Obs:
|
|||
if save:
|
||||
fig1.savefig(save)
|
||||
|
||||
return dict(zip(labels, sizes))
|
||||
return dict(zip(labels, sizes, strict=True))
|
||||
|
||||
def dump(self, filename, datatype="json.gz", description="", **kwargs):
|
||||
"""Dump the Obs to a file 'name' of chosen format.
|
||||
|
|
@ -1212,7 +1214,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
for name in o.cov_names:
|
||||
if name in allcov:
|
||||
if not np.allclose(allcov[name], o.covobs[name].cov):
|
||||
raise Exception('Inconsistent covariance matrices for %s!' % (name))
|
||||
raise Exception(f'Inconsistent covariance matrices for {name}!')
|
||||
else:
|
||||
allcov[name] = o.covobs[name].cov
|
||||
|
||||
|
|
@ -1292,7 +1294,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
|
||||
if array_mode is True:
|
||||
|
||||
class _Zero_grad():
|
||||
class _Zero_grad:
|
||||
def __init__(self, N):
|
||||
self.grad = np.zeros((N, 1))
|
||||
|
||||
|
|
@ -1302,12 +1304,12 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
for name in new_sample_names:
|
||||
d_extracted[name] = []
|
||||
ens_length = len(new_idl_d[name])
|
||||
for i_dat, dat in enumerate(data):
|
||||
for dat in data:
|
||||
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
||||
for name in new_cov_names:
|
||||
g_extracted[name] = []
|
||||
zero_grad = _Zero_grad(new_covobs_lengths[name])
|
||||
for i_dat, dat in enumerate(data):
|
||||
for dat in data:
|
||||
g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
|
||||
|
||||
for i_val, new_val in np.ndenumerate(new_values):
|
||||
|
|
@ -1376,7 +1378,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
|||
Has to be a subset of idx_old.
|
||||
"""
|
||||
if not len(deltas) == len(idx_old):
|
||||
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
||||
raise ValueError(f'Length of deltas and idx_old have to be the same: {len(deltas)} != {len(idx_old)}')
|
||||
if type(idx_old) is range and type(idx_new) is range:
|
||||
if idx_old == idx_new:
|
||||
return deltas
|
||||
|
|
@ -1413,7 +1415,7 @@ def reweight(weight, obs, **kwargs):
|
|||
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
|
||||
for name in obs[i].names:
|
||||
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
||||
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
||||
raise ValueError(f'obs[{i}] has to be defined on a subset of the configs in weight.idl[{name}]!')
|
||||
new_samples = []
|
||||
w_deltas = {}
|
||||
for name in sorted(obs[i].names):
|
||||
|
|
@ -1463,9 +1465,9 @@ def correlate(obs_a, obs_b):
|
|||
raise ValueError('idl of ensemble', name, 'do not fit')
|
||||
|
||||
if obs_a.reweighted is True:
|
||||
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
||||
warnings.warn("The first observable is already reweighted.", RuntimeWarning, stacklevel=2)
|
||||
if obs_b.reweighted is True:
|
||||
warnings.warn("The second observable is already reweighted.", RuntimeWarning)
|
||||
warnings.warn("The second observable is already reweighted.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
new_samples = []
|
||||
new_idl = []
|
||||
|
|
@ -1516,7 +1518,7 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
|||
|
||||
max_samples = np.max([o.N for o in obs])
|
||||
if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
|
||||
warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
|
||||
warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
cov = np.zeros((length, length))
|
||||
for i in range(length):
|
||||
|
|
@ -1543,7 +1545,7 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
|||
|
||||
eigenvalues = np.linalg.eigh(cov)[0]
|
||||
if not np.all(eigenvalues >= 0):
|
||||
warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
|
||||
warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning, stacklevel=2)
|
||||
|
||||
return cov
|
||||
|
||||
|
|
@ -1564,7 +1566,7 @@ def invert_corr_cov_cholesky(corr, inverrdiag):
|
|||
if condn > 0.1 / np.finfo(float).eps:
|
||||
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||
if condn > 1e13:
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||
warnings.warn(f"Correlation matrix may be ill-conditioned, condition number: {{{condn:1.2e}}}", RuntimeWarning, stacklevel=2)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
chol_inv = scipy.linalg.solve_triangular(chol, inverrdiag, lower=True)
|
||||
|
||||
|
|
@ -1617,7 +1619,7 @@ def sort_corr(corr, kl, yd):
|
|||
|
||||
posd = {}
|
||||
ofs = 0
|
||||
for ki, k in enumerate(kl):
|
||||
for _ki, k in enumerate(kl):
|
||||
posd[k] = [i + ofs for i in range(len(yd[k]))]
|
||||
ofs += len(posd[k])
|
||||
|
||||
|
|
@ -1779,7 +1781,7 @@ def merge_obs(list_of_obs):
|
|||
"""
|
||||
replist = [item for obs in list_of_obs for item in obs.names]
|
||||
if (len(replist) == len(set(replist))) is False:
|
||||
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||
raise ValueError(f'list_of_obs contains duplicate replica: {str(replist)}')
|
||||
if any([len(o.cov_names) for o in list_of_obs]):
|
||||
raise ValueError('Not possible to merge data that contains covobs!')
|
||||
new_dict = {}
|
||||
|
|
@ -1832,7 +1834,7 @@ def cov_Obs(means, cov, name, grad=None):
|
|||
for i in range(len(means)):
|
||||
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
||||
if ol[0].covobs[name].N != len(means):
|
||||
raise ValueError('You have to provide %d mean values!' % (ol[0].N))
|
||||
raise ValueError(f'You have to provide {ol[0].N} mean values!')
|
||||
if len(ol) == 1:
|
||||
return ol[0]
|
||||
return ol
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import numpy as np
|
||||
import scipy.optimize
|
||||
from autograd import jacobian
|
||||
|
||||
from .obs import derived_observable
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,38 @@
|
|||
import scipy
|
||||
import numpy as np
|
||||
from autograd.extend import primitive, defvjp
|
||||
from autograd.scipy.special import j0, y0, j1, y1, jn, yn, i0, i1, iv, ive, beta, betainc, betaln
|
||||
from autograd.scipy.special import polygamma, psi, digamma, gamma, gammaln, gammainc, gammaincc, gammasgn, rgamma, multigammaln
|
||||
from autograd.scipy.special import erf, erfc, erfinv, erfcinv, logit, expit, logsumexp
|
||||
|
||||
import scipy
|
||||
from autograd.extend import defvjp, primitive
|
||||
from autograd.scipy.special import (
|
||||
beta,
|
||||
betainc,
|
||||
betaln,
|
||||
digamma,
|
||||
erf,
|
||||
erfc,
|
||||
erfcinv,
|
||||
erfinv,
|
||||
expit,
|
||||
gamma,
|
||||
gammainc,
|
||||
gammaincc,
|
||||
gammaln,
|
||||
gammasgn,
|
||||
i0,
|
||||
i1,
|
||||
iv,
|
||||
ive,
|
||||
j0,
|
||||
j1,
|
||||
jn,
|
||||
logit,
|
||||
logsumexp,
|
||||
multigammaln,
|
||||
polygamma,
|
||||
psi,
|
||||
rgamma,
|
||||
y0,
|
||||
y1,
|
||||
yn,
|
||||
)
|
||||
|
||||
__all__ = ["beta", "betainc", "betaln",
|
||||
"polygamma", "psi", "digamma", "gamma", "gammaln", "gammainc", "gammaincc", "gammasgn", "rgamma", "multigammaln",
|
||||
|
|
|
|||
|
|
@ -2,5 +2,9 @@
|
|||
requires = ["setuptools >= 63.0.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py310"
|
||||
|
||||
[tool.ruff.lint]
|
||||
ignore = ["F403"]
|
||||
extend-select = ["I", "B", "PLE", "UP"]
|
||||
ignore = ["F403", "E501", "PLC0415"]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue