[chore] Furture lint rules and removal of flake8

This commit is contained in:
Fabian Joswig 2026-04-20 20:28:15 +02:00
commit d9ffa33b2c
14 changed files with 107 additions and 68 deletions

View file

@ -1,26 +0,0 @@
name: flake8
on:
push:
branches:
- master
- develop
pull_request:
jobs:
flake8-lint:
runs-on: ubuntu-latest
name: Lint
steps:
- name: Check out source repository
uses: actions/checkout@v4
- name: Set up Python environment
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: flake8 Lint
uses: py-actions/flake8@v2
with:
ignore: "E501,W503"
exclude: "__init__.py, input/__init__.py"
path: "pyerrors"

View file

@ -19,14 +19,14 @@ Please add docstrings to any new function, class or method you implement. The do
### Tests
When implementing a new feature or fixing a bug please add meaningful tests to the files in the `tests` directory which cover the new code.
We follow the [PEP8](https://peps.python.org/pep-0008/) code style which is checked by `flake8`.
We follow the [PEP8](https://peps.python.org/pep-0008/) code style which is checked by `ruff`.
For all pull requests tests are executed for the most recent python releases via
```
pytest -vv -Werror
pytest --nbmake examples/*.ipynb
flake8 --ignore=E501,W503 --exclude=__init__.py pyerrors
ruff check pyerrors
```
The tests require `pytest`, `pytest-cov`, `pytest-benchmark`, `hypothesis`, `nbmake` and `flake8`. To install the test dependencies one can run `pip install pyerrors[test]`.
The tests require `pytest`, `pytest-cov`, `pytest-benchmark`, `hypothesis` and `nbmake`. To install the test dependencies one can run `pip install pyerrors[test]`. Linting is done with `ruff`, which can be installed via `pip install ruff` or run ad-hoc with `uvx ruff check pyerrors`.
Please make sure that all tests pass for a new pull requests.
To get a coverage report in html run

View file

@ -1,3 +1,4 @@
import itertools
import warnings
from itertools import permutations
@ -42,7 +43,7 @@ class Corr:
the temporal extent of the correlator and N is the dimension of the matrix.
"""
__slots__ = ["content", "N", "T", "tag", "prange"]
__slots__ = ["N", "T", "content", "prange", "tag"]
def __init__(self, data_input, padding=None, prange=None):
""" Initialize a Corr object.
@ -983,7 +984,7 @@ class Corr:
ax1.set_ylabel(ylabel)
ax1.set_xlim([x_range[0] - 0.5, x_range[1] + 0.5])
handles, labels = ax1.get_legend_handles_labels()
_handles, labels = ax1.get_legend_handles_labels()
if labels:
ax1.legend()
@ -1009,7 +1010,7 @@ class Corr:
if self.N != 1:
raise ValueError("Correlator needs to be projected first.")
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
mc_names = list(set([item for sublist in [list(itertools.chain.from_iterable(map(o[0].e_content.get, o[0].mc_names))) for o in self.content if o is not None] for item in sublist]))
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content, strict=True) if o is not None]
for name in mc_names:
@ -1096,6 +1097,8 @@ class Corr:
comp = np.asarray(y)
return np.asarray(self.content, dtype=object) == comp
__hash__ = None
def __add__(self, y):
if isinstance(y, Corr):
if ((self.N != y.N) or (self.T != y.T)):
@ -1425,7 +1428,7 @@ def _sort_vectors(vec_set_in, ts):
"""Helper function used to find a set of Eigenvectors consistent over all timeslices"""
if isinstance(vec_set_in[ts][0][0], Obs):
vec_set = [anp.vectorize(lambda x: float(x))(vi) if vi is not None else vi for vi in vec_set_in]
vec_set = [anp.vectorize(float)(vi) if vi is not None else vi for vi in vec_set_in]
else:
vec_set = vec_set_in
reference_sorting = np.array(vec_set[ts])

View file

@ -18,6 +18,8 @@ from odrpack import odr_fit
from .obs import Obs, cov_Obs, covariance, derived_observable, invert_corr_cov_cholesky
_rng = np.random.default_rng()
class Fit_result(Sequence):
"""Represents fit results.
@ -946,6 +948,6 @@ def _construct_prior_obs(i_prior, i_n):
return i_prior
elif isinstance(i_prior, str):
loc_val, loc_dval = _extract_val_and_dval(i_prior)
return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")
return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{_rng.integers(2147483647):010d}")
else:
raise TypeError("Prior entries need to be 'Obs' or 'str'.")

View file

@ -506,7 +506,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
else:
deltas = tmp_data[:, t]
tmp_corr.append(Obs([deltas], [ensemble_name], idl=[idl_target]))
result[(corr_name[c], corr_source[c]) + tuple(corr_kappa[c])] = tmp_corr
result[(corr_name[c], corr_source[c], *corr_kappa[c])] = tmp_corr
# Check that all data entries have the same number of configurations
if len(set([o[0].N for o in list(result.values())])) != 1:

View file

@ -289,7 +289,7 @@ def _import_array(arr):
def _import_rdata(rd):
name, idx, mask, deltas = _import_array(rd)
name, idx, _mask, deltas = _import_array(rd)
return deltas, name, idx

View file

@ -302,7 +302,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
if len(rep_name) > len(ens["id"]):
if rep_name[len(ens["id"])] != "|":
tmp_list = list(rep_name)
tmp_list = tmp_list[:len(ens["id"])] + ["|"] + tmp_list[len(ens["id"]):]
tmp_list = [*tmp_list[:len(ens["id"])], "|", *tmp_list[len(ens["id"]):]]
rep_name = ''.join(tmp_list)
retd['names'].append(rep_name)
retd['idl'].append([di[0] for di in rep['deltas']])

View file

@ -219,7 +219,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
if np.any([len(np.unique(np.diff(cl))) != 1 for cl in configlist]):
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
stepsizes = [next(iter(np.unique(np.diff(cl)))) for cl in configlist]
if np.any([step != 1 for step in stepsizes]):
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning, stacklevel=2)
@ -408,7 +408,7 @@ def _extract_flowed_energy_density(path, prefix, dtr_read, xmin, spatial_extent,
if np.any([len(np.unique(np.diff(cl))) != 1 for cl in configlist]):
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
stepsizes = [next(iter(np.unique(np.diff(cl)))) for cl in configlist]
if np.any([step != 1 for step in stepsizes]):
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning, stacklevel=2)

View file

@ -276,9 +276,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=None, corr_type_list=No
if isinstance(files, list):
if all(isinstance(f, list) for f in files):
files = files[i]
elif all(isinstance(f, str) for f in files):
files = files
else:
elif not all(isinstance(f, str) for f in files):
raise TypeError("files has to be of type list[list[str]] or list[str]!")
else:
raise TypeError("files has to be of type list[list[str]] or list[str]!")

View file

@ -10,6 +10,8 @@ import scipy
from .obs import Obs
from .version import __version__
_rng = np.random.default_rng()
def print_config():
"""Print information about version of python, pyerrors and dependencies."""
@ -120,7 +122,7 @@ def pseudo_Obs(value, dvalue, name, samples=1000):
return Obs([np.zeros(samples) + value], [name])
else:
for _ in range(100):
deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
deltas = [_rng.normal(0.0, dvalue * np.sqrt(samples), samples)]
deltas -= np.mean(deltas)
deltas *= dvalue / np.sqrt(np.var(deltas) / samples) / np.sqrt(1 + 3 / samples)
deltas += value
@ -163,7 +165,7 @@ def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
raise Exception('All integrated autocorrelations have to be >= 0.5.')
a = (2 * tau - 1) / (2 * tau + 1)
rand = np.random.multivariate_normal(np.zeros_like(means), cov * samples, samples)
rand = _rng.multivariate_normal(np.zeros_like(means), cov * samples, samples)
# Normalize samples such that sample variance matches input
norm = np.array([np.var(o, ddof=1) / samples for o in rand.T])

View file

@ -2,6 +2,7 @@ import hashlib
import pickle
import warnings
from itertools import groupby
from typing import ClassVar
import autograd.numpy as anp # Thinly-wrapped numpy
import matplotlib.pyplot as plt
@ -14,7 +15,7 @@ from scipy.stats import kurtosis, kurtosistest, skew, skewtest
from .covobs import Covobs
# Improve print output of numpy.ndarrays containing Obs objects.
np.set_printoptions(formatter={'object': lambda x: str(x)})
np.set_printoptions(formatter={'object': str})
class Obs:
@ -46,18 +47,40 @@ class Obs:
Dictionary for N_sigma values. If an entry for a given ensemble exists
this overwrites the standard value for that ensemble.
"""
__slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
'idl', 'tag', '_covobs', '__dict__']
__slots__ = [
'N',
'N_sigma',
'S',
'__dict__',
'_covobs',
'_dvalue',
'_value',
'ddvalue',
'deltas',
'e_ddvalue',
'e_drho',
'e_dtauint',
'e_dvalue',
'e_n_dtauint',
'e_n_tauint',
'e_rho',
'e_tauint',
'e_windowsize',
'idl',
'names',
'r_values',
'reweighted',
'shape',
'tag',
'tau_exp',
]
S_global = 2.0
S_dict = {}
S_dict: ClassVar[dict] = {}
tau_exp_global = 0.0
tau_exp_dict = {}
tau_exp_dict: ClassVar[dict] = {}
N_sigma_global = 1.0
N_sigma_dict = {}
N_sigma_dict: ClassVar[dict] = {}
def __init__(self, samples, names, idl=None, **kwargs):
""" Initialize Obs object.
@ -502,7 +525,7 @@ class Obs:
fig = plt.figure()
plt.xlabel(r'$W$')
plt.ylabel(r'$\tau_\mathrm{int}$')
length = int(len(self.e_n_tauint[e_name]))
length = len(self.e_n_tauint[e_name])
if self.tau_exp[e_name] > 0:
base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
x_help = np.arange(2 * self.tau_exp[e_name])
@ -541,7 +564,7 @@ class Obs:
fig = plt.figure()
plt.xlabel('W')
plt.ylabel('rho')
length = int(len(self.e_drho[e_name]))
length = len(self.e_drho[e_name])
plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
if self.tau_exp[e_name] > 0:
@ -919,7 +942,7 @@ class Obs:
class CObs:
"""Class for a complex valued observable."""
__slots__ = ['_real', '_imag', 'tag']
__slots__ = ['_imag', '_real', 'tag']
def __init__(self, real, imag=0.0):
self._real = real
@ -1021,6 +1044,8 @@ class CObs:
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
__hash__ = None
def __str__(self):
return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
@ -1305,12 +1330,12 @@ def derived_observable(func, data, array_mode=False, **kwargs):
d_extracted[name] = []
ens_length = len(new_idl_d[name])
for dat in data:
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape((*dat.shape, ens_length)))
for name in new_cov_names:
g_extracted[name] = []
zero_grad = _Zero_grad(new_covobs_lengths[name])
for dat in data:
g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape((*dat.shape, new_covobs_lengths[name], 1)))
for i_val, new_val in np.ndenumerate(new_values):
new_deltas = {}
@ -1320,11 +1345,11 @@ def derived_observable(func, data, array_mode=False, **kwargs):
ens_length = d_extracted[name][0].shape[-1]
new_deltas[name] = np.zeros(ens_length)
for i_dat, dat in enumerate(d_extracted[name]):
new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
new_deltas[name] += np.tensordot(deriv[(*i_val, i_dat)], dat)
for name in new_cov_names:
new_grad[name] = 0
for i_dat, dat in enumerate(g_extracted[name]):
new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
new_grad[name] += np.tensordot(deriv[(*i_val, i_dat)], dat)
else:
for j_obs, obs in np.ndenumerate(data):
scalef_d = _compute_scalefactor_missing_rep(obs)
@ -1781,7 +1806,7 @@ def merge_obs(list_of_obs):
"""
replist = [item for obs in list_of_obs for item in obs.names]
if (len(replist) == len(set(replist))) is False:
raise ValueError(f'list_of_obs contains duplicate replica: {str(replist)}')
raise ValueError(f'list_of_obs contains duplicate replica: {replist!s}')
if any([len(o.cov_names) for o in list_of_obs]):
raise ValueError('Not possible to merge data that contains covobs!')
new_dict = {}

View file

@ -34,10 +34,39 @@ from autograd.scipy.special import (
yn,
)
__all__ = ["beta", "betainc", "betaln",
"polygamma", "psi", "digamma", "gamma", "gammaln", "gammainc", "gammaincc", "gammasgn", "rgamma", "multigammaln",
"kn", "j0", "y0", "j1", "y1", "jn", "yn", "i0", "i1", "iv", "ive",
"erf", "erfc", "erfinv", "erfcinv", "logit", "expit", "logsumexp"]
__all__ = [
"beta",
"betainc",
"betaln",
"digamma",
"erf",
"erfc",
"erfcinv",
"erfinv",
"expit",
"gamma",
"gammainc",
"gammaincc",
"gammaln",
"gammasgn",
"i0",
"i1",
"iv",
"ive",
"j0",
"j1",
"jn",
"kn",
"logit",
"logsumexp",
"multigammaln",
"polygamma",
"psi",
"rgamma",
"y0",
"y1",
"yn",
]
@primitive

View file

@ -6,5 +6,11 @@ build-backend = "setuptools.build_meta"
target-version = "py310"
[tool.ruff.lint]
extend-select = ["I", "B", "PLE", "UP"]
ignore = ["F403", "E501", "PLC0415"]
extend-select = ["E", "W", "I", "B", "PIE", "PLE", "PLW", "UP", "NPY", "RUF"]
ignore = [
"F403", # star imports in __init__ files are intentional
"E501", # line too long
"PLC0415", # import outside top level
"PLW2901", # redefined loop name (too noisy)
"RUF002", # ambiguous unicode in docstrings (Greek letters)
]

View file

@ -26,7 +26,7 @@ setup(name='pyerrors',
packages=find_packages(),
python_requires='>=3.10.0',
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2', 'odrpack>=0.5'],
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',