diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 51322b2c..b8220691 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -27,17 +27,17 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: uv - uses: astral-sh/setup-uv@v5 - name: Install run: | sudo apt-get update sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super - uv pip install wheel --system - uv pip install . --system - uv pip install pytest nbmake --system - uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files. + python -m pip install --upgrade pip + pip install wheel + pip install . + pip install pytest + pip install nbmake + pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files. - name: Run tests run: pytest -vv --nbmake examples/*.ipynb diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index af98e210..36981809 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -20,9 +20,7 @@ jobs: python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] include: - os: macos-latest - python-version: "3.12" - - os: ubuntu-24.04-arm - python-version: "3.12" + python-version: "3.10" steps: - name: Checkout source @@ -32,15 +30,17 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: uv - uses: astral-sh/setup-uv@v5 - name: Install run: | - uv pip install wheel --system - uv pip install . --system - uv pip install pytest pytest-cov pytest-benchmark hypothesis --system - uv pip freeze --system + python -m pip install --upgrade pip + pip install wheel + pip install . + pip install pytest + pip install pytest-cov + pip install pytest-benchmark + pip install hypothesis + pip freeze - name: Run tests - run: pytest --cov=pyerrors -vv -Werror + run: pytest --cov=pyerrors -vv diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml deleted file mode 100644 index 2288bd3c..00000000 --- a/.github/workflows/ruff.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: ruff -on: - push: - branches: - - master - - develop - pull_request: -jobs: - ruff: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: astral-sh/ruff-action@v2 - with: - src: "./pyerrors" diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a61e766..d019608c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,21 +2,6 @@ All notable changes to this project will be documented in this file. -## [2.14.0] - 2025-03-09 - -### Added -- Explicit checks of the provided inverse matrix for correlated fits #259 - -### Changed -- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246 -- More explicit exception types #248 - -### Fixed -- Removed the possibility to create an Obs from data on several replica #258 -- Fix range in `set_prange` #247 -- Fix ensemble name handling in sfcf input modules #253 -- Correct error message for fit shape mismatch #257 - ## [2.13.0] - 2024-11-03 ### Added diff --git a/README.md b/README.md index 7937da4d..aa669ad5 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750) +[![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750) # pyerrors `pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations. @@ -14,6 +14,11 @@ Install the most recent release using pip and [pypi](https://pypi.org/project/py python -m pip install pyerrors # Fresh install python -m pip install -U pyerrors # Update ``` +Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors): +```bash +conda install -c conda-forge pyerrors # Fresh install +conda update -c conda-forge pyerrors # Update +``` ## Contributing We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md). diff --git a/examples/06_gevp.ipynb b/examples/06_gevp.ipynb index 3de14d5e..3e6c12d5 100644 --- a/examples/06_gevp.ipynb +++ b/examples/06_gevp.ipynb @@ -151,7 +151,7 @@ "\n", "$$C_{\\textrm{projected}}(t)=v_1^T \\underline{C}(t) v_2$$\n", "\n", - "If we choose the vectors to be $v_1=v_2=(1,0,0,0)$, we should get the same correlator as in the cell above. \n", + "If we choose the vectors to be $v_1=v_2=(0,1,0,0)$, we should get the same correlator as in the cell above. \n", "\n", "Thinking about it this way is usefull in the Context of the generalized eigenvalue problem (GEVP), used to find the source-sink combination, which best describes a certain energy eigenstate.\n", "A good introduction is found in https://arxiv.org/abs/0902.1265." diff --git a/pyerrors/__init__.py b/pyerrors/__init__.py index ca05aff4..2bfd688f 100644 --- a/pyerrors/__init__.py +++ b/pyerrors/__init__.py @@ -481,12 +481,12 @@ from .obs import * from .correlators import * from .fits import * from .misc import * -from . import dirac as dirac -from . import input as input -from . import linalg as linalg -from . import mpm as mpm -from . import roots as roots -from . import integrate as integrate -from . import special as special +from . import dirac +from . import input +from . import linalg +from . import mpm +from . import roots +from . import integrate +from . import special -from .version import __version__ as __version__ +from .version import __version__ diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 0375155f..de1addfd 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -101,7 +101,7 @@ class Corr: self.N = 1 elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]): self.content = data_input - noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements + noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements self.N = noNull[0].shape[0] if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]: raise ValueError("Smearing matrices are not NxN.") @@ -141,7 +141,7 @@ class Corr: def gamma_method(self, **kwargs): """Apply the gamma method to the content of the Corr.""" for item in self.content: - if item is not None: + if not (item is None): if self.N == 1: item[0].gamma_method(**kwargs) else: @@ -159,7 +159,7 @@ class Corr: By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to """ if self.N == 1: - raise ValueError("Trying to project a Corr, that already has N=1.") + raise Exception("Trying to project a Corr, that already has N=1.") if vector_l is None: vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.]) @@ -167,16 +167,16 @@ class Corr: vector_r = vector_l if isinstance(vector_l, list) and not isinstance(vector_r, list): if len(vector_l) != self.T: - raise ValueError("Length of vector list must be equal to T") + raise Exception("Length of vector list must be equal to T") vector_r = [vector_r] * self.T if isinstance(vector_r, list) and not isinstance(vector_l, list): if len(vector_r) != self.T: - raise ValueError("Length of vector list must be equal to T") + raise Exception("Length of vector list must be equal to T") vector_l = [vector_l] * self.T if not isinstance(vector_l, list): if not vector_l.shape == vector_r.shape == (self.N,): - raise ValueError("Vectors are of wrong shape!") + raise Exception("Vectors are of wrong shape!") if normalize: vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r) newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] @@ -201,7 +201,7 @@ class Corr: Second index to be picked. """ if self.N == 1: - raise ValueError("Trying to pick item from projected Corr") + raise Exception("Trying to pick item from projected Corr") newcontent = [None if (item is None) else item[i, j] for item in self.content] return Corr(newcontent) @@ -212,8 +212,8 @@ class Corr: timeslice and the error on each timeslice. """ if self.N != 1: - raise ValueError("Can only make Corr[N=1] plottable") - x_list = [x for x in range(self.T) if self.content[x] is not None] + raise Exception("Can only make Corr[N=1] plottable") + x_list = [x for x in range(self.T) if not self.content[x] is None] y_list = [y[0].value for y in self.content if y is not None] y_err_list = [y[0].dvalue for y in self.content if y is not None] @@ -222,9 +222,9 @@ class Corr: def symmetric(self): """ Symmetrize the correlator around x0=0.""" if self.N != 1: - raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.') + raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.') if self.T % 2 != 0: - raise ValueError("Can not symmetrize odd T") + raise Exception("Can not symmetrize odd T") if self.content[0] is not None: if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0: @@ -237,7 +237,7 @@ class Corr: else: newcontent.append(0.5 * (self.content[t] + self.content[self.T - t])) if (all([x is None for x in newcontent])): - raise ValueError("Corr could not be symmetrized: No redundant values") + raise Exception("Corr could not be symmetrized: No redundant values") return Corr(newcontent, prange=self.prange) def anti_symmetric(self): @@ -245,7 +245,7 @@ class Corr: if self.N != 1: raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.') if self.T % 2 != 0: - raise ValueError("Can not symmetrize odd T") + raise Exception("Can not symmetrize odd T") test = 1 * self test.gamma_method() @@ -259,7 +259,7 @@ class Corr: else: newcontent.append(0.5 * (self.content[t] - self.content[self.T - t])) if (all([x is None for x in newcontent])): - raise ValueError("Corr could not be symmetrized: No redundant values") + raise Exception("Corr could not be symmetrized: No redundant values") return Corr(newcontent, prange=self.prange) def is_matrix_symmetric(self): @@ -292,7 +292,7 @@ class Corr: def matrix_symmetric(self): """Symmetrizes the correlator matrices on every timeslice.""" if self.N == 1: - raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.") + raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.") if self.is_matrix_symmetric(): return 1.0 * self else: @@ -336,10 +336,10 @@ class Corr: ''' if self.N == 1: - raise ValueError("GEVP methods only works on correlator matrices and not single correlators.") + raise Exception("GEVP methods only works on correlator matrices and not single correlators.") if ts is not None: if (ts <= t0): - raise ValueError("ts has to be larger than t0.") + raise Exception("ts has to be larger than t0.") if "sorted_list" in kwargs: warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning) @@ -371,9 +371,9 @@ class Corr: if sort is None: if (ts is None): - raise ValueError("ts is required if sort=None.") + raise Exception("ts is required if sort=None.") if (self.content[t0] is None) or (self.content[ts] is None): - raise ValueError("Corr not defined at t0/ts.") + raise Exception("Corr not defined at t0/ts.") Gt = _get_mat_at_t(ts) reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv) if kwargs.get('auto_gamma', False) and vector_obs: @@ -391,14 +391,14 @@ class Corr: all_vecs.append(None) if sort == "Eigenvector": if ts is None: - raise ValueError("ts is required for the Eigenvector sorting method.") + raise Exception("ts is required for the Eigenvector sorting method.") all_vecs = _sort_vectors(all_vecs, ts) reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)] if kwargs.get('auto_gamma', False) and vector_obs: [[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs] else: - raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.") + raise Exception("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.") if "state" in kwargs: return reordered_vecs[kwargs.get("state")] @@ -435,7 +435,7 @@ class Corr: """ if self.N != 1: - raise NotImplementedError("Multi-operator Prony not implemented!") + raise Exception("Multi-operator Prony not implemented!") array = np.empty([N, N], dtype="object") new_content = [] @@ -502,7 +502,7 @@ class Corr: correlator or a Corr of same length. """ if self.N != 1: - raise ValueError("Only one-dimensional correlators can be safely correlated.") + raise Exception("Only one-dimensional correlators can be safely correlated.") new_content = [] for x0, t_slice in enumerate(self.content): if _check_for_none(self, t_slice): @@ -516,7 +516,7 @@ class Corr: elif isinstance(partner, Obs): # Should this include CObs? new_content.append(np.array([correlate(o, partner) for o in t_slice])) else: - raise TypeError("Can only correlate with an Obs or a Corr.") + raise Exception("Can only correlate with an Obs or a Corr.") return Corr(new_content) @@ -583,7 +583,7 @@ class Corr: Available choice: symmetric, forward, backward, improved, log, default: symmetric """ if self.N != 1: - raise ValueError("deriv only implemented for one-dimensional correlators.") + raise Exception("deriv only implemented for one-dimensional correlators.") if variant == "symmetric": newcontent = [] for t in range(1, self.T - 1): @@ -592,7 +592,7 @@ class Corr: else: newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1])) if (all([x is None for x in newcontent])): - raise ValueError('Derivative is undefined at all timeslices') + raise Exception('Derivative is undefined at all timeslices') return Corr(newcontent, padding=[1, 1]) elif variant == "forward": newcontent = [] @@ -602,7 +602,7 @@ class Corr: else: newcontent.append(self.content[t + 1] - self.content[t]) if (all([x is None for x in newcontent])): - raise ValueError("Derivative is undefined at all timeslices") + raise Exception("Derivative is undefined at all timeslices") return Corr(newcontent, padding=[0, 1]) elif variant == "backward": newcontent = [] @@ -612,7 +612,7 @@ class Corr: else: newcontent.append(self.content[t] - self.content[t - 1]) if (all([x is None for x in newcontent])): - raise ValueError("Derivative is undefined at all timeslices") + raise Exception("Derivative is undefined at all timeslices") return Corr(newcontent, padding=[1, 0]) elif variant == "improved": newcontent = [] @@ -622,7 +622,7 @@ class Corr: else: newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2])) if (all([x is None for x in newcontent])): - raise ValueError('Derivative is undefined at all timeslices') + raise Exception('Derivative is undefined at all timeslices') return Corr(newcontent, padding=[2, 2]) elif variant == 'log': newcontent = [] @@ -632,11 +632,11 @@ class Corr: else: newcontent.append(np.log(self.content[t])) if (all([x is None for x in newcontent])): - raise ValueError("Log is undefined at all timeslices") + raise Exception("Log is undefined at all timeslices") logcorr = Corr(newcontent) return self * logcorr.deriv('symmetric') else: - raise ValueError("Unknown variant.") + raise Exception("Unknown variant.") def second_deriv(self, variant="symmetric"): r"""Return the second derivative of the correlator with respect to x0. @@ -656,7 +656,7 @@ class Corr: $$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$ """ if self.N != 1: - raise ValueError("second_deriv only implemented for one-dimensional correlators.") + raise Exception("second_deriv only implemented for one-dimensional correlators.") if variant == "symmetric": newcontent = [] for t in range(1, self.T - 1): @@ -665,7 +665,7 @@ class Corr: else: newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1])) if (all([x is None for x in newcontent])): - raise ValueError("Derivative is undefined at all timeslices") + raise Exception("Derivative is undefined at all timeslices") return Corr(newcontent, padding=[1, 1]) elif variant == "big_symmetric": newcontent = [] @@ -675,7 +675,7 @@ class Corr: else: newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4) if (all([x is None for x in newcontent])): - raise ValueError("Derivative is undefined at all timeslices") + raise Exception("Derivative is undefined at all timeslices") return Corr(newcontent, padding=[2, 2]) elif variant == "improved": newcontent = [] @@ -685,7 +685,7 @@ class Corr: else: newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2])) if (all([x is None for x in newcontent])): - raise ValueError("Derivative is undefined at all timeslices") + raise Exception("Derivative is undefined at all timeslices") return Corr(newcontent, padding=[2, 2]) elif variant == 'log': newcontent = [] @@ -695,11 +695,11 @@ class Corr: else: newcontent.append(np.log(self.content[t])) if (all([x is None for x in newcontent])): - raise ValueError("Log is undefined at all timeslices") + raise Exception("Log is undefined at all timeslices") logcorr = Corr(newcontent) return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2) else: - raise ValueError("Unknown variant.") + raise Exception("Unknown variant.") def m_eff(self, variant='log', guess=1.0): """Returns the effective mass of the correlator as correlator object @@ -728,7 +728,7 @@ class Corr: else: newcontent.append(self.content[t] / self.content[t + 1]) if (all([x is None for x in newcontent])): - raise ValueError('m_eff is undefined at all timeslices') + raise Exception('m_eff is undefined at all timeslices') return np.log(Corr(newcontent, padding=[0, 1])) @@ -742,7 +742,7 @@ class Corr: else: newcontent.append(self.content[t - 1] / self.content[t + 1]) if (all([x is None for x in newcontent])): - raise ValueError('m_eff is undefined at all timeslices') + raise Exception('m_eff is undefined at all timeslices') return np.log(Corr(newcontent, padding=[1, 1])) / 2 @@ -767,7 +767,7 @@ class Corr: else: newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess))) if (all([x is None for x in newcontent])): - raise ValueError('m_eff is undefined at all timeslices') + raise Exception('m_eff is undefined at all timeslices') return Corr(newcontent, padding=[0, 1]) @@ -779,11 +779,11 @@ class Corr: else: newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t])) if (all([x is None for x in newcontent])): - raise ValueError("m_eff is undefined at all timeslices") + raise Exception("m_eff is undefined at all timeslices") return np.arccosh(Corr(newcontent, padding=[1, 1])) else: - raise ValueError('Unknown variant.') + raise Exception('Unknown variant.') def fit(self, function, fitrange=None, silent=False, **kwargs): r'''Fits function to the data @@ -801,7 +801,7 @@ class Corr: Decides whether output is printed to the standard output. ''' if self.N != 1: - raise ValueError("Correlator must be projected before fitting") + raise Exception("Correlator must be projected before fitting") if fitrange is None: if self.prange: @@ -810,12 +810,12 @@ class Corr: fitrange = [0, self.T - 1] else: if not isinstance(fitrange, list): - raise TypeError("fitrange has to be a list with two elements") + raise Exception("fitrange has to be a list with two elements") if len(fitrange) != 2: - raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]") + raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]") - xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None]) - ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None]) + xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]) + ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]) result = least_squares(xs, ys, function, silent=silent, **kwargs) return result @@ -840,9 +840,9 @@ class Corr: else: raise Exception("no plateau range provided") if self.N != 1: - raise ValueError("Correlator must be projected before getting a plateau.") + raise Exception("Correlator must be projected before getting a plateau.") if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])): - raise ValueError("plateau is undefined at all timeslices in plateaurange.") + raise Exception("plateau is undefined at all timeslices in plateaurange.") if auto_gamma: self.gamma_method() if method == "fit": @@ -854,16 +854,16 @@ class Corr: return returnvalue else: - raise ValueError("Unsupported plateau method: " + method) + raise Exception("Unsupported plateau method: " + method) def set_prange(self, prange): """Sets the attribute prange of the Corr object.""" if not len(prange) == 2: - raise ValueError("prange must be a list or array with two values") + raise Exception("prange must be a list or array with two values") if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))): - raise TypeError("Start and end point must be integers") - if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]): - raise ValueError("Start and end point must define a range in the interval 0,T") + raise Exception("Start and end point must be integers") + if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]): + raise Exception("Start and end point must define a range in the interval 0,T") self.prange = prange return @@ -900,7 +900,7 @@ class Corr: Optional title of the figure. """ if self.N != 1: - raise ValueError("Correlator must be projected before plotting") + raise Exception("Correlator must be projected before plotting") if auto_gamma: self.gamma_method() @@ -941,7 +941,7 @@ class Corr: hide_from = None ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor']) else: - raise TypeError("'comp' must be a correlator or a list of correlators.") + raise Exception("'comp' must be a correlator or a list of correlators.") if plateau: if isinstance(plateau, Obs): @@ -950,14 +950,14 @@ class Corr: ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau)) ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-') else: - raise TypeError("'plateau' must be an Obs") + raise Exception("'plateau' must be an Obs") if references: if isinstance(references, list): for ref in references: ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--') else: - raise TypeError("'references' must be a list of floating pint values.") + raise Exception("'references' must be a list of floating pint values.") if self.prange: ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0) @@ -991,7 +991,7 @@ class Corr: if isinstance(save, str): fig.savefig(save, bbox_inches='tight') else: - raise TypeError("'save' has to be a string.") + raise Exception("'save' has to be a string.") def spaghetti_plot(self, logscale=True): """Produces a spaghetti plot of the correlator suited to monitor exceptional configurations. @@ -1002,7 +1002,7 @@ class Corr: Determines whether the scale of the y-axis is logarithmic or standard. """ if self.N != 1: - raise ValueError("Correlator needs to be projected first.") + raise Exception("Correlator needs to be projected first.") mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist])) x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None] @@ -1044,7 +1044,7 @@ class Corr: elif datatype == "pickle": dump_object(self, filename, **kwargs) else: - raise ValueError("Unknown datatype " + str(datatype)) + raise Exception("Unknown datatype " + str(datatype)) def print(self, print_range=None): print(self.__repr__(print_range)) @@ -1094,7 +1094,7 @@ class Corr: def __add__(self, y): if isinstance(y, Corr): if ((self.N != y.N) or (self.T != y.T)): - raise ValueError("Addition of Corrs with different shape") + raise Exception("Addition of Corrs with different shape") newcontent = [] for t in range(self.T): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): @@ -1122,7 +1122,7 @@ class Corr: def __mul__(self, y): if isinstance(y, Corr): if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): - raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T") + raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") newcontent = [] for t in range(self.T): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): @@ -1193,7 +1193,7 @@ class Corr: def __truediv__(self, y): if isinstance(y, Corr): if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): - raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T") + raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") newcontent = [] for t in range(self.T): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): @@ -1207,16 +1207,16 @@ class Corr: newcontent[t] = None if all([item is None for item in newcontent]): - raise ValueError("Division returns completely undefined correlator") + raise Exception("Division returns completely undefined correlator") return Corr(newcontent) elif isinstance(y, (Obs, CObs)): if isinstance(y, Obs): if y.value == 0: - raise ValueError('Division by zero will return undefined correlator') + raise Exception('Division by zero will return undefined correlator') if isinstance(y, CObs): if y.is_zero(): - raise ValueError('Division by zero will return undefined correlator') + raise Exception('Division by zero will return undefined correlator') newcontent = [] for t in range(self.T): @@ -1228,7 +1228,7 @@ class Corr: elif isinstance(y, (int, float)): if y == 0: - raise ValueError('Division by zero will return undefined correlator') + raise Exception('Division by zero will return undefined correlator') newcontent = [] for t in range(self.T): if _check_for_none(self, self.content[t]): @@ -1284,7 +1284,7 @@ class Corr: if np.isnan(tmp_sum.value): newcontent[t] = None if all([item is None for item in newcontent]): - raise ValueError('Operation returns undefined correlator') + raise Exception('Operation returns undefined correlator') return Corr(newcontent) def sin(self): @@ -1392,13 +1392,13 @@ class Corr: ''' if self.N == 1: - raise ValueError('Method cannot be applied to one-dimensional correlators.') + raise Exception('Method cannot be applied to one-dimensional correlators.') if basematrix is None: basematrix = self if Ntrunc >= basematrix.N: - raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) + raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) if basematrix.N != self.N: - raise ValueError('basematrix and targetmatrix have to be of the same size.') + raise Exception('basematrix and targetmatrix have to be of the same size.') evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] diff --git a/pyerrors/dirac.py b/pyerrors/dirac.py index 016e4722..48d1b547 100644 --- a/pyerrors/dirac.py +++ b/pyerrors/dirac.py @@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k): """ test_set = set((i, j, k)) if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))): - raise ValueError("Unexpected input", i, j, k) + raise Exception("Unexpected input", i, j, k) return (i - j) * (j - k) * (k - i) / 2 @@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o): """ test_set = set((i, j, k, o)) if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))): - raise ValueError("Unexpected input", i, j, k, o) + raise Exception("Unexpected input", i, j, k, o) return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12 @@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag): elif gamma_tag == 'SigmaZT': g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2]) else: - raise ValueError('Unkown gamma structure', gamma_tag) + raise Exception('Unkown gamma structure', gamma_tag) return g diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 675bdca6..8ed540c5 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -293,7 +293,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs): if len(key_ls) > 1: for key in key_ls: if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape: - raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.") + raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.") if not silent: print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) @@ -365,8 +365,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs): if (chol_inv[1] != key_ls): raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.') chol_inv = chol_inv[0] - if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))): - raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.') else: corr = covariance(y_all, correlation=True, **kwargs) inverrdiag = np.diag(1 / np.asarray(dy_f)) diff --git a/pyerrors/input/__init__.py b/pyerrors/input/__init__.py index 257c5bd8..6910bd2a 100644 --- a/pyerrors/input/__init__.py +++ b/pyerrors/input/__init__.py @@ -5,11 +5,11 @@ r''' For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object. See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details. ''' -from . import bdio as bdio -from . import dobs as dobs -from . import hadrons as hadrons -from . import json as json -from . import misc as misc -from . import openQCD as openQCD -from . import pandas as pandas -from . import sfcf as sfcf +from . import bdio +from . import dobs +from . import hadrons +from . import json +from . import misc +from . import openQCD +from . import pandas +from . import sfcf diff --git a/pyerrors/input/dobs.py b/pyerrors/input/dobs.py index 6907ec3c..b8b005ff 100644 --- a/pyerrors/input/dobs.py +++ b/pyerrors/input/dobs.py @@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '): o += space o += li + '\n' if li.startswith('<') and not cm: - if '<%s' % ('/') not in li: + if not '<%s' % ('/') in li: c += 1 cm = False return o @@ -529,8 +529,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True): deltas.append(repdeltas) idl.append(repidl) - obsmeans = [np.average(deltas[j]) for j in range(len(deltas))] - res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans)) + res.append(Obs(deltas, obs_names, idl=idl)) res[-1]._value = mean[i] _check(len(e_names) == ne) @@ -672,7 +671,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '): o += space o += li + '\n' if li.startswith('<') and not cm: - if '<%s' % ('/') not in li: + if not '<%s' % ('/') in li: c += 1 cm = False return o diff --git a/pyerrors/input/hadrons.py b/pyerrors/input/hadrons.py index 525f564a..4390e3f0 100644 --- a/pyerrors/input/hadrons.py +++ b/pyerrors/input/hadrons.py @@ -113,7 +113,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"): infos = [] for hd5_file in files: h5file = h5py.File(path + '/' + hd5_file, "r") - if group + '/' + entry not in h5file: + if not group + '/' + entry in h5file: raise Exception("Entry '" + entry + "' not contained in the files.") raw_data = h5file[group + '/' + entry + '/corr'] real_data = raw_data[:].view("complex") @@ -186,7 +186,7 @@ def _extract_real_arrays(path, files, tree, keys): for hd5_file in files: h5file = h5py.File(path + '/' + hd5_file, "r") for key in keys: - if tree + '/' + key not in h5file: + if not tree + '/' + key in h5file: raise Exception("Entry '" + key + "' not contained in the files.") raw_data = h5file[tree + '/' + key + '/data'] real_data = raw_data[:].astype(np.double) diff --git a/pyerrors/input/json.py b/pyerrors/input/json.py index a2008f9c..ca3fb0d2 100644 --- a/pyerrors/input/json.py +++ b/pyerrors/input/json.py @@ -133,11 +133,10 @@ def create_json_string(ol, description='', indent=1): names = [] idl = [] for key, value in obs.idl.items(): - samples.append(np.array([np.nan] * len(value))) + samples.append([np.nan] * len(value)) names.append(key) idl.append(value) - my_obs = Obs(samples, names, idl, means=[np.nan for n in names]) - my_obs._value = np.nan + my_obs = Obs(samples, names, idl) my_obs._covobs = obs._covobs for name in obs._covobs: my_obs.names.append(name) @@ -332,8 +331,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) if od: - r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']] - ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets]) + ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl']) ret._value = values[0] else: ret = Obs([], [], means=[]) @@ -358,8 +356,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): taglist = o.get('tag', layout * [None]) for i in range(layout): if od: - r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']]) - ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets])) + ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl'])) ret[-1]._value = values[i] else: ret.append(Obs([], [], means=[])) @@ -386,8 +383,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): taglist = o.get('tag', N * [None]) for i in range(N): if od: - r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']]) - ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets])) + ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl'])) ret[-1]._value = values[i] else: ret.append(Obs([], [], means=[])) @@ -571,6 +567,7 @@ def _ol_from_dict(ind, reps='DICTOBS'): counter = 0 def dict_replace_obs(d): + nonlocal ol nonlocal counter x = {} for k, v in d.items(): @@ -591,6 +588,7 @@ def _ol_from_dict(ind, reps='DICTOBS'): return x def list_replace_obs(li): + nonlocal ol nonlocal counter x = [] for e in li: @@ -611,6 +609,7 @@ def _ol_from_dict(ind, reps='DICTOBS'): return x def obslist_replace_obs(li): + nonlocal ol nonlocal counter il = [] for e in li: @@ -691,6 +690,7 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'): def dict_replace_string(d): nonlocal counter + nonlocal ol x = {} for k, v in d.items(): if isinstance(v, dict): @@ -706,6 +706,7 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'): def list_replace_string(li): nonlocal counter + nonlocal ol x = [] for e in li: if isinstance(e, list): diff --git a/pyerrors/input/openQCD.py b/pyerrors/input/openQCD.py index 278977d2..158bcaca 100644 --- a/pyerrors/input/openQCD.py +++ b/pyerrors/input/openQCD.py @@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs): Reweighting factors read """ known_oqcd_versions = ['1.4', '1.6', '2.0'] - if version not in known_oqcd_versions: + if not (version in known_oqcd_versions): raise Exception('Unknown openQCD version defined!') print("Working with openQCD version " + version) if 'postfix' in kwargs: diff --git a/pyerrors/input/pandas.py b/pyerrors/input/pandas.py index af446cfc..13482983 100644 --- a/pyerrors/input/pandas.py +++ b/pyerrors/input/pandas.py @@ -1,7 +1,6 @@ import warnings import gzip import sqlite3 -from contextlib import closing import pandas as pd from ..obs import Obs from ..correlators import Corr @@ -30,8 +29,9 @@ def to_sql(df, table_name, db, if_exists='fail', gz=True, **kwargs): None """ se_df = _serialize_df(df, gz=gz) - with closing(sqlite3.connect(db)) as con: - se_df.to_sql(table_name, con=con, if_exists=if_exists, index=False, **kwargs) + con = sqlite3.connect(db) + se_df.to_sql(table_name, con, if_exists=if_exists, index=False, **kwargs) + con.close() def read_sql(sql, db, auto_gamma=False, **kwargs): @@ -52,8 +52,9 @@ def read_sql(sql, db, auto_gamma=False, **kwargs): data : pandas.DataFrame Dataframe with the content of the sqlite database. """ - with closing(sqlite3.connect(db)) as con: - extract_df = pd.read_sql(sql, con=con, **kwargs) + con = sqlite3.connect(db) + extract_df = pd.read_sql(sql, con, **kwargs) + con.close() return _deserialize_df(extract_df, auto_gamma=auto_gamma) diff --git a/pyerrors/input/sfcf.py b/pyerrors/input/sfcf.py index 0431788a..e9f2837e 100644 --- a/pyerrors/input/sfcf.py +++ b/pyerrors/input/sfcf.py @@ -127,8 +127,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list= check_configs: list[list[int]] list of list of supposed configs, eg. [range(1,1000)] for one replicum with 1000 configs - rep_string: str - Separator of ensemble name and replicum. Example: In "ensAr0", "r" would be the separator string. + Returns ------- result: dict[list[Obs]] @@ -200,9 +199,9 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list= else: ens_name = kwargs.get("ens_name") if not appended: - new_names = _get_rep_names(ls, ens_name, rep_sep=(kwargs.get('rep_string', 'r'))) + new_names = _get_rep_names(ls, ens_name) else: - new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name, rep_sep=(kwargs.get('rep_string', 'r'))) + new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name) new_names = sort_names(new_names) idl = [] @@ -647,22 +646,22 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single): return T, rep_idl, data -def _get_rep_names(ls, ens_name=None, rep_sep='r'): +def _get_rep_names(ls, ens_name=None): new_names = [] for entry in ls: try: - idx = entry.index(rep_sep) + idx = entry.index('r') except Exception: raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") if ens_name: - new_names.append(ens_name + '|' + entry[idx:]) + new_names.append('ens_name' + '|' + entry[idx:]) else: new_names.append(entry[:idx] + '|' + entry[idx:]) return new_names -def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'): +def _get_appended_rep_names(ls, prefix, name, ens_name=None): new_names = [] for exc in ls: if not fnmatch.fnmatch(exc, prefix + '*.' + name): @@ -671,12 +670,12 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'): for entry in ls: myentry = entry[:-len(name) - 1] try: - idx = myentry.index(rep_sep) + idx = myentry.index('r') except Exception: raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") if ens_name: - new_names.append(ens_name + '|' + entry[idx:]) + new_names.append('ens_name' + '|' + entry[idx:]) else: new_names.append(myentry[:idx] + '|' + myentry[idx:]) return new_names diff --git a/pyerrors/obs.py b/pyerrors/obs.py index 87591cd9..a1c2fd55 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -82,8 +82,6 @@ class Obs: raise ValueError('Names are not unique.') if not all(isinstance(x, str) for x in names): raise TypeError('All names have to be strings.') - if len(set([o.split('|')[0] for o in names])) > 1: - raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.') else: if not isinstance(names[0], str): raise TypeError('All names have to be strings.') @@ -224,7 +222,7 @@ class Obs: tmp = kwargs.get(kwarg_name) if isinstance(tmp, (int, float)): if tmp < 0: - raise ValueError(kwarg_name + ' has to be larger or equal to 0.') + raise Exception(kwarg_name + ' has to be larger or equal to 0.') for e, e_name in enumerate(self.e_names): getattr(self, kwarg_name)[e_name] = tmp else: @@ -293,7 +291,7 @@ class Obs: texp = self.tau_exp[e_name] # Critical slowing down analysis if w_max // 2 <= 1: - raise ValueError("Need at least 8 samples for tau_exp error analysis") + raise Exception("Need at least 8 samples for tau_exp error analysis") for n in range(1, w_max // 2): _compute_drho(n + 1) if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: @@ -622,7 +620,7 @@ class Obs: if not hasattr(self, 'e_dvalue'): raise Exception('Run the gamma method first.') if np.isclose(0.0, self._dvalue, atol=1e-15): - raise ValueError('Error is 0.0') + raise Exception('Error is 0.0') labels = self.e_names sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 fig1, ax1 = plt.subplots() @@ -661,7 +659,7 @@ class Obs: with open(file_name + '.p', 'wb') as fb: pickle.dump(self, fb) else: - raise TypeError("Unknown datatype " + str(datatype)) + raise Exception("Unknown datatype " + str(datatype)) def export_jackknife(self): """Export jackknife samples from the Obs @@ -678,7 +676,7 @@ class Obs: """ if len(self.names) != 1: - raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") name = self.names[0] full_data = self.deltas[name] + self.r_values[name] @@ -713,7 +711,7 @@ class Obs: should agree with samples from a full bootstrap analysis up to O(1/N). """ if len(self.names) != 1: - raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") + raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") name = self.names[0] length = self.N @@ -858,12 +856,15 @@ class Obs: def __pow__(self, y): if isinstance(y, Obs): - return derived_observable(lambda x, **kwargs: x[0] ** x[1], [self, y], man_grad=[y.value * self.value ** (y.value - 1), self.value ** y.value * np.log(self.value)]) + return derived_observable(lambda x: x[0] ** x[1], [self, y]) else: - return derived_observable(lambda x, **kwargs: x[0] ** y, [self], man_grad=[y * self.value ** (y - 1)]) + return derived_observable(lambda x: x[0] ** y, [self]) def __rpow__(self, y): - return derived_observable(lambda x, **kwargs: y ** x[0], [self], man_grad=[y ** self.value * np.log(y)]) + if isinstance(y, Obs): + return derived_observable(lambda x: x[0] ** x[1], [y, self]) + else: + return derived_observable(lambda x: y ** x[0], [self]) def __abs__(self): return derived_observable(lambda x: anp.abs(x[0]), [self]) @@ -1269,7 +1270,7 @@ def derived_observable(func, data, array_mode=False, **kwargs): if 'man_grad' in kwargs: deriv = np.asarray(kwargs.get('man_grad')) if new_values.shape + data.shape != deriv.shape: - raise ValueError('Manual derivative does not have correct shape.') + raise Exception('Manual derivative does not have correct shape.') elif kwargs.get('num_grad') is True: if multi > 0: raise Exception('Multi mode currently not supported for numerical derivative') @@ -1335,7 +1336,7 @@ def derived_observable(func, data, array_mode=False, **kwargs): new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): - raise ValueError('The same name has been used for deltas and covobs!') + raise Exception('The same name has been used for deltas and covobs!') new_samples = [] new_means = [] new_idl = [] @@ -1376,7 +1377,7 @@ def _reduce_deltas(deltas, idx_old, idx_new): Has to be a subset of idx_old. """ if not len(deltas) == len(idx_old): - raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) + raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) if type(idx_old) is range and type(idx_new) is range: if idx_old == idx_new: return deltas @@ -1384,7 +1385,7 @@ def _reduce_deltas(deltas, idx_old, idx_new): return deltas indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] if len(indices) < len(idx_new): - raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old') + raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') return np.array(deltas)[indices] @@ -1406,14 +1407,12 @@ def reweight(weight, obs, **kwargs): result = [] for i in range(len(obs)): if len(obs[i].cov_names): - raise ValueError('Error: Not possible to reweight an Obs that contains covobs!') + raise Exception('Error: Not possible to reweight an Obs that contains covobs!') if not set(obs[i].names).issubset(weight.names): - raise ValueError('Error: Ensembles do not fit') - if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1: - raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.') + raise Exception('Error: Ensembles do not fit') for name in obs[i].names: if not set(obs[i].idl[name]).issubset(weight.idl[name]): - raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) + raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) new_samples = [] w_deltas = {} for name in sorted(obs[i].names): @@ -1446,21 +1445,18 @@ def correlate(obs_a, obs_b): ----- Keep in mind to only correlate primary observables which have not been reweighted yet. The reweighting has to be applied after correlating the observables. - Only works if a single ensemble is present in the Obs. - Currently only works if ensemble content is identical (this is not strictly necessary). + Currently only works if ensembles are identical (this is not strictly necessary). """ - if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1: - raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.') if sorted(obs_a.names) != sorted(obs_b.names): - raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") + raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") if len(obs_a.cov_names) or len(obs_b.cov_names): - raise ValueError('Error: Not possible to correlate Obs that contain covobs!') + raise Exception('Error: Not possible to correlate Obs that contain covobs!') for name in obs_a.names: if obs_a.shape[name] != obs_b.shape[name]: - raise ValueError('Shapes of ensemble', name, 'do not fit') + raise Exception('Shapes of ensemble', name, 'do not fit') if obs_a.idl[name] != obs_b.idl[name]: - raise ValueError('idl of ensemble', name, 'do not fit') + raise Exception('idl of ensemble', name, 'do not fit') if obs_a.reweighted is True: warnings.warn("The first observable is already reweighted.", RuntimeWarning) @@ -1562,7 +1558,7 @@ def invert_corr_cov_cholesky(corr, inverrdiag): condn = np.linalg.cond(corr) if condn > 0.1 / np.finfo(float).eps: - raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") + raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") if condn > 1e13: warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) chol = np.linalg.cholesky(corr) @@ -1643,7 +1639,7 @@ def _smooth_eigenvalues(corr, E): Number of eigenvalues to be left substantially unchanged """ if not (2 < E < corr.shape[0] - 1): - raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") + raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") vals, vec = np.linalg.eigh(corr) lambda_min = np.mean(vals[:-E]) vals[vals < lambda_min] = lambda_min @@ -1762,11 +1758,7 @@ def import_bootstrap(boots, name, random_numbers): def merge_obs(list_of_obs): - """Combine all observables in list_of_obs into one new observable. - This allows to merge Obs that have been computed on multiple replica - of the same ensemble. - If you like to merge Obs that are based on several ensembles, please - average them yourself. + """Combine all observables in list_of_obs into one new observable Parameters ---------- @@ -1779,9 +1771,9 @@ def merge_obs(list_of_obs): """ replist = [item for obs in list_of_obs for item in obs.names] if (len(replist) == len(set(replist))) is False: - raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist))) + raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) if any([len(o.cov_names) for o in list_of_obs]): - raise ValueError('Not possible to merge data that contains covobs!') + raise Exception('Not possible to merge data that contains covobs!') new_dict = {} idl_dict = {} for o in list_of_obs: @@ -1832,7 +1824,7 @@ def cov_Obs(means, cov, name, grad=None): for i in range(len(means)): ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) if ol[0].covobs[name].N != len(means): - raise ValueError('You have to provide %d mean values!' % (ol[0].N)) + raise Exception('You have to provide %d mean values!' % (ol[0].N)) if len(ol) == 1: return ol[0] return ol @@ -1848,7 +1840,7 @@ def _determine_gap(o, e_content, e_name): gap = min(gaps) if not np.all([gi % gap == 0 for gi in gaps]): - raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) + raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) return gap diff --git a/pyerrors/version.py b/pyerrors/version.py index 806254b1..930e2cd6 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.15.0-dev" +__version__ = "2.13.0" diff --git a/pyproject.toml b/pyproject.toml index 657ec5bb..0c4facc3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,3 @@ [build-system] requires = ["setuptools >= 63.0.0", "wheel"] build-backend = "setuptools.build_meta" - -[tool.ruff.lint] -ignore = ["F403"] diff --git a/setup.py b/setup.py index 8c42f4a6..76efe7e2 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ setup(name='pyerrors', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', diff --git a/tests/correlators_test.py b/tests/correlators_test.py index fc3528d2..d6d2012c 100644 --- a/tests/correlators_test.py +++ b/tests/correlators_test.py @@ -129,7 +129,7 @@ def test_m_eff(): with pytest.warns(RuntimeWarning): my_corr.m_eff('sinh') - with pytest.raises(ValueError): + with pytest.raises(Exception): my_corr.m_eff('unkown_variant') @@ -140,7 +140,7 @@ def test_m_eff_negative_values(): assert m_eff_log[padding + 1] is None m_eff_cosh = my_corr.m_eff('cosh') assert m_eff_cosh[padding + 1] is None - with pytest.raises(ValueError): + with pytest.raises(Exception): my_corr.m_eff('logsym') @@ -155,7 +155,7 @@ def test_correlate(): my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')]) corr1 = my_corr.correlate(my_corr) corr2 = my_corr.correlate(my_corr[0]) - with pytest.raises(TypeError): + with pytest.raises(Exception): corr3 = my_corr.correlate(7.3) @@ -176,9 +176,9 @@ def test_fit_correlator(): assert fit_res[0] == my_corr[0] assert fit_res[1] == my_corr[1] - my_corr[0] - with pytest.raises(TypeError): + with pytest.raises(Exception): my_corr.fit(f, "from 0 to 3") - with pytest.raises(ValueError): + with pytest.raises(Exception): my_corr.fit(f, [0, 2, 3]) @@ -256,11 +256,11 @@ def test_prange(): corr = pe.correlators.Corr(corr_content) corr.set_prange([2, 4]) - with pytest.raises(ValueError): + with pytest.raises(Exception): corr.set_prange([2]) - with pytest.raises(TypeError): + with pytest.raises(Exception): corr.set_prange([2, 2.3]) - with pytest.raises(ValueError): + with pytest.raises(Exception): corr.set_prange([4, 1]) diff --git a/tests/dirac_test.py b/tests/dirac_test.py index 03605785..44812397 100644 --- a/tests/dirac_test.py +++ b/tests/dirac_test.py @@ -30,7 +30,7 @@ def test_grid_dirac(): 'SigmaYZ', 'SigmaZT']: pe.dirac.Grid_gamma(gamma) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.dirac.Grid_gamma('Not a gamma matrix') @@ -44,7 +44,7 @@ def test_epsilon_tensor(): (1, 1, 3) : 0.0} for key, value in check.items(): assert pe.dirac.epsilon_tensor(*key) == value - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.dirac.epsilon_tensor(0, 1, 3) @@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4(): (1, 2, 3, 1) : 0.0} for key, value in check.items(): assert pe.dirac.epsilon_tensor_rank4(*key) == value - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4) diff --git a/tests/fits_test.py b/tests/fits_test.py index 283ff6a2..2eeb6a49 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -223,9 +223,6 @@ def test_inv_cov_matrix_input_least_squares(): diff_inv_cov_combined_fit.gamma_method() assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12)) - with pytest.raises(ValueError): - pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit]) - def test_least_squares_invalid_inv_cov_matrix_input(): xvals = [] yvals = [] diff --git a/tests/json_io_test.py b/tests/json_io_test.py index a9263691..dafaaa41 100644 --- a/tests/json_io_test.py +++ b/tests/json_io_test.py @@ -12,7 +12,7 @@ def test_jsonio(): o = pe.pseudo_Obs(1.0, .2, 'one') o2 = pe.pseudo_Obs(0.5, .1, 'two|r1') o3 = pe.pseudo_Obs(0.5, .1, 'two|r2') - o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)]) + o4 = pe.merge_obs([o2, o3]) otag = 'This has been merged!' o4.tag = otag do = o - .2 * o4 @@ -101,8 +101,8 @@ def test_json_string_reconstruction(): def test_json_corr_io(): - my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)] - rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list) + my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)] + rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list) for obs_list in [my_list, rw_list]: for tag in [None, "test"]: @@ -111,51 +111,40 @@ def test_json_corr_io(): for corr_tag in [None, 'my_Corr_tag']: for prange in [None, [3, 6]]: for gap in [False, True]: - for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]: - my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange) - my_corr.tag = corr_tag - if gap: - my_corr.content[4] = None - pe.input.json.dump_to_json(my_corr, 'corr') - recover = pe.input.json.load_json('corr') - os.remove('corr.json.gz') - assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]]) - for index, entry in enumerate(my_corr): - if entry is None: - assert recover[index] is None - assert my_corr.tag == recover.tag - assert my_corr.prange == recover.prange - assert my_corr.reweighted == recover.reweighted + my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange) + my_corr.tag = corr_tag + if gap: + my_corr.content[4] = None + pe.input.json.dump_to_json(my_corr, 'corr') + recover = pe.input.json.load_json('corr') + os.remove('corr.json.gz') + assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]]) + for index, entry in enumerate(my_corr): + if entry is None: + assert recover[index] is None + assert my_corr.tag == recover.tag + assert my_corr.prange == recover.prange + assert my_corr.reweighted == recover.reweighted def test_json_corr_2d_io(): - obs_list = [np.array([ - [ - pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]), - pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]), - ], - [ - pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]), - pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]), - ], - ]) for i in range(4)] + obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)] for tag in [None, "test"]: obs_list[3][0, 1].tag = tag for padding in [0, 1]: for prange in [None, [3, 6]]: - for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]: - my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange) - my_corr.tag = tag - pe.input.json.dump_to_json(my_corr, 'corr') - recover = pe.input.json.load_json('corr') - os.remove('corr.json.gz') - assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]]) - for index, entry in enumerate(my_corr): - if entry is None: - assert recover[index] is None - assert my_corr.tag == recover.tag - assert my_corr.prange == recover.prange + my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange) + my_corr.tag = tag + pe.input.json.dump_to_json(my_corr, 'corr') + recover = pe.input.json.load_json('corr') + os.remove('corr.json.gz') + assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]]) + for index, entry in enumerate(my_corr): + if entry is None: + assert recover[index] is None + assert my_corr.tag == recover.tag + assert my_corr.prange == recover.prange def test_json_dict_io(): @@ -222,7 +211,6 @@ def test_json_dict_io(): 'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'), 'se': None, 'sf': 1.2, - 'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]), } } @@ -326,7 +314,7 @@ def test_dobsio(): o2 = pe.pseudo_Obs(0.5, .1, 'two|r1') o3 = pe.pseudo_Obs(0.5, .1, 'two|r2') - o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)]) + o4 = pe.merge_obs([o2, o3]) otag = 'This has been merged!' o4.tag = otag do = o - .2 * o4 @@ -340,7 +328,7 @@ def test_dobsio(): o5 /= co2[0] o5.tag = 2 * otag - tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)]) + tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)]) tt3 = pe.Obs([np.random.rand(102)], ['qe|r1']) tt = tt1 + tt3 @@ -349,7 +337,7 @@ def test_dobsio(): tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)]) - ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)] + ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)] print(ol) fname = 'test_rw' @@ -374,12 +362,9 @@ def test_dobsio(): def test_reconstruct_non_linear_r_obs(tmp_path): - to = ( - pe.Obs([np.random.rand(500), np.random.rand(1200)], - ["e|r1", "e|r2", ], - idl=[range(1, 501), range(0, 1200)]) - + pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)]) - ) + to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)], + ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], + idl=[range(1, 501), range(0, 500), range(1, 999, 9)]) to = np.log(to ** 2) / to to.dump((tmp_path / "test_equality").as_posix()) ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix()) @@ -387,12 +372,9 @@ def test_reconstruct_non_linear_r_obs(tmp_path): def test_reconstruct_non_linear_r_obs_list(tmp_path): - to = ( - pe.Obs([np.random.rand(500), np.random.rand(1200)], - ["e|r1", "e|r2", ], - idl=[range(1, 501), range(0, 1200)]) - + pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)]) - ) + to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)], + ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], + idl=[range(1, 501), range(0, 500), range(1, 999, 9)]) to = np.log(to ** 2) / to for to_list in [[to, to, to], np.array([to, to, to])]: pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix()) diff --git a/tests/linalg_test.py b/tests/linalg_test.py index 9323cfcf..4fb952d3 100644 --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -34,7 +34,7 @@ def test_matmul(): my_list = [] length = 100 + np.random.randint(200) for i in range(dim ** 2): - my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])) + my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) my_array = const * np.array(my_list).reshape((dim, dim)) tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array for t, e in np.ndenumerate(tt): @@ -43,8 +43,8 @@ def test_matmul(): my_list = [] length = 100 + np.random.randint(200) for i in range(dim ** 2): - my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']), - pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))) + my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), + pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) my_array = np.array(my_list).reshape((dim, dim)) * const tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array for t, e in np.ndenumerate(tt): @@ -151,7 +151,7 @@ def test_multi_dot(): my_list = [] length = 1000 + np.random.randint(200) for i in range(dim ** 2): - my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])) + my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])) my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim)) tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array for t, e in np.ndenumerate(tt): @@ -160,8 +160,8 @@ def test_multi_dot(): my_list = [] length = 1000 + np.random.randint(200) for i in range(dim ** 2): - my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']), - pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))) + my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']), + pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))) my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array for t, e in np.ndenumerate(tt): @@ -209,7 +209,7 @@ def test_irregular_matrix_inverse(): for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]: irregular_array = [] for i in range(dim ** 2): - irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)])) + irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)])) irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23') invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T diff --git a/tests/obs_test.py b/tests/obs_test.py index 546a4bfd..726ecffa 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -61,9 +61,9 @@ def test_Obs_exceptions(): my_obs.plot_rep_dist() with pytest.raises(Exception): my_obs.plot_piechart() - with pytest.raises(TypeError): + with pytest.raises(Exception): my_obs.gamma_method(S='2.3') - with pytest.raises(ValueError): + with pytest.raises(Exception): my_obs.gamma_method(tau_exp=2.3) my_obs.gamma_method() my_obs.details() @@ -199,7 +199,7 @@ def test_gamma_method_no_windowing(): assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue) obs.gamma_method(S=1.1) assert obs.e_tauint['ens'] > 0.5 - with pytest.raises(ValueError): + with pytest.raises(Exception): obs.gamma_method(S=-0.2) @@ -333,7 +333,7 @@ def test_derived_observables(): def test_multi_ens(): names = ['A0', 'A1|r001', 'A1|r002'] - test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:]) + test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names) assert test_obs.e_names == ['A0', 'A1'] assert test_obs.e_content['A0'] == ['A0'] assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002'] @@ -345,9 +345,6 @@ def test_multi_ens(): ensembles.append(str(i)) assert my_sum.e_names == sorted(ensembles) - with pytest.raises(ValueError): - test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names) - def test_multi_ens2(): names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh'] @@ -464,18 +461,6 @@ def test_cobs_overloading(): obs / cobs -def test_pow(): - data = [1, 2.341, pe.pseudo_Obs(4.8, 0.48, "test_obs"), pe.cov_Obs(1.1, 0.3 ** 2, "test_cov_obs")] - - for d in data: - assert d * d == d ** 2 - assert d * d * d == d ** 3 - - for d2 in data: - assert np.log(d ** d2) == d2 * np.log(d) - assert (d ** d2) ** (1 / d2) == d - - def test_reweighting(): my_obs = pe.Obs([np.random.rand(1000)], ['t']) assert not my_obs.reweighted @@ -493,33 +478,26 @@ def test_reweighting(): r_obs2 = r_obs[0] * my_obs assert r_obs2.reweighted my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.reweight(my_obs, [my_covobs]) my_obs2 = pe.Obs([np.random.rand(1000)], ['t2']) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.reweight(my_obs, [my_obs + my_obs2]) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.reweight(my_irregular_obs, [my_obs]) - my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q']) - with pytest.raises(ValueError): - pe.reweight(my_merged_obs, [my_merged_obs]) - def test_merge_obs(): - my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1']) - my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)]) + my_obs1 = pe.Obs([np.random.rand(100)], ['t']) + my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)]) merged = pe.merge_obs([my_obs1, my_obs2]) - diff = merged - (my_obs2 + my_obs1) / 2 - assert np.isclose(0, diff.value, atol=1e-16) - with pytest.raises(ValueError): + diff = merged - my_obs2 - my_obs1 + assert diff == -(my_obs1.value + my_obs2.value) / 2 + with pytest.raises(Exception): pe.merge_obs([my_obs1, my_obs1]) my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.merge_obs([my_obs1, my_covobs]) - my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)]) - with pytest.raises(ValueError): - pe.merge_obs([my_obs1, my_obs3]) @@ -541,26 +519,23 @@ def test_correlate(): assert corr1 == corr2 my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)]) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.correlate(my_obs1, my_obs3) my_obs4 = pe.Obs([np.random.rand(99)], ['t']) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.correlate(my_obs1, my_obs4) my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)]) my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)]) corr3 = pe.correlate(my_obs5, my_obs6) assert my_obs5.idl == corr3.idl - my_obs7 = pe.Obs([np.random.rand(99)], ['q']) - with pytest.raises(ValueError): - pe.correlate(my_obs1, my_obs7) my_new_obs = pe.Obs([np.random.rand(100)], ['q3']) - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.correlate(my_obs1, my_new_obs) my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') - with pytest.raises(ValueError): + with pytest.raises(Exception): pe.correlate(my_covobs, my_covobs) r_obs = pe.reweight(my_obs1, [my_obs1])[0] with pytest.warns(RuntimeWarning): @@ -694,14 +669,14 @@ def test_gamma_method_irregular(): assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue) arr2 = np.random.normal(1, .2, size=N) - afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2']) + afull = pe.Obs([arr, arr2], ['a1', 'a2']) configs = np.ones_like(arr2) for i in np.random.uniform(0, len(arr2), size=int(.8*N)): configs[int(i)] = 0 zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0] idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1] - a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2]) + a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2]) afull.gamma_method() a.gamma_method() @@ -787,7 +762,7 @@ def test_gamma_method_irregular(): my_obs.gm() idl += [range(1, 400, 4)] my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl) - with pytest.raises(ValueError): + with pytest.raises(Exception): my_obs.gm() # check cases where tau is large compared to the chain length @@ -1035,7 +1010,7 @@ def test_correlation_intersection_of_idls(): def test_covariance_non_identical_objects(): - obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2']) + obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"]) obs1.gamma_method() obs2 = obs1 + 1e-18 obs2.gamma_method() @@ -1119,9 +1094,6 @@ def test_reweight_method(): obs1 = pe.pseudo_Obs(0.2, 0.01, 'test') rw = pe.pseudo_Obs(0.999, 0.001, 'test') assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0] - rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2') - with pytest.raises(ValueError): - obs1.reweight(rw2) def test_jackknife(): @@ -1138,7 +1110,7 @@ def test_jackknife(): assert np.allclose(tmp_jacks, my_obs.export_jackknife()) my_new_obs = my_obs + pe.Obs([full_data], ['test2']) - with pytest.raises(ValueError): + with pytest.raises(Exception): my_new_obs.export_jackknife() diff --git a/tests/sfcf_in_test.py b/tests/sfcf_in_test.py index 60a71433..f92126f9 100644 --- a/tests/sfcf_in_test.py +++ b/tests/sfcf_in_test.py @@ -387,33 +387,3 @@ def test_find_correlator(): found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False) assert found_start == 21 assert found_T == 3 - - -def test_get_rep_name(): - names = ['data_r0', 'data_r1', 'data_r2'] - new_names = sfin._get_rep_names(names) - assert len(new_names) == 3 - assert new_names[0] == 'data_|r0' - assert new_names[1] == 'data_|r1' - assert new_names[2] == 'data_|r2' - names = ['data_q0', 'data_q1', 'data_q2'] - new_names = sfin._get_rep_names(names, rep_sep='q') - assert len(new_names) == 3 - assert new_names[0] == 'data_|q0' - assert new_names[1] == 'data_|q1' - assert new_names[2] == 'data_|q2' - - -def test_get_appended_rep_name(): - names = ['data_r0.f_1', 'data_r1.f_1', 'data_r2.f_1'] - new_names = sfin._get_appended_rep_names(names, 'data', 'f_1') - assert len(new_names) == 3 - assert new_names[0] == 'data_|r0' - assert new_names[1] == 'data_|r1' - assert new_names[2] == 'data_|r2' - names = ['data_q0.f_1', 'data_q1.f_1', 'data_q2.f_1'] - new_names = sfin._get_appended_rep_names(names, 'data', 'f_1', rep_sep='q') - assert len(new_names) == 3 - assert new_names[0] == 'data_|q0' - assert new_names[1] == 'data_|q1' - assert new_names[2] == 'data_|q2'