From 6643b523c5c3399031079755a56e0d1cf0a7a6ab Mon Sep 17 00:00:00 2001 From: fjosw Date: Fri, 21 Jul 2023 13:18:02 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/correlators.html | 8 +- docs/pyerrors/obs.html | 3310 ++++++++++++++++---------------- 2 files changed, 1661 insertions(+), 1657 deletions(-) diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html index 939ca477..8853a3aa 100644 --- a/docs/pyerrors/correlators.html +++ b/docs/pyerrors/correlators.html @@ -1318,7 +1318,7 @@ 1075 newcontent.append(self.content[t] + y.content[t]) 1076 return Corr(newcontent) 1077 -1078 elif isinstance(y, (Obs, int, float, CObs)): +1078 elif isinstance(y, (Obs, int, float, CObs, complex)): 1079 newcontent = [] 1080 for t in range(self.T): 1081 if _check_for_none(self, self.content[t]): @@ -1346,7 +1346,7 @@ 1103 newcontent.append(self.content[t] * y.content[t]) 1104 return Corr(newcontent) 1105 -1106 elif isinstance(y, (Obs, int, float, CObs)): +1106 elif isinstance(y, (Obs, int, float, CObs, complex)): 1107 newcontent = [] 1108 for t in range(self.T): 1109 if _check_for_none(self, self.content[t]): @@ -2748,7 +2748,7 @@ 1076 newcontent.append(self.content[t] + y.content[t]) 1077 return Corr(newcontent) 1078 -1079 elif isinstance(y, (Obs, int, float, CObs)): +1079 elif isinstance(y, (Obs, int, float, CObs, complex)): 1080 newcontent = [] 1081 for t in range(self.T): 1082 if _check_for_none(self, self.content[t]): @@ -2776,7 +2776,7 @@ 1104 newcontent.append(self.content[t] * y.content[t]) 1105 return Corr(newcontent) 1106 -1107 elif isinstance(y, (Obs, int, float, CObs)): +1107 elif isinstance(y, (Obs, int, float, CObs, complex)): 1108 newcontent = [] 1109 for t in range(self.T): 1110 if _check_for_none(self, self.content[t]): diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index d7796d68..6ca79c61 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -1114,965 +1114,967 @@ 784 else: 785 if isinstance(y, np.ndarray): 786 return np.array([self + o for o in y]) - 787 elif y.__class__.__name__ in ['Corr', 'CObs']: - 788 return NotImplemented - 789 else: - 790 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 791 - 792 def __radd__(self, y): - 793 return self + y - 794 - 795 def __mul__(self, y): - 796 if isinstance(y, Obs): - 797 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 798 else: - 799 if isinstance(y, np.ndarray): - 800 return np.array([self * o for o in y]) - 801 elif isinstance(y, complex): - 802 return CObs(self * y.real, self * y.imag) - 803 elif y.__class__.__name__ in ['Corr', 'CObs']: - 804 return NotImplemented - 805 else: - 806 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 807 - 808 def __rmul__(self, y): - 809 return self * y - 810 - 811 def __sub__(self, y): - 812 if isinstance(y, Obs): - 813 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 814 else: - 815 if isinstance(y, np.ndarray): - 816 return np.array([self - o for o in y]) - 817 elif y.__class__.__name__ in ['Corr', 'CObs']: - 818 return NotImplemented - 819 else: - 820 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 821 - 822 def __rsub__(self, y): - 823 return -1 * (self - y) - 824 - 825 def __pos__(self): - 826 return self - 827 - 828 def __neg__(self): - 829 return -1 * self - 830 - 831 def __truediv__(self, y): - 832 if isinstance(y, Obs): - 833 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 834 else: - 835 if isinstance(y, np.ndarray): - 836 return np.array([self / o for o in y]) - 837 elif y.__class__.__name__ in ['Corr', 'CObs']: - 838 return NotImplemented - 839 else: - 840 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 841 - 842 def __rtruediv__(self, y): - 843 if isinstance(y, Obs): - 844 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 845 else: - 846 if isinstance(y, np.ndarray): - 847 return np.array([o / self for o in y]) - 848 elif y.__class__.__name__ in ['Corr', 'CObs']: - 849 return NotImplemented - 850 else: - 851 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 852 - 853 def __pow__(self, y): - 854 if isinstance(y, Obs): - 855 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 856 else: - 857 return derived_observable(lambda x: x[0] ** y, [self]) - 858 - 859 def __rpow__(self, y): - 860 if isinstance(y, Obs): - 861 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 862 else: - 863 return derived_observable(lambda x: y ** x[0], [self]) - 864 - 865 def __abs__(self): - 866 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 867 - 868 # Overload numpy functions - 869 def sqrt(self): - 870 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 871 - 872 def log(self): - 873 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 874 - 875 def exp(self): - 876 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 877 - 878 def sin(self): - 879 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 880 - 881 def cos(self): - 882 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 883 - 884 def tan(self): - 885 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 886 - 887 def arcsin(self): - 888 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 889 - 890 def arccos(self): - 891 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 892 - 893 def arctan(self): - 894 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 895 - 896 def sinh(self): - 897 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 898 - 899 def cosh(self): - 900 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 901 - 902 def tanh(self): - 903 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 904 - 905 def arcsinh(self): - 906 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 907 - 908 def arccosh(self): - 909 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 910 - 911 def arctanh(self): - 912 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 913 - 914 - 915class CObs: - 916 """Class for a complex valued observable.""" - 917 __slots__ = ['_real', '_imag', 'tag'] - 918 - 919 def __init__(self, real, imag=0.0): - 920 self._real = real - 921 self._imag = imag - 922 self.tag = None - 923 - 924 @property - 925 def real(self): - 926 return self._real - 927 - 928 @property - 929 def imag(self): - 930 return self._imag - 931 - 932 def gamma_method(self, **kwargs): - 933 """Executes the gamma_method for the real and the imaginary part.""" - 934 if isinstance(self.real, Obs): - 935 self.real.gamma_method(**kwargs) - 936 if isinstance(self.imag, Obs): - 937 self.imag.gamma_method(**kwargs) - 938 - 939 def is_zero(self): - 940 """Checks whether both real and imaginary part are zero within machine precision.""" - 941 return self.real == 0.0 and self.imag == 0.0 - 942 - 943 def conjugate(self): - 944 return CObs(self.real, -self.imag) - 945 - 946 def __add__(self, other): - 947 if isinstance(other, np.ndarray): - 948 return other + self - 949 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 950 return CObs(self.real + other.real, - 951 self.imag + other.imag) - 952 else: - 953 return CObs(self.real + other, self.imag) - 954 - 955 def __radd__(self, y): - 956 return self + y - 957 - 958 def __sub__(self, other): - 959 if isinstance(other, np.ndarray): - 960 return -1 * (other - self) - 961 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 962 return CObs(self.real - other.real, self.imag - other.imag) - 963 else: - 964 return CObs(self.real - other, self.imag) - 965 - 966 def __rsub__(self, other): - 967 return -1 * (self - other) - 968 - 969 def __mul__(self, other): - 970 if isinstance(other, np.ndarray): - 971 return other * self - 972 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 973 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 974 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 975 [self.real, other.real, self.imag, other.imag], - 976 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 977 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 978 [self.real, other.real, self.imag, other.imag], - 979 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 980 elif getattr(other, 'imag', 0) != 0: - 981 return CObs(self.real * other.real - self.imag * other.imag, - 982 self.imag * other.real + self.real * other.imag) - 983 else: - 984 return CObs(self.real * other.real, self.imag * other.real) - 985 else: - 986 return CObs(self.real * other, self.imag * other) - 987 - 988 def __rmul__(self, other): - 989 return self * other - 990 - 991 def __truediv__(self, other): - 992 if isinstance(other, np.ndarray): - 993 return 1 / (other / self) - 994 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 995 r = other.real ** 2 + other.imag ** 2 - 996 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 997 else: - 998 return CObs(self.real / other, self.imag / other) - 999 -1000 def __rtruediv__(self, other): -1001 r = self.real ** 2 + self.imag ** 2 -1002 if hasattr(other, 'real') and hasattr(other, 'imag'): -1003 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) -1004 else: -1005 return CObs(self.real * other / r, -self.imag * other / r) -1006 -1007 def __abs__(self): -1008 return np.sqrt(self.real**2 + self.imag**2) -1009 -1010 def __pos__(self): -1011 return self -1012 -1013 def __neg__(self): -1014 return -1 * self -1015 -1016 def __eq__(self, other): -1017 return self.real == other.real and self.imag == other.imag -1018 -1019 def __str__(self): -1020 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' -1021 -1022 def __repr__(self): -1023 return 'CObs[' + str(self) + ']' -1024 -1025 def __format__(self, format_type): -1026 if format_type == "": -1027 significance = 2 -1028 format_type = "2" -1029 else: -1030 significance = int(float(format_type.replace("+", "").replace("-", ""))) -1031 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" -1032 -1033 -1034def gamma_method(x, **kwargs): -1035 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. -1036 -1037 See docstring of pe.Obs.gamma_method for details. -1038 """ -1039 return np.vectorize(lambda o: o.gm(**kwargs))(x) -1040 -1041 -1042gm = gamma_method + 787 elif isinstance(y, complex): + 788 return CObs(self, 0) + y + 789 elif y.__class__.__name__ in ['Corr', 'CObs']: + 790 return NotImplemented + 791 else: + 792 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 793 + 794 def __radd__(self, y): + 795 return self + y + 796 + 797 def __mul__(self, y): + 798 if isinstance(y, Obs): + 799 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 800 else: + 801 if isinstance(y, np.ndarray): + 802 return np.array([self * o for o in y]) + 803 elif isinstance(y, complex): + 804 return CObs(self * y.real, self * y.imag) + 805 elif y.__class__.__name__ in ['Corr', 'CObs']: + 806 return NotImplemented + 807 else: + 808 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 809 + 810 def __rmul__(self, y): + 811 return self * y + 812 + 813 def __sub__(self, y): + 814 if isinstance(y, Obs): + 815 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 816 else: + 817 if isinstance(y, np.ndarray): + 818 return np.array([self - o for o in y]) + 819 elif y.__class__.__name__ in ['Corr', 'CObs']: + 820 return NotImplemented + 821 else: + 822 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 823 + 824 def __rsub__(self, y): + 825 return -1 * (self - y) + 826 + 827 def __pos__(self): + 828 return self + 829 + 830 def __neg__(self): + 831 return -1 * self + 832 + 833 def __truediv__(self, y): + 834 if isinstance(y, Obs): + 835 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 836 else: + 837 if isinstance(y, np.ndarray): + 838 return np.array([self / o for o in y]) + 839 elif y.__class__.__name__ in ['Corr', 'CObs']: + 840 return NotImplemented + 841 else: + 842 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 843 + 844 def __rtruediv__(self, y): + 845 if isinstance(y, Obs): + 846 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 847 else: + 848 if isinstance(y, np.ndarray): + 849 return np.array([o / self for o in y]) + 850 elif y.__class__.__name__ in ['Corr', 'CObs']: + 851 return NotImplemented + 852 else: + 853 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 854 + 855 def __pow__(self, y): + 856 if isinstance(y, Obs): + 857 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 858 else: + 859 return derived_observable(lambda x: x[0] ** y, [self]) + 860 + 861 def __rpow__(self, y): + 862 if isinstance(y, Obs): + 863 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 864 else: + 865 return derived_observable(lambda x: y ** x[0], [self]) + 866 + 867 def __abs__(self): + 868 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 869 + 870 # Overload numpy functions + 871 def sqrt(self): + 872 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 873 + 874 def log(self): + 875 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 876 + 877 def exp(self): + 878 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 879 + 880 def sin(self): + 881 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 882 + 883 def cos(self): + 884 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 885 + 886 def tan(self): + 887 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 888 + 889 def arcsin(self): + 890 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 891 + 892 def arccos(self): + 893 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 894 + 895 def arctan(self): + 896 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 897 + 898 def sinh(self): + 899 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 900 + 901 def cosh(self): + 902 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 903 + 904 def tanh(self): + 905 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 906 + 907 def arcsinh(self): + 908 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 909 + 910 def arccosh(self): + 911 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 912 + 913 def arctanh(self): + 914 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 915 + 916 + 917class CObs: + 918 """Class for a complex valued observable.""" + 919 __slots__ = ['_real', '_imag', 'tag'] + 920 + 921 def __init__(self, real, imag=0.0): + 922 self._real = real + 923 self._imag = imag + 924 self.tag = None + 925 + 926 @property + 927 def real(self): + 928 return self._real + 929 + 930 @property + 931 def imag(self): + 932 return self._imag + 933 + 934 def gamma_method(self, **kwargs): + 935 """Executes the gamma_method for the real and the imaginary part.""" + 936 if isinstance(self.real, Obs): + 937 self.real.gamma_method(**kwargs) + 938 if isinstance(self.imag, Obs): + 939 self.imag.gamma_method(**kwargs) + 940 + 941 def is_zero(self): + 942 """Checks whether both real and imaginary part are zero within machine precision.""" + 943 return self.real == 0.0 and self.imag == 0.0 + 944 + 945 def conjugate(self): + 946 return CObs(self.real, -self.imag) + 947 + 948 def __add__(self, other): + 949 if isinstance(other, np.ndarray): + 950 return other + self + 951 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 952 return CObs(self.real + other.real, + 953 self.imag + other.imag) + 954 else: + 955 return CObs(self.real + other, self.imag) + 956 + 957 def __radd__(self, y): + 958 return self + y + 959 + 960 def __sub__(self, other): + 961 if isinstance(other, np.ndarray): + 962 return -1 * (other - self) + 963 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 964 return CObs(self.real - other.real, self.imag - other.imag) + 965 else: + 966 return CObs(self.real - other, self.imag) + 967 + 968 def __rsub__(self, other): + 969 return -1 * (self - other) + 970 + 971 def __mul__(self, other): + 972 if isinstance(other, np.ndarray): + 973 return other * self + 974 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 975 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 976 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 977 [self.real, other.real, self.imag, other.imag], + 978 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 979 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 980 [self.real, other.real, self.imag, other.imag], + 981 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 982 elif getattr(other, 'imag', 0) != 0: + 983 return CObs(self.real * other.real - self.imag * other.imag, + 984 self.imag * other.real + self.real * other.imag) + 985 else: + 986 return CObs(self.real * other.real, self.imag * other.real) + 987 else: + 988 return CObs(self.real * other, self.imag * other) + 989 + 990 def __rmul__(self, other): + 991 return self * other + 992 + 993 def __truediv__(self, other): + 994 if isinstance(other, np.ndarray): + 995 return 1 / (other / self) + 996 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 997 r = other.real ** 2 + other.imag ** 2 + 998 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 999 else: +1000 return CObs(self.real / other, self.imag / other) +1001 +1002 def __rtruediv__(self, other): +1003 r = self.real ** 2 + self.imag ** 2 +1004 if hasattr(other, 'real') and hasattr(other, 'imag'): +1005 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) +1006 else: +1007 return CObs(self.real * other / r, -self.imag * other / r) +1008 +1009 def __abs__(self): +1010 return np.sqrt(self.real**2 + self.imag**2) +1011 +1012 def __pos__(self): +1013 return self +1014 +1015 def __neg__(self): +1016 return -1 * self +1017 +1018 def __eq__(self, other): +1019 return self.real == other.real and self.imag == other.imag +1020 +1021 def __str__(self): +1022 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' +1023 +1024 def __repr__(self): +1025 return 'CObs[' + str(self) + ']' +1026 +1027 def __format__(self, format_type): +1028 if format_type == "": +1029 significance = 2 +1030 format_type = "2" +1031 else: +1032 significance = int(float(format_type.replace("+", "").replace("-", ""))) +1033 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" +1034 +1035 +1036def gamma_method(x, **kwargs): +1037 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. +1038 +1039 See docstring of pe.Obs.gamma_method for details. +1040 """ +1041 return np.vectorize(lambda o: o.gm(**kwargs))(x) +1042 1043 -1044 -1045def _format_uncertainty(value, dvalue, significance=2): -1046 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" -1047 if dvalue == 0.0 or (not np.isfinite(dvalue)): -1048 return str(value) -1049 if not isinstance(significance, int): -1050 raise TypeError("significance needs to be an integer.") -1051 if significance < 1: -1052 raise ValueError("significance needs to be larger than zero.") -1053 fexp = np.floor(np.log10(dvalue)) -1054 if fexp < 0.0: -1055 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') -1056 elif fexp == 0.0: -1057 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" -1058 else: -1059 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" -1060 -1061 -1062def _expand_deltas(deltas, idx, shape, gapsize): -1063 """Expand deltas defined on idx to a regular range with spacing gapsize between two -1064 configurations and where holes are filled by 0. -1065 If idx is of type range, the deltas are not changed if the idx.step == gapsize. -1066 -1067 Parameters -1068 ---------- -1069 deltas : list -1070 List of fluctuations -1071 idx : list -1072 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1073 shape : int -1074 Number of configs in idx. -1075 gapsize : int -1076 The target distance between two configurations. If longer distances -1077 are found in idx, the data is expanded. -1078 """ -1079 if isinstance(idx, range): -1080 if (idx.step == gapsize): -1081 return deltas -1082 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) -1083 for i in range(shape): -1084 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] -1085 return ret -1086 -1087 -1088def _merge_idx(idl): -1089 """Returns the union of all lists in idl as range or sorted list -1090 -1091 Parameters -1092 ---------- -1093 idl : list -1094 List of lists or ranges. -1095 """ -1096 -1097 if _check_lists_equal(idl): -1098 return idl[0] -1099 -1100 idunion = sorted(set().union(*idl)) +1044gm = gamma_method +1045 +1046 +1047def _format_uncertainty(value, dvalue, significance=2): +1048 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" +1049 if dvalue == 0.0 or (not np.isfinite(dvalue)): +1050 return str(value) +1051 if not isinstance(significance, int): +1052 raise TypeError("significance needs to be an integer.") +1053 if significance < 1: +1054 raise ValueError("significance needs to be larger than zero.") +1055 fexp = np.floor(np.log10(dvalue)) +1056 if fexp < 0.0: +1057 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') +1058 elif fexp == 0.0: +1059 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" +1060 else: +1061 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" +1062 +1063 +1064def _expand_deltas(deltas, idx, shape, gapsize): +1065 """Expand deltas defined on idx to a regular range with spacing gapsize between two +1066 configurations and where holes are filled by 0. +1067 If idx is of type range, the deltas are not changed if the idx.step == gapsize. +1068 +1069 Parameters +1070 ---------- +1071 deltas : list +1072 List of fluctuations +1073 idx : list +1074 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1075 shape : int +1076 Number of configs in idx. +1077 gapsize : int +1078 The target distance between two configurations. If longer distances +1079 are found in idx, the data is expanded. +1080 """ +1081 if isinstance(idx, range): +1082 if (idx.step == gapsize): +1083 return deltas +1084 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1085 for i in range(shape): +1086 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1087 return ret +1088 +1089 +1090def _merge_idx(idl): +1091 """Returns the union of all lists in idl as range or sorted list +1092 +1093 Parameters +1094 ---------- +1095 idl : list +1096 List of lists or ranges. +1097 """ +1098 +1099 if _check_lists_equal(idl): +1100 return idl[0] 1101 -1102 # Check whether idunion can be expressed as range -1103 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) -1104 idtest = [list(idrange), idunion] -1105 if _check_lists_equal(idtest): -1106 return idrange -1107 -1108 return idunion +1102 idunion = sorted(set().union(*idl)) +1103 +1104 # Check whether idunion can be expressed as range +1105 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) +1106 idtest = [list(idrange), idunion] +1107 if _check_lists_equal(idtest): +1108 return idrange 1109 -1110 -1111def _intersection_idx(idl): -1112 """Returns the intersection of all lists in idl as range or sorted list -1113 -1114 Parameters -1115 ---------- -1116 idl : list -1117 List of lists or ranges. -1118 """ -1119 -1120 if _check_lists_equal(idl): -1121 return idl[0] -1122 -1123 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1110 return idunion +1111 +1112 +1113def _intersection_idx(idl): +1114 """Returns the intersection of all lists in idl as range or sorted list +1115 +1116 Parameters +1117 ---------- +1118 idl : list +1119 List of lists or ranges. +1120 """ +1121 +1122 if _check_lists_equal(idl): +1123 return idl[0] 1124 -1125 # Check whether idinter can be expressed as range -1126 try: -1127 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) -1128 idtest = [list(idrange), idinter] -1129 if _check_lists_equal(idtest): -1130 return idrange -1131 except IndexError: -1132 pass -1133 -1134 return idinter +1125 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1126 +1127 # Check whether idinter can be expressed as range +1128 try: +1129 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) +1130 idtest = [list(idrange), idinter] +1131 if _check_lists_equal(idtest): +1132 return idrange +1133 except IndexError: +1134 pass 1135 -1136 -1137def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1138 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1139 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1140 common divisor of the step sizes is used as new step size. -1141 -1142 Parameters -1143 ---------- -1144 deltas : list -1145 List of fluctuations -1146 idx : list -1147 List or range of configs on which the deltas are defined. -1148 Has to be a subset of new_idx and has to be sorted in ascending order. -1149 shape : list -1150 Number of configs in idx. -1151 new_idx : list -1152 List of configs that defines the new range, has to be sorted in ascending order. -1153 """ -1154 -1155 if type(idx) is range and type(new_idx) is range: -1156 if idx == new_idx: -1157 return deltas -1158 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1159 for i in range(shape): -1160 ret[idx[i] - new_idx[0]] = deltas[i] -1161 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) -1162 -1163 -1164def derived_observable(func, data, array_mode=False, **kwargs): -1165 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1166 -1167 Parameters -1168 ---------- -1169 func : object -1170 arbitrary function of the form func(data, **kwargs). For the -1171 automatic differentiation to work, all numpy functions have to have -1172 the autograd wrapper (use 'import autograd.numpy as anp'). -1173 data : list -1174 list of Obs, e.g. [obs1, obs2, obs3]. -1175 num_grad : bool -1176 if True, numerical derivatives are used instead of autograd -1177 (default False). To control the numerical differentiation the -1178 kwargs of numdifftools.step_generators.MaxStepGenerator -1179 can be used. -1180 man_grad : list -1181 manually supply a list or an array which contains the jacobian -1182 of func. Use cautiously, supplying the wrong derivative will -1183 not be intercepted. -1184 -1185 Notes -1186 ----- -1187 For simple mathematical operations it can be practical to use anonymous -1188 functions. For the ratio of two observables one can e.g. use -1189 -1190 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1191 """ -1192 -1193 data = np.asarray(data) -1194 raveled_data = data.ravel() -1195 -1196 # Workaround for matrix operations containing non Obs data -1197 if not all(isinstance(x, Obs) for x in raveled_data): -1198 for i in range(len(raveled_data)): -1199 if isinstance(raveled_data[i], (int, float)): -1200 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1201 -1202 allcov = {} -1203 for o in raveled_data: -1204 for name in o.cov_names: -1205 if name in allcov: -1206 if not np.allclose(allcov[name], o.covobs[name].cov): -1207 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1208 else: -1209 allcov[name] = o.covobs[name].cov -1210 -1211 n_obs = len(raveled_data) -1212 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1213 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1214 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1215 -1216 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1136 return idinter +1137 +1138 +1139def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1140 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1141 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1142 common divisor of the step sizes is used as new step size. +1143 +1144 Parameters +1145 ---------- +1146 deltas : list +1147 List of fluctuations +1148 idx : list +1149 List or range of configs on which the deltas are defined. +1150 Has to be a subset of new_idx and has to be sorted in ascending order. +1151 shape : list +1152 Number of configs in idx. +1153 new_idx : list +1154 List of configs that defines the new range, has to be sorted in ascending order. +1155 """ +1156 +1157 if type(idx) is range and type(new_idx) is range: +1158 if idx == new_idx: +1159 return deltas +1160 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1161 for i in range(shape): +1162 ret[idx[i] - new_idx[0]] = deltas[i] +1163 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1164 +1165 +1166def derived_observable(func, data, array_mode=False, **kwargs): +1167 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1168 +1169 Parameters +1170 ---------- +1171 func : object +1172 arbitrary function of the form func(data, **kwargs). For the +1173 automatic differentiation to work, all numpy functions have to have +1174 the autograd wrapper (use 'import autograd.numpy as anp'). +1175 data : list +1176 list of Obs, e.g. [obs1, obs2, obs3]. +1177 num_grad : bool +1178 if True, numerical derivatives are used instead of autograd +1179 (default False). To control the numerical differentiation the +1180 kwargs of numdifftools.step_generators.MaxStepGenerator +1181 can be used. +1182 man_grad : list +1183 manually supply a list or an array which contains the jacobian +1184 of func. Use cautiously, supplying the wrong derivative will +1185 not be intercepted. +1186 +1187 Notes +1188 ----- +1189 For simple mathematical operations it can be practical to use anonymous +1190 functions. For the ratio of two observables one can e.g. use +1191 +1192 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1193 """ +1194 +1195 data = np.asarray(data) +1196 raveled_data = data.ravel() +1197 +1198 # Workaround for matrix operations containing non Obs data +1199 if not all(isinstance(x, Obs) for x in raveled_data): +1200 for i in range(len(raveled_data)): +1201 if isinstance(raveled_data[i], (int, float)): +1202 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1203 +1204 allcov = {} +1205 for o in raveled_data: +1206 for name in o.cov_names: +1207 if name in allcov: +1208 if not np.allclose(allcov[name], o.covobs[name].cov): +1209 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1210 else: +1211 allcov[name] = o.covobs[name].cov +1212 +1213 n_obs = len(raveled_data) +1214 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1215 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1216 new_sample_names = sorted(set(new_names) - set(new_cov_names)) 1217 -1218 if data.ndim == 1: -1219 values = np.array([o.value for o in data]) -1220 else: -1221 values = np.vectorize(lambda x: x.value)(data) -1222 -1223 new_values = func(values, **kwargs) +1218 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1219 +1220 if data.ndim == 1: +1221 values = np.array([o.value for o in data]) +1222 else: +1223 values = np.vectorize(lambda x: x.value)(data) 1224 -1225 multi = int(isinstance(new_values, np.ndarray)) +1225 new_values = func(values, **kwargs) 1226 -1227 new_r_values = {} -1228 new_idl_d = {} -1229 for name in new_sample_names: -1230 idl = [] -1231 tmp_values = np.zeros(n_obs) -1232 for i, item in enumerate(raveled_data): -1233 tmp_values[i] = item.r_values.get(name, item.value) -1234 tmp_idl = item.idl.get(name) -1235 if tmp_idl is not None: -1236 idl.append(tmp_idl) -1237 if multi > 0: -1238 tmp_values = np.array(tmp_values).reshape(data.shape) -1239 new_r_values[name] = func(tmp_values, **kwargs) -1240 new_idl_d[name] = _merge_idx(idl) -1241 -1242 if 'man_grad' in kwargs: -1243 deriv = np.asarray(kwargs.get('man_grad')) -1244 if new_values.shape + data.shape != deriv.shape: -1245 raise Exception('Manual derivative does not have correct shape.') -1246 elif kwargs.get('num_grad') is True: -1247 if multi > 0: -1248 raise Exception('Multi mode currently not supported for numerical derivative') -1249 options = { -1250 'base_step': 0.1, -1251 'step_ratio': 2.5} -1252 for key in options.keys(): -1253 kwarg = kwargs.get(key) -1254 if kwarg is not None: -1255 options[key] = kwarg -1256 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1257 if tmp_df.size == 1: -1258 deriv = np.array([tmp_df.real]) -1259 else: -1260 deriv = tmp_df.real -1261 else: -1262 deriv = jacobian(func)(values, **kwargs) -1263 -1264 final_result = np.zeros(new_values.shape, dtype=object) +1227 multi = int(isinstance(new_values, np.ndarray)) +1228 +1229 new_r_values = {} +1230 new_idl_d = {} +1231 for name in new_sample_names: +1232 idl = [] +1233 tmp_values = np.zeros(n_obs) +1234 for i, item in enumerate(raveled_data): +1235 tmp_values[i] = item.r_values.get(name, item.value) +1236 tmp_idl = item.idl.get(name) +1237 if tmp_idl is not None: +1238 idl.append(tmp_idl) +1239 if multi > 0: +1240 tmp_values = np.array(tmp_values).reshape(data.shape) +1241 new_r_values[name] = func(tmp_values, **kwargs) +1242 new_idl_d[name] = _merge_idx(idl) +1243 +1244 if 'man_grad' in kwargs: +1245 deriv = np.asarray(kwargs.get('man_grad')) +1246 if new_values.shape + data.shape != deriv.shape: +1247 raise Exception('Manual derivative does not have correct shape.') +1248 elif kwargs.get('num_grad') is True: +1249 if multi > 0: +1250 raise Exception('Multi mode currently not supported for numerical derivative') +1251 options = { +1252 'base_step': 0.1, +1253 'step_ratio': 2.5} +1254 for key in options.keys(): +1255 kwarg = kwargs.get(key) +1256 if kwarg is not None: +1257 options[key] = kwarg +1258 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1259 if tmp_df.size == 1: +1260 deriv = np.array([tmp_df.real]) +1261 else: +1262 deriv = tmp_df.real +1263 else: +1264 deriv = jacobian(func)(values, **kwargs) 1265 -1266 if array_mode is True: +1266 final_result = np.zeros(new_values.shape, dtype=object) 1267 -1268 class _Zero_grad(): -1269 def __init__(self, N): -1270 self.grad = np.zeros((N, 1)) -1271 -1272 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1273 d_extracted = {} -1274 g_extracted = {} -1275 for name in new_sample_names: -1276 d_extracted[name] = [] -1277 ens_length = len(new_idl_d[name]) -1278 for i_dat, dat in enumerate(data): -1279 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1280 for name in new_cov_names: -1281 g_extracted[name] = [] -1282 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1283 for i_dat, dat in enumerate(data): -1284 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1285 -1286 for i_val, new_val in np.ndenumerate(new_values): -1287 new_deltas = {} -1288 new_grad = {} -1289 if array_mode is True: -1290 for name in new_sample_names: -1291 ens_length = d_extracted[name][0].shape[-1] -1292 new_deltas[name] = np.zeros(ens_length) -1293 for i_dat, dat in enumerate(d_extracted[name]): -1294 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1295 for name in new_cov_names: -1296 new_grad[name] = 0 -1297 for i_dat, dat in enumerate(g_extracted[name]): -1298 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1299 else: -1300 for j_obs, obs in np.ndenumerate(data): -1301 for name in obs.names: -1302 if name in obs.cov_names: -1303 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1304 else: -1305 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1306 -1307 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1268 if array_mode is True: +1269 +1270 class _Zero_grad(): +1271 def __init__(self, N): +1272 self.grad = np.zeros((N, 1)) +1273 +1274 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1275 d_extracted = {} +1276 g_extracted = {} +1277 for name in new_sample_names: +1278 d_extracted[name] = [] +1279 ens_length = len(new_idl_d[name]) +1280 for i_dat, dat in enumerate(data): +1281 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1282 for name in new_cov_names: +1283 g_extracted[name] = [] +1284 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1285 for i_dat, dat in enumerate(data): +1286 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1287 +1288 for i_val, new_val in np.ndenumerate(new_values): +1289 new_deltas = {} +1290 new_grad = {} +1291 if array_mode is True: +1292 for name in new_sample_names: +1293 ens_length = d_extracted[name][0].shape[-1] +1294 new_deltas[name] = np.zeros(ens_length) +1295 for i_dat, dat in enumerate(d_extracted[name]): +1296 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1297 for name in new_cov_names: +1298 new_grad[name] = 0 +1299 for i_dat, dat in enumerate(g_extracted[name]): +1300 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1301 else: +1302 for j_obs, obs in np.ndenumerate(data): +1303 for name in obs.names: +1304 if name in obs.cov_names: +1305 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1306 else: +1307 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) 1308 -1309 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1310 raise Exception('The same name has been used for deltas and covobs!') -1311 new_samples = [] -1312 new_means = [] -1313 new_idl = [] -1314 new_names_obs = [] -1315 for name in new_names: -1316 if name not in new_covobs: -1317 new_samples.append(new_deltas[name]) -1318 new_idl.append(new_idl_d[name]) -1319 new_means.append(new_r_values[name][i_val]) -1320 new_names_obs.append(name) -1321 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1322 for name in new_covobs: -1323 final_result[i_val].names.append(name) -1324 final_result[i_val]._covobs = new_covobs -1325 final_result[i_val]._value = new_val -1326 final_result[i_val].reweighted = reweighted -1327 -1328 if multi == 0: -1329 final_result = final_result.item() -1330 -1331 return final_result +1309 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1310 +1311 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1312 raise Exception('The same name has been used for deltas and covobs!') +1313 new_samples = [] +1314 new_means = [] +1315 new_idl = [] +1316 new_names_obs = [] +1317 for name in new_names: +1318 if name not in new_covobs: +1319 new_samples.append(new_deltas[name]) +1320 new_idl.append(new_idl_d[name]) +1321 new_means.append(new_r_values[name][i_val]) +1322 new_names_obs.append(name) +1323 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1324 for name in new_covobs: +1325 final_result[i_val].names.append(name) +1326 final_result[i_val]._covobs = new_covobs +1327 final_result[i_val]._value = new_val +1328 final_result[i_val].reweighted = reweighted +1329 +1330 if multi == 0: +1331 final_result = final_result.item() 1332 -1333 -1334def _reduce_deltas(deltas, idx_old, idx_new): -1335 """Extract deltas defined on idx_old on all configs of idx_new. -1336 -1337 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1338 are ordered in an ascending order. -1339 -1340 Parameters -1341 ---------- -1342 deltas : list -1343 List of fluctuations -1344 idx_old : list -1345 List or range of configs on which the deltas are defined -1346 idx_new : list -1347 List of configs for which we want to extract the deltas. -1348 Has to be a subset of idx_old. -1349 """ -1350 if not len(deltas) == len(idx_old): -1351 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1352 if type(idx_old) is range and type(idx_new) is range: -1353 if idx_old == idx_new: -1354 return deltas -1355 if _check_lists_equal([idx_old, idx_new]): -1356 return deltas -1357 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1358 if len(indices) < len(idx_new): -1359 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1360 return np.array(deltas)[indices] -1361 -1362 -1363def reweight(weight, obs, **kwargs): -1364 """Reweight a list of observables. -1365 -1366 Parameters -1367 ---------- -1368 weight : Obs -1369 Reweighting factor. An Observable that has to be defined on a superset of the -1370 configurations in obs[i].idl for all i. -1371 obs : list -1372 list of Obs, e.g. [obs1, obs2, obs3]. -1373 all_configs : bool -1374 if True, the reweighted observables are normalized by the average of -1375 the reweighting factor on all configurations in weight.idl and not -1376 on the configurations in obs[i].idl. Default False. -1377 """ -1378 result = [] -1379 for i in range(len(obs)): -1380 if len(obs[i].cov_names): -1381 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1382 if not set(obs[i].names).issubset(weight.names): -1383 raise Exception('Error: Ensembles do not fit') -1384 for name in obs[i].names: -1385 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1386 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1387 new_samples = [] -1388 w_deltas = {} -1389 for name in sorted(obs[i].names): -1390 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1391 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1392 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1393 -1394 if kwargs.get('all_configs'): -1395 new_weight = weight -1396 else: -1397 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1398 -1399 result.append(tmp_obs / new_weight) -1400 result[-1].reweighted = True -1401 -1402 return result +1333 return final_result +1334 +1335 +1336def _reduce_deltas(deltas, idx_old, idx_new): +1337 """Extract deltas defined on idx_old on all configs of idx_new. +1338 +1339 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1340 are ordered in an ascending order. +1341 +1342 Parameters +1343 ---------- +1344 deltas : list +1345 List of fluctuations +1346 idx_old : list +1347 List or range of configs on which the deltas are defined +1348 idx_new : list +1349 List of configs for which we want to extract the deltas. +1350 Has to be a subset of idx_old. +1351 """ +1352 if not len(deltas) == len(idx_old): +1353 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1354 if type(idx_old) is range and type(idx_new) is range: +1355 if idx_old == idx_new: +1356 return deltas +1357 if _check_lists_equal([idx_old, idx_new]): +1358 return deltas +1359 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1360 if len(indices) < len(idx_new): +1361 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1362 return np.array(deltas)[indices] +1363 +1364 +1365def reweight(weight, obs, **kwargs): +1366 """Reweight a list of observables. +1367 +1368 Parameters +1369 ---------- +1370 weight : Obs +1371 Reweighting factor. An Observable that has to be defined on a superset of the +1372 configurations in obs[i].idl for all i. +1373 obs : list +1374 list of Obs, e.g. [obs1, obs2, obs3]. +1375 all_configs : bool +1376 if True, the reweighted observables are normalized by the average of +1377 the reweighting factor on all configurations in weight.idl and not +1378 on the configurations in obs[i].idl. Default False. +1379 """ +1380 result = [] +1381 for i in range(len(obs)): +1382 if len(obs[i].cov_names): +1383 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1384 if not set(obs[i].names).issubset(weight.names): +1385 raise Exception('Error: Ensembles do not fit') +1386 for name in obs[i].names: +1387 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1388 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1389 new_samples = [] +1390 w_deltas = {} +1391 for name in sorted(obs[i].names): +1392 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1393 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1394 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1395 +1396 if kwargs.get('all_configs'): +1397 new_weight = weight +1398 else: +1399 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1400 +1401 result.append(tmp_obs / new_weight) +1402 result[-1].reweighted = True 1403 -1404 -1405def correlate(obs_a, obs_b): -1406 """Correlate two observables. -1407 -1408 Parameters -1409 ---------- -1410 obs_a : Obs -1411 First observable -1412 obs_b : Obs -1413 Second observable -1414 -1415 Notes -1416 ----- -1417 Keep in mind to only correlate primary observables which have not been reweighted -1418 yet. The reweighting has to be applied after correlating the observables. -1419 Currently only works if ensembles are identical (this is not strictly necessary). -1420 """ -1421 -1422 if sorted(obs_a.names) != sorted(obs_b.names): -1423 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1424 if len(obs_a.cov_names) or len(obs_b.cov_names): -1425 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1426 for name in obs_a.names: -1427 if obs_a.shape[name] != obs_b.shape[name]: -1428 raise Exception('Shapes of ensemble', name, 'do not fit') -1429 if obs_a.idl[name] != obs_b.idl[name]: -1430 raise Exception('idl of ensemble', name, 'do not fit') -1431 -1432 if obs_a.reweighted is True: -1433 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1434 if obs_b.reweighted is True: -1435 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1436 -1437 new_samples = [] -1438 new_idl = [] -1439 for name in sorted(obs_a.names): -1440 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1441 new_idl.append(obs_a.idl[name]) -1442 -1443 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1444 o.reweighted = obs_a.reweighted or obs_b.reweighted -1445 return o -1446 -1447 -1448def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1449 r'''Calculates the error covariance matrix of a set of observables. -1450 -1451 WARNING: This function should be used with care, especially for observables with support on multiple -1452 ensembles with differing autocorrelations. See the notes below for details. -1453 -1454 The gamma method has to be applied first to all observables. +1404 return result +1405 +1406 +1407def correlate(obs_a, obs_b): +1408 """Correlate two observables. +1409 +1410 Parameters +1411 ---------- +1412 obs_a : Obs +1413 First observable +1414 obs_b : Obs +1415 Second observable +1416 +1417 Notes +1418 ----- +1419 Keep in mind to only correlate primary observables which have not been reweighted +1420 yet. The reweighting has to be applied after correlating the observables. +1421 Currently only works if ensembles are identical (this is not strictly necessary). +1422 """ +1423 +1424 if sorted(obs_a.names) != sorted(obs_b.names): +1425 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1426 if len(obs_a.cov_names) or len(obs_b.cov_names): +1427 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1428 for name in obs_a.names: +1429 if obs_a.shape[name] != obs_b.shape[name]: +1430 raise Exception('Shapes of ensemble', name, 'do not fit') +1431 if obs_a.idl[name] != obs_b.idl[name]: +1432 raise Exception('idl of ensemble', name, 'do not fit') +1433 +1434 if obs_a.reweighted is True: +1435 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1436 if obs_b.reweighted is True: +1437 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1438 +1439 new_samples = [] +1440 new_idl = [] +1441 for name in sorted(obs_a.names): +1442 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1443 new_idl.append(obs_a.idl[name]) +1444 +1445 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1446 o.reweighted = obs_a.reweighted or obs_b.reweighted +1447 return o +1448 +1449 +1450def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1451 r'''Calculates the error covariance matrix of a set of observables. +1452 +1453 WARNING: This function should be used with care, especially for observables with support on multiple +1454 ensembles with differing autocorrelations. See the notes below for details. 1455 -1456 Parameters -1457 ---------- -1458 obs : list or numpy.ndarray -1459 List or one dimensional array of Obs -1460 visualize : bool -1461 If True plots the corresponding normalized correlation matrix (default False). -1462 correlation : bool -1463 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1464 smooth : None or int -1465 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1466 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1467 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1468 small ones. -1469 -1470 Notes -1471 ----- -1472 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1473 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1474 in the absence of autocorrelation. -1475 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1476 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1477 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1478 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1479 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1480 ''' -1481 -1482 length = len(obs) +1456 The gamma method has to be applied first to all observables. +1457 +1458 Parameters +1459 ---------- +1460 obs : list or numpy.ndarray +1461 List or one dimensional array of Obs +1462 visualize : bool +1463 If True plots the corresponding normalized correlation matrix (default False). +1464 correlation : bool +1465 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1466 smooth : None or int +1467 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1468 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1469 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1470 small ones. +1471 +1472 Notes +1473 ----- +1474 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1475 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1476 in the absence of autocorrelation. +1477 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1478 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1479 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1480 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1481 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1482 ''' 1483 -1484 max_samples = np.max([o.N for o in obs]) -1485 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1486 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1487 -1488 cov = np.zeros((length, length)) -1489 for i in range(length): -1490 for j in range(i, length): -1491 cov[i, j] = _covariance_element(obs[i], obs[j]) -1492 cov = cov + cov.T - np.diag(np.diag(cov)) -1493 -1494 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1484 length = len(obs) +1485 +1486 max_samples = np.max([o.N for o in obs]) +1487 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1488 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1489 +1490 cov = np.zeros((length, length)) +1491 for i in range(length): +1492 for j in range(i, length): +1493 cov[i, j] = _covariance_element(obs[i], obs[j]) +1494 cov = cov + cov.T - np.diag(np.diag(cov)) 1495 -1496 if isinstance(smooth, int): -1497 corr = _smooth_eigenvalues(corr, smooth) -1498 -1499 if visualize: -1500 plt.matshow(corr, vmin=-1, vmax=1) -1501 plt.set_cmap('RdBu') -1502 plt.colorbar() -1503 plt.draw() -1504 -1505 if correlation is True: -1506 return corr -1507 -1508 errors = [o.dvalue for o in obs] -1509 cov = np.diag(errors) @ corr @ np.diag(errors) -1510 -1511 eigenvalues = np.linalg.eigh(cov)[0] -1512 if not np.all(eigenvalues >= 0): -1513 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1514 -1515 return cov +1496 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1497 +1498 if isinstance(smooth, int): +1499 corr = _smooth_eigenvalues(corr, smooth) +1500 +1501 if visualize: +1502 plt.matshow(corr, vmin=-1, vmax=1) +1503 plt.set_cmap('RdBu') +1504 plt.colorbar() +1505 plt.draw() +1506 +1507 if correlation is True: +1508 return corr +1509 +1510 errors = [o.dvalue for o in obs] +1511 cov = np.diag(errors) @ corr @ np.diag(errors) +1512 +1513 eigenvalues = np.linalg.eigh(cov)[0] +1514 if not np.all(eigenvalues >= 0): +1515 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) 1516 -1517 -1518def _smooth_eigenvalues(corr, E): -1519 """Eigenvalue smoothing as described in hep-lat/9412087 -1520 -1521 corr : np.ndarray -1522 correlation matrix -1523 E : integer -1524 Number of eigenvalues to be left substantially unchanged -1525 """ -1526 if not (2 < E < corr.shape[0] - 1): -1527 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1528 vals, vec = np.linalg.eigh(corr) -1529 lambda_min = np.mean(vals[:-E]) -1530 vals[vals < lambda_min] = lambda_min -1531 vals /= np.mean(vals) -1532 return vec @ np.diag(vals) @ vec.T -1533 -1534 -1535def _covariance_element(obs1, obs2): -1536 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1537 -1538 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1539 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1540 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1541 return np.sum(deltas1 * deltas2) -1542 -1543 if set(obs1.names).isdisjoint(set(obs2.names)): -1544 return 0.0 -1545 -1546 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1547 raise Exception('The gamma method has to be applied to both Obs first.') -1548 -1549 dvalue = 0.0 +1517 return cov +1518 +1519 +1520def _smooth_eigenvalues(corr, E): +1521 """Eigenvalue smoothing as described in hep-lat/9412087 +1522 +1523 corr : np.ndarray +1524 correlation matrix +1525 E : integer +1526 Number of eigenvalues to be left substantially unchanged +1527 """ +1528 if not (2 < E < corr.shape[0] - 1): +1529 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1530 vals, vec = np.linalg.eigh(corr) +1531 lambda_min = np.mean(vals[:-E]) +1532 vals[vals < lambda_min] = lambda_min +1533 vals /= np.mean(vals) +1534 return vec @ np.diag(vals) @ vec.T +1535 +1536 +1537def _covariance_element(obs1, obs2): +1538 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1539 +1540 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1541 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1542 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1543 return np.sum(deltas1 * deltas2) +1544 +1545 if set(obs1.names).isdisjoint(set(obs2.names)): +1546 return 0.0 +1547 +1548 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1549 raise Exception('The gamma method has to be applied to both Obs first.') 1550 -1551 for e_name in obs1.mc_names: +1551 dvalue = 0.0 1552 -1553 if e_name not in obs2.mc_names: -1554 continue -1555 -1556 idl_d = {} -1557 for r_name in obs1.e_content[e_name]: -1558 if r_name not in obs2.e_content[e_name]: -1559 continue -1560 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1561 -1562 gamma = 0.0 +1553 for e_name in obs1.mc_names: +1554 +1555 if e_name not in obs2.mc_names: +1556 continue +1557 +1558 idl_d = {} +1559 for r_name in obs1.e_content[e_name]: +1560 if r_name not in obs2.e_content[e_name]: +1561 continue +1562 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) 1563 -1564 for r_name in obs1.e_content[e_name]: -1565 if r_name not in obs2.e_content[e_name]: -1566 continue -1567 if len(idl_d[r_name]) == 0: +1564 gamma = 0.0 +1565 +1566 for r_name in obs1.e_content[e_name]: +1567 if r_name not in obs2.e_content[e_name]: 1568 continue -1569 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1570 -1571 if gamma == 0.0: -1572 continue -1573 -1574 gamma_div = 0.0 -1575 for r_name in obs1.e_content[e_name]: -1576 if r_name not in obs2.e_content[e_name]: -1577 continue -1578 if len(idl_d[r_name]) == 0: +1569 if len(idl_d[r_name]) == 0: +1570 continue +1571 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1572 +1573 if gamma == 0.0: +1574 continue +1575 +1576 gamma_div = 0.0 +1577 for r_name in obs1.e_content[e_name]: +1578 if r_name not in obs2.e_content[e_name]: 1579 continue -1580 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1581 gamma /= gamma_div -1582 -1583 dvalue += gamma +1580 if len(idl_d[r_name]) == 0: +1581 continue +1582 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1583 gamma /= gamma_div 1584 -1585 for e_name in obs1.cov_names: +1585 dvalue += gamma 1586 -1587 if e_name not in obs2.cov_names: -1588 continue -1589 -1590 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() +1587 for e_name in obs1.cov_names: +1588 +1589 if e_name not in obs2.cov_names: +1590 continue 1591 -1592 return dvalue +1592 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() 1593 -1594 -1595def import_jackknife(jacks, name, idl=None): -1596 """Imports jackknife samples and returns an Obs -1597 -1598 Parameters -1599 ---------- -1600 jacks : numpy.ndarray -1601 numpy array containing the mean value as zeroth entry and -1602 the N jackknife samples as first to Nth entry. -1603 name : str -1604 name of the ensemble the samples are defined on. -1605 """ -1606 length = len(jacks) - 1 -1607 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1608 samples = jacks[1:] @ prj -1609 mean = np.mean(samples) -1610 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1611 new_obs._value = jacks[0] -1612 return new_obs -1613 -1614 -1615def import_bootstrap(boots, name, random_numbers): -1616 """Imports bootstrap samples and returns an Obs -1617 -1618 Parameters -1619 ---------- -1620 boots : numpy.ndarray -1621 numpy array containing the mean value as zeroth entry and -1622 the N bootstrap samples as first to Nth entry. -1623 name : str -1624 name of the ensemble the samples are defined on. -1625 random_numbers : np.ndarray -1626 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, -1627 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo -1628 chain to be reconstructed. -1629 """ -1630 samples, length = random_numbers.shape -1631 if samples != len(boots) - 1: -1632 raise ValueError("Random numbers do not have the correct shape.") -1633 -1634 if samples < length: -1635 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") -1636 -1637 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +1594 return dvalue +1595 +1596 +1597def import_jackknife(jacks, name, idl=None): +1598 """Imports jackknife samples and returns an Obs +1599 +1600 Parameters +1601 ---------- +1602 jacks : numpy.ndarray +1603 numpy array containing the mean value as zeroth entry and +1604 the N jackknife samples as first to Nth entry. +1605 name : str +1606 name of the ensemble the samples are defined on. +1607 """ +1608 length = len(jacks) - 1 +1609 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1610 samples = jacks[1:] @ prj +1611 mean = np.mean(samples) +1612 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1613 new_obs._value = jacks[0] +1614 return new_obs +1615 +1616 +1617def import_bootstrap(boots, name, random_numbers): +1618 """Imports bootstrap samples and returns an Obs +1619 +1620 Parameters +1621 ---------- +1622 boots : numpy.ndarray +1623 numpy array containing the mean value as zeroth entry and +1624 the N bootstrap samples as first to Nth entry. +1625 name : str +1626 name of the ensemble the samples are defined on. +1627 random_numbers : np.ndarray +1628 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, +1629 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo +1630 chain to be reconstructed. +1631 """ +1632 samples, length = random_numbers.shape +1633 if samples != len(boots) - 1: +1634 raise ValueError("Random numbers do not have the correct shape.") +1635 +1636 if samples < length: +1637 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") 1638 -1639 samples = scipy.linalg.lstsq(proj, boots[1:])[0] -1640 ret = Obs([samples], [name]) -1641 ret._value = boots[0] -1642 return ret -1643 -1644 -1645def merge_obs(list_of_obs): -1646 """Combine all observables in list_of_obs into one new observable -1647 -1648 Parameters -1649 ---------- -1650 list_of_obs : list -1651 list of the Obs object to be combined -1652 -1653 Notes -1654 ----- -1655 It is not possible to combine obs which are based on the same replicum -1656 """ -1657 replist = [item for obs in list_of_obs for item in obs.names] -1658 if (len(replist) == len(set(replist))) is False: -1659 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1660 if any([len(o.cov_names) for o in list_of_obs]): -1661 raise Exception('Not possible to merge data that contains covobs!') -1662 new_dict = {} -1663 idl_dict = {} -1664 for o in list_of_obs: -1665 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1666 for key in set(o.deltas) | set(o.r_values)}) -1667 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1668 -1669 names = sorted(new_dict.keys()) -1670 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1671 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1672 return o -1673 -1674 -1675def cov_Obs(means, cov, name, grad=None): -1676 """Create an Obs based on mean(s) and a covariance matrix -1677 -1678 Parameters -1679 ---------- -1680 mean : list of floats or float -1681 N mean value(s) of the new Obs -1682 cov : list or array -1683 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1684 name : str -1685 identifier for the covariance matrix -1686 grad : list or array -1687 Gradient of the Covobs wrt. the means belonging to cov. -1688 """ -1689 -1690 def covobs_to_obs(co): -1691 """Make an Obs out of a Covobs -1692 -1693 Parameters -1694 ---------- -1695 co : Covobs -1696 Covobs to be embedded into the Obs -1697 """ -1698 o = Obs([], [], means=[]) -1699 o._value = co.value -1700 o.names.append(co.name) -1701 o._covobs[co.name] = co -1702 o._dvalue = np.sqrt(co.errsq()) -1703 return o -1704 -1705 ol = [] -1706 if isinstance(means, (float, int)): -1707 means = [means] -1708 -1709 for i in range(len(means)): -1710 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1711 if ol[0].covobs[name].N != len(means): -1712 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1713 if len(ol) == 1: -1714 return ol[0] -1715 return ol -1716 -1717 -1718def _determine_gap(o, e_content, e_name): -1719 gaps = [] -1720 for r_name in e_content[e_name]: -1721 if isinstance(o.idl[r_name], range): -1722 gaps.append(o.idl[r_name].step) -1723 else: -1724 gaps.append(np.min(np.diff(o.idl[r_name]))) -1725 -1726 gap = min(gaps) -1727 if not np.all([gi % gap == 0 for gi in gaps]): -1728 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) -1729 -1730 return gap +1639 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +1640 +1641 samples = scipy.linalg.lstsq(proj, boots[1:])[0] +1642 ret = Obs([samples], [name]) +1643 ret._value = boots[0] +1644 return ret +1645 +1646 +1647def merge_obs(list_of_obs): +1648 """Combine all observables in list_of_obs into one new observable +1649 +1650 Parameters +1651 ---------- +1652 list_of_obs : list +1653 list of the Obs object to be combined +1654 +1655 Notes +1656 ----- +1657 It is not possible to combine obs which are based on the same replicum +1658 """ +1659 replist = [item for obs in list_of_obs for item in obs.names] +1660 if (len(replist) == len(set(replist))) is False: +1661 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1662 if any([len(o.cov_names) for o in list_of_obs]): +1663 raise Exception('Not possible to merge data that contains covobs!') +1664 new_dict = {} +1665 idl_dict = {} +1666 for o in list_of_obs: +1667 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1668 for key in set(o.deltas) | set(o.r_values)}) +1669 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1670 +1671 names = sorted(new_dict.keys()) +1672 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1673 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1674 return o +1675 +1676 +1677def cov_Obs(means, cov, name, grad=None): +1678 """Create an Obs based on mean(s) and a covariance matrix +1679 +1680 Parameters +1681 ---------- +1682 mean : list of floats or float +1683 N mean value(s) of the new Obs +1684 cov : list or array +1685 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1686 name : str +1687 identifier for the covariance matrix +1688 grad : list or array +1689 Gradient of the Covobs wrt. the means belonging to cov. +1690 """ +1691 +1692 def covobs_to_obs(co): +1693 """Make an Obs out of a Covobs +1694 +1695 Parameters +1696 ---------- +1697 co : Covobs +1698 Covobs to be embedded into the Obs +1699 """ +1700 o = Obs([], [], means=[]) +1701 o._value = co.value +1702 o.names.append(co.name) +1703 o._covobs[co.name] = co +1704 o._dvalue = np.sqrt(co.errsq()) +1705 return o +1706 +1707 ol = [] +1708 if isinstance(means, (float, int)): +1709 means = [means] +1710 +1711 for i in range(len(means)): +1712 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1713 if ol[0].covobs[name].N != len(means): +1714 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1715 if len(ol) == 1: +1716 return ol[0] +1717 return ol +1718 +1719 +1720def _determine_gap(o, e_content, e_name): +1721 gaps = [] +1722 for r_name in e_content[e_name]: +1723 if isinstance(o.idl[r_name], range): +1724 gaps.append(o.idl[r_name].step) +1725 else: +1726 gaps.append(np.min(np.diff(o.idl[r_name]))) +1727 +1728 gap = min(gaps) +1729 if not np.all([gi % gap == 0 for gi in gaps]): +1730 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) 1731 -1732 -1733def _check_lists_equal(idl): -1734 ''' -1735 Use groupby to efficiently check whether all elements of idl are identical. -1736 Returns True if all elements are equal, otherwise False. -1737 -1738 Parameters -1739 ---------- -1740 idl : list of lists, ranges or np.ndarrays -1741 ''' -1742 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) -1743 if next(g, True) and not next(g, False): -1744 return True -1745 return False +1732 return gap +1733 +1734 +1735def _check_lists_equal(idl): +1736 ''' +1737 Use groupby to efficiently check whether all elements of idl are identical. +1738 Returns True if all elements are equal, otherwise False. +1739 +1740 Parameters +1741 ---------- +1742 idl : list of lists, ranges or np.ndarrays +1743 ''' +1744 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) +1745 if next(g, True) and not next(g, False): +1746 return True +1747 return False @@ -2857,132 +2859,134 @@ 785 else: 786 if isinstance(y, np.ndarray): 787 return np.array([self + o for o in y]) -788 elif y.__class__.__name__ in ['Corr', 'CObs']: -789 return NotImplemented -790 else: -791 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -792 -793 def __radd__(self, y): -794 return self + y -795 -796 def __mul__(self, y): -797 if isinstance(y, Obs): -798 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -799 else: -800 if isinstance(y, np.ndarray): -801 return np.array([self * o for o in y]) -802 elif isinstance(y, complex): -803 return CObs(self * y.real, self * y.imag) -804 elif y.__class__.__name__ in ['Corr', 'CObs']: -805 return NotImplemented -806 else: -807 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -808 -809 def __rmul__(self, y): -810 return self * y -811 -812 def __sub__(self, y): -813 if isinstance(y, Obs): -814 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -815 else: -816 if isinstance(y, np.ndarray): -817 return np.array([self - o for o in y]) -818 elif y.__class__.__name__ in ['Corr', 'CObs']: -819 return NotImplemented -820 else: -821 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -822 -823 def __rsub__(self, y): -824 return -1 * (self - y) -825 -826 def __pos__(self): -827 return self -828 -829 def __neg__(self): -830 return -1 * self -831 -832 def __truediv__(self, y): -833 if isinstance(y, Obs): -834 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -835 else: -836 if isinstance(y, np.ndarray): -837 return np.array([self / o for o in y]) -838 elif y.__class__.__name__ in ['Corr', 'CObs']: -839 return NotImplemented -840 else: -841 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -842 -843 def __rtruediv__(self, y): -844 if isinstance(y, Obs): -845 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -846 else: -847 if isinstance(y, np.ndarray): -848 return np.array([o / self for o in y]) -849 elif y.__class__.__name__ in ['Corr', 'CObs']: -850 return NotImplemented -851 else: -852 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -853 -854 def __pow__(self, y): -855 if isinstance(y, Obs): -856 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -857 else: -858 return derived_observable(lambda x: x[0] ** y, [self]) -859 -860 def __rpow__(self, y): -861 if isinstance(y, Obs): -862 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -863 else: -864 return derived_observable(lambda x: y ** x[0], [self]) -865 -866 def __abs__(self): -867 return derived_observable(lambda x: anp.abs(x[0]), [self]) -868 -869 # Overload numpy functions -870 def sqrt(self): -871 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -872 -873 def log(self): -874 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -875 -876 def exp(self): -877 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -878 -879 def sin(self): -880 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -881 -882 def cos(self): -883 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -884 -885 def tan(self): -886 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -887 -888 def arcsin(self): -889 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -890 -891 def arccos(self): -892 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -893 -894 def arctan(self): -895 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -896 -897 def sinh(self): -898 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -899 -900 def cosh(self): -901 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -902 -903 def tanh(self): -904 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -905 -906 def arcsinh(self): -907 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -908 -909 def arccosh(self): -910 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -911 -912 def arctanh(self): -913 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +788 elif isinstance(y, complex): +789 return CObs(self, 0) + y +790 elif y.__class__.__name__ in ['Corr', 'CObs']: +791 return NotImplemented +792 else: +793 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +794 +795 def __radd__(self, y): +796 return self + y +797 +798 def __mul__(self, y): +799 if isinstance(y, Obs): +800 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +801 else: +802 if isinstance(y, np.ndarray): +803 return np.array([self * o for o in y]) +804 elif isinstance(y, complex): +805 return CObs(self * y.real, self * y.imag) +806 elif y.__class__.__name__ in ['Corr', 'CObs']: +807 return NotImplemented +808 else: +809 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +810 +811 def __rmul__(self, y): +812 return self * y +813 +814 def __sub__(self, y): +815 if isinstance(y, Obs): +816 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +817 else: +818 if isinstance(y, np.ndarray): +819 return np.array([self - o for o in y]) +820 elif y.__class__.__name__ in ['Corr', 'CObs']: +821 return NotImplemented +822 else: +823 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +824 +825 def __rsub__(self, y): +826 return -1 * (self - y) +827 +828 def __pos__(self): +829 return self +830 +831 def __neg__(self): +832 return -1 * self +833 +834 def __truediv__(self, y): +835 if isinstance(y, Obs): +836 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +837 else: +838 if isinstance(y, np.ndarray): +839 return np.array([self / o for o in y]) +840 elif y.__class__.__name__ in ['Corr', 'CObs']: +841 return NotImplemented +842 else: +843 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +844 +845 def __rtruediv__(self, y): +846 if isinstance(y, Obs): +847 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +848 else: +849 if isinstance(y, np.ndarray): +850 return np.array([o / self for o in y]) +851 elif y.__class__.__name__ in ['Corr', 'CObs']: +852 return NotImplemented +853 else: +854 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +855 +856 def __pow__(self, y): +857 if isinstance(y, Obs): +858 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +859 else: +860 return derived_observable(lambda x: x[0] ** y, [self]) +861 +862 def __rpow__(self, y): +863 if isinstance(y, Obs): +864 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +865 else: +866 return derived_observable(lambda x: y ** x[0], [self]) +867 +868 def __abs__(self): +869 return derived_observable(lambda x: anp.abs(x[0]), [self]) +870 +871 # Overload numpy functions +872 def sqrt(self): +873 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +874 +875 def log(self): +876 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +877 +878 def exp(self): +879 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +880 +881 def sin(self): +882 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +883 +884 def cos(self): +885 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +886 +887 def tan(self): +888 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +889 +890 def arcsin(self): +891 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +892 +893 def arccos(self): +894 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +895 +896 def arctan(self): +897 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +898 +899 def sinh(self): +900 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +901 +902 def cosh(self): +903 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +904 +905 def tanh(self): +906 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +907 +908 def arcsinh(self): +909 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +910 +911 def arccosh(self): +912 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +913 +914 def arctanh(self): +915 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -4472,8 +4476,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N). -
870    def sqrt(self):
-871        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
872    def sqrt(self):
+873        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -4491,8 +4495,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
873    def log(self):
-874        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
875    def log(self):
+876        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -4510,8 +4514,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
876    def exp(self):
-877        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
878    def exp(self):
+879        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -4529,8 +4533,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
879    def sin(self):
-880        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
881    def sin(self):
+882        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -4548,8 +4552,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
882    def cos(self):
-883        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
884    def cos(self):
+885        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -4567,8 +4571,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
885    def tan(self):
-886        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
887    def tan(self):
+888        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -4586,8 +4590,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
888    def arcsin(self):
-889        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
890    def arcsin(self):
+891        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4605,8 +4609,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
891    def arccos(self):
-892        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
893    def arccos(self):
+894        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4624,8 +4628,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
894    def arctan(self):
-895        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
896    def arctan(self):
+897        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4643,8 +4647,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
897    def sinh(self):
-898        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
899    def sinh(self):
+900        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4662,8 +4666,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
900    def cosh(self):
-901        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
902    def cosh(self):
+903        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4681,8 +4685,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
903    def tanh(self):
-904        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
905    def tanh(self):
+906        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4700,8 +4704,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
906    def arcsinh(self):
-907        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
908    def arcsinh(self):
+909        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4719,8 +4723,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
909    def arccosh(self):
-910        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
911    def arccosh(self):
+912        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4738,8 +4742,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
912    def arctanh(self):
-913        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
914    def arctanh(self):
+915        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4890,123 +4894,123 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
 916class CObs:
- 917    """Class for a complex valued observable."""
- 918    __slots__ = ['_real', '_imag', 'tag']
- 919
- 920    def __init__(self, real, imag=0.0):
- 921        self._real = real
- 922        self._imag = imag
- 923        self.tag = None
- 924
- 925    @property
- 926    def real(self):
- 927        return self._real
- 928
- 929    @property
- 930    def imag(self):
- 931        return self._imag
- 932
- 933    def gamma_method(self, **kwargs):
- 934        """Executes the gamma_method for the real and the imaginary part."""
- 935        if isinstance(self.real, Obs):
- 936            self.real.gamma_method(**kwargs)
- 937        if isinstance(self.imag, Obs):
- 938            self.imag.gamma_method(**kwargs)
- 939
- 940    def is_zero(self):
- 941        """Checks whether both real and imaginary part are zero within machine precision."""
- 942        return self.real == 0.0 and self.imag == 0.0
- 943
- 944    def conjugate(self):
- 945        return CObs(self.real, -self.imag)
- 946
- 947    def __add__(self, other):
- 948        if isinstance(other, np.ndarray):
- 949            return other + self
- 950        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 951            return CObs(self.real + other.real,
- 952                        self.imag + other.imag)
- 953        else:
- 954            return CObs(self.real + other, self.imag)
- 955
- 956    def __radd__(self, y):
- 957        return self + y
- 958
- 959    def __sub__(self, other):
- 960        if isinstance(other, np.ndarray):
- 961            return -1 * (other - self)
- 962        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 963            return CObs(self.real - other.real, self.imag - other.imag)
- 964        else:
- 965            return CObs(self.real - other, self.imag)
- 966
- 967    def __rsub__(self, other):
- 968        return -1 * (self - other)
- 969
- 970    def __mul__(self, other):
- 971        if isinstance(other, np.ndarray):
- 972            return other * self
- 973        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 974            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
- 975                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
- 976                                               [self.real, other.real, self.imag, other.imag],
- 977                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
- 978                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
- 979                                               [self.real, other.real, self.imag, other.imag],
- 980                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
- 981            elif getattr(other, 'imag', 0) != 0:
- 982                return CObs(self.real * other.real - self.imag * other.imag,
- 983                            self.imag * other.real + self.real * other.imag)
- 984            else:
- 985                return CObs(self.real * other.real, self.imag * other.real)
- 986        else:
- 987            return CObs(self.real * other, self.imag * other)
- 988
- 989    def __rmul__(self, other):
- 990        return self * other
- 991
- 992    def __truediv__(self, other):
- 993        if isinstance(other, np.ndarray):
- 994            return 1 / (other / self)
- 995        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 996            r = other.real ** 2 + other.imag ** 2
- 997            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
- 998        else:
- 999            return CObs(self.real / other, self.imag / other)
-1000
-1001    def __rtruediv__(self, other):
-1002        r = self.real ** 2 + self.imag ** 2
-1003        if hasattr(other, 'real') and hasattr(other, 'imag'):
-1004            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-1005        else:
-1006            return CObs(self.real * other / r, -self.imag * other / r)
-1007
-1008    def __abs__(self):
-1009        return np.sqrt(self.real**2 + self.imag**2)
-1010
-1011    def __pos__(self):
-1012        return self
-1013
-1014    def __neg__(self):
-1015        return -1 * self
-1016
-1017    def __eq__(self, other):
-1018        return self.real == other.real and self.imag == other.imag
-1019
-1020    def __str__(self):
-1021        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-1022
-1023    def __repr__(self):
-1024        return 'CObs[' + str(self) + ']'
-1025
-1026    def __format__(self, format_type):
-1027        if format_type == "":
-1028            significance = 2
-1029            format_type = "2"
-1030        else:
-1031            significance = int(float(format_type.replace("+", "").replace("-", "")))
-1032        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
+            
 918class CObs:
+ 919    """Class for a complex valued observable."""
+ 920    __slots__ = ['_real', '_imag', 'tag']
+ 921
+ 922    def __init__(self, real, imag=0.0):
+ 923        self._real = real
+ 924        self._imag = imag
+ 925        self.tag = None
+ 926
+ 927    @property
+ 928    def real(self):
+ 929        return self._real
+ 930
+ 931    @property
+ 932    def imag(self):
+ 933        return self._imag
+ 934
+ 935    def gamma_method(self, **kwargs):
+ 936        """Executes the gamma_method for the real and the imaginary part."""
+ 937        if isinstance(self.real, Obs):
+ 938            self.real.gamma_method(**kwargs)
+ 939        if isinstance(self.imag, Obs):
+ 940            self.imag.gamma_method(**kwargs)
+ 941
+ 942    def is_zero(self):
+ 943        """Checks whether both real and imaginary part are zero within machine precision."""
+ 944        return self.real == 0.0 and self.imag == 0.0
+ 945
+ 946    def conjugate(self):
+ 947        return CObs(self.real, -self.imag)
+ 948
+ 949    def __add__(self, other):
+ 950        if isinstance(other, np.ndarray):
+ 951            return other + self
+ 952        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 953            return CObs(self.real + other.real,
+ 954                        self.imag + other.imag)
+ 955        else:
+ 956            return CObs(self.real + other, self.imag)
+ 957
+ 958    def __radd__(self, y):
+ 959        return self + y
+ 960
+ 961    def __sub__(self, other):
+ 962        if isinstance(other, np.ndarray):
+ 963            return -1 * (other - self)
+ 964        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 965            return CObs(self.real - other.real, self.imag - other.imag)
+ 966        else:
+ 967            return CObs(self.real - other, self.imag)
+ 968
+ 969    def __rsub__(self, other):
+ 970        return -1 * (self - other)
+ 971
+ 972    def __mul__(self, other):
+ 973        if isinstance(other, np.ndarray):
+ 974            return other * self
+ 975        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 976            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+ 977                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+ 978                                               [self.real, other.real, self.imag, other.imag],
+ 979                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+ 980                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+ 981                                               [self.real, other.real, self.imag, other.imag],
+ 982                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+ 983            elif getattr(other, 'imag', 0) != 0:
+ 984                return CObs(self.real * other.real - self.imag * other.imag,
+ 985                            self.imag * other.real + self.real * other.imag)
+ 986            else:
+ 987                return CObs(self.real * other.real, self.imag * other.real)
+ 988        else:
+ 989            return CObs(self.real * other, self.imag * other)
+ 990
+ 991    def __rmul__(self, other):
+ 992        return self * other
+ 993
+ 994    def __truediv__(self, other):
+ 995        if isinstance(other, np.ndarray):
+ 996            return 1 / (other / self)
+ 997        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 998            r = other.real ** 2 + other.imag ** 2
+ 999            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+1000        else:
+1001            return CObs(self.real / other, self.imag / other)
+1002
+1003    def __rtruediv__(self, other):
+1004        r = self.real ** 2 + self.imag ** 2
+1005        if hasattr(other, 'real') and hasattr(other, 'imag'):
+1006            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+1007        else:
+1008            return CObs(self.real * other / r, -self.imag * other / r)
+1009
+1010    def __abs__(self):
+1011        return np.sqrt(self.real**2 + self.imag**2)
+1012
+1013    def __pos__(self):
+1014        return self
+1015
+1016    def __neg__(self):
+1017        return -1 * self
+1018
+1019    def __eq__(self, other):
+1020        return self.real == other.real and self.imag == other.imag
+1021
+1022    def __str__(self):
+1023        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+1024
+1025    def __repr__(self):
+1026        return 'CObs[' + str(self) + ']'
+1027
+1028    def __format__(self, format_type):
+1029        if format_type == "":
+1030            significance = 2
+1031            format_type = "2"
+1032        else:
+1033            significance = int(float(format_type.replace("+", "").replace("-", "")))
+1034        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
 
@@ -5024,10 +5028,10 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
920    def __init__(self, real, imag=0.0):
-921        self._real = real
-922        self._imag = imag
-923        self.tag = None
+            
922    def __init__(self, real, imag=0.0):
+923        self._real = real
+924        self._imag = imag
+925        self.tag = None
 
@@ -5078,12 +5082,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
933    def gamma_method(self, **kwargs):
-934        """Executes the gamma_method for the real and the imaginary part."""
-935        if isinstance(self.real, Obs):
-936            self.real.gamma_method(**kwargs)
-937        if isinstance(self.imag, Obs):
-938            self.imag.gamma_method(**kwargs)
+            
935    def gamma_method(self, **kwargs):
+936        """Executes the gamma_method for the real and the imaginary part."""
+937        if isinstance(self.real, Obs):
+938            self.real.gamma_method(**kwargs)
+939        if isinstance(self.imag, Obs):
+940            self.imag.gamma_method(**kwargs)
 
@@ -5103,9 +5107,9 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
940    def is_zero(self):
-941        """Checks whether both real and imaginary part are zero within machine precision."""
-942        return self.real == 0.0 and self.imag == 0.0
+            
942    def is_zero(self):
+943        """Checks whether both real and imaginary part are zero within machine precision."""
+944        return self.real == 0.0 and self.imag == 0.0
 
@@ -5125,8 +5129,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
944    def conjugate(self):
-945        return CObs(self.real, -self.imag)
+            
946    def conjugate(self):
+947        return CObs(self.real, -self.imag)
 
@@ -5145,12 +5149,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1035def gamma_method(x, **kwargs):
-1036    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
-1037
-1038    See docstring of pe.Obs.gamma_method for details.
-1039    """
-1040    return np.vectorize(lambda o: o.gm(**kwargs))(x)
+            
1037def gamma_method(x, **kwargs):
+1038    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
+1039
+1040    See docstring of pe.Obs.gamma_method for details.
+1041    """
+1042    return np.vectorize(lambda o: o.gm(**kwargs))(x)
 
@@ -5172,12 +5176,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1035def gamma_method(x, **kwargs):
-1036    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
-1037
-1038    See docstring of pe.Obs.gamma_method for details.
-1039    """
-1040    return np.vectorize(lambda o: o.gm(**kwargs))(x)
+            
1037def gamma_method(x, **kwargs):
+1038    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
+1039
+1040    See docstring of pe.Obs.gamma_method for details.
+1041    """
+1042    return np.vectorize(lambda o: o.gm(**kwargs))(x)
 
@@ -5199,174 +5203,174 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1165def derived_observable(func, data, array_mode=False, **kwargs):
-1166    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1167
-1168    Parameters
-1169    ----------
-1170    func : object
-1171        arbitrary function of the form func(data, **kwargs). For the
-1172        automatic differentiation to work, all numpy functions have to have
-1173        the autograd wrapper (use 'import autograd.numpy as anp').
-1174    data : list
-1175        list of Obs, e.g. [obs1, obs2, obs3].
-1176    num_grad : bool
-1177        if True, numerical derivatives are used instead of autograd
-1178        (default False). To control the numerical differentiation the
-1179        kwargs of numdifftools.step_generators.MaxStepGenerator
-1180        can be used.
-1181    man_grad : list
-1182        manually supply a list or an array which contains the jacobian
-1183        of func. Use cautiously, supplying the wrong derivative will
-1184        not be intercepted.
-1185
-1186    Notes
-1187    -----
-1188    For simple mathematical operations it can be practical to use anonymous
-1189    functions. For the ratio of two observables one can e.g. use
-1190
-1191    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1192    """
-1193
-1194    data = np.asarray(data)
-1195    raveled_data = data.ravel()
-1196
-1197    # Workaround for matrix operations containing non Obs data
-1198    if not all(isinstance(x, Obs) for x in raveled_data):
-1199        for i in range(len(raveled_data)):
-1200            if isinstance(raveled_data[i], (int, float)):
-1201                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1202
-1203    allcov = {}
-1204    for o in raveled_data:
-1205        for name in o.cov_names:
-1206            if name in allcov:
-1207                if not np.allclose(allcov[name], o.covobs[name].cov):
-1208                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1209            else:
-1210                allcov[name] = o.covobs[name].cov
-1211
-1212    n_obs = len(raveled_data)
-1213    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1214    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1215    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1216
-1217    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+            
1167def derived_observable(func, data, array_mode=False, **kwargs):
+1168    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1169
+1170    Parameters
+1171    ----------
+1172    func : object
+1173        arbitrary function of the form func(data, **kwargs). For the
+1174        automatic differentiation to work, all numpy functions have to have
+1175        the autograd wrapper (use 'import autograd.numpy as anp').
+1176    data : list
+1177        list of Obs, e.g. [obs1, obs2, obs3].
+1178    num_grad : bool
+1179        if True, numerical derivatives are used instead of autograd
+1180        (default False). To control the numerical differentiation the
+1181        kwargs of numdifftools.step_generators.MaxStepGenerator
+1182        can be used.
+1183    man_grad : list
+1184        manually supply a list or an array which contains the jacobian
+1185        of func. Use cautiously, supplying the wrong derivative will
+1186        not be intercepted.
+1187
+1188    Notes
+1189    -----
+1190    For simple mathematical operations it can be practical to use anonymous
+1191    functions. For the ratio of two observables one can e.g. use
+1192
+1193    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1194    """
+1195
+1196    data = np.asarray(data)
+1197    raveled_data = data.ravel()
+1198
+1199    # Workaround for matrix operations containing non Obs data
+1200    if not all(isinstance(x, Obs) for x in raveled_data):
+1201        for i in range(len(raveled_data)):
+1202            if isinstance(raveled_data[i], (int, float)):
+1203                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1204
+1205    allcov = {}
+1206    for o in raveled_data:
+1207        for name in o.cov_names:
+1208            if name in allcov:
+1209                if not np.allclose(allcov[name], o.covobs[name].cov):
+1210                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1211            else:
+1212                allcov[name] = o.covobs[name].cov
+1213
+1214    n_obs = len(raveled_data)
+1215    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1216    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1217    new_sample_names = sorted(set(new_names) - set(new_cov_names))
 1218
-1219    if data.ndim == 1:
-1220        values = np.array([o.value for o in data])
-1221    else:
-1222        values = np.vectorize(lambda x: x.value)(data)
-1223
-1224    new_values = func(values, **kwargs)
+1219    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1220
+1221    if data.ndim == 1:
+1222        values = np.array([o.value for o in data])
+1223    else:
+1224        values = np.vectorize(lambda x: x.value)(data)
 1225
-1226    multi = int(isinstance(new_values, np.ndarray))
+1226    new_values = func(values, **kwargs)
 1227
-1228    new_r_values = {}
-1229    new_idl_d = {}
-1230    for name in new_sample_names:
-1231        idl = []
-1232        tmp_values = np.zeros(n_obs)
-1233        for i, item in enumerate(raveled_data):
-1234            tmp_values[i] = item.r_values.get(name, item.value)
-1235            tmp_idl = item.idl.get(name)
-1236            if tmp_idl is not None:
-1237                idl.append(tmp_idl)
-1238        if multi > 0:
-1239            tmp_values = np.array(tmp_values).reshape(data.shape)
-1240        new_r_values[name] = func(tmp_values, **kwargs)
-1241        new_idl_d[name] = _merge_idx(idl)
-1242
-1243    if 'man_grad' in kwargs:
-1244        deriv = np.asarray(kwargs.get('man_grad'))
-1245        if new_values.shape + data.shape != deriv.shape:
-1246            raise Exception('Manual derivative does not have correct shape.')
-1247    elif kwargs.get('num_grad') is True:
-1248        if multi > 0:
-1249            raise Exception('Multi mode currently not supported for numerical derivative')
-1250        options = {
-1251            'base_step': 0.1,
-1252            'step_ratio': 2.5}
-1253        for key in options.keys():
-1254            kwarg = kwargs.get(key)
-1255            if kwarg is not None:
-1256                options[key] = kwarg
-1257        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1258        if tmp_df.size == 1:
-1259            deriv = np.array([tmp_df.real])
-1260        else:
-1261            deriv = tmp_df.real
-1262    else:
-1263        deriv = jacobian(func)(values, **kwargs)
-1264
-1265    final_result = np.zeros(new_values.shape, dtype=object)
+1228    multi = int(isinstance(new_values, np.ndarray))
+1229
+1230    new_r_values = {}
+1231    new_idl_d = {}
+1232    for name in new_sample_names:
+1233        idl = []
+1234        tmp_values = np.zeros(n_obs)
+1235        for i, item in enumerate(raveled_data):
+1236            tmp_values[i] = item.r_values.get(name, item.value)
+1237            tmp_idl = item.idl.get(name)
+1238            if tmp_idl is not None:
+1239                idl.append(tmp_idl)
+1240        if multi > 0:
+1241            tmp_values = np.array(tmp_values).reshape(data.shape)
+1242        new_r_values[name] = func(tmp_values, **kwargs)
+1243        new_idl_d[name] = _merge_idx(idl)
+1244
+1245    if 'man_grad' in kwargs:
+1246        deriv = np.asarray(kwargs.get('man_grad'))
+1247        if new_values.shape + data.shape != deriv.shape:
+1248            raise Exception('Manual derivative does not have correct shape.')
+1249    elif kwargs.get('num_grad') is True:
+1250        if multi > 0:
+1251            raise Exception('Multi mode currently not supported for numerical derivative')
+1252        options = {
+1253            'base_step': 0.1,
+1254            'step_ratio': 2.5}
+1255        for key in options.keys():
+1256            kwarg = kwargs.get(key)
+1257            if kwarg is not None:
+1258                options[key] = kwarg
+1259        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1260        if tmp_df.size == 1:
+1261            deriv = np.array([tmp_df.real])
+1262        else:
+1263            deriv = tmp_df.real
+1264    else:
+1265        deriv = jacobian(func)(values, **kwargs)
 1266
-1267    if array_mode is True:
+1267    final_result = np.zeros(new_values.shape, dtype=object)
 1268
-1269        class _Zero_grad():
-1270            def __init__(self, N):
-1271                self.grad = np.zeros((N, 1))
-1272
-1273        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1274        d_extracted = {}
-1275        g_extracted = {}
-1276        for name in new_sample_names:
-1277            d_extracted[name] = []
-1278            ens_length = len(new_idl_d[name])
-1279            for i_dat, dat in enumerate(data):
-1280                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1281        for name in new_cov_names:
-1282            g_extracted[name] = []
-1283            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1284            for i_dat, dat in enumerate(data):
-1285                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1286
-1287    for i_val, new_val in np.ndenumerate(new_values):
-1288        new_deltas = {}
-1289        new_grad = {}
-1290        if array_mode is True:
-1291            for name in new_sample_names:
-1292                ens_length = d_extracted[name][0].shape[-1]
-1293                new_deltas[name] = np.zeros(ens_length)
-1294                for i_dat, dat in enumerate(d_extracted[name]):
-1295                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1296            for name in new_cov_names:
-1297                new_grad[name] = 0
-1298                for i_dat, dat in enumerate(g_extracted[name]):
-1299                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1300        else:
-1301            for j_obs, obs in np.ndenumerate(data):
-1302                for name in obs.names:
-1303                    if name in obs.cov_names:
-1304                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1305                    else:
-1306                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1307
-1308        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1269    if array_mode is True:
+1270
+1271        class _Zero_grad():
+1272            def __init__(self, N):
+1273                self.grad = np.zeros((N, 1))
+1274
+1275        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1276        d_extracted = {}
+1277        g_extracted = {}
+1278        for name in new_sample_names:
+1279            d_extracted[name] = []
+1280            ens_length = len(new_idl_d[name])
+1281            for i_dat, dat in enumerate(data):
+1282                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1283        for name in new_cov_names:
+1284            g_extracted[name] = []
+1285            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1286            for i_dat, dat in enumerate(data):
+1287                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1288
+1289    for i_val, new_val in np.ndenumerate(new_values):
+1290        new_deltas = {}
+1291        new_grad = {}
+1292        if array_mode is True:
+1293            for name in new_sample_names:
+1294                ens_length = d_extracted[name][0].shape[-1]
+1295                new_deltas[name] = np.zeros(ens_length)
+1296                for i_dat, dat in enumerate(d_extracted[name]):
+1297                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1298            for name in new_cov_names:
+1299                new_grad[name] = 0
+1300                for i_dat, dat in enumerate(g_extracted[name]):
+1301                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1302        else:
+1303            for j_obs, obs in np.ndenumerate(data):
+1304                for name in obs.names:
+1305                    if name in obs.cov_names:
+1306                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1307                    else:
+1308                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
 1309
-1310        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1311            raise Exception('The same name has been used for deltas and covobs!')
-1312        new_samples = []
-1313        new_means = []
-1314        new_idl = []
-1315        new_names_obs = []
-1316        for name in new_names:
-1317            if name not in new_covobs:
-1318                new_samples.append(new_deltas[name])
-1319                new_idl.append(new_idl_d[name])
-1320                new_means.append(new_r_values[name][i_val])
-1321                new_names_obs.append(name)
-1322        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1323        for name in new_covobs:
-1324            final_result[i_val].names.append(name)
-1325        final_result[i_val]._covobs = new_covobs
-1326        final_result[i_val]._value = new_val
-1327        final_result[i_val].reweighted = reweighted
-1328
-1329    if multi == 0:
-1330        final_result = final_result.item()
-1331
-1332    return final_result
+1310        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1311
+1312        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1313            raise Exception('The same name has been used for deltas and covobs!')
+1314        new_samples = []
+1315        new_means = []
+1316        new_idl = []
+1317        new_names_obs = []
+1318        for name in new_names:
+1319            if name not in new_covobs:
+1320                new_samples.append(new_deltas[name])
+1321                new_idl.append(new_idl_d[name])
+1322                new_means.append(new_r_values[name][i_val])
+1323                new_names_obs.append(name)
+1324        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1325        for name in new_covobs:
+1326            final_result[i_val].names.append(name)
+1327        final_result[i_val]._covobs = new_covobs
+1328        final_result[i_val]._value = new_val
+1329        final_result[i_val].reweighted = reweighted
+1330
+1331    if multi == 0:
+1332        final_result = final_result.item()
+1333
+1334    return final_result
 
@@ -5413,46 +5417,46 @@ functions. For the ratio of two observables one can e.g. use

-
1364def reweight(weight, obs, **kwargs):
-1365    """Reweight a list of observables.
-1366
-1367    Parameters
-1368    ----------
-1369    weight : Obs
-1370        Reweighting factor. An Observable that has to be defined on a superset of the
-1371        configurations in obs[i].idl for all i.
-1372    obs : list
-1373        list of Obs, e.g. [obs1, obs2, obs3].
-1374    all_configs : bool
-1375        if True, the reweighted observables are normalized by the average of
-1376        the reweighting factor on all configurations in weight.idl and not
-1377        on the configurations in obs[i].idl. Default False.
-1378    """
-1379    result = []
-1380    for i in range(len(obs)):
-1381        if len(obs[i].cov_names):
-1382            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1383        if not set(obs[i].names).issubset(weight.names):
-1384            raise Exception('Error: Ensembles do not fit')
-1385        for name in obs[i].names:
-1386            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1387                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1388        new_samples = []
-1389        w_deltas = {}
-1390        for name in sorted(obs[i].names):
-1391            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1392            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1393        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1394
-1395        if kwargs.get('all_configs'):
-1396            new_weight = weight
-1397        else:
-1398            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1399
-1400        result.append(tmp_obs / new_weight)
-1401        result[-1].reweighted = True
-1402
-1403    return result
+            
1366def reweight(weight, obs, **kwargs):
+1367    """Reweight a list of observables.
+1368
+1369    Parameters
+1370    ----------
+1371    weight : Obs
+1372        Reweighting factor. An Observable that has to be defined on a superset of the
+1373        configurations in obs[i].idl for all i.
+1374    obs : list
+1375        list of Obs, e.g. [obs1, obs2, obs3].
+1376    all_configs : bool
+1377        if True, the reweighted observables are normalized by the average of
+1378        the reweighting factor on all configurations in weight.idl and not
+1379        on the configurations in obs[i].idl. Default False.
+1380    """
+1381    result = []
+1382    for i in range(len(obs)):
+1383        if len(obs[i].cov_names):
+1384            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1385        if not set(obs[i].names).issubset(weight.names):
+1386            raise Exception('Error: Ensembles do not fit')
+1387        for name in obs[i].names:
+1388            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1389                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1390        new_samples = []
+1391        w_deltas = {}
+1392        for name in sorted(obs[i].names):
+1393            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1394            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1395        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1396
+1397        if kwargs.get('all_configs'):
+1398            new_weight = weight
+1399        else:
+1400            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1401
+1402        result.append(tmp_obs / new_weight)
+1403        result[-1].reweighted = True
+1404
+1405    return result
 
@@ -5486,47 +5490,47 @@ on the configurations in obs[i].idl. Default False.
-
1406def correlate(obs_a, obs_b):
-1407    """Correlate two observables.
-1408
-1409    Parameters
-1410    ----------
-1411    obs_a : Obs
-1412        First observable
-1413    obs_b : Obs
-1414        Second observable
-1415
-1416    Notes
-1417    -----
-1418    Keep in mind to only correlate primary observables which have not been reweighted
-1419    yet. The reweighting has to be applied after correlating the observables.
-1420    Currently only works if ensembles are identical (this is not strictly necessary).
-1421    """
-1422
-1423    if sorted(obs_a.names) != sorted(obs_b.names):
-1424        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1425    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1426        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1427    for name in obs_a.names:
-1428        if obs_a.shape[name] != obs_b.shape[name]:
-1429            raise Exception('Shapes of ensemble', name, 'do not fit')
-1430        if obs_a.idl[name] != obs_b.idl[name]:
-1431            raise Exception('idl of ensemble', name, 'do not fit')
-1432
-1433    if obs_a.reweighted is True:
-1434        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1435    if obs_b.reweighted is True:
-1436        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1437
-1438    new_samples = []
-1439    new_idl = []
-1440    for name in sorted(obs_a.names):
-1441        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1442        new_idl.append(obs_a.idl[name])
-1443
-1444    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1445    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1446    return o
+            
1408def correlate(obs_a, obs_b):
+1409    """Correlate two observables.
+1410
+1411    Parameters
+1412    ----------
+1413    obs_a : Obs
+1414        First observable
+1415    obs_b : Obs
+1416        Second observable
+1417
+1418    Notes
+1419    -----
+1420    Keep in mind to only correlate primary observables which have not been reweighted
+1421    yet. The reweighting has to be applied after correlating the observables.
+1422    Currently only works if ensembles are identical (this is not strictly necessary).
+1423    """
+1424
+1425    if sorted(obs_a.names) != sorted(obs_b.names):
+1426        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1427    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1428        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1429    for name in obs_a.names:
+1430        if obs_a.shape[name] != obs_b.shape[name]:
+1431            raise Exception('Shapes of ensemble', name, 'do not fit')
+1432        if obs_a.idl[name] != obs_b.idl[name]:
+1433            raise Exception('idl of ensemble', name, 'do not fit')
+1434
+1435    if obs_a.reweighted is True:
+1436        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1437    if obs_b.reweighted is True:
+1438        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1439
+1440    new_samples = []
+1441    new_idl = []
+1442    for name in sorted(obs_a.names):
+1443        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1444        new_idl.append(obs_a.idl[name])
+1445
+1446    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1447    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1448    return o
 
@@ -5561,74 +5565,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1449def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1450    r'''Calculates the error covariance matrix of a set of observables.
-1451
-1452    WARNING: This function should be used with care, especially for observables with support on multiple
-1453             ensembles with differing autocorrelations. See the notes below for details.
-1454
-1455    The gamma method has to be applied first to all observables.
+            
1451def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1452    r'''Calculates the error covariance matrix of a set of observables.
+1453
+1454    WARNING: This function should be used with care, especially for observables with support on multiple
+1455             ensembles with differing autocorrelations. See the notes below for details.
 1456
-1457    Parameters
-1458    ----------
-1459    obs : list or numpy.ndarray
-1460        List or one dimensional array of Obs
-1461    visualize : bool
-1462        If True plots the corresponding normalized correlation matrix (default False).
-1463    correlation : bool
-1464        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1465    smooth : None or int
-1466        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1467        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1468        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1469        small ones.
-1470
-1471    Notes
-1472    -----
-1473    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1474    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1475    in the absence of autocorrelation.
-1476    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1477    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1478    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1479    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1480    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1481    '''
-1482
-1483    length = len(obs)
+1457    The gamma method has to be applied first to all observables.
+1458
+1459    Parameters
+1460    ----------
+1461    obs : list or numpy.ndarray
+1462        List or one dimensional array of Obs
+1463    visualize : bool
+1464        If True plots the corresponding normalized correlation matrix (default False).
+1465    correlation : bool
+1466        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1467    smooth : None or int
+1468        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1469        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1470        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1471        small ones.
+1472
+1473    Notes
+1474    -----
+1475    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1476    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1477    in the absence of autocorrelation.
+1478    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1479    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1480    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1481    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1482    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1483    '''
 1484
-1485    max_samples = np.max([o.N for o in obs])
-1486    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1487        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1488
-1489    cov = np.zeros((length, length))
-1490    for i in range(length):
-1491        for j in range(i, length):
-1492            cov[i, j] = _covariance_element(obs[i], obs[j])
-1493    cov = cov + cov.T - np.diag(np.diag(cov))
-1494
-1495    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1485    length = len(obs)
+1486
+1487    max_samples = np.max([o.N for o in obs])
+1488    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1489        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1490
+1491    cov = np.zeros((length, length))
+1492    for i in range(length):
+1493        for j in range(i, length):
+1494            cov[i, j] = _covariance_element(obs[i], obs[j])
+1495    cov = cov + cov.T - np.diag(np.diag(cov))
 1496
-1497    if isinstance(smooth, int):
-1498        corr = _smooth_eigenvalues(corr, smooth)
-1499
-1500    if visualize:
-1501        plt.matshow(corr, vmin=-1, vmax=1)
-1502        plt.set_cmap('RdBu')
-1503        plt.colorbar()
-1504        plt.draw()
-1505
-1506    if correlation is True:
-1507        return corr
-1508
-1509    errors = [o.dvalue for o in obs]
-1510    cov = np.diag(errors) @ corr @ np.diag(errors)
-1511
-1512    eigenvalues = np.linalg.eigh(cov)[0]
-1513    if not np.all(eigenvalues >= 0):
-1514        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1515
-1516    return cov
+1497    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1498
+1499    if isinstance(smooth, int):
+1500        corr = _smooth_eigenvalues(corr, smooth)
+1501
+1502    if visualize:
+1503        plt.matshow(corr, vmin=-1, vmax=1)
+1504        plt.set_cmap('RdBu')
+1505        plt.colorbar()
+1506        plt.draw()
+1507
+1508    if correlation is True:
+1509        return corr
+1510
+1511    errors = [o.dvalue for o in obs]
+1512    cov = np.diag(errors) @ corr @ np.diag(errors)
+1513
+1514    eigenvalues = np.linalg.eigh(cov)[0]
+1515    if not np.all(eigenvalues >= 0):
+1516        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1517
+1518    return cov
 
@@ -5680,24 +5684,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1596def import_jackknife(jacks, name, idl=None):
-1597    """Imports jackknife samples and returns an Obs
-1598
-1599    Parameters
-1600    ----------
-1601    jacks : numpy.ndarray
-1602        numpy array containing the mean value as zeroth entry and
-1603        the N jackknife samples as first to Nth entry.
-1604    name : str
-1605        name of the ensemble the samples are defined on.
-1606    """
-1607    length = len(jacks) - 1
-1608    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1609    samples = jacks[1:] @ prj
-1610    mean = np.mean(samples)
-1611    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1612    new_obs._value = jacks[0]
-1613    return new_obs
+            
1598def import_jackknife(jacks, name, idl=None):
+1599    """Imports jackknife samples and returns an Obs
+1600
+1601    Parameters
+1602    ----------
+1603    jacks : numpy.ndarray
+1604        numpy array containing the mean value as zeroth entry and
+1605        the N jackknife samples as first to Nth entry.
+1606    name : str
+1607        name of the ensemble the samples are defined on.
+1608    """
+1609    length = len(jacks) - 1
+1610    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1611    samples = jacks[1:] @ prj
+1612    mean = np.mean(samples)
+1613    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1614    new_obs._value = jacks[0]
+1615    return new_obs
 
@@ -5727,34 +5731,34 @@ name of the ensemble the samples are defined on.
-
1616def import_bootstrap(boots, name, random_numbers):
-1617    """Imports bootstrap samples and returns an Obs
-1618
-1619    Parameters
-1620    ----------
-1621    boots : numpy.ndarray
-1622        numpy array containing the mean value as zeroth entry and
-1623        the N bootstrap samples as first to Nth entry.
-1624    name : str
-1625        name of the ensemble the samples are defined on.
-1626    random_numbers : np.ndarray
-1627        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
-1628        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
-1629        chain to be reconstructed.
-1630    """
-1631    samples, length = random_numbers.shape
-1632    if samples != len(boots) - 1:
-1633        raise ValueError("Random numbers do not have the correct shape.")
-1634
-1635    if samples < length:
-1636        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
-1637
-1638    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+            
1618def import_bootstrap(boots, name, random_numbers):
+1619    """Imports bootstrap samples and returns an Obs
+1620
+1621    Parameters
+1622    ----------
+1623    boots : numpy.ndarray
+1624        numpy array containing the mean value as zeroth entry and
+1625        the N bootstrap samples as first to Nth entry.
+1626    name : str
+1627        name of the ensemble the samples are defined on.
+1628    random_numbers : np.ndarray
+1629        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
+1630        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
+1631        chain to be reconstructed.
+1632    """
+1633    samples, length = random_numbers.shape
+1634    if samples != len(boots) - 1:
+1635        raise ValueError("Random numbers do not have the correct shape.")
+1636
+1637    if samples < length:
+1638        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
 1639
-1640    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
-1641    ret = Obs([samples], [name])
-1642    ret._value = boots[0]
-1643    return ret
+1640    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+1641
+1642    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
+1643    ret = Obs([samples], [name])
+1644    ret._value = boots[0]
+1645    return ret
 
@@ -5788,34 +5792,34 @@ chain to be reconstructed.
-
1646def merge_obs(list_of_obs):
-1647    """Combine all observables in list_of_obs into one new observable
-1648
-1649    Parameters
-1650    ----------
-1651    list_of_obs : list
-1652        list of the Obs object to be combined
-1653
-1654    Notes
-1655    -----
-1656    It is not possible to combine obs which are based on the same replicum
-1657    """
-1658    replist = [item for obs in list_of_obs for item in obs.names]
-1659    if (len(replist) == len(set(replist))) is False:
-1660        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1661    if any([len(o.cov_names) for o in list_of_obs]):
-1662        raise Exception('Not possible to merge data that contains covobs!')
-1663    new_dict = {}
-1664    idl_dict = {}
-1665    for o in list_of_obs:
-1666        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1667                        for key in set(o.deltas) | set(o.r_values)})
-1668        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1669
-1670    names = sorted(new_dict.keys())
-1671    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1672    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1673    return o
+            
1648def merge_obs(list_of_obs):
+1649    """Combine all observables in list_of_obs into one new observable
+1650
+1651    Parameters
+1652    ----------
+1653    list_of_obs : list
+1654        list of the Obs object to be combined
+1655
+1656    Notes
+1657    -----
+1658    It is not possible to combine obs which are based on the same replicum
+1659    """
+1660    replist = [item for obs in list_of_obs for item in obs.names]
+1661    if (len(replist) == len(set(replist))) is False:
+1662        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1663    if any([len(o.cov_names) for o in list_of_obs]):
+1664        raise Exception('Not possible to merge data that contains covobs!')
+1665    new_dict = {}
+1666    idl_dict = {}
+1667    for o in list_of_obs:
+1668        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1669                        for key in set(o.deltas) | set(o.r_values)})
+1670        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1671
+1672    names = sorted(new_dict.keys())
+1673    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1674    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1675    return o
 
@@ -5846,47 +5850,47 @@ list of the Obs object to be combined
-
1676def cov_Obs(means, cov, name, grad=None):
-1677    """Create an Obs based on mean(s) and a covariance matrix
-1678
-1679    Parameters
-1680    ----------
-1681    mean : list of floats or float
-1682        N mean value(s) of the new Obs
-1683    cov : list or array
-1684        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1685    name : str
-1686        identifier for the covariance matrix
-1687    grad : list or array
-1688        Gradient of the Covobs wrt. the means belonging to cov.
-1689    """
-1690
-1691    def covobs_to_obs(co):
-1692        """Make an Obs out of a Covobs
-1693
-1694        Parameters
-1695        ----------
-1696        co : Covobs
-1697            Covobs to be embedded into the Obs
-1698        """
-1699        o = Obs([], [], means=[])
-1700        o._value = co.value
-1701        o.names.append(co.name)
-1702        o._covobs[co.name] = co
-1703        o._dvalue = np.sqrt(co.errsq())
-1704        return o
-1705
-1706    ol = []
-1707    if isinstance(means, (float, int)):
-1708        means = [means]
-1709
-1710    for i in range(len(means)):
-1711        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1712    if ol[0].covobs[name].N != len(means):
-1713        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1714    if len(ol) == 1:
-1715        return ol[0]
-1716    return ol
+            
1678def cov_Obs(means, cov, name, grad=None):
+1679    """Create an Obs based on mean(s) and a covariance matrix
+1680
+1681    Parameters
+1682    ----------
+1683    mean : list of floats or float
+1684        N mean value(s) of the new Obs
+1685    cov : list or array
+1686        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1687    name : str
+1688        identifier for the covariance matrix
+1689    grad : list or array
+1690        Gradient of the Covobs wrt. the means belonging to cov.
+1691    """
+1692
+1693    def covobs_to_obs(co):
+1694        """Make an Obs out of a Covobs
+1695
+1696        Parameters
+1697        ----------
+1698        co : Covobs
+1699            Covobs to be embedded into the Obs
+1700        """
+1701        o = Obs([], [], means=[])
+1702        o._value = co.value
+1703        o.names.append(co.name)
+1704        o._covobs[co.name] = co
+1705        o._dvalue = np.sqrt(co.errsq())
+1706        return o
+1707
+1708    ol = []
+1709    if isinstance(means, (float, int)):
+1710        means = [means]
+1711
+1712    for i in range(len(means)):
+1713        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1714    if ol[0].covobs[name].N != len(means):
+1715        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1716    if len(ol) == 1:
+1717        return ol[0]
+1718    return ol