diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 3df3848b..1969cc3d 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -901,959 +901,963 @@ 694 return _format_uncertainty(self.value, self._dvalue) 695 696 def __format__(self, format_type): - 697 my_str = _format_uncertainty(self.value, self._dvalue, - 698 significance=int(float(format_type.replace("+", "").replace("-", "")))) - 699 for char in ["+", " "]: - 700 if format_type.startswith(char): - 701 if my_str[0] != "-": - 702 my_str = char + my_str - 703 return my_str - 704 - 705 def __hash__(self): - 706 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 707 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 708 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 709 hash_tuple += tuple([o.encode() for o in self.names]) - 710 m = hashlib.md5() - 711 [m.update(o) for o in hash_tuple] - 712 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 713 - 714 # Overload comparisons - 715 def __lt__(self, other): - 716 return self.value < other + 697 if format_type == "": + 698 significance = 2 + 699 else: + 700 significance = int(float(format_type.replace("+", "").replace("-", ""))) + 701 my_str = _format_uncertainty(self.value, self._dvalue, + 702 significance=significance) + 703 for char in ["+", " "]: + 704 if format_type.startswith(char): + 705 if my_str[0] != "-": + 706 my_str = char + my_str + 707 return my_str + 708 + 709 def __hash__(self): + 710 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 711 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 712 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 713 hash_tuple += tuple([o.encode() for o in self.names]) + 714 m = hashlib.md5() + 715 [m.update(o) for o in hash_tuple] + 716 return int(m.hexdigest(), 16) & 0xFFFFFFFF 717 - 718 def __le__(self, other): - 719 return self.value <= other - 720 - 721 def __gt__(self, other): - 722 return self.value > other - 723 - 724 def __ge__(self, other): - 725 return self.value >= other - 726 - 727 def __eq__(self, other): - 728 return (self - other).is_zero() - 729 - 730 def __ne__(self, other): - 731 return not (self - other).is_zero() - 732 - 733 # Overload math operations - 734 def __add__(self, y): - 735 if isinstance(y, Obs): - 736 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 737 else: - 738 if isinstance(y, np.ndarray): - 739 return np.array([self + o for o in y]) - 740 elif y.__class__.__name__ in ['Corr', 'CObs']: - 741 return NotImplemented - 742 else: - 743 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 744 - 745 def __radd__(self, y): - 746 return self + y - 747 - 748 def __mul__(self, y): - 749 if isinstance(y, Obs): - 750 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 751 else: - 752 if isinstance(y, np.ndarray): - 753 return np.array([self * o for o in y]) - 754 elif isinstance(y, complex): - 755 return CObs(self * y.real, self * y.imag) - 756 elif y.__class__.__name__ in ['Corr', 'CObs']: - 757 return NotImplemented - 758 else: - 759 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 760 - 761 def __rmul__(self, y): - 762 return self * y - 763 - 764 def __sub__(self, y): - 765 if isinstance(y, Obs): - 766 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 767 else: - 768 if isinstance(y, np.ndarray): - 769 return np.array([self - o for o in y]) - 770 elif y.__class__.__name__ in ['Corr', 'CObs']: - 771 return NotImplemented - 772 else: - 773 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 774 - 775 def __rsub__(self, y): - 776 return -1 * (self - y) - 777 - 778 def __pos__(self): - 779 return self - 780 - 781 def __neg__(self): - 782 return -1 * self - 783 - 784 def __truediv__(self, y): - 785 if isinstance(y, Obs): - 786 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 787 else: - 788 if isinstance(y, np.ndarray): - 789 return np.array([self / o for o in y]) - 790 elif y.__class__.__name__ in ['Corr', 'CObs']: - 791 return NotImplemented - 792 else: - 793 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 794 - 795 def __rtruediv__(self, y): - 796 if isinstance(y, Obs): - 797 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 798 else: - 799 if isinstance(y, np.ndarray): - 800 return np.array([o / self for o in y]) - 801 elif y.__class__.__name__ in ['Corr', 'CObs']: - 802 return NotImplemented - 803 else: - 804 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 805 - 806 def __pow__(self, y): - 807 if isinstance(y, Obs): - 808 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 809 else: - 810 return derived_observable(lambda x: x[0] ** y, [self]) - 811 - 812 def __rpow__(self, y): - 813 if isinstance(y, Obs): - 814 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 815 else: - 816 return derived_observable(lambda x: y ** x[0], [self]) - 817 - 818 def __abs__(self): - 819 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 820 - 821 # Overload numpy functions - 822 def sqrt(self): - 823 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 718 # Overload comparisons + 719 def __lt__(self, other): + 720 return self.value < other + 721 + 722 def __le__(self, other): + 723 return self.value <= other + 724 + 725 def __gt__(self, other): + 726 return self.value > other + 727 + 728 def __ge__(self, other): + 729 return self.value >= other + 730 + 731 def __eq__(self, other): + 732 return (self - other).is_zero() + 733 + 734 def __ne__(self, other): + 735 return not (self - other).is_zero() + 736 + 737 # Overload math operations + 738 def __add__(self, y): + 739 if isinstance(y, Obs): + 740 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 741 else: + 742 if isinstance(y, np.ndarray): + 743 return np.array([self + o for o in y]) + 744 elif y.__class__.__name__ in ['Corr', 'CObs']: + 745 return NotImplemented + 746 else: + 747 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 748 + 749 def __radd__(self, y): + 750 return self + y + 751 + 752 def __mul__(self, y): + 753 if isinstance(y, Obs): + 754 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 755 else: + 756 if isinstance(y, np.ndarray): + 757 return np.array([self * o for o in y]) + 758 elif isinstance(y, complex): + 759 return CObs(self * y.real, self * y.imag) + 760 elif y.__class__.__name__ in ['Corr', 'CObs']: + 761 return NotImplemented + 762 else: + 763 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 764 + 765 def __rmul__(self, y): + 766 return self * y + 767 + 768 def __sub__(self, y): + 769 if isinstance(y, Obs): + 770 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 771 else: + 772 if isinstance(y, np.ndarray): + 773 return np.array([self - o for o in y]) + 774 elif y.__class__.__name__ in ['Corr', 'CObs']: + 775 return NotImplemented + 776 else: + 777 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 778 + 779 def __rsub__(self, y): + 780 return -1 * (self - y) + 781 + 782 def __pos__(self): + 783 return self + 784 + 785 def __neg__(self): + 786 return -1 * self + 787 + 788 def __truediv__(self, y): + 789 if isinstance(y, Obs): + 790 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 791 else: + 792 if isinstance(y, np.ndarray): + 793 return np.array([self / o for o in y]) + 794 elif y.__class__.__name__ in ['Corr', 'CObs']: + 795 return NotImplemented + 796 else: + 797 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 798 + 799 def __rtruediv__(self, y): + 800 if isinstance(y, Obs): + 801 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 802 else: + 803 if isinstance(y, np.ndarray): + 804 return np.array([o / self for o in y]) + 805 elif y.__class__.__name__ in ['Corr', 'CObs']: + 806 return NotImplemented + 807 else: + 808 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 809 + 810 def __pow__(self, y): + 811 if isinstance(y, Obs): + 812 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 813 else: + 814 return derived_observable(lambda x: x[0] ** y, [self]) + 815 + 816 def __rpow__(self, y): + 817 if isinstance(y, Obs): + 818 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 819 else: + 820 return derived_observable(lambda x: y ** x[0], [self]) + 821 + 822 def __abs__(self): + 823 return derived_observable(lambda x: anp.abs(x[0]), [self]) 824 - 825 def log(self): - 826 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 827 - 828 def exp(self): - 829 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 830 - 831 def sin(self): - 832 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 833 - 834 def cos(self): - 835 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 836 - 837 def tan(self): - 838 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 839 - 840 def arcsin(self): - 841 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 842 - 843 def arccos(self): - 844 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 845 - 846 def arctan(self): - 847 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 848 - 849 def sinh(self): - 850 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 851 - 852 def cosh(self): - 853 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 854 - 855 def tanh(self): - 856 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 857 - 858 def arcsinh(self): - 859 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 860 - 861 def arccosh(self): - 862 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 863 - 864 def arctanh(self): - 865 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 866 + 825 # Overload numpy functions + 826 def sqrt(self): + 827 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 828 + 829 def log(self): + 830 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 831 + 832 def exp(self): + 833 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 834 + 835 def sin(self): + 836 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 837 + 838 def cos(self): + 839 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 840 + 841 def tan(self): + 842 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 843 + 844 def arcsin(self): + 845 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 846 + 847 def arccos(self): + 848 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 849 + 850 def arctan(self): + 851 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 852 + 853 def sinh(self): + 854 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 855 + 856 def cosh(self): + 857 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 858 + 859 def tanh(self): + 860 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 861 + 862 def arcsinh(self): + 863 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 864 + 865 def arccosh(self): + 866 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) 867 - 868class CObs: - 869 """Class for a complex valued observable.""" - 870 __slots__ = ['_real', '_imag', 'tag'] + 868 def arctanh(self): + 869 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 870 871 - 872 def __init__(self, real, imag=0.0): - 873 self._real = real - 874 self._imag = imag - 875 self.tag = None - 876 - 877 @property - 878 def real(self): - 879 return self._real + 872class CObs: + 873 """Class for a complex valued observable.""" + 874 __slots__ = ['_real', '_imag', 'tag'] + 875 + 876 def __init__(self, real, imag=0.0): + 877 self._real = real + 878 self._imag = imag + 879 self.tag = None 880 881 @property - 882 def imag(self): - 883 return self._imag + 882 def real(self): + 883 return self._real 884 - 885 def gamma_method(self, **kwargs): - 886 """Executes the gamma_method for the real and the imaginary part.""" - 887 if isinstance(self.real, Obs): - 888 self.real.gamma_method(**kwargs) - 889 if isinstance(self.imag, Obs): - 890 self.imag.gamma_method(**kwargs) - 891 - 892 def is_zero(self): - 893 """Checks whether both real and imaginary part are zero within machine precision.""" - 894 return self.real == 0.0 and self.imag == 0.0 + 885 @property + 886 def imag(self): + 887 return self._imag + 888 + 889 def gamma_method(self, **kwargs): + 890 """Executes the gamma_method for the real and the imaginary part.""" + 891 if isinstance(self.real, Obs): + 892 self.real.gamma_method(**kwargs) + 893 if isinstance(self.imag, Obs): + 894 self.imag.gamma_method(**kwargs) 895 - 896 def conjugate(self): - 897 return CObs(self.real, -self.imag) - 898 - 899 def __add__(self, other): - 900 if isinstance(other, np.ndarray): - 901 return other + self - 902 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 903 return CObs(self.real + other.real, - 904 self.imag + other.imag) - 905 else: - 906 return CObs(self.real + other, self.imag) - 907 - 908 def __radd__(self, y): - 909 return self + y - 910 - 911 def __sub__(self, other): - 912 if isinstance(other, np.ndarray): - 913 return -1 * (other - self) - 914 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 915 return CObs(self.real - other.real, self.imag - other.imag) - 916 else: - 917 return CObs(self.real - other, self.imag) - 918 - 919 def __rsub__(self, other): - 920 return -1 * (self - other) - 921 - 922 def __mul__(self, other): - 923 if isinstance(other, np.ndarray): - 924 return other * self - 925 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 926 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 927 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 928 [self.real, other.real, self.imag, other.imag], - 929 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 930 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 931 [self.real, other.real, self.imag, other.imag], - 932 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 933 elif getattr(other, 'imag', 0) != 0: - 934 return CObs(self.real * other.real - self.imag * other.imag, - 935 self.imag * other.real + self.real * other.imag) - 936 else: - 937 return CObs(self.real * other.real, self.imag * other.real) - 938 else: - 939 return CObs(self.real * other, self.imag * other) - 940 - 941 def __rmul__(self, other): - 942 return self * other - 943 - 944 def __truediv__(self, other): - 945 if isinstance(other, np.ndarray): - 946 return 1 / (other / self) - 947 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 948 r = other.real ** 2 + other.imag ** 2 - 949 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 950 else: - 951 return CObs(self.real / other, self.imag / other) - 952 - 953 def __rtruediv__(self, other): - 954 r = self.real ** 2 + self.imag ** 2 - 955 if hasattr(other, 'real') and hasattr(other, 'imag'): - 956 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 957 else: - 958 return CObs(self.real * other / r, -self.imag * other / r) - 959 - 960 def __abs__(self): - 961 return np.sqrt(self.real**2 + self.imag**2) - 962 - 963 def __pos__(self): - 964 return self - 965 - 966 def __neg__(self): - 967 return -1 * self - 968 - 969 def __eq__(self, other): - 970 return self.real == other.real and self.imag == other.imag - 971 - 972 def __str__(self): - 973 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' - 974 - 975 def __repr__(self): - 976 return 'CObs[' + str(self) + ']' - 977 + 896 def is_zero(self): + 897 """Checks whether both real and imaginary part are zero within machine precision.""" + 898 return self.real == 0.0 and self.imag == 0.0 + 899 + 900 def conjugate(self): + 901 return CObs(self.real, -self.imag) + 902 + 903 def __add__(self, other): + 904 if isinstance(other, np.ndarray): + 905 return other + self + 906 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 907 return CObs(self.real + other.real, + 908 self.imag + other.imag) + 909 else: + 910 return CObs(self.real + other, self.imag) + 911 + 912 def __radd__(self, y): + 913 return self + y + 914 + 915 def __sub__(self, other): + 916 if isinstance(other, np.ndarray): + 917 return -1 * (other - self) + 918 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 919 return CObs(self.real - other.real, self.imag - other.imag) + 920 else: + 921 return CObs(self.real - other, self.imag) + 922 + 923 def __rsub__(self, other): + 924 return -1 * (self - other) + 925 + 926 def __mul__(self, other): + 927 if isinstance(other, np.ndarray): + 928 return other * self + 929 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 930 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 931 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 932 [self.real, other.real, self.imag, other.imag], + 933 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 934 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 935 [self.real, other.real, self.imag, other.imag], + 936 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 937 elif getattr(other, 'imag', 0) != 0: + 938 return CObs(self.real * other.real - self.imag * other.imag, + 939 self.imag * other.real + self.real * other.imag) + 940 else: + 941 return CObs(self.real * other.real, self.imag * other.real) + 942 else: + 943 return CObs(self.real * other, self.imag * other) + 944 + 945 def __rmul__(self, other): + 946 return self * other + 947 + 948 def __truediv__(self, other): + 949 if isinstance(other, np.ndarray): + 950 return 1 / (other / self) + 951 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 952 r = other.real ** 2 + other.imag ** 2 + 953 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 954 else: + 955 return CObs(self.real / other, self.imag / other) + 956 + 957 def __rtruediv__(self, other): + 958 r = self.real ** 2 + self.imag ** 2 + 959 if hasattr(other, 'real') and hasattr(other, 'imag'): + 960 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 961 else: + 962 return CObs(self.real * other / r, -self.imag * other / r) + 963 + 964 def __abs__(self): + 965 return np.sqrt(self.real**2 + self.imag**2) + 966 + 967 def __pos__(self): + 968 return self + 969 + 970 def __neg__(self): + 971 return -1 * self + 972 + 973 def __eq__(self, other): + 974 return self.real == other.real and self.imag == other.imag + 975 + 976 def __str__(self): + 977 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' 978 - 979def _format_uncertainty(value, dvalue, significance=2): - 980 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" - 981 if dvalue == 0.0 or (not np.isfinite(dvalue)): - 982 return str(value) - 983 if not isinstance(significance, int): - 984 raise TypeError("significance needs to be an integer.") - 985 if significance < 1: - 986 raise ValueError("significance needs to be larger than zero.") - 987 fexp = np.floor(np.log10(dvalue)) - 988 if fexp < 0.0: - 989 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') - 990 elif fexp == 0.0: - 991 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" - 992 else: - 993 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" - 994 - 995 - 996def _expand_deltas(deltas, idx, shape, gapsize): - 997 """Expand deltas defined on idx to a regular range with spacing gapsize between two - 998 configurations and where holes are filled by 0. - 999 If idx is of type range, the deltas are not changed if the idx.step == gapsize. -1000 -1001 Parameters -1002 ---------- -1003 deltas : list -1004 List of fluctuations -1005 idx : list -1006 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1007 shape : int -1008 Number of configs in idx. -1009 gapsize : int -1010 The target distance between two configurations. If longer distances -1011 are found in idx, the data is expanded. -1012 """ -1013 if isinstance(idx, range): -1014 if (idx.step == gapsize): -1015 return deltas -1016 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) -1017 for i in range(shape): -1018 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] -1019 return ret -1020 -1021 -1022def _merge_idx(idl): -1023 """Returns the union of all lists in idl as range or sorted list + 979 def __repr__(self): + 980 return 'CObs[' + str(self) + ']' + 981 + 982 + 983def _format_uncertainty(value, dvalue, significance=2): + 984 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 985 if dvalue == 0.0 or (not np.isfinite(dvalue)): + 986 return str(value) + 987 if not isinstance(significance, int): + 988 raise TypeError("significance needs to be an integer.") + 989 if significance < 1: + 990 raise ValueError("significance needs to be larger than zero.") + 991 fexp = np.floor(np.log10(dvalue)) + 992 if fexp < 0.0: + 993 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') + 994 elif fexp == 0.0: + 995 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" + 996 else: + 997 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" + 998 + 999 +1000def _expand_deltas(deltas, idx, shape, gapsize): +1001 """Expand deltas defined on idx to a regular range with spacing gapsize between two +1002 configurations and where holes are filled by 0. +1003 If idx is of type range, the deltas are not changed if the idx.step == gapsize. +1004 +1005 Parameters +1006 ---------- +1007 deltas : list +1008 List of fluctuations +1009 idx : list +1010 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1011 shape : int +1012 Number of configs in idx. +1013 gapsize : int +1014 The target distance between two configurations. If longer distances +1015 are found in idx, the data is expanded. +1016 """ +1017 if isinstance(idx, range): +1018 if (idx.step == gapsize): +1019 return deltas +1020 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1021 for i in range(shape): +1022 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1023 return ret 1024 -1025 Parameters -1026 ---------- -1027 idl : list -1028 List of lists or ranges. -1029 """ -1030 -1031 if _check_lists_equal(idl): -1032 return idl[0] -1033 -1034 idunion = sorted(set().union(*idl)) -1035 -1036 # Check whether idunion can be expressed as range -1037 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) -1038 idtest = [list(idrange), idunion] -1039 if _check_lists_equal(idtest): -1040 return idrange -1041 -1042 return idunion -1043 -1044 -1045def _intersection_idx(idl): -1046 """Returns the intersection of all lists in idl as range or sorted list +1025 +1026def _merge_idx(idl): +1027 """Returns the union of all lists in idl as range or sorted list +1028 +1029 Parameters +1030 ---------- +1031 idl : list +1032 List of lists or ranges. +1033 """ +1034 +1035 if _check_lists_equal(idl): +1036 return idl[0] +1037 +1038 idunion = sorted(set().union(*idl)) +1039 +1040 # Check whether idunion can be expressed as range +1041 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) +1042 idtest = [list(idrange), idunion] +1043 if _check_lists_equal(idtest): +1044 return idrange +1045 +1046 return idunion 1047 -1048 Parameters -1049 ---------- -1050 idl : list -1051 List of lists or ranges. -1052 """ -1053 -1054 if _check_lists_equal(idl): -1055 return idl[0] -1056 -1057 idinter = sorted(set.intersection(*[set(o) for o in idl])) -1058 -1059 # Check whether idinter can be expressed as range -1060 try: -1061 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) -1062 idtest = [list(idrange), idinter] -1063 if _check_lists_equal(idtest): -1064 return idrange -1065 except IndexError: -1066 pass -1067 -1068 return idinter -1069 -1070 -1071def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1072 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1073 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1074 common divisor of the step sizes is used as new step size. -1075 -1076 Parameters -1077 ---------- -1078 deltas : list -1079 List of fluctuations -1080 idx : list -1081 List or range of configs on which the deltas are defined. -1082 Has to be a subset of new_idx and has to be sorted in ascending order. -1083 shape : list -1084 Number of configs in idx. -1085 new_idx : list -1086 List of configs that defines the new range, has to be sorted in ascending order. -1087 """ -1088 -1089 if type(idx) is range and type(new_idx) is range: -1090 if idx == new_idx: -1091 return deltas -1092 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1093 for i in range(shape): -1094 ret[idx[i] - new_idx[0]] = deltas[i] -1095 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) -1096 -1097 -1098def derived_observable(func, data, array_mode=False, **kwargs): -1099 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1048 +1049def _intersection_idx(idl): +1050 """Returns the intersection of all lists in idl as range or sorted list +1051 +1052 Parameters +1053 ---------- +1054 idl : list +1055 List of lists or ranges. +1056 """ +1057 +1058 if _check_lists_equal(idl): +1059 return idl[0] +1060 +1061 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1062 +1063 # Check whether idinter can be expressed as range +1064 try: +1065 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) +1066 idtest = [list(idrange), idinter] +1067 if _check_lists_equal(idtest): +1068 return idrange +1069 except IndexError: +1070 pass +1071 +1072 return idinter +1073 +1074 +1075def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1076 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1077 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1078 common divisor of the step sizes is used as new step size. +1079 +1080 Parameters +1081 ---------- +1082 deltas : list +1083 List of fluctuations +1084 idx : list +1085 List or range of configs on which the deltas are defined. +1086 Has to be a subset of new_idx and has to be sorted in ascending order. +1087 shape : list +1088 Number of configs in idx. +1089 new_idx : list +1090 List of configs that defines the new range, has to be sorted in ascending order. +1091 """ +1092 +1093 if type(idx) is range and type(new_idx) is range: +1094 if idx == new_idx: +1095 return deltas +1096 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1097 for i in range(shape): +1098 ret[idx[i] - new_idx[0]] = deltas[i] +1099 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) 1100 -1101 Parameters -1102 ---------- -1103 func : object -1104 arbitrary function of the form func(data, **kwargs). For the -1105 automatic differentiation to work, all numpy functions have to have -1106 the autograd wrapper (use 'import autograd.numpy as anp'). -1107 data : list -1108 list of Obs, e.g. [obs1, obs2, obs3]. -1109 num_grad : bool -1110 if True, numerical derivatives are used instead of autograd -1111 (default False). To control the numerical differentiation the -1112 kwargs of numdifftools.step_generators.MaxStepGenerator -1113 can be used. -1114 man_grad : list -1115 manually supply a list or an array which contains the jacobian -1116 of func. Use cautiously, supplying the wrong derivative will -1117 not be intercepted. -1118 -1119 Notes -1120 ----- -1121 For simple mathematical operations it can be practical to use anonymous -1122 functions. For the ratio of two observables one can e.g. use -1123 -1124 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1125 """ -1126 -1127 data = np.asarray(data) -1128 raveled_data = data.ravel() -1129 -1130 # Workaround for matrix operations containing non Obs data -1131 if not all(isinstance(x, Obs) for x in raveled_data): -1132 for i in range(len(raveled_data)): -1133 if isinstance(raveled_data[i], (int, float)): -1134 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1135 -1136 allcov = {} -1137 for o in raveled_data: -1138 for name in o.cov_names: -1139 if name in allcov: -1140 if not np.allclose(allcov[name], o.covobs[name].cov): -1141 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1142 else: -1143 allcov[name] = o.covobs[name].cov -1144 -1145 n_obs = len(raveled_data) -1146 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1147 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1148 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1149 -1150 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1151 -1152 if data.ndim == 1: -1153 values = np.array([o.value for o in data]) -1154 else: -1155 values = np.vectorize(lambda x: x.value)(data) -1156 -1157 new_values = func(values, **kwargs) -1158 -1159 multi = int(isinstance(new_values, np.ndarray)) +1101 +1102def derived_observable(func, data, array_mode=False, **kwargs): +1103 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1104 +1105 Parameters +1106 ---------- +1107 func : object +1108 arbitrary function of the form func(data, **kwargs). For the +1109 automatic differentiation to work, all numpy functions have to have +1110 the autograd wrapper (use 'import autograd.numpy as anp'). +1111 data : list +1112 list of Obs, e.g. [obs1, obs2, obs3]. +1113 num_grad : bool +1114 if True, numerical derivatives are used instead of autograd +1115 (default False). To control the numerical differentiation the +1116 kwargs of numdifftools.step_generators.MaxStepGenerator +1117 can be used. +1118 man_grad : list +1119 manually supply a list or an array which contains the jacobian +1120 of func. Use cautiously, supplying the wrong derivative will +1121 not be intercepted. +1122 +1123 Notes +1124 ----- +1125 For simple mathematical operations it can be practical to use anonymous +1126 functions. For the ratio of two observables one can e.g. use +1127 +1128 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1129 """ +1130 +1131 data = np.asarray(data) +1132 raveled_data = data.ravel() +1133 +1134 # Workaround for matrix operations containing non Obs data +1135 if not all(isinstance(x, Obs) for x in raveled_data): +1136 for i in range(len(raveled_data)): +1137 if isinstance(raveled_data[i], (int, float)): +1138 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1139 +1140 allcov = {} +1141 for o in raveled_data: +1142 for name in o.cov_names: +1143 if name in allcov: +1144 if not np.allclose(allcov[name], o.covobs[name].cov): +1145 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1146 else: +1147 allcov[name] = o.covobs[name].cov +1148 +1149 n_obs = len(raveled_data) +1150 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1151 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1152 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1153 +1154 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1155 +1156 if data.ndim == 1: +1157 values = np.array([o.value for o in data]) +1158 else: +1159 values = np.vectorize(lambda x: x.value)(data) 1160 -1161 new_r_values = {} -1162 new_idl_d = {} -1163 for name in new_sample_names: -1164 idl = [] -1165 tmp_values = np.zeros(n_obs) -1166 for i, item in enumerate(raveled_data): -1167 tmp_values[i] = item.r_values.get(name, item.value) -1168 tmp_idl = item.idl.get(name) -1169 if tmp_idl is not None: -1170 idl.append(tmp_idl) -1171 if multi > 0: -1172 tmp_values = np.array(tmp_values).reshape(data.shape) -1173 new_r_values[name] = func(tmp_values, **kwargs) -1174 new_idl_d[name] = _merge_idx(idl) -1175 -1176 if 'man_grad' in kwargs: -1177 deriv = np.asarray(kwargs.get('man_grad')) -1178 if new_values.shape + data.shape != deriv.shape: -1179 raise Exception('Manual derivative does not have correct shape.') -1180 elif kwargs.get('num_grad') is True: -1181 if multi > 0: -1182 raise Exception('Multi mode currently not supported for numerical derivative') -1183 options = { -1184 'base_step': 0.1, -1185 'step_ratio': 2.5} -1186 for key in options.keys(): -1187 kwarg = kwargs.get(key) -1188 if kwarg is not None: -1189 options[key] = kwarg -1190 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1191 if tmp_df.size == 1: -1192 deriv = np.array([tmp_df.real]) -1193 else: -1194 deriv = tmp_df.real -1195 else: -1196 deriv = jacobian(func)(values, **kwargs) -1197 -1198 final_result = np.zeros(new_values.shape, dtype=object) -1199 -1200 if array_mode is True: +1161 new_values = func(values, **kwargs) +1162 +1163 multi = int(isinstance(new_values, np.ndarray)) +1164 +1165 new_r_values = {} +1166 new_idl_d = {} +1167 for name in new_sample_names: +1168 idl = [] +1169 tmp_values = np.zeros(n_obs) +1170 for i, item in enumerate(raveled_data): +1171 tmp_values[i] = item.r_values.get(name, item.value) +1172 tmp_idl = item.idl.get(name) +1173 if tmp_idl is not None: +1174 idl.append(tmp_idl) +1175 if multi > 0: +1176 tmp_values = np.array(tmp_values).reshape(data.shape) +1177 new_r_values[name] = func(tmp_values, **kwargs) +1178 new_idl_d[name] = _merge_idx(idl) +1179 +1180 if 'man_grad' in kwargs: +1181 deriv = np.asarray(kwargs.get('man_grad')) +1182 if new_values.shape + data.shape != deriv.shape: +1183 raise Exception('Manual derivative does not have correct shape.') +1184 elif kwargs.get('num_grad') is True: +1185 if multi > 0: +1186 raise Exception('Multi mode currently not supported for numerical derivative') +1187 options = { +1188 'base_step': 0.1, +1189 'step_ratio': 2.5} +1190 for key in options.keys(): +1191 kwarg = kwargs.get(key) +1192 if kwarg is not None: +1193 options[key] = kwarg +1194 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1195 if tmp_df.size == 1: +1196 deriv = np.array([tmp_df.real]) +1197 else: +1198 deriv = tmp_df.real +1199 else: +1200 deriv = jacobian(func)(values, **kwargs) 1201 -1202 class _Zero_grad(): -1203 def __init__(self, N): -1204 self.grad = np.zeros((N, 1)) +1202 final_result = np.zeros(new_values.shape, dtype=object) +1203 +1204 if array_mode is True: 1205 -1206 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1207 d_extracted = {} -1208 g_extracted = {} -1209 for name in new_sample_names: -1210 d_extracted[name] = [] -1211 ens_length = len(new_idl_d[name]) -1212 for i_dat, dat in enumerate(data): -1213 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1214 for name in new_cov_names: -1215 g_extracted[name] = [] -1216 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1217 for i_dat, dat in enumerate(data): -1218 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1219 -1220 for i_val, new_val in np.ndenumerate(new_values): -1221 new_deltas = {} -1222 new_grad = {} -1223 if array_mode is True: -1224 for name in new_sample_names: -1225 ens_length = d_extracted[name][0].shape[-1] -1226 new_deltas[name] = np.zeros(ens_length) -1227 for i_dat, dat in enumerate(d_extracted[name]): -1228 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1229 for name in new_cov_names: -1230 new_grad[name] = 0 -1231 for i_dat, dat in enumerate(g_extracted[name]): -1232 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1233 else: -1234 for j_obs, obs in np.ndenumerate(data): -1235 for name in obs.names: -1236 if name in obs.cov_names: -1237 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1238 else: -1239 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1240 -1241 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1242 -1243 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1244 raise Exception('The same name has been used for deltas and covobs!') -1245 new_samples = [] -1246 new_means = [] -1247 new_idl = [] -1248 new_names_obs = [] -1249 for name in new_names: -1250 if name not in new_covobs: -1251 new_samples.append(new_deltas[name]) -1252 new_idl.append(new_idl_d[name]) -1253 new_means.append(new_r_values[name][i_val]) -1254 new_names_obs.append(name) -1255 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1256 for name in new_covobs: -1257 final_result[i_val].names.append(name) -1258 final_result[i_val]._covobs = new_covobs -1259 final_result[i_val]._value = new_val -1260 final_result[i_val].reweighted = reweighted -1261 -1262 if multi == 0: -1263 final_result = final_result.item() -1264 -1265 return final_result -1266 -1267 -1268def _reduce_deltas(deltas, idx_old, idx_new): -1269 """Extract deltas defined on idx_old on all configs of idx_new. +1206 class _Zero_grad(): +1207 def __init__(self, N): +1208 self.grad = np.zeros((N, 1)) +1209 +1210 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1211 d_extracted = {} +1212 g_extracted = {} +1213 for name in new_sample_names: +1214 d_extracted[name] = [] +1215 ens_length = len(new_idl_d[name]) +1216 for i_dat, dat in enumerate(data): +1217 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1218 for name in new_cov_names: +1219 g_extracted[name] = [] +1220 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1221 for i_dat, dat in enumerate(data): +1222 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1223 +1224 for i_val, new_val in np.ndenumerate(new_values): +1225 new_deltas = {} +1226 new_grad = {} +1227 if array_mode is True: +1228 for name in new_sample_names: +1229 ens_length = d_extracted[name][0].shape[-1] +1230 new_deltas[name] = np.zeros(ens_length) +1231 for i_dat, dat in enumerate(d_extracted[name]): +1232 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1233 for name in new_cov_names: +1234 new_grad[name] = 0 +1235 for i_dat, dat in enumerate(g_extracted[name]): +1236 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1237 else: +1238 for j_obs, obs in np.ndenumerate(data): +1239 for name in obs.names: +1240 if name in obs.cov_names: +1241 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1242 else: +1243 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1244 +1245 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1246 +1247 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1248 raise Exception('The same name has been used for deltas and covobs!') +1249 new_samples = [] +1250 new_means = [] +1251 new_idl = [] +1252 new_names_obs = [] +1253 for name in new_names: +1254 if name not in new_covobs: +1255 new_samples.append(new_deltas[name]) +1256 new_idl.append(new_idl_d[name]) +1257 new_means.append(new_r_values[name][i_val]) +1258 new_names_obs.append(name) +1259 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1260 for name in new_covobs: +1261 final_result[i_val].names.append(name) +1262 final_result[i_val]._covobs = new_covobs +1263 final_result[i_val]._value = new_val +1264 final_result[i_val].reweighted = reweighted +1265 +1266 if multi == 0: +1267 final_result = final_result.item() +1268 +1269 return final_result 1270 -1271 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1272 are ordered in an ascending order. -1273 -1274 Parameters -1275 ---------- -1276 deltas : list -1277 List of fluctuations -1278 idx_old : list -1279 List or range of configs on which the deltas are defined -1280 idx_new : list -1281 List of configs for which we want to extract the deltas. -1282 Has to be a subset of idx_old. -1283 """ -1284 if not len(deltas) == len(idx_old): -1285 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1286 if type(idx_old) is range and type(idx_new) is range: -1287 if idx_old == idx_new: -1288 return deltas -1289 if _check_lists_equal([idx_old, idx_new]): -1290 return deltas -1291 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1292 if len(indices) < len(idx_new): -1293 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1294 return np.array(deltas)[indices] -1295 -1296 -1297def reweight(weight, obs, **kwargs): -1298 """Reweight a list of observables. +1271 +1272def _reduce_deltas(deltas, idx_old, idx_new): +1273 """Extract deltas defined on idx_old on all configs of idx_new. +1274 +1275 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1276 are ordered in an ascending order. +1277 +1278 Parameters +1279 ---------- +1280 deltas : list +1281 List of fluctuations +1282 idx_old : list +1283 List or range of configs on which the deltas are defined +1284 idx_new : list +1285 List of configs for which we want to extract the deltas. +1286 Has to be a subset of idx_old. +1287 """ +1288 if not len(deltas) == len(idx_old): +1289 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1290 if type(idx_old) is range and type(idx_new) is range: +1291 if idx_old == idx_new: +1292 return deltas +1293 if _check_lists_equal([idx_old, idx_new]): +1294 return deltas +1295 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1296 if len(indices) < len(idx_new): +1297 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1298 return np.array(deltas)[indices] 1299 -1300 Parameters -1301 ---------- -1302 weight : Obs -1303 Reweighting factor. An Observable that has to be defined on a superset of the -1304 configurations in obs[i].idl for all i. -1305 obs : list -1306 list of Obs, e.g. [obs1, obs2, obs3]. -1307 all_configs : bool -1308 if True, the reweighted observables are normalized by the average of -1309 the reweighting factor on all configurations in weight.idl and not -1310 on the configurations in obs[i].idl. Default False. -1311 """ -1312 result = [] -1313 for i in range(len(obs)): -1314 if len(obs[i].cov_names): -1315 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1316 if not set(obs[i].names).issubset(weight.names): -1317 raise Exception('Error: Ensembles do not fit') -1318 for name in obs[i].names: -1319 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1320 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1321 new_samples = [] -1322 w_deltas = {} -1323 for name in sorted(obs[i].names): -1324 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1325 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1326 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1327 -1328 if kwargs.get('all_configs'): -1329 new_weight = weight -1330 else: -1331 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1332 -1333 result.append(tmp_obs / new_weight) -1334 result[-1].reweighted = True -1335 -1336 return result -1337 -1338 -1339def correlate(obs_a, obs_b): -1340 """Correlate two observables. +1300 +1301def reweight(weight, obs, **kwargs): +1302 """Reweight a list of observables. +1303 +1304 Parameters +1305 ---------- +1306 weight : Obs +1307 Reweighting factor. An Observable that has to be defined on a superset of the +1308 configurations in obs[i].idl for all i. +1309 obs : list +1310 list of Obs, e.g. [obs1, obs2, obs3]. +1311 all_configs : bool +1312 if True, the reweighted observables are normalized by the average of +1313 the reweighting factor on all configurations in weight.idl and not +1314 on the configurations in obs[i].idl. Default False. +1315 """ +1316 result = [] +1317 for i in range(len(obs)): +1318 if len(obs[i].cov_names): +1319 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1320 if not set(obs[i].names).issubset(weight.names): +1321 raise Exception('Error: Ensembles do not fit') +1322 for name in obs[i].names: +1323 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1324 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1325 new_samples = [] +1326 w_deltas = {} +1327 for name in sorted(obs[i].names): +1328 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1329 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1330 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1331 +1332 if kwargs.get('all_configs'): +1333 new_weight = weight +1334 else: +1335 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1336 +1337 result.append(tmp_obs / new_weight) +1338 result[-1].reweighted = True +1339 +1340 return result 1341 -1342 Parameters -1343 ---------- -1344 obs_a : Obs -1345 First observable -1346 obs_b : Obs -1347 Second observable -1348 -1349 Notes -1350 ----- -1351 Keep in mind to only correlate primary observables which have not been reweighted -1352 yet. The reweighting has to be applied after correlating the observables. -1353 Currently only works if ensembles are identical (this is not strictly necessary). -1354 """ -1355 -1356 if sorted(obs_a.names) != sorted(obs_b.names): -1357 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1358 if len(obs_a.cov_names) or len(obs_b.cov_names): -1359 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1360 for name in obs_a.names: -1361 if obs_a.shape[name] != obs_b.shape[name]: -1362 raise Exception('Shapes of ensemble', name, 'do not fit') -1363 if obs_a.idl[name] != obs_b.idl[name]: -1364 raise Exception('idl of ensemble', name, 'do not fit') -1365 -1366 if obs_a.reweighted is True: -1367 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1368 if obs_b.reweighted is True: -1369 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1370 -1371 new_samples = [] -1372 new_idl = [] -1373 for name in sorted(obs_a.names): -1374 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1375 new_idl.append(obs_a.idl[name]) -1376 -1377 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1378 o.reweighted = obs_a.reweighted or obs_b.reweighted -1379 return o +1342 +1343def correlate(obs_a, obs_b): +1344 """Correlate two observables. +1345 +1346 Parameters +1347 ---------- +1348 obs_a : Obs +1349 First observable +1350 obs_b : Obs +1351 Second observable +1352 +1353 Notes +1354 ----- +1355 Keep in mind to only correlate primary observables which have not been reweighted +1356 yet. The reweighting has to be applied after correlating the observables. +1357 Currently only works if ensembles are identical (this is not strictly necessary). +1358 """ +1359 +1360 if sorted(obs_a.names) != sorted(obs_b.names): +1361 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1362 if len(obs_a.cov_names) or len(obs_b.cov_names): +1363 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1364 for name in obs_a.names: +1365 if obs_a.shape[name] != obs_b.shape[name]: +1366 raise Exception('Shapes of ensemble', name, 'do not fit') +1367 if obs_a.idl[name] != obs_b.idl[name]: +1368 raise Exception('idl of ensemble', name, 'do not fit') +1369 +1370 if obs_a.reweighted is True: +1371 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1372 if obs_b.reweighted is True: +1373 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1374 +1375 new_samples = [] +1376 new_idl = [] +1377 for name in sorted(obs_a.names): +1378 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1379 new_idl.append(obs_a.idl[name]) 1380 -1381 -1382def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1383 r'''Calculates the error covariance matrix of a set of observables. +1381 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1382 o.reweighted = obs_a.reweighted or obs_b.reweighted +1383 return o 1384 -1385 WARNING: This function should be used with care, especially for observables with support on multiple -1386 ensembles with differing autocorrelations. See the notes below for details. -1387 -1388 The gamma method has to be applied first to all observables. -1389 -1390 Parameters -1391 ---------- -1392 obs : list or numpy.ndarray -1393 List or one dimensional array of Obs -1394 visualize : bool -1395 If True plots the corresponding normalized correlation matrix (default False). -1396 correlation : bool -1397 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1398 smooth : None or int -1399 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1400 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1401 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1402 small ones. -1403 -1404 Notes -1405 ----- -1406 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1407 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1408 in the absence of autocorrelation. -1409 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1410 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1411 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1412 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1413 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1414 ''' -1415 -1416 length = len(obs) -1417 -1418 max_samples = np.max([o.N for o in obs]) -1419 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1420 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1385 +1386def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1387 r'''Calculates the error covariance matrix of a set of observables. +1388 +1389 WARNING: This function should be used with care, especially for observables with support on multiple +1390 ensembles with differing autocorrelations. See the notes below for details. +1391 +1392 The gamma method has to be applied first to all observables. +1393 +1394 Parameters +1395 ---------- +1396 obs : list or numpy.ndarray +1397 List or one dimensional array of Obs +1398 visualize : bool +1399 If True plots the corresponding normalized correlation matrix (default False). +1400 correlation : bool +1401 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1402 smooth : None or int +1403 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1404 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1405 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1406 small ones. +1407 +1408 Notes +1409 ----- +1410 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1411 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1412 in the absence of autocorrelation. +1413 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1414 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1415 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1416 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1417 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1418 ''' +1419 +1420 length = len(obs) 1421 -1422 cov = np.zeros((length, length)) -1423 for i in range(length): -1424 for j in range(i, length): -1425 cov[i, j] = _covariance_element(obs[i], obs[j]) -1426 cov = cov + cov.T - np.diag(np.diag(cov)) -1427 -1428 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1429 -1430 if isinstance(smooth, int): -1431 corr = _smooth_eigenvalues(corr, smooth) -1432 -1433 if visualize: -1434 plt.matshow(corr, vmin=-1, vmax=1) -1435 plt.set_cmap('RdBu') -1436 plt.colorbar() -1437 plt.draw() -1438 -1439 if correlation is True: -1440 return corr -1441 -1442 errors = [o.dvalue for o in obs] -1443 cov = np.diag(errors) @ corr @ np.diag(errors) -1444 -1445 eigenvalues = np.linalg.eigh(cov)[0] -1446 if not np.all(eigenvalues >= 0): -1447 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1422 max_samples = np.max([o.N for o in obs]) +1423 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1424 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1425 +1426 cov = np.zeros((length, length)) +1427 for i in range(length): +1428 for j in range(i, length): +1429 cov[i, j] = _covariance_element(obs[i], obs[j]) +1430 cov = cov + cov.T - np.diag(np.diag(cov)) +1431 +1432 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1433 +1434 if isinstance(smooth, int): +1435 corr = _smooth_eigenvalues(corr, smooth) +1436 +1437 if visualize: +1438 plt.matshow(corr, vmin=-1, vmax=1) +1439 plt.set_cmap('RdBu') +1440 plt.colorbar() +1441 plt.draw() +1442 +1443 if correlation is True: +1444 return corr +1445 +1446 errors = [o.dvalue for o in obs] +1447 cov = np.diag(errors) @ corr @ np.diag(errors) 1448 -1449 return cov -1450 -1451 -1452def _smooth_eigenvalues(corr, E): -1453 """Eigenvalue smoothing as described in hep-lat/9412087 +1449 eigenvalues = np.linalg.eigh(cov)[0] +1450 if not np.all(eigenvalues >= 0): +1451 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1452 +1453 return cov 1454 -1455 corr : np.ndarray -1456 correlation matrix -1457 E : integer -1458 Number of eigenvalues to be left substantially unchanged -1459 """ -1460 if not (2 < E < corr.shape[0] - 1): -1461 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1462 vals, vec = np.linalg.eigh(corr) -1463 lambda_min = np.mean(vals[:-E]) -1464 vals[vals < lambda_min] = lambda_min -1465 vals /= np.mean(vals) -1466 return vec @ np.diag(vals) @ vec.T -1467 -1468 -1469def _covariance_element(obs1, obs2): -1470 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1455 +1456def _smooth_eigenvalues(corr, E): +1457 """Eigenvalue smoothing as described in hep-lat/9412087 +1458 +1459 corr : np.ndarray +1460 correlation matrix +1461 E : integer +1462 Number of eigenvalues to be left substantially unchanged +1463 """ +1464 if not (2 < E < corr.shape[0] - 1): +1465 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1466 vals, vec = np.linalg.eigh(corr) +1467 lambda_min = np.mean(vals[:-E]) +1468 vals[vals < lambda_min] = lambda_min +1469 vals /= np.mean(vals) +1470 return vec @ np.diag(vals) @ vec.T 1471 -1472 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1473 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1474 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1475 return np.sum(deltas1 * deltas2) -1476 -1477 if set(obs1.names).isdisjoint(set(obs2.names)): -1478 return 0.0 -1479 -1480 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1481 raise Exception('The gamma method has to be applied to both Obs first.') -1482 -1483 dvalue = 0.0 -1484 -1485 for e_name in obs1.mc_names: +1472 +1473def _covariance_element(obs1, obs2): +1474 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1475 +1476 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1477 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1478 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1479 return np.sum(deltas1 * deltas2) +1480 +1481 if set(obs1.names).isdisjoint(set(obs2.names)): +1482 return 0.0 +1483 +1484 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1485 raise Exception('The gamma method has to be applied to both Obs first.') 1486 -1487 if e_name not in obs2.mc_names: -1488 continue -1489 -1490 idl_d = {} -1491 for r_name in obs1.e_content[e_name]: -1492 if r_name not in obs2.e_content[e_name]: -1493 continue -1494 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1495 -1496 gamma = 0.0 -1497 -1498 for r_name in obs1.e_content[e_name]: -1499 if r_name not in obs2.e_content[e_name]: -1500 continue -1501 if len(idl_d[r_name]) == 0: -1502 continue -1503 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1504 -1505 if gamma == 0.0: -1506 continue -1507 -1508 gamma_div = 0.0 -1509 for r_name in obs1.e_content[e_name]: -1510 if r_name not in obs2.e_content[e_name]: -1511 continue -1512 if len(idl_d[r_name]) == 0: -1513 continue -1514 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1515 gamma /= gamma_div -1516 -1517 dvalue += gamma -1518 -1519 for e_name in obs1.cov_names: +1487 dvalue = 0.0 +1488 +1489 for e_name in obs1.mc_names: +1490 +1491 if e_name not in obs2.mc_names: +1492 continue +1493 +1494 idl_d = {} +1495 for r_name in obs1.e_content[e_name]: +1496 if r_name not in obs2.e_content[e_name]: +1497 continue +1498 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1499 +1500 gamma = 0.0 +1501 +1502 for r_name in obs1.e_content[e_name]: +1503 if r_name not in obs2.e_content[e_name]: +1504 continue +1505 if len(idl_d[r_name]) == 0: +1506 continue +1507 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1508 +1509 if gamma == 0.0: +1510 continue +1511 +1512 gamma_div = 0.0 +1513 for r_name in obs1.e_content[e_name]: +1514 if r_name not in obs2.e_content[e_name]: +1515 continue +1516 if len(idl_d[r_name]) == 0: +1517 continue +1518 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1519 gamma /= gamma_div 1520 -1521 if e_name not in obs2.cov_names: -1522 continue -1523 -1524 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() -1525 -1526 return dvalue +1521 dvalue += gamma +1522 +1523 for e_name in obs1.cov_names: +1524 +1525 if e_name not in obs2.cov_names: +1526 continue 1527 -1528 -1529def import_jackknife(jacks, name, idl=None): -1530 """Imports jackknife samples and returns an Obs +1528 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() +1529 +1530 return dvalue 1531 -1532 Parameters -1533 ---------- -1534 jacks : numpy.ndarray -1535 numpy array containing the mean value as zeroth entry and -1536 the N jackknife samples as first to Nth entry. -1537 name : str -1538 name of the ensemble the samples are defined on. -1539 """ -1540 length = len(jacks) - 1 -1541 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1542 samples = jacks[1:] @ prj -1543 mean = np.mean(samples) -1544 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1545 new_obs._value = jacks[0] -1546 return new_obs -1547 -1548 -1549def merge_obs(list_of_obs): -1550 """Combine all observables in list_of_obs into one new observable +1532 +1533def import_jackknife(jacks, name, idl=None): +1534 """Imports jackknife samples and returns an Obs +1535 +1536 Parameters +1537 ---------- +1538 jacks : numpy.ndarray +1539 numpy array containing the mean value as zeroth entry and +1540 the N jackknife samples as first to Nth entry. +1541 name : str +1542 name of the ensemble the samples are defined on. +1543 """ +1544 length = len(jacks) - 1 +1545 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1546 samples = jacks[1:] @ prj +1547 mean = np.mean(samples) +1548 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1549 new_obs._value = jacks[0] +1550 return new_obs 1551 -1552 Parameters -1553 ---------- -1554 list_of_obs : list -1555 list of the Obs object to be combined -1556 -1557 Notes -1558 ----- -1559 It is not possible to combine obs which are based on the same replicum -1560 """ -1561 replist = [item for obs in list_of_obs for item in obs.names] -1562 if (len(replist) == len(set(replist))) is False: -1563 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1564 if any([len(o.cov_names) for o in list_of_obs]): -1565 raise Exception('Not possible to merge data that contains covobs!') -1566 new_dict = {} -1567 idl_dict = {} -1568 for o in list_of_obs: -1569 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1570 for key in set(o.deltas) | set(o.r_values)}) -1571 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1572 -1573 names = sorted(new_dict.keys()) -1574 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1575 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1576 return o -1577 -1578 -1579def cov_Obs(means, cov, name, grad=None): -1580 """Create an Obs based on mean(s) and a covariance matrix +1552 +1553def merge_obs(list_of_obs): +1554 """Combine all observables in list_of_obs into one new observable +1555 +1556 Parameters +1557 ---------- +1558 list_of_obs : list +1559 list of the Obs object to be combined +1560 +1561 Notes +1562 ----- +1563 It is not possible to combine obs which are based on the same replicum +1564 """ +1565 replist = [item for obs in list_of_obs for item in obs.names] +1566 if (len(replist) == len(set(replist))) is False: +1567 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1568 if any([len(o.cov_names) for o in list_of_obs]): +1569 raise Exception('Not possible to merge data that contains covobs!') +1570 new_dict = {} +1571 idl_dict = {} +1572 for o in list_of_obs: +1573 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1574 for key in set(o.deltas) | set(o.r_values)}) +1575 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1576 +1577 names = sorted(new_dict.keys()) +1578 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1579 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1580 return o 1581 -1582 Parameters -1583 ---------- -1584 mean : list of floats or float -1585 N mean value(s) of the new Obs -1586 cov : list or array -1587 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1588 name : str -1589 identifier for the covariance matrix -1590 grad : list or array -1591 Gradient of the Covobs wrt. the means belonging to cov. -1592 """ -1593 -1594 def covobs_to_obs(co): -1595 """Make an Obs out of a Covobs -1596 -1597 Parameters -1598 ---------- -1599 co : Covobs -1600 Covobs to be embedded into the Obs -1601 """ -1602 o = Obs([], [], means=[]) -1603 o._value = co.value -1604 o.names.append(co.name) -1605 o._covobs[co.name] = co -1606 o._dvalue = np.sqrt(co.errsq()) -1607 return o -1608 -1609 ol = [] -1610 if isinstance(means, (float, int)): -1611 means = [means] +1582 +1583def cov_Obs(means, cov, name, grad=None): +1584 """Create an Obs based on mean(s) and a covariance matrix +1585 +1586 Parameters +1587 ---------- +1588 mean : list of floats or float +1589 N mean value(s) of the new Obs +1590 cov : list or array +1591 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1592 name : str +1593 identifier for the covariance matrix +1594 grad : list or array +1595 Gradient of the Covobs wrt. the means belonging to cov. +1596 """ +1597 +1598 def covobs_to_obs(co): +1599 """Make an Obs out of a Covobs +1600 +1601 Parameters +1602 ---------- +1603 co : Covobs +1604 Covobs to be embedded into the Obs +1605 """ +1606 o = Obs([], [], means=[]) +1607 o._value = co.value +1608 o.names.append(co.name) +1609 o._covobs[co.name] = co +1610 o._dvalue = np.sqrt(co.errsq()) +1611 return o 1612 -1613 for i in range(len(means)): -1614 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1615 if ol[0].covobs[name].N != len(means): -1616 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1617 if len(ol) == 1: -1618 return ol[0] -1619 return ol -1620 -1621 -1622def _determine_gap(o, e_content, e_name): -1623 gaps = [] -1624 for r_name in e_content[e_name]: -1625 if isinstance(o.idl[r_name], range): -1626 gaps.append(o.idl[r_name].step) -1627 else: -1628 gaps.append(np.min(np.diff(o.idl[r_name]))) -1629 -1630 gap = min(gaps) -1631 if not np.all([gi % gap == 0 for gi in gaps]): -1632 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1613 ol = [] +1614 if isinstance(means, (float, int)): +1615 means = [means] +1616 +1617 for i in range(len(means)): +1618 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1619 if ol[0].covobs[name].N != len(means): +1620 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1621 if len(ol) == 1: +1622 return ol[0] +1623 return ol +1624 +1625 +1626def _determine_gap(o, e_content, e_name): +1627 gaps = [] +1628 for r_name in e_content[e_name]: +1629 if isinstance(o.idl[r_name], range): +1630 gaps.append(o.idl[r_name].step) +1631 else: +1632 gaps.append(np.min(np.diff(o.idl[r_name]))) 1633 -1634 return gap -1635 -1636 -1637def _check_lists_equal(idl): -1638 ''' -1639 Use groupby to efficiently check whether all elements of idl are identical. -1640 Returns True if all elements are equal, otherwise False. -1641 -1642 Parameters -1643 ---------- -1644 idl : list of lists, ranges or np.ndarrays -1645 ''' -1646 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) -1647 if next(g, True) and not next(g, False): -1648 return True -1649 return False +1634 gap = min(gaps) +1635 if not np.all([gi % gap == 0 for gi in gaps]): +1636 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1637 +1638 return gap +1639 +1640 +1641def _check_lists_equal(idl): +1642 ''' +1643 Use groupby to efficiently check whether all elements of idl are identical. +1644 Returns True if all elements are equal, otherwise False. +1645 +1646 Parameters +1647 ---------- +1648 idl : list of lists, ranges or np.ndarrays +1649 ''' +1650 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) +1651 if next(g, True) and not next(g, False): +1652 return True +1653 return False @@ -2549,175 +2553,179 @@ 695 return _format_uncertainty(self.value, self._dvalue) 696 697 def __format__(self, format_type): -698 my_str = _format_uncertainty(self.value, self._dvalue, -699 significance=int(float(format_type.replace("+", "").replace("-", "")))) -700 for char in ["+", " "]: -701 if format_type.startswith(char): -702 if my_str[0] != "-": -703 my_str = char + my_str -704 return my_str -705 -706 def __hash__(self): -707 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -708 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -709 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -710 hash_tuple += tuple([o.encode() for o in self.names]) -711 m = hashlib.md5() -712 [m.update(o) for o in hash_tuple] -713 return int(m.hexdigest(), 16) & 0xFFFFFFFF -714 -715 # Overload comparisons -716 def __lt__(self, other): -717 return self.value < other +698 if format_type == "": +699 significance = 2 +700 else: +701 significance = int(float(format_type.replace("+", "").replace("-", ""))) +702 my_str = _format_uncertainty(self.value, self._dvalue, +703 significance=significance) +704 for char in ["+", " "]: +705 if format_type.startswith(char): +706 if my_str[0] != "-": +707 my_str = char + my_str +708 return my_str +709 +710 def __hash__(self): +711 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +712 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +713 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +714 hash_tuple += tuple([o.encode() for o in self.names]) +715 m = hashlib.md5() +716 [m.update(o) for o in hash_tuple] +717 return int(m.hexdigest(), 16) & 0xFFFFFFFF 718 -719 def __le__(self, other): -720 return self.value <= other -721 -722 def __gt__(self, other): -723 return self.value > other -724 -725 def __ge__(self, other): -726 return self.value >= other -727 -728 def __eq__(self, other): -729 return (self - other).is_zero() -730 -731 def __ne__(self, other): -732 return not (self - other).is_zero() -733 -734 # Overload math operations -735 def __add__(self, y): -736 if isinstance(y, Obs): -737 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -738 else: -739 if isinstance(y, np.ndarray): -740 return np.array([self + o for o in y]) -741 elif y.__class__.__name__ in ['Corr', 'CObs']: -742 return NotImplemented -743 else: -744 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -745 -746 def __radd__(self, y): -747 return self + y -748 -749 def __mul__(self, y): -750 if isinstance(y, Obs): -751 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -752 else: -753 if isinstance(y, np.ndarray): -754 return np.array([self * o for o in y]) -755 elif isinstance(y, complex): -756 return CObs(self * y.real, self * y.imag) -757 elif y.__class__.__name__ in ['Corr', 'CObs']: -758 return NotImplemented -759 else: -760 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -761 -762 def __rmul__(self, y): -763 return self * y -764 -765 def __sub__(self, y): -766 if isinstance(y, Obs): -767 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -768 else: -769 if isinstance(y, np.ndarray): -770 return np.array([self - o for o in y]) -771 elif y.__class__.__name__ in ['Corr', 'CObs']: -772 return NotImplemented -773 else: -774 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -775 -776 def __rsub__(self, y): -777 return -1 * (self - y) -778 -779 def __pos__(self): -780 return self -781 -782 def __neg__(self): -783 return -1 * self -784 -785 def __truediv__(self, y): -786 if isinstance(y, Obs): -787 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -788 else: -789 if isinstance(y, np.ndarray): -790 return np.array([self / o for o in y]) -791 elif y.__class__.__name__ in ['Corr', 'CObs']: -792 return NotImplemented -793 else: -794 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -795 -796 def __rtruediv__(self, y): -797 if isinstance(y, Obs): -798 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -799 else: -800 if isinstance(y, np.ndarray): -801 return np.array([o / self for o in y]) -802 elif y.__class__.__name__ in ['Corr', 'CObs']: -803 return NotImplemented -804 else: -805 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -806 -807 def __pow__(self, y): -808 if isinstance(y, Obs): -809 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -810 else: -811 return derived_observable(lambda x: x[0] ** y, [self]) -812 -813 def __rpow__(self, y): -814 if isinstance(y, Obs): -815 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -816 else: -817 return derived_observable(lambda x: y ** x[0], [self]) -818 -819 def __abs__(self): -820 return derived_observable(lambda x: anp.abs(x[0]), [self]) -821 -822 # Overload numpy functions -823 def sqrt(self): -824 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +719 # Overload comparisons +720 def __lt__(self, other): +721 return self.value < other +722 +723 def __le__(self, other): +724 return self.value <= other +725 +726 def __gt__(self, other): +727 return self.value > other +728 +729 def __ge__(self, other): +730 return self.value >= other +731 +732 def __eq__(self, other): +733 return (self - other).is_zero() +734 +735 def __ne__(self, other): +736 return not (self - other).is_zero() +737 +738 # Overload math operations +739 def __add__(self, y): +740 if isinstance(y, Obs): +741 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +742 else: +743 if isinstance(y, np.ndarray): +744 return np.array([self + o for o in y]) +745 elif y.__class__.__name__ in ['Corr', 'CObs']: +746 return NotImplemented +747 else: +748 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +749 +750 def __radd__(self, y): +751 return self + y +752 +753 def __mul__(self, y): +754 if isinstance(y, Obs): +755 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +756 else: +757 if isinstance(y, np.ndarray): +758 return np.array([self * o for o in y]) +759 elif isinstance(y, complex): +760 return CObs(self * y.real, self * y.imag) +761 elif y.__class__.__name__ in ['Corr', 'CObs']: +762 return NotImplemented +763 else: +764 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +765 +766 def __rmul__(self, y): +767 return self * y +768 +769 def __sub__(self, y): +770 if isinstance(y, Obs): +771 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +772 else: +773 if isinstance(y, np.ndarray): +774 return np.array([self - o for o in y]) +775 elif y.__class__.__name__ in ['Corr', 'CObs']: +776 return NotImplemented +777 else: +778 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +779 +780 def __rsub__(self, y): +781 return -1 * (self - y) +782 +783 def __pos__(self): +784 return self +785 +786 def __neg__(self): +787 return -1 * self +788 +789 def __truediv__(self, y): +790 if isinstance(y, Obs): +791 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +792 else: +793 if isinstance(y, np.ndarray): +794 return np.array([self / o for o in y]) +795 elif y.__class__.__name__ in ['Corr', 'CObs']: +796 return NotImplemented +797 else: +798 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +799 +800 def __rtruediv__(self, y): +801 if isinstance(y, Obs): +802 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +803 else: +804 if isinstance(y, np.ndarray): +805 return np.array([o / self for o in y]) +806 elif y.__class__.__name__ in ['Corr', 'CObs']: +807 return NotImplemented +808 else: +809 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +810 +811 def __pow__(self, y): +812 if isinstance(y, Obs): +813 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +814 else: +815 return derived_observable(lambda x: x[0] ** y, [self]) +816 +817 def __rpow__(self, y): +818 if isinstance(y, Obs): +819 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +820 else: +821 return derived_observable(lambda x: y ** x[0], [self]) +822 +823 def __abs__(self): +824 return derived_observable(lambda x: anp.abs(x[0]), [self]) 825 -826 def log(self): -827 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -828 -829 def exp(self): -830 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -831 -832 def sin(self): -833 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -834 -835 def cos(self): -836 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -837 -838 def tan(self): -839 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -840 -841 def arcsin(self): -842 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -843 -844 def arccos(self): -845 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -846 -847 def arctan(self): -848 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -849 -850 def sinh(self): -851 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -852 -853 def cosh(self): -854 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -855 -856 def tanh(self): -857 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -858 -859 def arcsinh(self): -860 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -861 -862 def arccosh(self): -863 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -864 -865 def arctanh(self): -866 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +826 # Overload numpy functions +827 def sqrt(self): +828 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +829 +830 def log(self): +831 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +832 +833 def exp(self): +834 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +835 +836 def sin(self): +837 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +838 +839 def cos(self): +840 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +841 +842 def tan(self): +843 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +844 +845 def arcsin(self): +846 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +847 +848 def arccos(self): +849 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +850 +851 def arctan(self): +852 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +853 +854 def sinh(self): +855 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +856 +857 def cosh(self): +858 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +859 +860 def tanh(self): +861 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +862 +863 def arcsinh(self): +864 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +865 +866 def arccosh(self): +867 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +868 +869 def arctanh(self): +870 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -3875,8 +3883,8 @@ should agree with samples from a full jackknife analysis up to O(1/N). -
823    def sqrt(self):
-824        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
827    def sqrt(self):
+828        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3894,8 +3902,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
826    def log(self):
-827        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
830    def log(self):
+831        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3913,8 +3921,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
829    def exp(self):
-830        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
833    def exp(self):
+834        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3932,8 +3940,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
832    def sin(self):
-833        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
836    def sin(self):
+837        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3951,8 +3959,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
835    def cos(self):
-836        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
839    def cos(self):
+840        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3970,8 +3978,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
838    def tan(self):
-839        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
842    def tan(self):
+843        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3989,8 +3997,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
841    def arcsin(self):
-842        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
845    def arcsin(self):
+846        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4008,8 +4016,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
844    def arccos(self):
-845        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
848    def arccos(self):
+849        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4027,8 +4035,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
847    def arctan(self):
-848        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
851    def arctan(self):
+852        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4046,8 +4054,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
850    def sinh(self):
-851        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
854    def sinh(self):
+855        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4065,8 +4073,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
853    def cosh(self):
-854        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
857    def cosh(self):
+858        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4084,8 +4092,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
856    def tanh(self):
-857        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
860    def tanh(self):
+861        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4103,8 +4111,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
859    def arcsinh(self):
-860        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
863    def arcsinh(self):
+864        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4122,8 +4130,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
862    def arccosh(self):
-863        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
866    def arccosh(self):
+867        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4141,8 +4149,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
865    def arctanh(self):
-866        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
869    def arctanh(self):
+870        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4161,115 +4169,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
869class CObs:
-870    """Class for a complex valued observable."""
-871    __slots__ = ['_real', '_imag', 'tag']
-872
-873    def __init__(self, real, imag=0.0):
-874        self._real = real
-875        self._imag = imag
-876        self.tag = None
-877
-878    @property
-879    def real(self):
-880        return self._real
+            
873class CObs:
+874    """Class for a complex valued observable."""
+875    __slots__ = ['_real', '_imag', 'tag']
+876
+877    def __init__(self, real, imag=0.0):
+878        self._real = real
+879        self._imag = imag
+880        self.tag = None
 881
 882    @property
-883    def imag(self):
-884        return self._imag
+883    def real(self):
+884        return self._real
 885
-886    def gamma_method(self, **kwargs):
-887        """Executes the gamma_method for the real and the imaginary part."""
-888        if isinstance(self.real, Obs):
-889            self.real.gamma_method(**kwargs)
-890        if isinstance(self.imag, Obs):
-891            self.imag.gamma_method(**kwargs)
-892
-893    def is_zero(self):
-894        """Checks whether both real and imaginary part are zero within machine precision."""
-895        return self.real == 0.0 and self.imag == 0.0
+886    @property
+887    def imag(self):
+888        return self._imag
+889
+890    def gamma_method(self, **kwargs):
+891        """Executes the gamma_method for the real and the imaginary part."""
+892        if isinstance(self.real, Obs):
+893            self.real.gamma_method(**kwargs)
+894        if isinstance(self.imag, Obs):
+895            self.imag.gamma_method(**kwargs)
 896
-897    def conjugate(self):
-898        return CObs(self.real, -self.imag)
-899
-900    def __add__(self, other):
-901        if isinstance(other, np.ndarray):
-902            return other + self
-903        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-904            return CObs(self.real + other.real,
-905                        self.imag + other.imag)
-906        else:
-907            return CObs(self.real + other, self.imag)
-908
-909    def __radd__(self, y):
-910        return self + y
-911
-912    def __sub__(self, other):
-913        if isinstance(other, np.ndarray):
-914            return -1 * (other - self)
-915        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-916            return CObs(self.real - other.real, self.imag - other.imag)
-917        else:
-918            return CObs(self.real - other, self.imag)
-919
-920    def __rsub__(self, other):
-921        return -1 * (self - other)
-922
-923    def __mul__(self, other):
-924        if isinstance(other, np.ndarray):
-925            return other * self
-926        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-927            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-928                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-929                                               [self.real, other.real, self.imag, other.imag],
-930                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-931                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-932                                               [self.real, other.real, self.imag, other.imag],
-933                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-934            elif getattr(other, 'imag', 0) != 0:
-935                return CObs(self.real * other.real - self.imag * other.imag,
-936                            self.imag * other.real + self.real * other.imag)
-937            else:
-938                return CObs(self.real * other.real, self.imag * other.real)
-939        else:
-940            return CObs(self.real * other, self.imag * other)
-941
-942    def __rmul__(self, other):
-943        return self * other
-944
-945    def __truediv__(self, other):
-946        if isinstance(other, np.ndarray):
-947            return 1 / (other / self)
-948        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-949            r = other.real ** 2 + other.imag ** 2
-950            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-951        else:
-952            return CObs(self.real / other, self.imag / other)
-953
-954    def __rtruediv__(self, other):
-955        r = self.real ** 2 + self.imag ** 2
-956        if hasattr(other, 'real') and hasattr(other, 'imag'):
-957            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-958        else:
-959            return CObs(self.real * other / r, -self.imag * other / r)
-960
-961    def __abs__(self):
-962        return np.sqrt(self.real**2 + self.imag**2)
-963
-964    def __pos__(self):
-965        return self
-966
-967    def __neg__(self):
-968        return -1 * self
-969
-970    def __eq__(self, other):
-971        return self.real == other.real and self.imag == other.imag
-972
-973    def __str__(self):
-974        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-975
-976    def __repr__(self):
-977        return 'CObs[' + str(self) + ']'
+897    def is_zero(self):
+898        """Checks whether both real and imaginary part are zero within machine precision."""
+899        return self.real == 0.0 and self.imag == 0.0
+900
+901    def conjugate(self):
+902        return CObs(self.real, -self.imag)
+903
+904    def __add__(self, other):
+905        if isinstance(other, np.ndarray):
+906            return other + self
+907        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+908            return CObs(self.real + other.real,
+909                        self.imag + other.imag)
+910        else:
+911            return CObs(self.real + other, self.imag)
+912
+913    def __radd__(self, y):
+914        return self + y
+915
+916    def __sub__(self, other):
+917        if isinstance(other, np.ndarray):
+918            return -1 * (other - self)
+919        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+920            return CObs(self.real - other.real, self.imag - other.imag)
+921        else:
+922            return CObs(self.real - other, self.imag)
+923
+924    def __rsub__(self, other):
+925        return -1 * (self - other)
+926
+927    def __mul__(self, other):
+928        if isinstance(other, np.ndarray):
+929            return other * self
+930        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+931            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+932                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+933                                               [self.real, other.real, self.imag, other.imag],
+934                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+935                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+936                                               [self.real, other.real, self.imag, other.imag],
+937                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+938            elif getattr(other, 'imag', 0) != 0:
+939                return CObs(self.real * other.real - self.imag * other.imag,
+940                            self.imag * other.real + self.real * other.imag)
+941            else:
+942                return CObs(self.real * other.real, self.imag * other.real)
+943        else:
+944            return CObs(self.real * other, self.imag * other)
+945
+946    def __rmul__(self, other):
+947        return self * other
+948
+949    def __truediv__(self, other):
+950        if isinstance(other, np.ndarray):
+951            return 1 / (other / self)
+952        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+953            r = other.real ** 2 + other.imag ** 2
+954            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+955        else:
+956            return CObs(self.real / other, self.imag / other)
+957
+958    def __rtruediv__(self, other):
+959        r = self.real ** 2 + self.imag ** 2
+960        if hasattr(other, 'real') and hasattr(other, 'imag'):
+961            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+962        else:
+963            return CObs(self.real * other / r, -self.imag * other / r)
+964
+965    def __abs__(self):
+966        return np.sqrt(self.real**2 + self.imag**2)
+967
+968    def __pos__(self):
+969        return self
+970
+971    def __neg__(self):
+972        return -1 * self
+973
+974    def __eq__(self, other):
+975        return self.real == other.real and self.imag == other.imag
+976
+977    def __str__(self):
+978        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+979
+980    def __repr__(self):
+981        return 'CObs[' + str(self) + ']'
 
@@ -4287,10 +4295,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
873    def __init__(self, real, imag=0.0):
-874        self._real = real
-875        self._imag = imag
-876        self.tag = None
+            
877    def __init__(self, real, imag=0.0):
+878        self._real = real
+879        self._imag = imag
+880        self.tag = None
 
@@ -4308,12 +4316,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
886    def gamma_method(self, **kwargs):
-887        """Executes the gamma_method for the real and the imaginary part."""
-888        if isinstance(self.real, Obs):
-889            self.real.gamma_method(**kwargs)
-890        if isinstance(self.imag, Obs):
-891            self.imag.gamma_method(**kwargs)
+            
890    def gamma_method(self, **kwargs):
+891        """Executes the gamma_method for the real and the imaginary part."""
+892        if isinstance(self.real, Obs):
+893            self.real.gamma_method(**kwargs)
+894        if isinstance(self.imag, Obs):
+895            self.imag.gamma_method(**kwargs)
 
@@ -4333,9 +4341,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
893    def is_zero(self):
-894        """Checks whether both real and imaginary part are zero within machine precision."""
-895        return self.real == 0.0 and self.imag == 0.0
+            
897    def is_zero(self):
+898        """Checks whether both real and imaginary part are zero within machine precision."""
+899        return self.real == 0.0 and self.imag == 0.0
 
@@ -4355,8 +4363,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
897    def conjugate(self):
-898        return CObs(self.real, -self.imag)
+            
901    def conjugate(self):
+902        return CObs(self.real, -self.imag)
 
@@ -4375,174 +4383,174 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1099def derived_observable(func, data, array_mode=False, **kwargs):
-1100    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1101
-1102    Parameters
-1103    ----------
-1104    func : object
-1105        arbitrary function of the form func(data, **kwargs). For the
-1106        automatic differentiation to work, all numpy functions have to have
-1107        the autograd wrapper (use 'import autograd.numpy as anp').
-1108    data : list
-1109        list of Obs, e.g. [obs1, obs2, obs3].
-1110    num_grad : bool
-1111        if True, numerical derivatives are used instead of autograd
-1112        (default False). To control the numerical differentiation the
-1113        kwargs of numdifftools.step_generators.MaxStepGenerator
-1114        can be used.
-1115    man_grad : list
-1116        manually supply a list or an array which contains the jacobian
-1117        of func. Use cautiously, supplying the wrong derivative will
-1118        not be intercepted.
-1119
-1120    Notes
-1121    -----
-1122    For simple mathematical operations it can be practical to use anonymous
-1123    functions. For the ratio of two observables one can e.g. use
-1124
-1125    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1126    """
-1127
-1128    data = np.asarray(data)
-1129    raveled_data = data.ravel()
-1130
-1131    # Workaround for matrix operations containing non Obs data
-1132    if not all(isinstance(x, Obs) for x in raveled_data):
-1133        for i in range(len(raveled_data)):
-1134            if isinstance(raveled_data[i], (int, float)):
-1135                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1136
-1137    allcov = {}
-1138    for o in raveled_data:
-1139        for name in o.cov_names:
-1140            if name in allcov:
-1141                if not np.allclose(allcov[name], o.covobs[name].cov):
-1142                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1143            else:
-1144                allcov[name] = o.covobs[name].cov
-1145
-1146    n_obs = len(raveled_data)
-1147    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1148    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1149    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1150
-1151    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1152
-1153    if data.ndim == 1:
-1154        values = np.array([o.value for o in data])
-1155    else:
-1156        values = np.vectorize(lambda x: x.value)(data)
-1157
-1158    new_values = func(values, **kwargs)
-1159
-1160    multi = int(isinstance(new_values, np.ndarray))
+            
1103def derived_observable(func, data, array_mode=False, **kwargs):
+1104    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1105
+1106    Parameters
+1107    ----------
+1108    func : object
+1109        arbitrary function of the form func(data, **kwargs). For the
+1110        automatic differentiation to work, all numpy functions have to have
+1111        the autograd wrapper (use 'import autograd.numpy as anp').
+1112    data : list
+1113        list of Obs, e.g. [obs1, obs2, obs3].
+1114    num_grad : bool
+1115        if True, numerical derivatives are used instead of autograd
+1116        (default False). To control the numerical differentiation the
+1117        kwargs of numdifftools.step_generators.MaxStepGenerator
+1118        can be used.
+1119    man_grad : list
+1120        manually supply a list or an array which contains the jacobian
+1121        of func. Use cautiously, supplying the wrong derivative will
+1122        not be intercepted.
+1123
+1124    Notes
+1125    -----
+1126    For simple mathematical operations it can be practical to use anonymous
+1127    functions. For the ratio of two observables one can e.g. use
+1128
+1129    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1130    """
+1131
+1132    data = np.asarray(data)
+1133    raveled_data = data.ravel()
+1134
+1135    # Workaround for matrix operations containing non Obs data
+1136    if not all(isinstance(x, Obs) for x in raveled_data):
+1137        for i in range(len(raveled_data)):
+1138            if isinstance(raveled_data[i], (int, float)):
+1139                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1140
+1141    allcov = {}
+1142    for o in raveled_data:
+1143        for name in o.cov_names:
+1144            if name in allcov:
+1145                if not np.allclose(allcov[name], o.covobs[name].cov):
+1146                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1147            else:
+1148                allcov[name] = o.covobs[name].cov
+1149
+1150    n_obs = len(raveled_data)
+1151    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1152    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1153    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1154
+1155    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1156
+1157    if data.ndim == 1:
+1158        values = np.array([o.value for o in data])
+1159    else:
+1160        values = np.vectorize(lambda x: x.value)(data)
 1161
-1162    new_r_values = {}
-1163    new_idl_d = {}
-1164    for name in new_sample_names:
-1165        idl = []
-1166        tmp_values = np.zeros(n_obs)
-1167        for i, item in enumerate(raveled_data):
-1168            tmp_values[i] = item.r_values.get(name, item.value)
-1169            tmp_idl = item.idl.get(name)
-1170            if tmp_idl is not None:
-1171                idl.append(tmp_idl)
-1172        if multi > 0:
-1173            tmp_values = np.array(tmp_values).reshape(data.shape)
-1174        new_r_values[name] = func(tmp_values, **kwargs)
-1175        new_idl_d[name] = _merge_idx(idl)
-1176
-1177    if 'man_grad' in kwargs:
-1178        deriv = np.asarray(kwargs.get('man_grad'))
-1179        if new_values.shape + data.shape != deriv.shape:
-1180            raise Exception('Manual derivative does not have correct shape.')
-1181    elif kwargs.get('num_grad') is True:
-1182        if multi > 0:
-1183            raise Exception('Multi mode currently not supported for numerical derivative')
-1184        options = {
-1185            'base_step': 0.1,
-1186            'step_ratio': 2.5}
-1187        for key in options.keys():
-1188            kwarg = kwargs.get(key)
-1189            if kwarg is not None:
-1190                options[key] = kwarg
-1191        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1192        if tmp_df.size == 1:
-1193            deriv = np.array([tmp_df.real])
-1194        else:
-1195            deriv = tmp_df.real
-1196    else:
-1197        deriv = jacobian(func)(values, **kwargs)
-1198
-1199    final_result = np.zeros(new_values.shape, dtype=object)
-1200
-1201    if array_mode is True:
+1162    new_values = func(values, **kwargs)
+1163
+1164    multi = int(isinstance(new_values, np.ndarray))
+1165
+1166    new_r_values = {}
+1167    new_idl_d = {}
+1168    for name in new_sample_names:
+1169        idl = []
+1170        tmp_values = np.zeros(n_obs)
+1171        for i, item in enumerate(raveled_data):
+1172            tmp_values[i] = item.r_values.get(name, item.value)
+1173            tmp_idl = item.idl.get(name)
+1174            if tmp_idl is not None:
+1175                idl.append(tmp_idl)
+1176        if multi > 0:
+1177            tmp_values = np.array(tmp_values).reshape(data.shape)
+1178        new_r_values[name] = func(tmp_values, **kwargs)
+1179        new_idl_d[name] = _merge_idx(idl)
+1180
+1181    if 'man_grad' in kwargs:
+1182        deriv = np.asarray(kwargs.get('man_grad'))
+1183        if new_values.shape + data.shape != deriv.shape:
+1184            raise Exception('Manual derivative does not have correct shape.')
+1185    elif kwargs.get('num_grad') is True:
+1186        if multi > 0:
+1187            raise Exception('Multi mode currently not supported for numerical derivative')
+1188        options = {
+1189            'base_step': 0.1,
+1190            'step_ratio': 2.5}
+1191        for key in options.keys():
+1192            kwarg = kwargs.get(key)
+1193            if kwarg is not None:
+1194                options[key] = kwarg
+1195        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1196        if tmp_df.size == 1:
+1197            deriv = np.array([tmp_df.real])
+1198        else:
+1199            deriv = tmp_df.real
+1200    else:
+1201        deriv = jacobian(func)(values, **kwargs)
 1202
-1203        class _Zero_grad():
-1204            def __init__(self, N):
-1205                self.grad = np.zeros((N, 1))
+1203    final_result = np.zeros(new_values.shape, dtype=object)
+1204
+1205    if array_mode is True:
 1206
-1207        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1208        d_extracted = {}
-1209        g_extracted = {}
-1210        for name in new_sample_names:
-1211            d_extracted[name] = []
-1212            ens_length = len(new_idl_d[name])
-1213            for i_dat, dat in enumerate(data):
-1214                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1215        for name in new_cov_names:
-1216            g_extracted[name] = []
-1217            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1218            for i_dat, dat in enumerate(data):
-1219                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1220
-1221    for i_val, new_val in np.ndenumerate(new_values):
-1222        new_deltas = {}
-1223        new_grad = {}
-1224        if array_mode is True:
-1225            for name in new_sample_names:
-1226                ens_length = d_extracted[name][0].shape[-1]
-1227                new_deltas[name] = np.zeros(ens_length)
-1228                for i_dat, dat in enumerate(d_extracted[name]):
-1229                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1230            for name in new_cov_names:
-1231                new_grad[name] = 0
-1232                for i_dat, dat in enumerate(g_extracted[name]):
-1233                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1234        else:
-1235            for j_obs, obs in np.ndenumerate(data):
-1236                for name in obs.names:
-1237                    if name in obs.cov_names:
-1238                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1239                    else:
-1240                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1241
-1242        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1243
-1244        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1245            raise Exception('The same name has been used for deltas and covobs!')
-1246        new_samples = []
-1247        new_means = []
-1248        new_idl = []
-1249        new_names_obs = []
-1250        for name in new_names:
-1251            if name not in new_covobs:
-1252                new_samples.append(new_deltas[name])
-1253                new_idl.append(new_idl_d[name])
-1254                new_means.append(new_r_values[name][i_val])
-1255                new_names_obs.append(name)
-1256        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1257        for name in new_covobs:
-1258            final_result[i_val].names.append(name)
-1259        final_result[i_val]._covobs = new_covobs
-1260        final_result[i_val]._value = new_val
-1261        final_result[i_val].reweighted = reweighted
-1262
-1263    if multi == 0:
-1264        final_result = final_result.item()
-1265
-1266    return final_result
+1207        class _Zero_grad():
+1208            def __init__(self, N):
+1209                self.grad = np.zeros((N, 1))
+1210
+1211        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1212        d_extracted = {}
+1213        g_extracted = {}
+1214        for name in new_sample_names:
+1215            d_extracted[name] = []
+1216            ens_length = len(new_idl_d[name])
+1217            for i_dat, dat in enumerate(data):
+1218                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1219        for name in new_cov_names:
+1220            g_extracted[name] = []
+1221            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1222            for i_dat, dat in enumerate(data):
+1223                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1224
+1225    for i_val, new_val in np.ndenumerate(new_values):
+1226        new_deltas = {}
+1227        new_grad = {}
+1228        if array_mode is True:
+1229            for name in new_sample_names:
+1230                ens_length = d_extracted[name][0].shape[-1]
+1231                new_deltas[name] = np.zeros(ens_length)
+1232                for i_dat, dat in enumerate(d_extracted[name]):
+1233                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1234            for name in new_cov_names:
+1235                new_grad[name] = 0
+1236                for i_dat, dat in enumerate(g_extracted[name]):
+1237                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1238        else:
+1239            for j_obs, obs in np.ndenumerate(data):
+1240                for name in obs.names:
+1241                    if name in obs.cov_names:
+1242                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1243                    else:
+1244                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1245
+1246        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1247
+1248        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1249            raise Exception('The same name has been used for deltas and covobs!')
+1250        new_samples = []
+1251        new_means = []
+1252        new_idl = []
+1253        new_names_obs = []
+1254        for name in new_names:
+1255            if name not in new_covobs:
+1256                new_samples.append(new_deltas[name])
+1257                new_idl.append(new_idl_d[name])
+1258                new_means.append(new_r_values[name][i_val])
+1259                new_names_obs.append(name)
+1260        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1261        for name in new_covobs:
+1262            final_result[i_val].names.append(name)
+1263        final_result[i_val]._covobs = new_covobs
+1264        final_result[i_val]._value = new_val
+1265        final_result[i_val].reweighted = reweighted
+1266
+1267    if multi == 0:
+1268        final_result = final_result.item()
+1269
+1270    return final_result
 
@@ -4589,46 +4597,46 @@ functions. For the ratio of two observables one can e.g. use

-
1298def reweight(weight, obs, **kwargs):
-1299    """Reweight a list of observables.
-1300
-1301    Parameters
-1302    ----------
-1303    weight : Obs
-1304        Reweighting factor. An Observable that has to be defined on a superset of the
-1305        configurations in obs[i].idl for all i.
-1306    obs : list
-1307        list of Obs, e.g. [obs1, obs2, obs3].
-1308    all_configs : bool
-1309        if True, the reweighted observables are normalized by the average of
-1310        the reweighting factor on all configurations in weight.idl and not
-1311        on the configurations in obs[i].idl. Default False.
-1312    """
-1313    result = []
-1314    for i in range(len(obs)):
-1315        if len(obs[i].cov_names):
-1316            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1317        if not set(obs[i].names).issubset(weight.names):
-1318            raise Exception('Error: Ensembles do not fit')
-1319        for name in obs[i].names:
-1320            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1321                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1322        new_samples = []
-1323        w_deltas = {}
-1324        for name in sorted(obs[i].names):
-1325            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1326            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1327        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1328
-1329        if kwargs.get('all_configs'):
-1330            new_weight = weight
-1331        else:
-1332            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1333
-1334        result.append(tmp_obs / new_weight)
-1335        result[-1].reweighted = True
-1336
-1337    return result
+            
1302def reweight(weight, obs, **kwargs):
+1303    """Reweight a list of observables.
+1304
+1305    Parameters
+1306    ----------
+1307    weight : Obs
+1308        Reweighting factor. An Observable that has to be defined on a superset of the
+1309        configurations in obs[i].idl for all i.
+1310    obs : list
+1311        list of Obs, e.g. [obs1, obs2, obs3].
+1312    all_configs : bool
+1313        if True, the reweighted observables are normalized by the average of
+1314        the reweighting factor on all configurations in weight.idl and not
+1315        on the configurations in obs[i].idl. Default False.
+1316    """
+1317    result = []
+1318    for i in range(len(obs)):
+1319        if len(obs[i].cov_names):
+1320            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1321        if not set(obs[i].names).issubset(weight.names):
+1322            raise Exception('Error: Ensembles do not fit')
+1323        for name in obs[i].names:
+1324            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1325                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1326        new_samples = []
+1327        w_deltas = {}
+1328        for name in sorted(obs[i].names):
+1329            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1330            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1331        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1332
+1333        if kwargs.get('all_configs'):
+1334            new_weight = weight
+1335        else:
+1336            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1337
+1338        result.append(tmp_obs / new_weight)
+1339        result[-1].reweighted = True
+1340
+1341    return result
 
@@ -4662,47 +4670,47 @@ on the configurations in obs[i].idl. Default False.
-
1340def correlate(obs_a, obs_b):
-1341    """Correlate two observables.
-1342
-1343    Parameters
-1344    ----------
-1345    obs_a : Obs
-1346        First observable
-1347    obs_b : Obs
-1348        Second observable
-1349
-1350    Notes
-1351    -----
-1352    Keep in mind to only correlate primary observables which have not been reweighted
-1353    yet. The reweighting has to be applied after correlating the observables.
-1354    Currently only works if ensembles are identical (this is not strictly necessary).
-1355    """
-1356
-1357    if sorted(obs_a.names) != sorted(obs_b.names):
-1358        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1359    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1360        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1361    for name in obs_a.names:
-1362        if obs_a.shape[name] != obs_b.shape[name]:
-1363            raise Exception('Shapes of ensemble', name, 'do not fit')
-1364        if obs_a.idl[name] != obs_b.idl[name]:
-1365            raise Exception('idl of ensemble', name, 'do not fit')
-1366
-1367    if obs_a.reweighted is True:
-1368        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1369    if obs_b.reweighted is True:
-1370        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1371
-1372    new_samples = []
-1373    new_idl = []
-1374    for name in sorted(obs_a.names):
-1375        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1376        new_idl.append(obs_a.idl[name])
-1377
-1378    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1379    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1380    return o
+            
1344def correlate(obs_a, obs_b):
+1345    """Correlate two observables.
+1346
+1347    Parameters
+1348    ----------
+1349    obs_a : Obs
+1350        First observable
+1351    obs_b : Obs
+1352        Second observable
+1353
+1354    Notes
+1355    -----
+1356    Keep in mind to only correlate primary observables which have not been reweighted
+1357    yet. The reweighting has to be applied after correlating the observables.
+1358    Currently only works if ensembles are identical (this is not strictly necessary).
+1359    """
+1360
+1361    if sorted(obs_a.names) != sorted(obs_b.names):
+1362        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1363    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1364        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1365    for name in obs_a.names:
+1366        if obs_a.shape[name] != obs_b.shape[name]:
+1367            raise Exception('Shapes of ensemble', name, 'do not fit')
+1368        if obs_a.idl[name] != obs_b.idl[name]:
+1369            raise Exception('idl of ensemble', name, 'do not fit')
+1370
+1371    if obs_a.reweighted is True:
+1372        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1373    if obs_b.reweighted is True:
+1374        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1375
+1376    new_samples = []
+1377    new_idl = []
+1378    for name in sorted(obs_a.names):
+1379        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1380        new_idl.append(obs_a.idl[name])
+1381
+1382    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1383    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1384    return o
 
@@ -4737,74 +4745,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1383def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1384    r'''Calculates the error covariance matrix of a set of observables.
-1385
-1386    WARNING: This function should be used with care, especially for observables with support on multiple
-1387             ensembles with differing autocorrelations. See the notes below for details.
-1388
-1389    The gamma method has to be applied first to all observables.
-1390
-1391    Parameters
-1392    ----------
-1393    obs : list or numpy.ndarray
-1394        List or one dimensional array of Obs
-1395    visualize : bool
-1396        If True plots the corresponding normalized correlation matrix (default False).
-1397    correlation : bool
-1398        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1399    smooth : None or int
-1400        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1401        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1402        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1403        small ones.
-1404
-1405    Notes
-1406    -----
-1407    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1408    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1409    in the absence of autocorrelation.
-1410    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1411    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1412    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1413    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1414    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1415    '''
-1416
-1417    length = len(obs)
-1418
-1419    max_samples = np.max([o.N for o in obs])
-1420    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1421        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+            
1387def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1388    r'''Calculates the error covariance matrix of a set of observables.
+1389
+1390    WARNING: This function should be used with care, especially for observables with support on multiple
+1391             ensembles with differing autocorrelations. See the notes below for details.
+1392
+1393    The gamma method has to be applied first to all observables.
+1394
+1395    Parameters
+1396    ----------
+1397    obs : list or numpy.ndarray
+1398        List or one dimensional array of Obs
+1399    visualize : bool
+1400        If True plots the corresponding normalized correlation matrix (default False).
+1401    correlation : bool
+1402        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1403    smooth : None or int
+1404        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1405        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1406        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1407        small ones.
+1408
+1409    Notes
+1410    -----
+1411    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1412    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1413    in the absence of autocorrelation.
+1414    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1415    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1416    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1417    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1418    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1419    '''
+1420
+1421    length = len(obs)
 1422
-1423    cov = np.zeros((length, length))
-1424    for i in range(length):
-1425        for j in range(i, length):
-1426            cov[i, j] = _covariance_element(obs[i], obs[j])
-1427    cov = cov + cov.T - np.diag(np.diag(cov))
-1428
-1429    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1430
-1431    if isinstance(smooth, int):
-1432        corr = _smooth_eigenvalues(corr, smooth)
-1433
-1434    if visualize:
-1435        plt.matshow(corr, vmin=-1, vmax=1)
-1436        plt.set_cmap('RdBu')
-1437        plt.colorbar()
-1438        plt.draw()
-1439
-1440    if correlation is True:
-1441        return corr
-1442
-1443    errors = [o.dvalue for o in obs]
-1444    cov = np.diag(errors) @ corr @ np.diag(errors)
-1445
-1446    eigenvalues = np.linalg.eigh(cov)[0]
-1447    if not np.all(eigenvalues >= 0):
-1448        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1423    max_samples = np.max([o.N for o in obs])
+1424    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1425        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1426
+1427    cov = np.zeros((length, length))
+1428    for i in range(length):
+1429        for j in range(i, length):
+1430            cov[i, j] = _covariance_element(obs[i], obs[j])
+1431    cov = cov + cov.T - np.diag(np.diag(cov))
+1432
+1433    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1434
+1435    if isinstance(smooth, int):
+1436        corr = _smooth_eigenvalues(corr, smooth)
+1437
+1438    if visualize:
+1439        plt.matshow(corr, vmin=-1, vmax=1)
+1440        plt.set_cmap('RdBu')
+1441        plt.colorbar()
+1442        plt.draw()
+1443
+1444    if correlation is True:
+1445        return corr
+1446
+1447    errors = [o.dvalue for o in obs]
+1448    cov = np.diag(errors) @ corr @ np.diag(errors)
 1449
-1450    return cov
+1450    eigenvalues = np.linalg.eigh(cov)[0]
+1451    if not np.all(eigenvalues >= 0):
+1452        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1453
+1454    return cov
 
@@ -4856,24 +4864,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1530def import_jackknife(jacks, name, idl=None):
-1531    """Imports jackknife samples and returns an Obs
-1532
-1533    Parameters
-1534    ----------
-1535    jacks : numpy.ndarray
-1536        numpy array containing the mean value as zeroth entry and
-1537        the N jackknife samples as first to Nth entry.
-1538    name : str
-1539        name of the ensemble the samples are defined on.
-1540    """
-1541    length = len(jacks) - 1
-1542    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1543    samples = jacks[1:] @ prj
-1544    mean = np.mean(samples)
-1545    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1546    new_obs._value = jacks[0]
-1547    return new_obs
+            
1534def import_jackknife(jacks, name, idl=None):
+1535    """Imports jackknife samples and returns an Obs
+1536
+1537    Parameters
+1538    ----------
+1539    jacks : numpy.ndarray
+1540        numpy array containing the mean value as zeroth entry and
+1541        the N jackknife samples as first to Nth entry.
+1542    name : str
+1543        name of the ensemble the samples are defined on.
+1544    """
+1545    length = len(jacks) - 1
+1546    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1547    samples = jacks[1:] @ prj
+1548    mean = np.mean(samples)
+1549    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1550    new_obs._value = jacks[0]
+1551    return new_obs
 
@@ -4903,34 +4911,34 @@ name of the ensemble the samples are defined on.
-
1550def merge_obs(list_of_obs):
-1551    """Combine all observables in list_of_obs into one new observable
-1552
-1553    Parameters
-1554    ----------
-1555    list_of_obs : list
-1556        list of the Obs object to be combined
-1557
-1558    Notes
-1559    -----
-1560    It is not possible to combine obs which are based on the same replicum
-1561    """
-1562    replist = [item for obs in list_of_obs for item in obs.names]
-1563    if (len(replist) == len(set(replist))) is False:
-1564        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1565    if any([len(o.cov_names) for o in list_of_obs]):
-1566        raise Exception('Not possible to merge data that contains covobs!')
-1567    new_dict = {}
-1568    idl_dict = {}
-1569    for o in list_of_obs:
-1570        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1571                        for key in set(o.deltas) | set(o.r_values)})
-1572        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1573
-1574    names = sorted(new_dict.keys())
-1575    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1576    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1577    return o
+            
1554def merge_obs(list_of_obs):
+1555    """Combine all observables in list_of_obs into one new observable
+1556
+1557    Parameters
+1558    ----------
+1559    list_of_obs : list
+1560        list of the Obs object to be combined
+1561
+1562    Notes
+1563    -----
+1564    It is not possible to combine obs which are based on the same replicum
+1565    """
+1566    replist = [item for obs in list_of_obs for item in obs.names]
+1567    if (len(replist) == len(set(replist))) is False:
+1568        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1569    if any([len(o.cov_names) for o in list_of_obs]):
+1570        raise Exception('Not possible to merge data that contains covobs!')
+1571    new_dict = {}
+1572    idl_dict = {}
+1573    for o in list_of_obs:
+1574        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1575                        for key in set(o.deltas) | set(o.r_values)})
+1576        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1577
+1578    names = sorted(new_dict.keys())
+1579    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1580    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1581    return o
 
@@ -4961,47 +4969,47 @@ list of the Obs object to be combined
-
1580def cov_Obs(means, cov, name, grad=None):
-1581    """Create an Obs based on mean(s) and a covariance matrix
-1582
-1583    Parameters
-1584    ----------
-1585    mean : list of floats or float
-1586        N mean value(s) of the new Obs
-1587    cov : list or array
-1588        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1589    name : str
-1590        identifier for the covariance matrix
-1591    grad : list or array
-1592        Gradient of the Covobs wrt. the means belonging to cov.
-1593    """
-1594
-1595    def covobs_to_obs(co):
-1596        """Make an Obs out of a Covobs
-1597
-1598        Parameters
-1599        ----------
-1600        co : Covobs
-1601            Covobs to be embedded into the Obs
-1602        """
-1603        o = Obs([], [], means=[])
-1604        o._value = co.value
-1605        o.names.append(co.name)
-1606        o._covobs[co.name] = co
-1607        o._dvalue = np.sqrt(co.errsq())
-1608        return o
-1609
-1610    ol = []
-1611    if isinstance(means, (float, int)):
-1612        means = [means]
+            
1584def cov_Obs(means, cov, name, grad=None):
+1585    """Create an Obs based on mean(s) and a covariance matrix
+1586
+1587    Parameters
+1588    ----------
+1589    mean : list of floats or float
+1590        N mean value(s) of the new Obs
+1591    cov : list or array
+1592        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1593    name : str
+1594        identifier for the covariance matrix
+1595    grad : list or array
+1596        Gradient of the Covobs wrt. the means belonging to cov.
+1597    """
+1598
+1599    def covobs_to_obs(co):
+1600        """Make an Obs out of a Covobs
+1601
+1602        Parameters
+1603        ----------
+1604        co : Covobs
+1605            Covobs to be embedded into the Obs
+1606        """
+1607        o = Obs([], [], means=[])
+1608        o._value = co.value
+1609        o.names.append(co.name)
+1610        o._covobs[co.name] = co
+1611        o._dvalue = np.sqrt(co.errsq())
+1612        return o
 1613
-1614    for i in range(len(means)):
-1615        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1616    if ol[0].covobs[name].N != len(means):
-1617        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1618    if len(ol) == 1:
-1619        return ol[0]
-1620    return ol
+1614    ol = []
+1615    if isinstance(means, (float, int)):
+1616        means = [means]
+1617
+1618    for i in range(len(means)):
+1619        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1620    if ol[0].covobs[name].N != len(means):
+1621        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1622    if len(ol) == 1:
+1623        return ol[0]
+1624    return ol