From 5165a436919ba166cdd3ea34ec8a34caf2183ae6 Mon Sep 17 00:00:00 2001 From: fjosw Date: Wed, 19 Jul 2023 14:07:36 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/correlators.html | 1530 +++++++------- docs/pyerrors/obs.html | 3396 ++++++++++++++++---------------- 2 files changed, 2469 insertions(+), 2457 deletions(-) diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html index 206809ff..939ca477 100644 --- a/docs/pyerrors/correlators.html +++ b/docs/pyerrors/correlators.html @@ -1299,368 +1299,375 @@ 1056 1057 __array_priority__ = 10000 1058 -1059 def __add__(self, y): +1059 def __eq__(self, y): 1060 if isinstance(y, Corr): -1061 if ((self.N != y.N) or (self.T != y.T)): -1062 raise Exception("Addition of Corrs with different shape") -1063 newcontent = [] -1064 for t in range(self.T): -1065 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1066 newcontent.append(None) -1067 else: -1068 newcontent.append(self.content[t] + y.content[t]) -1069 return Corr(newcontent) -1070 -1071 elif isinstance(y, (Obs, int, float, CObs)): -1072 newcontent = [] -1073 for t in range(self.T): -1074 if _check_for_none(self, self.content[t]): -1075 newcontent.append(None) -1076 else: -1077 newcontent.append(self.content[t] + y) -1078 return Corr(newcontent, prange=self.prange) -1079 elif isinstance(y, np.ndarray): -1080 if y.shape == (self.T,): -1081 return Corr(list((np.array(self.content).T + y).T)) -1082 else: -1083 raise ValueError("operands could not be broadcast together") -1084 else: -1085 raise TypeError("Corr + wrong type") -1086 -1087 def __mul__(self, y): -1088 if isinstance(y, Corr): -1089 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): -1090 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") -1091 newcontent = [] -1092 for t in range(self.T): -1093 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1094 newcontent.append(None) -1095 else: -1096 newcontent.append(self.content[t] * y.content[t]) -1097 return Corr(newcontent) -1098 -1099 elif isinstance(y, (Obs, int, float, CObs)): -1100 newcontent = [] -1101 for t in range(self.T): -1102 if _check_for_none(self, self.content[t]): -1103 newcontent.append(None) -1104 else: -1105 newcontent.append(self.content[t] * y) -1106 return Corr(newcontent, prange=self.prange) -1107 elif isinstance(y, np.ndarray): -1108 if y.shape == (self.T,): -1109 return Corr(list((np.array(self.content).T * y).T)) -1110 else: -1111 raise ValueError("operands could not be broadcast together") -1112 else: -1113 raise TypeError("Corr * wrong type") -1114 -1115 def __matmul__(self, y): -1116 if isinstance(y, np.ndarray): -1117 if y.ndim != 2 or y.shape[0] != y.shape[1]: -1118 raise ValueError("Can only multiply correlators by square matrices.") -1119 if not self.N == y.shape[0]: -1120 raise ValueError("matmul: mismatch of matrix dimensions") -1121 newcontent = [] -1122 for t in range(self.T): -1123 if _check_for_none(self, self.content[t]): -1124 newcontent.append(None) -1125 else: -1126 newcontent.append(self.content[t] @ y) -1127 return Corr(newcontent) -1128 elif isinstance(y, Corr): -1129 if not self.N == y.N: -1130 raise ValueError("matmul: mismatch of matrix dimensions") -1131 newcontent = [] -1132 for t in range(self.T): -1133 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1134 newcontent.append(None) -1135 else: -1136 newcontent.append(self.content[t] @ y.content[t]) -1137 return Corr(newcontent) -1138 -1139 else: -1140 return NotImplemented -1141 -1142 def __rmatmul__(self, y): -1143 if isinstance(y, np.ndarray): -1144 if y.ndim != 2 or y.shape[0] != y.shape[1]: -1145 raise ValueError("Can only multiply correlators by square matrices.") -1146 if not self.N == y.shape[0]: -1147 raise ValueError("matmul: mismatch of matrix dimensions") -1148 newcontent = [] -1149 for t in range(self.T): -1150 if _check_for_none(self, self.content[t]): -1151 newcontent.append(None) -1152 else: -1153 newcontent.append(y @ self.content[t]) -1154 return Corr(newcontent) -1155 else: -1156 return NotImplemented -1157 -1158 def __truediv__(self, y): -1159 if isinstance(y, Corr): -1160 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): -1161 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") -1162 newcontent = [] -1163 for t in range(self.T): -1164 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1165 newcontent.append(None) -1166 else: -1167 newcontent.append(self.content[t] / y.content[t]) -1168 for t in range(self.T): -1169 if _check_for_none(self, newcontent[t]): -1170 continue -1171 if np.isnan(np.sum(newcontent[t]).value): -1172 newcontent[t] = None -1173 -1174 if all([item is None for item in newcontent]): -1175 raise Exception("Division returns completely undefined correlator") -1176 return Corr(newcontent) -1177 -1178 elif isinstance(y, (Obs, CObs)): -1179 if isinstance(y, Obs): -1180 if y.value == 0: -1181 raise Exception('Division by zero will return undefined correlator') -1182 if isinstance(y, CObs): -1183 if y.is_zero(): -1184 raise Exception('Division by zero will return undefined correlator') -1185 -1186 newcontent = [] -1187 for t in range(self.T): -1188 if _check_for_none(self, self.content[t]): -1189 newcontent.append(None) -1190 else: -1191 newcontent.append(self.content[t] / y) -1192 return Corr(newcontent, prange=self.prange) -1193 -1194 elif isinstance(y, (int, float)): -1195 if y == 0: -1196 raise Exception('Division by zero will return undefined correlator') -1197 newcontent = [] -1198 for t in range(self.T): -1199 if _check_for_none(self, self.content[t]): -1200 newcontent.append(None) -1201 else: -1202 newcontent.append(self.content[t] / y) -1203 return Corr(newcontent, prange=self.prange) -1204 elif isinstance(y, np.ndarray): -1205 if y.shape == (self.T,): -1206 return Corr(list((np.array(self.content).T / y).T)) -1207 else: -1208 raise ValueError("operands could not be broadcast together") -1209 else: -1210 raise TypeError('Corr / wrong type') -1211 -1212 def __neg__(self): -1213 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content] -1214 return Corr(newcontent, prange=self.prange) -1215 -1216 def __sub__(self, y): -1217 return self + (-y) +1061 comp = np.asarray(y.content, dtype=object) +1062 else: +1063 comp = np.asarray(y) +1064 return np.asarray(self.content, dtype=object) == comp +1065 +1066 def __add__(self, y): +1067 if isinstance(y, Corr): +1068 if ((self.N != y.N) or (self.T != y.T)): +1069 raise Exception("Addition of Corrs with different shape") +1070 newcontent = [] +1071 for t in range(self.T): +1072 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1073 newcontent.append(None) +1074 else: +1075 newcontent.append(self.content[t] + y.content[t]) +1076 return Corr(newcontent) +1077 +1078 elif isinstance(y, (Obs, int, float, CObs)): +1079 newcontent = [] +1080 for t in range(self.T): +1081 if _check_for_none(self, self.content[t]): +1082 newcontent.append(None) +1083 else: +1084 newcontent.append(self.content[t] + y) +1085 return Corr(newcontent, prange=self.prange) +1086 elif isinstance(y, np.ndarray): +1087 if y.shape == (self.T,): +1088 return Corr(list((np.array(self.content).T + y).T)) +1089 else: +1090 raise ValueError("operands could not be broadcast together") +1091 else: +1092 raise TypeError("Corr + wrong type") +1093 +1094 def __mul__(self, y): +1095 if isinstance(y, Corr): +1096 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): +1097 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") +1098 newcontent = [] +1099 for t in range(self.T): +1100 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1101 newcontent.append(None) +1102 else: +1103 newcontent.append(self.content[t] * y.content[t]) +1104 return Corr(newcontent) +1105 +1106 elif isinstance(y, (Obs, int, float, CObs)): +1107 newcontent = [] +1108 for t in range(self.T): +1109 if _check_for_none(self, self.content[t]): +1110 newcontent.append(None) +1111 else: +1112 newcontent.append(self.content[t] * y) +1113 return Corr(newcontent, prange=self.prange) +1114 elif isinstance(y, np.ndarray): +1115 if y.shape == (self.T,): +1116 return Corr(list((np.array(self.content).T * y).T)) +1117 else: +1118 raise ValueError("operands could not be broadcast together") +1119 else: +1120 raise TypeError("Corr * wrong type") +1121 +1122 def __matmul__(self, y): +1123 if isinstance(y, np.ndarray): +1124 if y.ndim != 2 or y.shape[0] != y.shape[1]: +1125 raise ValueError("Can only multiply correlators by square matrices.") +1126 if not self.N == y.shape[0]: +1127 raise ValueError("matmul: mismatch of matrix dimensions") +1128 newcontent = [] +1129 for t in range(self.T): +1130 if _check_for_none(self, self.content[t]): +1131 newcontent.append(None) +1132 else: +1133 newcontent.append(self.content[t] @ y) +1134 return Corr(newcontent) +1135 elif isinstance(y, Corr): +1136 if not self.N == y.N: +1137 raise ValueError("matmul: mismatch of matrix dimensions") +1138 newcontent = [] +1139 for t in range(self.T): +1140 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1141 newcontent.append(None) +1142 else: +1143 newcontent.append(self.content[t] @ y.content[t]) +1144 return Corr(newcontent) +1145 +1146 else: +1147 return NotImplemented +1148 +1149 def __rmatmul__(self, y): +1150 if isinstance(y, np.ndarray): +1151 if y.ndim != 2 or y.shape[0] != y.shape[1]: +1152 raise ValueError("Can only multiply correlators by square matrices.") +1153 if not self.N == y.shape[0]: +1154 raise ValueError("matmul: mismatch of matrix dimensions") +1155 newcontent = [] +1156 for t in range(self.T): +1157 if _check_for_none(self, self.content[t]): +1158 newcontent.append(None) +1159 else: +1160 newcontent.append(y @ self.content[t]) +1161 return Corr(newcontent) +1162 else: +1163 return NotImplemented +1164 +1165 def __truediv__(self, y): +1166 if isinstance(y, Corr): +1167 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): +1168 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") +1169 newcontent = [] +1170 for t in range(self.T): +1171 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1172 newcontent.append(None) +1173 else: +1174 newcontent.append(self.content[t] / y.content[t]) +1175 for t in range(self.T): +1176 if _check_for_none(self, newcontent[t]): +1177 continue +1178 if np.isnan(np.sum(newcontent[t]).value): +1179 newcontent[t] = None +1180 +1181 if all([item is None for item in newcontent]): +1182 raise Exception("Division returns completely undefined correlator") +1183 return Corr(newcontent) +1184 +1185 elif isinstance(y, (Obs, CObs)): +1186 if isinstance(y, Obs): +1187 if y.value == 0: +1188 raise Exception('Division by zero will return undefined correlator') +1189 if isinstance(y, CObs): +1190 if y.is_zero(): +1191 raise Exception('Division by zero will return undefined correlator') +1192 +1193 newcontent = [] +1194 for t in range(self.T): +1195 if _check_for_none(self, self.content[t]): +1196 newcontent.append(None) +1197 else: +1198 newcontent.append(self.content[t] / y) +1199 return Corr(newcontent, prange=self.prange) +1200 +1201 elif isinstance(y, (int, float)): +1202 if y == 0: +1203 raise Exception('Division by zero will return undefined correlator') +1204 newcontent = [] +1205 for t in range(self.T): +1206 if _check_for_none(self, self.content[t]): +1207 newcontent.append(None) +1208 else: +1209 newcontent.append(self.content[t] / y) +1210 return Corr(newcontent, prange=self.prange) +1211 elif isinstance(y, np.ndarray): +1212 if y.shape == (self.T,): +1213 return Corr(list((np.array(self.content).T / y).T)) +1214 else: +1215 raise ValueError("operands could not be broadcast together") +1216 else: +1217 raise TypeError('Corr / wrong type') 1218 -1219 def __pow__(self, y): -1220 if isinstance(y, (Obs, int, float, CObs)): -1221 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content] -1222 return Corr(newcontent, prange=self.prange) -1223 else: -1224 raise TypeError('Type of exponent not supported') +1219 def __neg__(self): +1220 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content] +1221 return Corr(newcontent, prange=self.prange) +1222 +1223 def __sub__(self, y): +1224 return self + (-y) 1225 -1226 def __abs__(self): -1227 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content] -1228 return Corr(newcontent, prange=self.prange) -1229 -1230 # The numpy functions: -1231 def sqrt(self): -1232 return self ** 0.5 -1233 -1234 def log(self): -1235 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content] -1236 return Corr(newcontent, prange=self.prange) -1237 -1238 def exp(self): -1239 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content] -1240 return Corr(newcontent, prange=self.prange) -1241 -1242 def _apply_func_to_corr(self, func): -1243 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content] -1244 for t in range(self.T): -1245 if _check_for_none(self, newcontent[t]): -1246 continue -1247 tmp_sum = np.sum(newcontent[t]) -1248 if hasattr(tmp_sum, "value"): -1249 if np.isnan(tmp_sum.value): -1250 newcontent[t] = None -1251 if all([item is None for item in newcontent]): -1252 raise Exception('Operation returns undefined correlator') -1253 return Corr(newcontent) -1254 -1255 def sin(self): -1256 return self._apply_func_to_corr(np.sin) -1257 -1258 def cos(self): -1259 return self._apply_func_to_corr(np.cos) -1260 -1261 def tan(self): -1262 return self._apply_func_to_corr(np.tan) -1263 -1264 def sinh(self): -1265 return self._apply_func_to_corr(np.sinh) -1266 -1267 def cosh(self): -1268 return self._apply_func_to_corr(np.cosh) -1269 -1270 def tanh(self): -1271 return self._apply_func_to_corr(np.tanh) -1272 -1273 def arcsin(self): -1274 return self._apply_func_to_corr(np.arcsin) -1275 -1276 def arccos(self): -1277 return self._apply_func_to_corr(np.arccos) -1278 -1279 def arctan(self): -1280 return self._apply_func_to_corr(np.arctan) -1281 -1282 def arcsinh(self): -1283 return self._apply_func_to_corr(np.arcsinh) -1284 -1285 def arccosh(self): -1286 return self._apply_func_to_corr(np.arccosh) -1287 -1288 def arctanh(self): -1289 return self._apply_func_to_corr(np.arctanh) -1290 -1291 # Right hand side operations (require tweak in main module to work) -1292 def __radd__(self, y): -1293 return self + y +1226 def __pow__(self, y): +1227 if isinstance(y, (Obs, int, float, CObs)): +1228 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content] +1229 return Corr(newcontent, prange=self.prange) +1230 else: +1231 raise TypeError('Type of exponent not supported') +1232 +1233 def __abs__(self): +1234 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content] +1235 return Corr(newcontent, prange=self.prange) +1236 +1237 # The numpy functions: +1238 def sqrt(self): +1239 return self ** 0.5 +1240 +1241 def log(self): +1242 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content] +1243 return Corr(newcontent, prange=self.prange) +1244 +1245 def exp(self): +1246 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content] +1247 return Corr(newcontent, prange=self.prange) +1248 +1249 def _apply_func_to_corr(self, func): +1250 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content] +1251 for t in range(self.T): +1252 if _check_for_none(self, newcontent[t]): +1253 continue +1254 tmp_sum = np.sum(newcontent[t]) +1255 if hasattr(tmp_sum, "value"): +1256 if np.isnan(tmp_sum.value): +1257 newcontent[t] = None +1258 if all([item is None for item in newcontent]): +1259 raise Exception('Operation returns undefined correlator') +1260 return Corr(newcontent) +1261 +1262 def sin(self): +1263 return self._apply_func_to_corr(np.sin) +1264 +1265 def cos(self): +1266 return self._apply_func_to_corr(np.cos) +1267 +1268 def tan(self): +1269 return self._apply_func_to_corr(np.tan) +1270 +1271 def sinh(self): +1272 return self._apply_func_to_corr(np.sinh) +1273 +1274 def cosh(self): +1275 return self._apply_func_to_corr(np.cosh) +1276 +1277 def tanh(self): +1278 return self._apply_func_to_corr(np.tanh) +1279 +1280 def arcsin(self): +1281 return self._apply_func_to_corr(np.arcsin) +1282 +1283 def arccos(self): +1284 return self._apply_func_to_corr(np.arccos) +1285 +1286 def arctan(self): +1287 return self._apply_func_to_corr(np.arctan) +1288 +1289 def arcsinh(self): +1290 return self._apply_func_to_corr(np.arcsinh) +1291 +1292 def arccosh(self): +1293 return self._apply_func_to_corr(np.arccosh) 1294 -1295 def __rsub__(self, y): -1296 return -self + y +1295 def arctanh(self): +1296 return self._apply_func_to_corr(np.arctanh) 1297 -1298 def __rmul__(self, y): -1299 return self * y -1300 -1301 def __rtruediv__(self, y): -1302 return (self / y) ** (-1) -1303 -1304 @property -1305 def real(self): -1306 def return_real(obs_OR_cobs): -1307 if isinstance(obs_OR_cobs.flatten()[0], CObs): -1308 return np.vectorize(lambda x: x.real)(obs_OR_cobs) -1309 else: -1310 return obs_OR_cobs -1311 -1312 return self._apply_func_to_corr(return_real) -1313 -1314 @property -1315 def imag(self): -1316 def return_imag(obs_OR_cobs): -1317 if isinstance(obs_OR_cobs.flatten()[0], CObs): -1318 return np.vectorize(lambda x: x.imag)(obs_OR_cobs) -1319 else: -1320 return obs_OR_cobs * 0 # So it stays the right type -1321 -1322 return self._apply_func_to_corr(return_imag) -1323 -1324 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None): -1325 r''' Project large correlation matrix to lowest states -1326 -1327 This method can be used to reduce the size of an (N x N) correlation matrix -1328 to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise -1329 is still small. +1298 # Right hand side operations (require tweak in main module to work) +1299 def __radd__(self, y): +1300 return self + y +1301 +1302 def __rsub__(self, y): +1303 return -self + y +1304 +1305 def __rmul__(self, y): +1306 return self * y +1307 +1308 def __rtruediv__(self, y): +1309 return (self / y) ** (-1) +1310 +1311 @property +1312 def real(self): +1313 def return_real(obs_OR_cobs): +1314 if isinstance(obs_OR_cobs.flatten()[0], CObs): +1315 return np.vectorize(lambda x: x.real)(obs_OR_cobs) +1316 else: +1317 return obs_OR_cobs +1318 +1319 return self._apply_func_to_corr(return_real) +1320 +1321 @property +1322 def imag(self): +1323 def return_imag(obs_OR_cobs): +1324 if isinstance(obs_OR_cobs.flatten()[0], CObs): +1325 return np.vectorize(lambda x: x.imag)(obs_OR_cobs) +1326 else: +1327 return obs_OR_cobs * 0 # So it stays the right type +1328 +1329 return self._apply_func_to_corr(return_imag) 1330 -1331 Parameters -1332 ---------- -1333 Ntrunc: int -1334 Rank of the target matrix. -1335 tproj: int -1336 Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method. -1337 The default value is 3. -1338 t0proj: int -1339 Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly -1340 discouraged for O(a) improved theories, since the correctness of the procedure -1341 cannot be granted in this case. The default value is 2. -1342 basematrix : Corr -1343 Correlation matrix that is used to determine the eigenvectors of the -1344 lowest states based on a GEVP. basematrix is taken to be the Corr itself if -1345 is is not specified. -1346 -1347 Notes -1348 ----- -1349 We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving -1350 the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$ -1351 and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the -1352 resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via -1353 $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large -1354 correlation matrix and to remove some noise that is added by irrelevant operators. -1355 This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated -1356 bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$. -1357 ''' -1358 -1359 if self.N == 1: -1360 raise Exception('Method cannot be applied to one-dimensional correlators.') -1361 if basematrix is None: -1362 basematrix = self -1363 if Ntrunc >= basematrix.N: -1364 raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) -1365 if basematrix.N != self.N: -1366 raise Exception('basematrix and targetmatrix have to be of the same size.') -1367 -1368 evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] -1369 -1370 tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object) -1371 rmat = [] -1372 for t in range(basematrix.T): -1373 for i in range(Ntrunc): -1374 for j in range(Ntrunc): -1375 tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j] -1376 rmat.append(np.copy(tmpmat)) -1377 -1378 newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)] -1379 return Corr(newcontent) -1380 -1381 -1382def _sort_vectors(vec_set, ts): -1383 """Helper function used to find a set of Eigenvectors consistent over all timeslices""" -1384 reference_sorting = np.array(vec_set[ts]) -1385 N = reference_sorting.shape[0] -1386 sorted_vec_set = [] -1387 for t in range(len(vec_set)): -1388 if vec_set[t] is None: -1389 sorted_vec_set.append(None) -1390 elif not t == ts: -1391 perms = [list(o) for o in permutations([i for i in range(N)], N)] -1392 best_score = 0 -1393 for perm in perms: -1394 current_score = 1 -1395 for k in range(N): -1396 new_sorting = reference_sorting.copy() -1397 new_sorting[perm[k], :] = vec_set[t][k] -1398 current_score *= abs(np.linalg.det(new_sorting)) -1399 if current_score > best_score: -1400 best_score = current_score -1401 best_perm = perm -1402 sorted_vec_set.append([vec_set[t][k] for k in best_perm]) -1403 else: -1404 sorted_vec_set.append(vec_set[t]) -1405 -1406 return sorted_vec_set -1407 -1408 -1409def _check_for_none(corr, entry): -1410 """Checks if entry for correlator corr is None""" -1411 return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2 +1331 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None): +1332 r''' Project large correlation matrix to lowest states +1333 +1334 This method can be used to reduce the size of an (N x N) correlation matrix +1335 to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise +1336 is still small. +1337 +1338 Parameters +1339 ---------- +1340 Ntrunc: int +1341 Rank of the target matrix. +1342 tproj: int +1343 Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method. +1344 The default value is 3. +1345 t0proj: int +1346 Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly +1347 discouraged for O(a) improved theories, since the correctness of the procedure +1348 cannot be granted in this case. The default value is 2. +1349 basematrix : Corr +1350 Correlation matrix that is used to determine the eigenvectors of the +1351 lowest states based on a GEVP. basematrix is taken to be the Corr itself if +1352 is is not specified. +1353 +1354 Notes +1355 ----- +1356 We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving +1357 the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$ +1358 and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the +1359 resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via +1360 $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large +1361 correlation matrix and to remove some noise that is added by irrelevant operators. +1362 This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated +1363 bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$. +1364 ''' +1365 +1366 if self.N == 1: +1367 raise Exception('Method cannot be applied to one-dimensional correlators.') +1368 if basematrix is None: +1369 basematrix = self +1370 if Ntrunc >= basematrix.N: +1371 raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) +1372 if basematrix.N != self.N: +1373 raise Exception('basematrix and targetmatrix have to be of the same size.') +1374 +1375 evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] +1376 +1377 tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object) +1378 rmat = [] +1379 for t in range(basematrix.T): +1380 for i in range(Ntrunc): +1381 for j in range(Ntrunc): +1382 tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j] +1383 rmat.append(np.copy(tmpmat)) +1384 +1385 newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)] +1386 return Corr(newcontent) +1387 +1388 +1389def _sort_vectors(vec_set, ts): +1390 """Helper function used to find a set of Eigenvectors consistent over all timeslices""" +1391 reference_sorting = np.array(vec_set[ts]) +1392 N = reference_sorting.shape[0] +1393 sorted_vec_set = [] +1394 for t in range(len(vec_set)): +1395 if vec_set[t] is None: +1396 sorted_vec_set.append(None) +1397 elif not t == ts: +1398 perms = [list(o) for o in permutations([i for i in range(N)], N)] +1399 best_score = 0 +1400 for perm in perms: +1401 current_score = 1 +1402 for k in range(N): +1403 new_sorting = reference_sorting.copy() +1404 new_sorting[perm[k], :] = vec_set[t][k] +1405 current_score *= abs(np.linalg.det(new_sorting)) +1406 if current_score > best_score: +1407 best_score = current_score +1408 best_perm = perm +1409 sorted_vec_set.append([vec_set[t][k] for k in best_perm]) +1410 else: +1411 sorted_vec_set.append(vec_set[t]) 1412 -1413 -1414def _GEVP_solver(Gt, G0): -1415 """Helper function for solving the GEVP and sorting the eigenvectors. -1416 -1417 The helper function assumes that both provided matrices are symmetric and -1418 only processes the lower triangular part of both matrices. In case the matrices -1419 are not symmetric the upper triangular parts are effectively discarded.""" -1420 return scipy.linalg.eigh(Gt, G0, lower=True)[1].T[::-1] +1413 return sorted_vec_set +1414 +1415 +1416def _check_for_none(corr, entry): +1417 """Checks if entry for correlator corr is None""" +1418 return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2 +1419 +1420 +1421def _GEVP_solver(Gt, G0): +1422 """Helper function for solving the GEVP and sorting the eigenvectors. +1423 +1424 The helper function assumes that both provided matrices are symmetric and +1425 only processes the lower triangular part of both matrices. In case the matrices +1426 are not symmetric the upper triangular parts are effectively discarded.""" +1427 return scipy.linalg.eigh(Gt, G0, lower=True)[1].T[::-1] @@ -2722,327 +2729,334 @@ 1057 1058 __array_priority__ = 10000 1059 -1060 def __add__(self, y): +1060 def __eq__(self, y): 1061 if isinstance(y, Corr): -1062 if ((self.N != y.N) or (self.T != y.T)): -1063 raise Exception("Addition of Corrs with different shape") -1064 newcontent = [] -1065 for t in range(self.T): -1066 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1067 newcontent.append(None) -1068 else: -1069 newcontent.append(self.content[t] + y.content[t]) -1070 return Corr(newcontent) -1071 -1072 elif isinstance(y, (Obs, int, float, CObs)): -1073 newcontent = [] -1074 for t in range(self.T): -1075 if _check_for_none(self, self.content[t]): -1076 newcontent.append(None) -1077 else: -1078 newcontent.append(self.content[t] + y) -1079 return Corr(newcontent, prange=self.prange) -1080 elif isinstance(y, np.ndarray): -1081 if y.shape == (self.T,): -1082 return Corr(list((np.array(self.content).T + y).T)) -1083 else: -1084 raise ValueError("operands could not be broadcast together") -1085 else: -1086 raise TypeError("Corr + wrong type") -1087 -1088 def __mul__(self, y): -1089 if isinstance(y, Corr): -1090 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): -1091 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") -1092 newcontent = [] -1093 for t in range(self.T): -1094 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1095 newcontent.append(None) -1096 else: -1097 newcontent.append(self.content[t] * y.content[t]) -1098 return Corr(newcontent) -1099 -1100 elif isinstance(y, (Obs, int, float, CObs)): -1101 newcontent = [] -1102 for t in range(self.T): -1103 if _check_for_none(self, self.content[t]): -1104 newcontent.append(None) -1105 else: -1106 newcontent.append(self.content[t] * y) -1107 return Corr(newcontent, prange=self.prange) -1108 elif isinstance(y, np.ndarray): -1109 if y.shape == (self.T,): -1110 return Corr(list((np.array(self.content).T * y).T)) -1111 else: -1112 raise ValueError("operands could not be broadcast together") -1113 else: -1114 raise TypeError("Corr * wrong type") -1115 -1116 def __matmul__(self, y): -1117 if isinstance(y, np.ndarray): -1118 if y.ndim != 2 or y.shape[0] != y.shape[1]: -1119 raise ValueError("Can only multiply correlators by square matrices.") -1120 if not self.N == y.shape[0]: -1121 raise ValueError("matmul: mismatch of matrix dimensions") -1122 newcontent = [] -1123 for t in range(self.T): -1124 if _check_for_none(self, self.content[t]): -1125 newcontent.append(None) -1126 else: -1127 newcontent.append(self.content[t] @ y) -1128 return Corr(newcontent) -1129 elif isinstance(y, Corr): -1130 if not self.N == y.N: -1131 raise ValueError("matmul: mismatch of matrix dimensions") -1132 newcontent = [] -1133 for t in range(self.T): -1134 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1135 newcontent.append(None) -1136 else: -1137 newcontent.append(self.content[t] @ y.content[t]) -1138 return Corr(newcontent) -1139 -1140 else: -1141 return NotImplemented -1142 -1143 def __rmatmul__(self, y): -1144 if isinstance(y, np.ndarray): -1145 if y.ndim != 2 or y.shape[0] != y.shape[1]: -1146 raise ValueError("Can only multiply correlators by square matrices.") -1147 if not self.N == y.shape[0]: -1148 raise ValueError("matmul: mismatch of matrix dimensions") -1149 newcontent = [] -1150 for t in range(self.T): -1151 if _check_for_none(self, self.content[t]): -1152 newcontent.append(None) -1153 else: -1154 newcontent.append(y @ self.content[t]) -1155 return Corr(newcontent) -1156 else: -1157 return NotImplemented -1158 -1159 def __truediv__(self, y): -1160 if isinstance(y, Corr): -1161 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): -1162 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") -1163 newcontent = [] -1164 for t in range(self.T): -1165 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): -1166 newcontent.append(None) -1167 else: -1168 newcontent.append(self.content[t] / y.content[t]) -1169 for t in range(self.T): -1170 if _check_for_none(self, newcontent[t]): -1171 continue -1172 if np.isnan(np.sum(newcontent[t]).value): -1173 newcontent[t] = None -1174 -1175 if all([item is None for item in newcontent]): -1176 raise Exception("Division returns completely undefined correlator") -1177 return Corr(newcontent) -1178 -1179 elif isinstance(y, (Obs, CObs)): -1180 if isinstance(y, Obs): -1181 if y.value == 0: -1182 raise Exception('Division by zero will return undefined correlator') -1183 if isinstance(y, CObs): -1184 if y.is_zero(): -1185 raise Exception('Division by zero will return undefined correlator') -1186 -1187 newcontent = [] -1188 for t in range(self.T): -1189 if _check_for_none(self, self.content[t]): -1190 newcontent.append(None) -1191 else: -1192 newcontent.append(self.content[t] / y) -1193 return Corr(newcontent, prange=self.prange) -1194 -1195 elif isinstance(y, (int, float)): -1196 if y == 0: -1197 raise Exception('Division by zero will return undefined correlator') -1198 newcontent = [] -1199 for t in range(self.T): -1200 if _check_for_none(self, self.content[t]): -1201 newcontent.append(None) -1202 else: -1203 newcontent.append(self.content[t] / y) -1204 return Corr(newcontent, prange=self.prange) -1205 elif isinstance(y, np.ndarray): -1206 if y.shape == (self.T,): -1207 return Corr(list((np.array(self.content).T / y).T)) -1208 else: -1209 raise ValueError("operands could not be broadcast together") -1210 else: -1211 raise TypeError('Corr / wrong type') -1212 -1213 def __neg__(self): -1214 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content] -1215 return Corr(newcontent, prange=self.prange) -1216 -1217 def __sub__(self, y): -1218 return self + (-y) +1062 comp = np.asarray(y.content, dtype=object) +1063 else: +1064 comp = np.asarray(y) +1065 return np.asarray(self.content, dtype=object) == comp +1066 +1067 def __add__(self, y): +1068 if isinstance(y, Corr): +1069 if ((self.N != y.N) or (self.T != y.T)): +1070 raise Exception("Addition of Corrs with different shape") +1071 newcontent = [] +1072 for t in range(self.T): +1073 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1074 newcontent.append(None) +1075 else: +1076 newcontent.append(self.content[t] + y.content[t]) +1077 return Corr(newcontent) +1078 +1079 elif isinstance(y, (Obs, int, float, CObs)): +1080 newcontent = [] +1081 for t in range(self.T): +1082 if _check_for_none(self, self.content[t]): +1083 newcontent.append(None) +1084 else: +1085 newcontent.append(self.content[t] + y) +1086 return Corr(newcontent, prange=self.prange) +1087 elif isinstance(y, np.ndarray): +1088 if y.shape == (self.T,): +1089 return Corr(list((np.array(self.content).T + y).T)) +1090 else: +1091 raise ValueError("operands could not be broadcast together") +1092 else: +1093 raise TypeError("Corr + wrong type") +1094 +1095 def __mul__(self, y): +1096 if isinstance(y, Corr): +1097 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): +1098 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") +1099 newcontent = [] +1100 for t in range(self.T): +1101 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1102 newcontent.append(None) +1103 else: +1104 newcontent.append(self.content[t] * y.content[t]) +1105 return Corr(newcontent) +1106 +1107 elif isinstance(y, (Obs, int, float, CObs)): +1108 newcontent = [] +1109 for t in range(self.T): +1110 if _check_for_none(self, self.content[t]): +1111 newcontent.append(None) +1112 else: +1113 newcontent.append(self.content[t] * y) +1114 return Corr(newcontent, prange=self.prange) +1115 elif isinstance(y, np.ndarray): +1116 if y.shape == (self.T,): +1117 return Corr(list((np.array(self.content).T * y).T)) +1118 else: +1119 raise ValueError("operands could not be broadcast together") +1120 else: +1121 raise TypeError("Corr * wrong type") +1122 +1123 def __matmul__(self, y): +1124 if isinstance(y, np.ndarray): +1125 if y.ndim != 2 or y.shape[0] != y.shape[1]: +1126 raise ValueError("Can only multiply correlators by square matrices.") +1127 if not self.N == y.shape[0]: +1128 raise ValueError("matmul: mismatch of matrix dimensions") +1129 newcontent = [] +1130 for t in range(self.T): +1131 if _check_for_none(self, self.content[t]): +1132 newcontent.append(None) +1133 else: +1134 newcontent.append(self.content[t] @ y) +1135 return Corr(newcontent) +1136 elif isinstance(y, Corr): +1137 if not self.N == y.N: +1138 raise ValueError("matmul: mismatch of matrix dimensions") +1139 newcontent = [] +1140 for t in range(self.T): +1141 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1142 newcontent.append(None) +1143 else: +1144 newcontent.append(self.content[t] @ y.content[t]) +1145 return Corr(newcontent) +1146 +1147 else: +1148 return NotImplemented +1149 +1150 def __rmatmul__(self, y): +1151 if isinstance(y, np.ndarray): +1152 if y.ndim != 2 or y.shape[0] != y.shape[1]: +1153 raise ValueError("Can only multiply correlators by square matrices.") +1154 if not self.N == y.shape[0]: +1155 raise ValueError("matmul: mismatch of matrix dimensions") +1156 newcontent = [] +1157 for t in range(self.T): +1158 if _check_for_none(self, self.content[t]): +1159 newcontent.append(None) +1160 else: +1161 newcontent.append(y @ self.content[t]) +1162 return Corr(newcontent) +1163 else: +1164 return NotImplemented +1165 +1166 def __truediv__(self, y): +1167 if isinstance(y, Corr): +1168 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): +1169 raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") +1170 newcontent = [] +1171 for t in range(self.T): +1172 if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): +1173 newcontent.append(None) +1174 else: +1175 newcontent.append(self.content[t] / y.content[t]) +1176 for t in range(self.T): +1177 if _check_for_none(self, newcontent[t]): +1178 continue +1179 if np.isnan(np.sum(newcontent[t]).value): +1180 newcontent[t] = None +1181 +1182 if all([item is None for item in newcontent]): +1183 raise Exception("Division returns completely undefined correlator") +1184 return Corr(newcontent) +1185 +1186 elif isinstance(y, (Obs, CObs)): +1187 if isinstance(y, Obs): +1188 if y.value == 0: +1189 raise Exception('Division by zero will return undefined correlator') +1190 if isinstance(y, CObs): +1191 if y.is_zero(): +1192 raise Exception('Division by zero will return undefined correlator') +1193 +1194 newcontent = [] +1195 for t in range(self.T): +1196 if _check_for_none(self, self.content[t]): +1197 newcontent.append(None) +1198 else: +1199 newcontent.append(self.content[t] / y) +1200 return Corr(newcontent, prange=self.prange) +1201 +1202 elif isinstance(y, (int, float)): +1203 if y == 0: +1204 raise Exception('Division by zero will return undefined correlator') +1205 newcontent = [] +1206 for t in range(self.T): +1207 if _check_for_none(self, self.content[t]): +1208 newcontent.append(None) +1209 else: +1210 newcontent.append(self.content[t] / y) +1211 return Corr(newcontent, prange=self.prange) +1212 elif isinstance(y, np.ndarray): +1213 if y.shape == (self.T,): +1214 return Corr(list((np.array(self.content).T / y).T)) +1215 else: +1216 raise ValueError("operands could not be broadcast together") +1217 else: +1218 raise TypeError('Corr / wrong type') 1219 -1220 def __pow__(self, y): -1221 if isinstance(y, (Obs, int, float, CObs)): -1222 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content] -1223 return Corr(newcontent, prange=self.prange) -1224 else: -1225 raise TypeError('Type of exponent not supported') +1220 def __neg__(self): +1221 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content] +1222 return Corr(newcontent, prange=self.prange) +1223 +1224 def __sub__(self, y): +1225 return self + (-y) 1226 -1227 def __abs__(self): -1228 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content] -1229 return Corr(newcontent, prange=self.prange) -1230 -1231 # The numpy functions: -1232 def sqrt(self): -1233 return self ** 0.5 -1234 -1235 def log(self): -1236 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content] -1237 return Corr(newcontent, prange=self.prange) -1238 -1239 def exp(self): -1240 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content] -1241 return Corr(newcontent, prange=self.prange) -1242 -1243 def _apply_func_to_corr(self, func): -1244 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content] -1245 for t in range(self.T): -1246 if _check_for_none(self, newcontent[t]): -1247 continue -1248 tmp_sum = np.sum(newcontent[t]) -1249 if hasattr(tmp_sum, "value"): -1250 if np.isnan(tmp_sum.value): -1251 newcontent[t] = None -1252 if all([item is None for item in newcontent]): -1253 raise Exception('Operation returns undefined correlator') -1254 return Corr(newcontent) -1255 -1256 def sin(self): -1257 return self._apply_func_to_corr(np.sin) -1258 -1259 def cos(self): -1260 return self._apply_func_to_corr(np.cos) -1261 -1262 def tan(self): -1263 return self._apply_func_to_corr(np.tan) -1264 -1265 def sinh(self): -1266 return self._apply_func_to_corr(np.sinh) -1267 -1268 def cosh(self): -1269 return self._apply_func_to_corr(np.cosh) -1270 -1271 def tanh(self): -1272 return self._apply_func_to_corr(np.tanh) -1273 -1274 def arcsin(self): -1275 return self._apply_func_to_corr(np.arcsin) -1276 -1277 def arccos(self): -1278 return self._apply_func_to_corr(np.arccos) -1279 -1280 def arctan(self): -1281 return self._apply_func_to_corr(np.arctan) -1282 -1283 def arcsinh(self): -1284 return self._apply_func_to_corr(np.arcsinh) -1285 -1286 def arccosh(self): -1287 return self._apply_func_to_corr(np.arccosh) -1288 -1289 def arctanh(self): -1290 return self._apply_func_to_corr(np.arctanh) -1291 -1292 # Right hand side operations (require tweak in main module to work) -1293 def __radd__(self, y): -1294 return self + y +1227 def __pow__(self, y): +1228 if isinstance(y, (Obs, int, float, CObs)): +1229 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content] +1230 return Corr(newcontent, prange=self.prange) +1231 else: +1232 raise TypeError('Type of exponent not supported') +1233 +1234 def __abs__(self): +1235 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content] +1236 return Corr(newcontent, prange=self.prange) +1237 +1238 # The numpy functions: +1239 def sqrt(self): +1240 return self ** 0.5 +1241 +1242 def log(self): +1243 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content] +1244 return Corr(newcontent, prange=self.prange) +1245 +1246 def exp(self): +1247 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content] +1248 return Corr(newcontent, prange=self.prange) +1249 +1250 def _apply_func_to_corr(self, func): +1251 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content] +1252 for t in range(self.T): +1253 if _check_for_none(self, newcontent[t]): +1254 continue +1255 tmp_sum = np.sum(newcontent[t]) +1256 if hasattr(tmp_sum, "value"): +1257 if np.isnan(tmp_sum.value): +1258 newcontent[t] = None +1259 if all([item is None for item in newcontent]): +1260 raise Exception('Operation returns undefined correlator') +1261 return Corr(newcontent) +1262 +1263 def sin(self): +1264 return self._apply_func_to_corr(np.sin) +1265 +1266 def cos(self): +1267 return self._apply_func_to_corr(np.cos) +1268 +1269 def tan(self): +1270 return self._apply_func_to_corr(np.tan) +1271 +1272 def sinh(self): +1273 return self._apply_func_to_corr(np.sinh) +1274 +1275 def cosh(self): +1276 return self._apply_func_to_corr(np.cosh) +1277 +1278 def tanh(self): +1279 return self._apply_func_to_corr(np.tanh) +1280 +1281 def arcsin(self): +1282 return self._apply_func_to_corr(np.arcsin) +1283 +1284 def arccos(self): +1285 return self._apply_func_to_corr(np.arccos) +1286 +1287 def arctan(self): +1288 return self._apply_func_to_corr(np.arctan) +1289 +1290 def arcsinh(self): +1291 return self._apply_func_to_corr(np.arcsinh) +1292 +1293 def arccosh(self): +1294 return self._apply_func_to_corr(np.arccosh) 1295 -1296 def __rsub__(self, y): -1297 return -self + y +1296 def arctanh(self): +1297 return self._apply_func_to_corr(np.arctanh) 1298 -1299 def __rmul__(self, y): -1300 return self * y -1301 -1302 def __rtruediv__(self, y): -1303 return (self / y) ** (-1) -1304 -1305 @property -1306 def real(self): -1307 def return_real(obs_OR_cobs): -1308 if isinstance(obs_OR_cobs.flatten()[0], CObs): -1309 return np.vectorize(lambda x: x.real)(obs_OR_cobs) -1310 else: -1311 return obs_OR_cobs -1312 -1313 return self._apply_func_to_corr(return_real) -1314 -1315 @property -1316 def imag(self): -1317 def return_imag(obs_OR_cobs): -1318 if isinstance(obs_OR_cobs.flatten()[0], CObs): -1319 return np.vectorize(lambda x: x.imag)(obs_OR_cobs) -1320 else: -1321 return obs_OR_cobs * 0 # So it stays the right type -1322 -1323 return self._apply_func_to_corr(return_imag) -1324 -1325 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None): -1326 r''' Project large correlation matrix to lowest states -1327 -1328 This method can be used to reduce the size of an (N x N) correlation matrix -1329 to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise -1330 is still small. +1299 # Right hand side operations (require tweak in main module to work) +1300 def __radd__(self, y): +1301 return self + y +1302 +1303 def __rsub__(self, y): +1304 return -self + y +1305 +1306 def __rmul__(self, y): +1307 return self * y +1308 +1309 def __rtruediv__(self, y): +1310 return (self / y) ** (-1) +1311 +1312 @property +1313 def real(self): +1314 def return_real(obs_OR_cobs): +1315 if isinstance(obs_OR_cobs.flatten()[0], CObs): +1316 return np.vectorize(lambda x: x.real)(obs_OR_cobs) +1317 else: +1318 return obs_OR_cobs +1319 +1320 return self._apply_func_to_corr(return_real) +1321 +1322 @property +1323 def imag(self): +1324 def return_imag(obs_OR_cobs): +1325 if isinstance(obs_OR_cobs.flatten()[0], CObs): +1326 return np.vectorize(lambda x: x.imag)(obs_OR_cobs) +1327 else: +1328 return obs_OR_cobs * 0 # So it stays the right type +1329 +1330 return self._apply_func_to_corr(return_imag) 1331 -1332 Parameters -1333 ---------- -1334 Ntrunc: int -1335 Rank of the target matrix. -1336 tproj: int -1337 Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method. -1338 The default value is 3. -1339 t0proj: int -1340 Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly -1341 discouraged for O(a) improved theories, since the correctness of the procedure -1342 cannot be granted in this case. The default value is 2. -1343 basematrix : Corr -1344 Correlation matrix that is used to determine the eigenvectors of the -1345 lowest states based on a GEVP. basematrix is taken to be the Corr itself if -1346 is is not specified. -1347 -1348 Notes -1349 ----- -1350 We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving -1351 the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$ -1352 and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the -1353 resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via -1354 $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large -1355 correlation matrix and to remove some noise that is added by irrelevant operators. -1356 This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated -1357 bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$. -1358 ''' -1359 -1360 if self.N == 1: -1361 raise Exception('Method cannot be applied to one-dimensional correlators.') -1362 if basematrix is None: -1363 basematrix = self -1364 if Ntrunc >= basematrix.N: -1365 raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) -1366 if basematrix.N != self.N: -1367 raise Exception('basematrix and targetmatrix have to be of the same size.') -1368 -1369 evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] -1370 -1371 tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object) -1372 rmat = [] -1373 for t in range(basematrix.T): -1374 for i in range(Ntrunc): -1375 for j in range(Ntrunc): -1376 tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j] -1377 rmat.append(np.copy(tmpmat)) -1378 -1379 newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)] -1380 return Corr(newcontent) +1332 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None): +1333 r''' Project large correlation matrix to lowest states +1334 +1335 This method can be used to reduce the size of an (N x N) correlation matrix +1336 to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise +1337 is still small. +1338 +1339 Parameters +1340 ---------- +1341 Ntrunc: int +1342 Rank of the target matrix. +1343 tproj: int +1344 Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method. +1345 The default value is 3. +1346 t0proj: int +1347 Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly +1348 discouraged for O(a) improved theories, since the correctness of the procedure +1349 cannot be granted in this case. The default value is 2. +1350 basematrix : Corr +1351 Correlation matrix that is used to determine the eigenvectors of the +1352 lowest states based on a GEVP. basematrix is taken to be the Corr itself if +1353 is is not specified. +1354 +1355 Notes +1356 ----- +1357 We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving +1358 the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$ +1359 and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the +1360 resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via +1361 $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large +1362 correlation matrix and to remove some noise that is added by irrelevant operators. +1363 This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated +1364 bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$. +1365 ''' +1366 +1367 if self.N == 1: +1368 raise Exception('Method cannot be applied to one-dimensional correlators.') +1369 if basematrix is None: +1370 basematrix = self +1371 if Ntrunc >= basematrix.N: +1372 raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) +1373 if basematrix.N != self.N: +1374 raise Exception('basematrix and targetmatrix have to be of the same size.') +1375 +1376 evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] +1377 +1378 tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object) +1379 rmat = [] +1380 for t in range(basematrix.T): +1381 for i in range(Ntrunc): +1382 for j in range(Ntrunc): +1383 tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j] +1384 rmat.append(np.copy(tmpmat)) +1385 +1386 newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)] +1387 return Corr(newcontent) @@ -4900,8 +4914,8 @@ specifies a custom path for the file (default '.') -
1232    def sqrt(self):
-1233        return self ** 0.5
+            
1239    def sqrt(self):
+1240        return self ** 0.5
 
@@ -4919,9 +4933,9 @@ specifies a custom path for the file (default '.')
-
1235    def log(self):
-1236        newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content]
-1237        return Corr(newcontent, prange=self.prange)
+            
1242    def log(self):
+1243        newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content]
+1244        return Corr(newcontent, prange=self.prange)
 
@@ -4939,9 +4953,9 @@ specifies a custom path for the file (default '.')
-
1239    def exp(self):
-1240        newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content]
-1241        return Corr(newcontent, prange=self.prange)
+            
1246    def exp(self):
+1247        newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content]
+1248        return Corr(newcontent, prange=self.prange)
 
@@ -4959,8 +4973,8 @@ specifies a custom path for the file (default '.')
-
1256    def sin(self):
-1257        return self._apply_func_to_corr(np.sin)
+            
1263    def sin(self):
+1264        return self._apply_func_to_corr(np.sin)
 
@@ -4978,8 +4992,8 @@ specifies a custom path for the file (default '.')
-
1259    def cos(self):
-1260        return self._apply_func_to_corr(np.cos)
+            
1266    def cos(self):
+1267        return self._apply_func_to_corr(np.cos)
 
@@ -4997,8 +5011,8 @@ specifies a custom path for the file (default '.')
-
1262    def tan(self):
-1263        return self._apply_func_to_corr(np.tan)
+            
1269    def tan(self):
+1270        return self._apply_func_to_corr(np.tan)
 
@@ -5016,8 +5030,8 @@ specifies a custom path for the file (default '.')
-
1265    def sinh(self):
-1266        return self._apply_func_to_corr(np.sinh)
+            
1272    def sinh(self):
+1273        return self._apply_func_to_corr(np.sinh)
 
@@ -5035,8 +5049,8 @@ specifies a custom path for the file (default '.')
-
1268    def cosh(self):
-1269        return self._apply_func_to_corr(np.cosh)
+            
1275    def cosh(self):
+1276        return self._apply_func_to_corr(np.cosh)
 
@@ -5054,8 +5068,8 @@ specifies a custom path for the file (default '.')
-
1271    def tanh(self):
-1272        return self._apply_func_to_corr(np.tanh)
+            
1278    def tanh(self):
+1279        return self._apply_func_to_corr(np.tanh)
 
@@ -5073,8 +5087,8 @@ specifies a custom path for the file (default '.')
-
1274    def arcsin(self):
-1275        return self._apply_func_to_corr(np.arcsin)
+            
1281    def arcsin(self):
+1282        return self._apply_func_to_corr(np.arcsin)
 
@@ -5092,8 +5106,8 @@ specifies a custom path for the file (default '.')
-
1277    def arccos(self):
-1278        return self._apply_func_to_corr(np.arccos)
+            
1284    def arccos(self):
+1285        return self._apply_func_to_corr(np.arccos)
 
@@ -5111,8 +5125,8 @@ specifies a custom path for the file (default '.')
-
1280    def arctan(self):
-1281        return self._apply_func_to_corr(np.arctan)
+            
1287    def arctan(self):
+1288        return self._apply_func_to_corr(np.arctan)
 
@@ -5130,8 +5144,8 @@ specifies a custom path for the file (default '.')
-
1283    def arcsinh(self):
-1284        return self._apply_func_to_corr(np.arcsinh)
+            
1290    def arcsinh(self):
+1291        return self._apply_func_to_corr(np.arcsinh)
 
@@ -5149,8 +5163,8 @@ specifies a custom path for the file (default '.')
-
1286    def arccosh(self):
-1287        return self._apply_func_to_corr(np.arccosh)
+            
1293    def arccosh(self):
+1294        return self._apply_func_to_corr(np.arccosh)
 
@@ -5168,8 +5182,8 @@ specifies a custom path for the file (default '.')
-
1289    def arctanh(self):
-1290        return self._apply_func_to_corr(np.arctanh)
+            
1296    def arctanh(self):
+1297        return self._apply_func_to_corr(np.arctanh)
 
@@ -5209,62 +5223,62 @@ specifies a custom path for the file (default '.')
-
1325    def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
-1326        r''' Project large correlation matrix to lowest states
-1327
-1328        This method can be used to reduce the size of an (N x N) correlation matrix
-1329        to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise
-1330        is still small.
-1331
-1332        Parameters
-1333        ----------
-1334        Ntrunc: int
-1335            Rank of the target matrix.
-1336        tproj: int
-1337            Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method.
-1338            The default value is 3.
-1339        t0proj: int
-1340            Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly
-1341            discouraged for O(a) improved theories, since the correctness of the procedure
-1342            cannot be granted in this case. The default value is 2.
-1343        basematrix : Corr
-1344            Correlation matrix that is used to determine the eigenvectors of the
-1345            lowest states based on a GEVP. basematrix is taken to be the Corr itself if
-1346            is is not specified.
-1347
-1348        Notes
-1349        -----
-1350        We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving
-1351        the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$
-1352        and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the
-1353        resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via
-1354        $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large
-1355        correlation matrix and to remove some noise that is added by irrelevant operators.
-1356        This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated
-1357        bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$.
-1358        '''
-1359
-1360        if self.N == 1:
-1361            raise Exception('Method cannot be applied to one-dimensional correlators.')
-1362        if basematrix is None:
-1363            basematrix = self
-1364        if Ntrunc >= basematrix.N:
-1365            raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
-1366        if basematrix.N != self.N:
-1367            raise Exception('basematrix and targetmatrix have to be of the same size.')
-1368
-1369        evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
-1370
-1371        tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object)
-1372        rmat = []
-1373        for t in range(basematrix.T):
-1374            for i in range(Ntrunc):
-1375                for j in range(Ntrunc):
-1376                    tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j]
-1377            rmat.append(np.copy(tmpmat))
-1378
-1379        newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)]
-1380        return Corr(newcontent)
+            
1332    def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
+1333        r''' Project large correlation matrix to lowest states
+1334
+1335        This method can be used to reduce the size of an (N x N) correlation matrix
+1336        to (Ntrunc x Ntrunc) by solving a GEVP at very early times where the noise
+1337        is still small.
+1338
+1339        Parameters
+1340        ----------
+1341        Ntrunc: int
+1342            Rank of the target matrix.
+1343        tproj: int
+1344            Time where the eigenvectors are evaluated, corresponds to ts in the GEVP method.
+1345            The default value is 3.
+1346        t0proj: int
+1347            Time where the correlation matrix is inverted. Choosing t0proj=1 is strongly
+1348            discouraged for O(a) improved theories, since the correctness of the procedure
+1349            cannot be granted in this case. The default value is 2.
+1350        basematrix : Corr
+1351            Correlation matrix that is used to determine the eigenvectors of the
+1352            lowest states based on a GEVP. basematrix is taken to be the Corr itself if
+1353            is is not specified.
+1354
+1355        Notes
+1356        -----
+1357        We have the basematrix $C(t)$ and the target matrix $G(t)$. We start by solving
+1358        the GEVP $$C(t) v_n(t, t_0) = \lambda_n(t, t_0) C(t_0) v_n(t, t_0)$$ where $t \equiv t_\mathrm{proj}$
+1359        and $t_0 \equiv t_{0, \mathrm{proj}}$. The target matrix is projected onto the subspace of the
+1360        resulting eigenvectors $v_n, n=1,\dots,N_\mathrm{trunc}$ via
+1361        $$G^\prime_{i, j}(t) = (v_i, G(t) v_j)$$. This allows to reduce the size of a large
+1362        correlation matrix and to remove some noise that is added by irrelevant operators.
+1363        This may allow to use the GEVP on $G(t)$ at late times such that the theoretically motivated
+1364        bound $t_0 \leq t/2$ holds, since the condition number of $G(t)$ is decreased, compared to $C(t)$.
+1365        '''
+1366
+1367        if self.N == 1:
+1368            raise Exception('Method cannot be applied to one-dimensional correlators.')
+1369        if basematrix is None:
+1370            basematrix = self
+1371        if Ntrunc >= basematrix.N:
+1372            raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
+1373        if basematrix.N != self.N:
+1374            raise Exception('basematrix and targetmatrix have to be of the same size.')
+1375
+1376        evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
+1377
+1378        tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object)
+1379        rmat = []
+1380        for t in range(basematrix.T):
+1381            for i in range(Ntrunc):
+1382                for j in range(Ntrunc):
+1383                    tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j]
+1384            rmat.append(np.copy(tmpmat))
+1385
+1386        newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)]
+1387        return Corr(newcontent)
 
diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 9f056695..d933ff5d 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -1103,982 +1103,981 @@
773 return self.value >= other 774 775 def __eq__(self, other): - 776 return (self - other).is_zero() - 777 - 778 def __ne__(self, other): - 779 return not (self - other).is_zero() - 780 - 781 # Overload math operations - 782 def __add__(self, y): - 783 if isinstance(y, Obs): - 784 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 785 else: - 786 if isinstance(y, np.ndarray): - 787 return np.array([self + o for o in y]) - 788 elif y.__class__.__name__ in ['Corr', 'CObs']: - 789 return NotImplemented - 790 else: - 791 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 792 - 793 def __radd__(self, y): - 794 return self + y - 795 - 796 def __mul__(self, y): - 797 if isinstance(y, Obs): - 798 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 799 else: - 800 if isinstance(y, np.ndarray): - 801 return np.array([self * o for o in y]) - 802 elif isinstance(y, complex): - 803 return CObs(self * y.real, self * y.imag) - 804 elif y.__class__.__name__ in ['Corr', 'CObs']: - 805 return NotImplemented - 806 else: - 807 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 808 - 809 def __rmul__(self, y): - 810 return self * y - 811 - 812 def __sub__(self, y): - 813 if isinstance(y, Obs): - 814 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 815 else: - 816 if isinstance(y, np.ndarray): - 817 return np.array([self - o for o in y]) - 818 elif y.__class__.__name__ in ['Corr', 'CObs']: - 819 return NotImplemented - 820 else: - 821 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 822 - 823 def __rsub__(self, y): - 824 return -1 * (self - y) - 825 - 826 def __pos__(self): - 827 return self - 828 - 829 def __neg__(self): - 830 return -1 * self - 831 - 832 def __truediv__(self, y): - 833 if isinstance(y, Obs): - 834 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 835 else: - 836 if isinstance(y, np.ndarray): - 837 return np.array([self / o for o in y]) - 838 elif y.__class__.__name__ in ['Corr', 'CObs']: - 839 return NotImplemented - 840 else: - 841 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 842 - 843 def __rtruediv__(self, y): - 844 if isinstance(y, Obs): - 845 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 846 else: - 847 if isinstance(y, np.ndarray): - 848 return np.array([o / self for o in y]) - 849 elif y.__class__.__name__ in ['Corr', 'CObs']: - 850 return NotImplemented - 851 else: - 852 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 853 - 854 def __pow__(self, y): - 855 if isinstance(y, Obs): - 856 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 857 else: - 858 return derived_observable(lambda x: x[0] ** y, [self]) - 859 - 860 def __rpow__(self, y): - 861 if isinstance(y, Obs): - 862 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 863 else: - 864 return derived_observable(lambda x: y ** x[0], [self]) - 865 - 866 def __abs__(self): - 867 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 868 - 869 # Overload numpy functions - 870 def sqrt(self): - 871 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 872 - 873 def log(self): - 874 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 875 - 876 def exp(self): - 877 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 878 - 879 def sin(self): - 880 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 881 - 882 def cos(self): - 883 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 884 - 885 def tan(self): - 886 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 887 - 888 def arcsin(self): - 889 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 890 - 891 def arccos(self): - 892 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 893 - 894 def arctan(self): - 895 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 896 - 897 def sinh(self): - 898 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 899 - 900 def cosh(self): - 901 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 902 - 903 def tanh(self): - 904 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 905 - 906 def arcsinh(self): - 907 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 908 - 909 def arccosh(self): - 910 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 911 - 912 def arctanh(self): - 913 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 776 if other is None: + 777 return False + 778 return (self - other).is_zero() + 779 + 780 # Overload math operations + 781 def __add__(self, y): + 782 if isinstance(y, Obs): + 783 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 784 else: + 785 if isinstance(y, np.ndarray): + 786 return np.array([self + o for o in y]) + 787 elif y.__class__.__name__ in ['Corr', 'CObs']: + 788 return NotImplemented + 789 else: + 790 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 791 + 792 def __radd__(self, y): + 793 return self + y + 794 + 795 def __mul__(self, y): + 796 if isinstance(y, Obs): + 797 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 798 else: + 799 if isinstance(y, np.ndarray): + 800 return np.array([self * o for o in y]) + 801 elif isinstance(y, complex): + 802 return CObs(self * y.real, self * y.imag) + 803 elif y.__class__.__name__ in ['Corr', 'CObs']: + 804 return NotImplemented + 805 else: + 806 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 807 + 808 def __rmul__(self, y): + 809 return self * y + 810 + 811 def __sub__(self, y): + 812 if isinstance(y, Obs): + 813 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 814 else: + 815 if isinstance(y, np.ndarray): + 816 return np.array([self - o for o in y]) + 817 elif y.__class__.__name__ in ['Corr', 'CObs']: + 818 return NotImplemented + 819 else: + 820 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 821 + 822 def __rsub__(self, y): + 823 return -1 * (self - y) + 824 + 825 def __pos__(self): + 826 return self + 827 + 828 def __neg__(self): + 829 return -1 * self + 830 + 831 def __truediv__(self, y): + 832 if isinstance(y, Obs): + 833 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 834 else: + 835 if isinstance(y, np.ndarray): + 836 return np.array([self / o for o in y]) + 837 elif y.__class__.__name__ in ['Corr', 'CObs']: + 838 return NotImplemented + 839 else: + 840 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 841 + 842 def __rtruediv__(self, y): + 843 if isinstance(y, Obs): + 844 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 845 else: + 846 if isinstance(y, np.ndarray): + 847 return np.array([o / self for o in y]) + 848 elif y.__class__.__name__ in ['Corr', 'CObs']: + 849 return NotImplemented + 850 else: + 851 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 852 + 853 def __pow__(self, y): + 854 if isinstance(y, Obs): + 855 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 856 else: + 857 return derived_observable(lambda x: x[0] ** y, [self]) + 858 + 859 def __rpow__(self, y): + 860 if isinstance(y, Obs): + 861 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 862 else: + 863 return derived_observable(lambda x: y ** x[0], [self]) + 864 + 865 def __abs__(self): + 866 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 867 + 868 # Overload numpy functions + 869 def sqrt(self): + 870 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 871 + 872 def log(self): + 873 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 874 + 875 def exp(self): + 876 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 877 + 878 def sin(self): + 879 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 880 + 881 def cos(self): + 882 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 883 + 884 def tan(self): + 885 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 886 + 887 def arcsin(self): + 888 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 889 + 890 def arccos(self): + 891 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 892 + 893 def arctan(self): + 894 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 895 + 896 def sinh(self): + 897 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 898 + 899 def cosh(self): + 900 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 901 + 902 def tanh(self): + 903 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 904 + 905 def arcsinh(self): + 906 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 907 + 908 def arccosh(self): + 909 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 910 + 911 def arctanh(self): + 912 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 913 914 - 915 - 916class CObs: - 917 """Class for a complex valued observable.""" - 918 __slots__ = ['_real', '_imag', 'tag'] - 919 - 920 def __init__(self, real, imag=0.0): - 921 self._real = real - 922 self._imag = imag - 923 self.tag = None - 924 - 925 @property - 926 def real(self): - 927 return self._real - 928 - 929 @property - 930 def imag(self): - 931 return self._imag - 932 - 933 def gamma_method(self, **kwargs): - 934 """Executes the gamma_method for the real and the imaginary part.""" - 935 if isinstance(self.real, Obs): - 936 self.real.gamma_method(**kwargs) - 937 if isinstance(self.imag, Obs): - 938 self.imag.gamma_method(**kwargs) - 939 - 940 def is_zero(self): - 941 """Checks whether both real and imaginary part are zero within machine precision.""" - 942 return self.real == 0.0 and self.imag == 0.0 - 943 - 944 def conjugate(self): - 945 return CObs(self.real, -self.imag) - 946 - 947 def __add__(self, other): - 948 if isinstance(other, np.ndarray): - 949 return other + self - 950 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 951 return CObs(self.real + other.real, - 952 self.imag + other.imag) - 953 else: - 954 return CObs(self.real + other, self.imag) - 955 - 956 def __radd__(self, y): - 957 return self + y - 958 - 959 def __sub__(self, other): - 960 if isinstance(other, np.ndarray): - 961 return -1 * (other - self) - 962 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 963 return CObs(self.real - other.real, self.imag - other.imag) - 964 else: - 965 return CObs(self.real - other, self.imag) - 966 - 967 def __rsub__(self, other): - 968 return -1 * (self - other) - 969 - 970 def __mul__(self, other): - 971 if isinstance(other, np.ndarray): - 972 return other * self - 973 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 974 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 975 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 976 [self.real, other.real, self.imag, other.imag], - 977 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 978 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 979 [self.real, other.real, self.imag, other.imag], - 980 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 981 elif getattr(other, 'imag', 0) != 0: - 982 return CObs(self.real * other.real - self.imag * other.imag, - 983 self.imag * other.real + self.real * other.imag) - 984 else: - 985 return CObs(self.real * other.real, self.imag * other.real) - 986 else: - 987 return CObs(self.real * other, self.imag * other) - 988 - 989 def __rmul__(self, other): - 990 return self * other - 991 - 992 def __truediv__(self, other): - 993 if isinstance(other, np.ndarray): - 994 return 1 / (other / self) - 995 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 996 r = other.real ** 2 + other.imag ** 2 - 997 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 998 else: - 999 return CObs(self.real / other, self.imag / other) -1000 -1001 def __rtruediv__(self, other): -1002 r = self.real ** 2 + self.imag ** 2 -1003 if hasattr(other, 'real') and hasattr(other, 'imag'): -1004 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) -1005 else: -1006 return CObs(self.real * other / r, -self.imag * other / r) -1007 -1008 def __abs__(self): -1009 return np.sqrt(self.real**2 + self.imag**2) -1010 -1011 def __pos__(self): -1012 return self -1013 -1014 def __neg__(self): -1015 return -1 * self -1016 -1017 def __eq__(self, other): -1018 return self.real == other.real and self.imag == other.imag -1019 -1020 def __str__(self): -1021 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' -1022 -1023 def __repr__(self): -1024 return 'CObs[' + str(self) + ']' -1025 -1026 def __format__(self, format_type): -1027 if format_type == "": -1028 significance = 2 -1029 format_type = "2" -1030 else: -1031 significance = int(float(format_type.replace("+", "").replace("-", ""))) -1032 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" + 915class CObs: + 916 """Class for a complex valued observable.""" + 917 __slots__ = ['_real', '_imag', 'tag'] + 918 + 919 def __init__(self, real, imag=0.0): + 920 self._real = real + 921 self._imag = imag + 922 self.tag = None + 923 + 924 @property + 925 def real(self): + 926 return self._real + 927 + 928 @property + 929 def imag(self): + 930 return self._imag + 931 + 932 def gamma_method(self, **kwargs): + 933 """Executes the gamma_method for the real and the imaginary part.""" + 934 if isinstance(self.real, Obs): + 935 self.real.gamma_method(**kwargs) + 936 if isinstance(self.imag, Obs): + 937 self.imag.gamma_method(**kwargs) + 938 + 939 def is_zero(self): + 940 """Checks whether both real and imaginary part are zero within machine precision.""" + 941 return self.real == 0.0 and self.imag == 0.0 + 942 + 943 def conjugate(self): + 944 return CObs(self.real, -self.imag) + 945 + 946 def __add__(self, other): + 947 if isinstance(other, np.ndarray): + 948 return other + self + 949 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 950 return CObs(self.real + other.real, + 951 self.imag + other.imag) + 952 else: + 953 return CObs(self.real + other, self.imag) + 954 + 955 def __radd__(self, y): + 956 return self + y + 957 + 958 def __sub__(self, other): + 959 if isinstance(other, np.ndarray): + 960 return -1 * (other - self) + 961 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 962 return CObs(self.real - other.real, self.imag - other.imag) + 963 else: + 964 return CObs(self.real - other, self.imag) + 965 + 966 def __rsub__(self, other): + 967 return -1 * (self - other) + 968 + 969 def __mul__(self, other): + 970 if isinstance(other, np.ndarray): + 971 return other * self + 972 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 973 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 974 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 975 [self.real, other.real, self.imag, other.imag], + 976 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 977 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 978 [self.real, other.real, self.imag, other.imag], + 979 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 980 elif getattr(other, 'imag', 0) != 0: + 981 return CObs(self.real * other.real - self.imag * other.imag, + 982 self.imag * other.real + self.real * other.imag) + 983 else: + 984 return CObs(self.real * other.real, self.imag * other.real) + 985 else: + 986 return CObs(self.real * other, self.imag * other) + 987 + 988 def __rmul__(self, other): + 989 return self * other + 990 + 991 def __truediv__(self, other): + 992 if isinstance(other, np.ndarray): + 993 return 1 / (other / self) + 994 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 995 r = other.real ** 2 + other.imag ** 2 + 996 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 997 else: + 998 return CObs(self.real / other, self.imag / other) + 999 +1000 def __rtruediv__(self, other): +1001 r = self.real ** 2 + self.imag ** 2 +1002 if hasattr(other, 'real') and hasattr(other, 'imag'): +1003 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) +1004 else: +1005 return CObs(self.real * other / r, -self.imag * other / r) +1006 +1007 def __abs__(self): +1008 return np.sqrt(self.real**2 + self.imag**2) +1009 +1010 def __pos__(self): +1011 return self +1012 +1013 def __neg__(self): +1014 return -1 * self +1015 +1016 def __eq__(self, other): +1017 return self.real == other.real and self.imag == other.imag +1018 +1019 def __str__(self): +1020 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' +1021 +1022 def __repr__(self): +1023 return 'CObs[' + str(self) + ']' +1024 +1025 def __format__(self, format_type): +1026 if format_type == "": +1027 significance = 2 +1028 format_type = "2" +1029 else: +1030 significance = int(float(format_type.replace("+", "").replace("-", ""))) +1031 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" +1032 1033 -1034 -1035def gamma_method(x, **kwargs): -1036 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. -1037 -1038 See docstring of pe.Obs.gamma_method for details. -1039 """ -1040 return np.vectorize(lambda o: o.gm(**kwargs))(x) +1034def gamma_method(x, **kwargs): +1035 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. +1036 +1037 See docstring of pe.Obs.gamma_method for details. +1038 """ +1039 return np.vectorize(lambda o: o.gm(**kwargs))(x) +1040 1041 -1042 -1043def gm(x, **kwargs): -1044 """Short version of the vectorized gamma_method. -1045 -1046 See docstring of pe.Obs.gamma_method for details -1047 """ -1048 return gamma_method(x, **kwargs) +1042def gm(x, **kwargs): +1043 """Short version of the vectorized gamma_method. +1044 +1045 See docstring of pe.Obs.gamma_method for details +1046 """ +1047 return gamma_method(x, **kwargs) +1048 1049 -1050 -1051def _format_uncertainty(value, dvalue, significance=2): -1052 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" -1053 if dvalue == 0.0 or (not np.isfinite(dvalue)): -1054 return str(value) -1055 if not isinstance(significance, int): -1056 raise TypeError("significance needs to be an integer.") -1057 if significance < 1: -1058 raise ValueError("significance needs to be larger than zero.") -1059 fexp = np.floor(np.log10(dvalue)) -1060 if fexp < 0.0: -1061 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') -1062 elif fexp == 0.0: -1063 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" -1064 else: -1065 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" +1050def _format_uncertainty(value, dvalue, significance=2): +1051 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" +1052 if dvalue == 0.0 or (not np.isfinite(dvalue)): +1053 return str(value) +1054 if not isinstance(significance, int): +1055 raise TypeError("significance needs to be an integer.") +1056 if significance < 1: +1057 raise ValueError("significance needs to be larger than zero.") +1058 fexp = np.floor(np.log10(dvalue)) +1059 if fexp < 0.0: +1060 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') +1061 elif fexp == 0.0: +1062 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" +1063 else: +1064 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" +1065 1066 -1067 -1068def _expand_deltas(deltas, idx, shape, gapsize): -1069 """Expand deltas defined on idx to a regular range with spacing gapsize between two -1070 configurations and where holes are filled by 0. -1071 If idx is of type range, the deltas are not changed if the idx.step == gapsize. -1072 -1073 Parameters -1074 ---------- -1075 deltas : list -1076 List of fluctuations -1077 idx : list -1078 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1079 shape : int -1080 Number of configs in idx. -1081 gapsize : int -1082 The target distance between two configurations. If longer distances -1083 are found in idx, the data is expanded. -1084 """ -1085 if isinstance(idx, range): -1086 if (idx.step == gapsize): -1087 return deltas -1088 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) -1089 for i in range(shape): -1090 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] -1091 return ret +1067def _expand_deltas(deltas, idx, shape, gapsize): +1068 """Expand deltas defined on idx to a regular range with spacing gapsize between two +1069 configurations and where holes are filled by 0. +1070 If idx is of type range, the deltas are not changed if the idx.step == gapsize. +1071 +1072 Parameters +1073 ---------- +1074 deltas : list +1075 List of fluctuations +1076 idx : list +1077 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1078 shape : int +1079 Number of configs in idx. +1080 gapsize : int +1081 The target distance between two configurations. If longer distances +1082 are found in idx, the data is expanded. +1083 """ +1084 if isinstance(idx, range): +1085 if (idx.step == gapsize): +1086 return deltas +1087 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1088 for i in range(shape): +1089 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1090 return ret +1091 1092 -1093 -1094def _merge_idx(idl): -1095 """Returns the union of all lists in idl as range or sorted list -1096 -1097 Parameters -1098 ---------- -1099 idl : list -1100 List of lists or ranges. -1101 """ -1102 -1103 if _check_lists_equal(idl): -1104 return idl[0] -1105 -1106 idunion = sorted(set().union(*idl)) -1107 -1108 # Check whether idunion can be expressed as range -1109 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) -1110 idtest = [list(idrange), idunion] -1111 if _check_lists_equal(idtest): -1112 return idrange -1113 -1114 return idunion +1093def _merge_idx(idl): +1094 """Returns the union of all lists in idl as range or sorted list +1095 +1096 Parameters +1097 ---------- +1098 idl : list +1099 List of lists or ranges. +1100 """ +1101 +1102 if _check_lists_equal(idl): +1103 return idl[0] +1104 +1105 idunion = sorted(set().union(*idl)) +1106 +1107 # Check whether idunion can be expressed as range +1108 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) +1109 idtest = [list(idrange), idunion] +1110 if _check_lists_equal(idtest): +1111 return idrange +1112 +1113 return idunion +1114 1115 -1116 -1117def _intersection_idx(idl): -1118 """Returns the intersection of all lists in idl as range or sorted list -1119 -1120 Parameters -1121 ---------- -1122 idl : list -1123 List of lists or ranges. -1124 """ -1125 -1126 if _check_lists_equal(idl): -1127 return idl[0] -1128 -1129 idinter = sorted(set.intersection(*[set(o) for o in idl])) -1130 -1131 # Check whether idinter can be expressed as range -1132 try: -1133 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) -1134 idtest = [list(idrange), idinter] -1135 if _check_lists_equal(idtest): -1136 return idrange -1137 except IndexError: -1138 pass -1139 -1140 return idinter +1116def _intersection_idx(idl): +1117 """Returns the intersection of all lists in idl as range or sorted list +1118 +1119 Parameters +1120 ---------- +1121 idl : list +1122 List of lists or ranges. +1123 """ +1124 +1125 if _check_lists_equal(idl): +1126 return idl[0] +1127 +1128 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1129 +1130 # Check whether idinter can be expressed as range +1131 try: +1132 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) +1133 idtest = [list(idrange), idinter] +1134 if _check_lists_equal(idtest): +1135 return idrange +1136 except IndexError: +1137 pass +1138 +1139 return idinter +1140 1141 -1142 -1143def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1144 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1145 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1146 common divisor of the step sizes is used as new step size. -1147 -1148 Parameters -1149 ---------- -1150 deltas : list -1151 List of fluctuations -1152 idx : list -1153 List or range of configs on which the deltas are defined. -1154 Has to be a subset of new_idx and has to be sorted in ascending order. -1155 shape : list -1156 Number of configs in idx. -1157 new_idx : list -1158 List of configs that defines the new range, has to be sorted in ascending order. -1159 """ -1160 -1161 if type(idx) is range and type(new_idx) is range: -1162 if idx == new_idx: -1163 return deltas -1164 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1165 for i in range(shape): -1166 ret[idx[i] - new_idx[0]] = deltas[i] -1167 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1142def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1143 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1144 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1145 common divisor of the step sizes is used as new step size. +1146 +1147 Parameters +1148 ---------- +1149 deltas : list +1150 List of fluctuations +1151 idx : list +1152 List or range of configs on which the deltas are defined. +1153 Has to be a subset of new_idx and has to be sorted in ascending order. +1154 shape : list +1155 Number of configs in idx. +1156 new_idx : list +1157 List of configs that defines the new range, has to be sorted in ascending order. +1158 """ +1159 +1160 if type(idx) is range and type(new_idx) is range: +1161 if idx == new_idx: +1162 return deltas +1163 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1164 for i in range(shape): +1165 ret[idx[i] - new_idx[0]] = deltas[i] +1166 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1167 1168 -1169 -1170def derived_observable(func, data, array_mode=False, **kwargs): -1171 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1172 -1173 Parameters -1174 ---------- -1175 func : object -1176 arbitrary function of the form func(data, **kwargs). For the -1177 automatic differentiation to work, all numpy functions have to have -1178 the autograd wrapper (use 'import autograd.numpy as anp'). -1179 data : list -1180 list of Obs, e.g. [obs1, obs2, obs3]. -1181 num_grad : bool -1182 if True, numerical derivatives are used instead of autograd -1183 (default False). To control the numerical differentiation the -1184 kwargs of numdifftools.step_generators.MaxStepGenerator -1185 can be used. -1186 man_grad : list -1187 manually supply a list or an array which contains the jacobian -1188 of func. Use cautiously, supplying the wrong derivative will -1189 not be intercepted. -1190 -1191 Notes -1192 ----- -1193 For simple mathematical operations it can be practical to use anonymous -1194 functions. For the ratio of two observables one can e.g. use -1195 -1196 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1197 """ -1198 -1199 data = np.asarray(data) -1200 raveled_data = data.ravel() -1201 -1202 # Workaround for matrix operations containing non Obs data -1203 if not all(isinstance(x, Obs) for x in raveled_data): -1204 for i in range(len(raveled_data)): -1205 if isinstance(raveled_data[i], (int, float)): -1206 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1207 -1208 allcov = {} -1209 for o in raveled_data: -1210 for name in o.cov_names: -1211 if name in allcov: -1212 if not np.allclose(allcov[name], o.covobs[name].cov): -1213 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1214 else: -1215 allcov[name] = o.covobs[name].cov -1216 -1217 n_obs = len(raveled_data) -1218 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1219 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1220 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1221 -1222 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1223 -1224 if data.ndim == 1: -1225 values = np.array([o.value for o in data]) -1226 else: -1227 values = np.vectorize(lambda x: x.value)(data) -1228 -1229 new_values = func(values, **kwargs) -1230 -1231 multi = int(isinstance(new_values, np.ndarray)) -1232 -1233 new_r_values = {} -1234 new_idl_d = {} -1235 for name in new_sample_names: -1236 idl = [] -1237 tmp_values = np.zeros(n_obs) -1238 for i, item in enumerate(raveled_data): -1239 tmp_values[i] = item.r_values.get(name, item.value) -1240 tmp_idl = item.idl.get(name) -1241 if tmp_idl is not None: -1242 idl.append(tmp_idl) -1243 if multi > 0: -1244 tmp_values = np.array(tmp_values).reshape(data.shape) -1245 new_r_values[name] = func(tmp_values, **kwargs) -1246 new_idl_d[name] = _merge_idx(idl) -1247 -1248 if 'man_grad' in kwargs: -1249 deriv = np.asarray(kwargs.get('man_grad')) -1250 if new_values.shape + data.shape != deriv.shape: -1251 raise Exception('Manual derivative does not have correct shape.') -1252 elif kwargs.get('num_grad') is True: -1253 if multi > 0: -1254 raise Exception('Multi mode currently not supported for numerical derivative') -1255 options = { -1256 'base_step': 0.1, -1257 'step_ratio': 2.5} -1258 for key in options.keys(): -1259 kwarg = kwargs.get(key) -1260 if kwarg is not None: -1261 options[key] = kwarg -1262 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1263 if tmp_df.size == 1: -1264 deriv = np.array([tmp_df.real]) -1265 else: -1266 deriv = tmp_df.real -1267 else: -1268 deriv = jacobian(func)(values, **kwargs) -1269 -1270 final_result = np.zeros(new_values.shape, dtype=object) -1271 -1272 if array_mode is True: -1273 -1274 class _Zero_grad(): -1275 def __init__(self, N): -1276 self.grad = np.zeros((N, 1)) -1277 -1278 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1279 d_extracted = {} -1280 g_extracted = {} -1281 for name in new_sample_names: -1282 d_extracted[name] = [] -1283 ens_length = len(new_idl_d[name]) -1284 for i_dat, dat in enumerate(data): -1285 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1286 for name in new_cov_names: -1287 g_extracted[name] = [] -1288 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1289 for i_dat, dat in enumerate(data): -1290 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1291 -1292 for i_val, new_val in np.ndenumerate(new_values): -1293 new_deltas = {} -1294 new_grad = {} -1295 if array_mode is True: -1296 for name in new_sample_names: -1297 ens_length = d_extracted[name][0].shape[-1] -1298 new_deltas[name] = np.zeros(ens_length) -1299 for i_dat, dat in enumerate(d_extracted[name]): -1300 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1301 for name in new_cov_names: -1302 new_grad[name] = 0 -1303 for i_dat, dat in enumerate(g_extracted[name]): -1304 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1305 else: -1306 for j_obs, obs in np.ndenumerate(data): -1307 for name in obs.names: -1308 if name in obs.cov_names: -1309 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1310 else: -1311 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1312 -1313 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1314 -1315 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1316 raise Exception('The same name has been used for deltas and covobs!') -1317 new_samples = [] -1318 new_means = [] -1319 new_idl = [] -1320 new_names_obs = [] -1321 for name in new_names: -1322 if name not in new_covobs: -1323 new_samples.append(new_deltas[name]) -1324 new_idl.append(new_idl_d[name]) -1325 new_means.append(new_r_values[name][i_val]) -1326 new_names_obs.append(name) -1327 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1328 for name in new_covobs: -1329 final_result[i_val].names.append(name) -1330 final_result[i_val]._covobs = new_covobs -1331 final_result[i_val]._value = new_val -1332 final_result[i_val].reweighted = reweighted -1333 -1334 if multi == 0: -1335 final_result = final_result.item() -1336 -1337 return final_result +1169def derived_observable(func, data, array_mode=False, **kwargs): +1170 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1171 +1172 Parameters +1173 ---------- +1174 func : object +1175 arbitrary function of the form func(data, **kwargs). For the +1176 automatic differentiation to work, all numpy functions have to have +1177 the autograd wrapper (use 'import autograd.numpy as anp'). +1178 data : list +1179 list of Obs, e.g. [obs1, obs2, obs3]. +1180 num_grad : bool +1181 if True, numerical derivatives are used instead of autograd +1182 (default False). To control the numerical differentiation the +1183 kwargs of numdifftools.step_generators.MaxStepGenerator +1184 can be used. +1185 man_grad : list +1186 manually supply a list or an array which contains the jacobian +1187 of func. Use cautiously, supplying the wrong derivative will +1188 not be intercepted. +1189 +1190 Notes +1191 ----- +1192 For simple mathematical operations it can be practical to use anonymous +1193 functions. For the ratio of two observables one can e.g. use +1194 +1195 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1196 """ +1197 +1198 data = np.asarray(data) +1199 raveled_data = data.ravel() +1200 +1201 # Workaround for matrix operations containing non Obs data +1202 if not all(isinstance(x, Obs) for x in raveled_data): +1203 for i in range(len(raveled_data)): +1204 if isinstance(raveled_data[i], (int, float)): +1205 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1206 +1207 allcov = {} +1208 for o in raveled_data: +1209 for name in o.cov_names: +1210 if name in allcov: +1211 if not np.allclose(allcov[name], o.covobs[name].cov): +1212 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1213 else: +1214 allcov[name] = o.covobs[name].cov +1215 +1216 n_obs = len(raveled_data) +1217 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1218 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1219 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1220 +1221 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1222 +1223 if data.ndim == 1: +1224 values = np.array([o.value for o in data]) +1225 else: +1226 values = np.vectorize(lambda x: x.value)(data) +1227 +1228 new_values = func(values, **kwargs) +1229 +1230 multi = int(isinstance(new_values, np.ndarray)) +1231 +1232 new_r_values = {} +1233 new_idl_d = {} +1234 for name in new_sample_names: +1235 idl = [] +1236 tmp_values = np.zeros(n_obs) +1237 for i, item in enumerate(raveled_data): +1238 tmp_values[i] = item.r_values.get(name, item.value) +1239 tmp_idl = item.idl.get(name) +1240 if tmp_idl is not None: +1241 idl.append(tmp_idl) +1242 if multi > 0: +1243 tmp_values = np.array(tmp_values).reshape(data.shape) +1244 new_r_values[name] = func(tmp_values, **kwargs) +1245 new_idl_d[name] = _merge_idx(idl) +1246 +1247 if 'man_grad' in kwargs: +1248 deriv = np.asarray(kwargs.get('man_grad')) +1249 if new_values.shape + data.shape != deriv.shape: +1250 raise Exception('Manual derivative does not have correct shape.') +1251 elif kwargs.get('num_grad') is True: +1252 if multi > 0: +1253 raise Exception('Multi mode currently not supported for numerical derivative') +1254 options = { +1255 'base_step': 0.1, +1256 'step_ratio': 2.5} +1257 for key in options.keys(): +1258 kwarg = kwargs.get(key) +1259 if kwarg is not None: +1260 options[key] = kwarg +1261 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1262 if tmp_df.size == 1: +1263 deriv = np.array([tmp_df.real]) +1264 else: +1265 deriv = tmp_df.real +1266 else: +1267 deriv = jacobian(func)(values, **kwargs) +1268 +1269 final_result = np.zeros(new_values.shape, dtype=object) +1270 +1271 if array_mode is True: +1272 +1273 class _Zero_grad(): +1274 def __init__(self, N): +1275 self.grad = np.zeros((N, 1)) +1276 +1277 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1278 d_extracted = {} +1279 g_extracted = {} +1280 for name in new_sample_names: +1281 d_extracted[name] = [] +1282 ens_length = len(new_idl_d[name]) +1283 for i_dat, dat in enumerate(data): +1284 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1285 for name in new_cov_names: +1286 g_extracted[name] = [] +1287 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1288 for i_dat, dat in enumerate(data): +1289 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1290 +1291 for i_val, new_val in np.ndenumerate(new_values): +1292 new_deltas = {} +1293 new_grad = {} +1294 if array_mode is True: +1295 for name in new_sample_names: +1296 ens_length = d_extracted[name][0].shape[-1] +1297 new_deltas[name] = np.zeros(ens_length) +1298 for i_dat, dat in enumerate(d_extracted[name]): +1299 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1300 for name in new_cov_names: +1301 new_grad[name] = 0 +1302 for i_dat, dat in enumerate(g_extracted[name]): +1303 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1304 else: +1305 for j_obs, obs in np.ndenumerate(data): +1306 for name in obs.names: +1307 if name in obs.cov_names: +1308 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1309 else: +1310 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1311 +1312 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1313 +1314 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1315 raise Exception('The same name has been used for deltas and covobs!') +1316 new_samples = [] +1317 new_means = [] +1318 new_idl = [] +1319 new_names_obs = [] +1320 for name in new_names: +1321 if name not in new_covobs: +1322 new_samples.append(new_deltas[name]) +1323 new_idl.append(new_idl_d[name]) +1324 new_means.append(new_r_values[name][i_val]) +1325 new_names_obs.append(name) +1326 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1327 for name in new_covobs: +1328 final_result[i_val].names.append(name) +1329 final_result[i_val]._covobs = new_covobs +1330 final_result[i_val]._value = new_val +1331 final_result[i_val].reweighted = reweighted +1332 +1333 if multi == 0: +1334 final_result = final_result.item() +1335 +1336 return final_result +1337 1338 -1339 -1340def _reduce_deltas(deltas, idx_old, idx_new): -1341 """Extract deltas defined on idx_old on all configs of idx_new. -1342 -1343 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1344 are ordered in an ascending order. -1345 -1346 Parameters -1347 ---------- -1348 deltas : list -1349 List of fluctuations -1350 idx_old : list -1351 List or range of configs on which the deltas are defined -1352 idx_new : list -1353 List of configs for which we want to extract the deltas. -1354 Has to be a subset of idx_old. -1355 """ -1356 if not len(deltas) == len(idx_old): -1357 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1358 if type(idx_old) is range and type(idx_new) is range: -1359 if idx_old == idx_new: -1360 return deltas -1361 if _check_lists_equal([idx_old, idx_new]): -1362 return deltas -1363 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1364 if len(indices) < len(idx_new): -1365 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1366 return np.array(deltas)[indices] +1339def _reduce_deltas(deltas, idx_old, idx_new): +1340 """Extract deltas defined on idx_old on all configs of idx_new. +1341 +1342 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1343 are ordered in an ascending order. +1344 +1345 Parameters +1346 ---------- +1347 deltas : list +1348 List of fluctuations +1349 idx_old : list +1350 List or range of configs on which the deltas are defined +1351 idx_new : list +1352 List of configs for which we want to extract the deltas. +1353 Has to be a subset of idx_old. +1354 """ +1355 if not len(deltas) == len(idx_old): +1356 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1357 if type(idx_old) is range and type(idx_new) is range: +1358 if idx_old == idx_new: +1359 return deltas +1360 if _check_lists_equal([idx_old, idx_new]): +1361 return deltas +1362 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1363 if len(indices) < len(idx_new): +1364 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1365 return np.array(deltas)[indices] +1366 1367 -1368 -1369def reweight(weight, obs, **kwargs): -1370 """Reweight a list of observables. -1371 -1372 Parameters -1373 ---------- -1374 weight : Obs -1375 Reweighting factor. An Observable that has to be defined on a superset of the -1376 configurations in obs[i].idl for all i. -1377 obs : list -1378 list of Obs, e.g. [obs1, obs2, obs3]. -1379 all_configs : bool -1380 if True, the reweighted observables are normalized by the average of -1381 the reweighting factor on all configurations in weight.idl and not -1382 on the configurations in obs[i].idl. Default False. -1383 """ -1384 result = [] -1385 for i in range(len(obs)): -1386 if len(obs[i].cov_names): -1387 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1388 if not set(obs[i].names).issubset(weight.names): -1389 raise Exception('Error: Ensembles do not fit') -1390 for name in obs[i].names: -1391 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1392 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1393 new_samples = [] -1394 w_deltas = {} -1395 for name in sorted(obs[i].names): -1396 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1397 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1398 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1399 -1400 if kwargs.get('all_configs'): -1401 new_weight = weight -1402 else: -1403 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1404 -1405 result.append(tmp_obs / new_weight) -1406 result[-1].reweighted = True -1407 -1408 return result +1368def reweight(weight, obs, **kwargs): +1369 """Reweight a list of observables. +1370 +1371 Parameters +1372 ---------- +1373 weight : Obs +1374 Reweighting factor. An Observable that has to be defined on a superset of the +1375 configurations in obs[i].idl for all i. +1376 obs : list +1377 list of Obs, e.g. [obs1, obs2, obs3]. +1378 all_configs : bool +1379 if True, the reweighted observables are normalized by the average of +1380 the reweighting factor on all configurations in weight.idl and not +1381 on the configurations in obs[i].idl. Default False. +1382 """ +1383 result = [] +1384 for i in range(len(obs)): +1385 if len(obs[i].cov_names): +1386 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1387 if not set(obs[i].names).issubset(weight.names): +1388 raise Exception('Error: Ensembles do not fit') +1389 for name in obs[i].names: +1390 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1391 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1392 new_samples = [] +1393 w_deltas = {} +1394 for name in sorted(obs[i].names): +1395 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1396 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1397 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1398 +1399 if kwargs.get('all_configs'): +1400 new_weight = weight +1401 else: +1402 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1403 +1404 result.append(tmp_obs / new_weight) +1405 result[-1].reweighted = True +1406 +1407 return result +1408 1409 -1410 -1411def correlate(obs_a, obs_b): -1412 """Correlate two observables. -1413 -1414 Parameters -1415 ---------- -1416 obs_a : Obs -1417 First observable -1418 obs_b : Obs -1419 Second observable -1420 -1421 Notes -1422 ----- -1423 Keep in mind to only correlate primary observables which have not been reweighted -1424 yet. The reweighting has to be applied after correlating the observables. -1425 Currently only works if ensembles are identical (this is not strictly necessary). -1426 """ -1427 -1428 if sorted(obs_a.names) != sorted(obs_b.names): -1429 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1430 if len(obs_a.cov_names) or len(obs_b.cov_names): -1431 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1432 for name in obs_a.names: -1433 if obs_a.shape[name] != obs_b.shape[name]: -1434 raise Exception('Shapes of ensemble', name, 'do not fit') -1435 if obs_a.idl[name] != obs_b.idl[name]: -1436 raise Exception('idl of ensemble', name, 'do not fit') -1437 -1438 if obs_a.reweighted is True: -1439 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1440 if obs_b.reweighted is True: -1441 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1442 -1443 new_samples = [] -1444 new_idl = [] -1445 for name in sorted(obs_a.names): -1446 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1447 new_idl.append(obs_a.idl[name]) -1448 -1449 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1450 o.reweighted = obs_a.reweighted or obs_b.reweighted -1451 return o +1410def correlate(obs_a, obs_b): +1411 """Correlate two observables. +1412 +1413 Parameters +1414 ---------- +1415 obs_a : Obs +1416 First observable +1417 obs_b : Obs +1418 Second observable +1419 +1420 Notes +1421 ----- +1422 Keep in mind to only correlate primary observables which have not been reweighted +1423 yet. The reweighting has to be applied after correlating the observables. +1424 Currently only works if ensembles are identical (this is not strictly necessary). +1425 """ +1426 +1427 if sorted(obs_a.names) != sorted(obs_b.names): +1428 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1429 if len(obs_a.cov_names) or len(obs_b.cov_names): +1430 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1431 for name in obs_a.names: +1432 if obs_a.shape[name] != obs_b.shape[name]: +1433 raise Exception('Shapes of ensemble', name, 'do not fit') +1434 if obs_a.idl[name] != obs_b.idl[name]: +1435 raise Exception('idl of ensemble', name, 'do not fit') +1436 +1437 if obs_a.reweighted is True: +1438 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1439 if obs_b.reweighted is True: +1440 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1441 +1442 new_samples = [] +1443 new_idl = [] +1444 for name in sorted(obs_a.names): +1445 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1446 new_idl.append(obs_a.idl[name]) +1447 +1448 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1449 o.reweighted = obs_a.reweighted or obs_b.reweighted +1450 return o +1451 1452 -1453 -1454def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1455 r'''Calculates the error covariance matrix of a set of observables. -1456 -1457 WARNING: This function should be used with care, especially for observables with support on multiple -1458 ensembles with differing autocorrelations. See the notes below for details. -1459 -1460 The gamma method has to be applied first to all observables. -1461 -1462 Parameters -1463 ---------- -1464 obs : list or numpy.ndarray -1465 List or one dimensional array of Obs -1466 visualize : bool -1467 If True plots the corresponding normalized correlation matrix (default False). -1468 correlation : bool -1469 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1470 smooth : None or int -1471 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1472 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1473 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1474 small ones. -1475 -1476 Notes -1477 ----- -1478 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1479 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1480 in the absence of autocorrelation. -1481 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1482 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1483 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1484 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1485 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1486 ''' -1487 -1488 length = len(obs) -1489 -1490 max_samples = np.max([o.N for o in obs]) -1491 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1492 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1493 -1494 cov = np.zeros((length, length)) -1495 for i in range(length): -1496 for j in range(i, length): -1497 cov[i, j] = _covariance_element(obs[i], obs[j]) -1498 cov = cov + cov.T - np.diag(np.diag(cov)) -1499 -1500 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1501 -1502 if isinstance(smooth, int): -1503 corr = _smooth_eigenvalues(corr, smooth) -1504 -1505 if visualize: -1506 plt.matshow(corr, vmin=-1, vmax=1) -1507 plt.set_cmap('RdBu') -1508 plt.colorbar() -1509 plt.draw() -1510 -1511 if correlation is True: -1512 return corr -1513 -1514 errors = [o.dvalue for o in obs] -1515 cov = np.diag(errors) @ corr @ np.diag(errors) -1516 -1517 eigenvalues = np.linalg.eigh(cov)[0] -1518 if not np.all(eigenvalues >= 0): -1519 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1520 -1521 return cov +1453def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1454 r'''Calculates the error covariance matrix of a set of observables. +1455 +1456 WARNING: This function should be used with care, especially for observables with support on multiple +1457 ensembles with differing autocorrelations. See the notes below for details. +1458 +1459 The gamma method has to be applied first to all observables. +1460 +1461 Parameters +1462 ---------- +1463 obs : list or numpy.ndarray +1464 List or one dimensional array of Obs +1465 visualize : bool +1466 If True plots the corresponding normalized correlation matrix (default False). +1467 correlation : bool +1468 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1469 smooth : None or int +1470 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1471 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1472 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1473 small ones. +1474 +1475 Notes +1476 ----- +1477 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1478 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1479 in the absence of autocorrelation. +1480 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1481 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1482 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1483 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1484 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1485 ''' +1486 +1487 length = len(obs) +1488 +1489 max_samples = np.max([o.N for o in obs]) +1490 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1491 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1492 +1493 cov = np.zeros((length, length)) +1494 for i in range(length): +1495 for j in range(i, length): +1496 cov[i, j] = _covariance_element(obs[i], obs[j]) +1497 cov = cov + cov.T - np.diag(np.diag(cov)) +1498 +1499 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1500 +1501 if isinstance(smooth, int): +1502 corr = _smooth_eigenvalues(corr, smooth) +1503 +1504 if visualize: +1505 plt.matshow(corr, vmin=-1, vmax=1) +1506 plt.set_cmap('RdBu') +1507 plt.colorbar() +1508 plt.draw() +1509 +1510 if correlation is True: +1511 return corr +1512 +1513 errors = [o.dvalue for o in obs] +1514 cov = np.diag(errors) @ corr @ np.diag(errors) +1515 +1516 eigenvalues = np.linalg.eigh(cov)[0] +1517 if not np.all(eigenvalues >= 0): +1518 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1519 +1520 return cov +1521 1522 -1523 -1524def _smooth_eigenvalues(corr, E): -1525 """Eigenvalue smoothing as described in hep-lat/9412087 -1526 -1527 corr : np.ndarray -1528 correlation matrix -1529 E : integer -1530 Number of eigenvalues to be left substantially unchanged -1531 """ -1532 if not (2 < E < corr.shape[0] - 1): -1533 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1534 vals, vec = np.linalg.eigh(corr) -1535 lambda_min = np.mean(vals[:-E]) -1536 vals[vals < lambda_min] = lambda_min -1537 vals /= np.mean(vals) -1538 return vec @ np.diag(vals) @ vec.T +1523def _smooth_eigenvalues(corr, E): +1524 """Eigenvalue smoothing as described in hep-lat/9412087 +1525 +1526 corr : np.ndarray +1527 correlation matrix +1528 E : integer +1529 Number of eigenvalues to be left substantially unchanged +1530 """ +1531 if not (2 < E < corr.shape[0] - 1): +1532 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1533 vals, vec = np.linalg.eigh(corr) +1534 lambda_min = np.mean(vals[:-E]) +1535 vals[vals < lambda_min] = lambda_min +1536 vals /= np.mean(vals) +1537 return vec @ np.diag(vals) @ vec.T +1538 1539 -1540 -1541def _covariance_element(obs1, obs2): -1542 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1543 -1544 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1545 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1546 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1547 return np.sum(deltas1 * deltas2) -1548 -1549 if set(obs1.names).isdisjoint(set(obs2.names)): -1550 return 0.0 -1551 -1552 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1553 raise Exception('The gamma method has to be applied to both Obs first.') -1554 -1555 dvalue = 0.0 -1556 -1557 for e_name in obs1.mc_names: -1558 -1559 if e_name not in obs2.mc_names: -1560 continue -1561 -1562 idl_d = {} -1563 for r_name in obs1.e_content[e_name]: -1564 if r_name not in obs2.e_content[e_name]: -1565 continue -1566 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1567 -1568 gamma = 0.0 -1569 -1570 for r_name in obs1.e_content[e_name]: -1571 if r_name not in obs2.e_content[e_name]: -1572 continue -1573 if len(idl_d[r_name]) == 0: -1574 continue -1575 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1576 -1577 if gamma == 0.0: -1578 continue -1579 -1580 gamma_div = 0.0 -1581 for r_name in obs1.e_content[e_name]: -1582 if r_name not in obs2.e_content[e_name]: -1583 continue -1584 if len(idl_d[r_name]) == 0: -1585 continue -1586 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1587 gamma /= gamma_div -1588 -1589 dvalue += gamma -1590 -1591 for e_name in obs1.cov_names: -1592 -1593 if e_name not in obs2.cov_names: -1594 continue -1595 -1596 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() -1597 -1598 return dvalue +1540def _covariance_element(obs1, obs2): +1541 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1542 +1543 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1544 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1545 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1546 return np.sum(deltas1 * deltas2) +1547 +1548 if set(obs1.names).isdisjoint(set(obs2.names)): +1549 return 0.0 +1550 +1551 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1552 raise Exception('The gamma method has to be applied to both Obs first.') +1553 +1554 dvalue = 0.0 +1555 +1556 for e_name in obs1.mc_names: +1557 +1558 if e_name not in obs2.mc_names: +1559 continue +1560 +1561 idl_d = {} +1562 for r_name in obs1.e_content[e_name]: +1563 if r_name not in obs2.e_content[e_name]: +1564 continue +1565 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1566 +1567 gamma = 0.0 +1568 +1569 for r_name in obs1.e_content[e_name]: +1570 if r_name not in obs2.e_content[e_name]: +1571 continue +1572 if len(idl_d[r_name]) == 0: +1573 continue +1574 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1575 +1576 if gamma == 0.0: +1577 continue +1578 +1579 gamma_div = 0.0 +1580 for r_name in obs1.e_content[e_name]: +1581 if r_name not in obs2.e_content[e_name]: +1582 continue +1583 if len(idl_d[r_name]) == 0: +1584 continue +1585 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1586 gamma /= gamma_div +1587 +1588 dvalue += gamma +1589 +1590 for e_name in obs1.cov_names: +1591 +1592 if e_name not in obs2.cov_names: +1593 continue +1594 +1595 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() +1596 +1597 return dvalue +1598 1599 -1600 -1601def import_jackknife(jacks, name, idl=None): -1602 """Imports jackknife samples and returns an Obs -1603 -1604 Parameters -1605 ---------- -1606 jacks : numpy.ndarray -1607 numpy array containing the mean value as zeroth entry and -1608 the N jackknife samples as first to Nth entry. -1609 name : str -1610 name of the ensemble the samples are defined on. -1611 """ -1612 length = len(jacks) - 1 -1613 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1614 samples = jacks[1:] @ prj -1615 mean = np.mean(samples) -1616 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1617 new_obs._value = jacks[0] -1618 return new_obs +1600def import_jackknife(jacks, name, idl=None): +1601 """Imports jackknife samples and returns an Obs +1602 +1603 Parameters +1604 ---------- +1605 jacks : numpy.ndarray +1606 numpy array containing the mean value as zeroth entry and +1607 the N jackknife samples as first to Nth entry. +1608 name : str +1609 name of the ensemble the samples are defined on. +1610 """ +1611 length = len(jacks) - 1 +1612 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1613 samples = jacks[1:] @ prj +1614 mean = np.mean(samples) +1615 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1616 new_obs._value = jacks[0] +1617 return new_obs +1618 1619 -1620 -1621def import_bootstrap(boots, name, random_numbers): -1622 """Imports bootstrap samples and returns an Obs -1623 -1624 Parameters -1625 ---------- -1626 boots : numpy.ndarray -1627 numpy array containing the mean value as zeroth entry and -1628 the N bootstrap samples as first to Nth entry. -1629 name : str -1630 name of the ensemble the samples are defined on. -1631 random_numbers : np.ndarray -1632 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, -1633 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo -1634 chain to be reconstructed. -1635 """ -1636 samples, length = random_numbers.shape -1637 if samples != len(boots) - 1: -1638 raise ValueError("Random numbers do not have the correct shape.") -1639 -1640 if samples < length: -1641 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") -1642 -1643 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length -1644 -1645 samples = scipy.linalg.lstsq(proj, boots[1:])[0] -1646 ret = Obs([samples], [name]) -1647 ret._value = boots[0] -1648 return ret +1620def import_bootstrap(boots, name, random_numbers): +1621 """Imports bootstrap samples and returns an Obs +1622 +1623 Parameters +1624 ---------- +1625 boots : numpy.ndarray +1626 numpy array containing the mean value as zeroth entry and +1627 the N bootstrap samples as first to Nth entry. +1628 name : str +1629 name of the ensemble the samples are defined on. +1630 random_numbers : np.ndarray +1631 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, +1632 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo +1633 chain to be reconstructed. +1634 """ +1635 samples, length = random_numbers.shape +1636 if samples != len(boots) - 1: +1637 raise ValueError("Random numbers do not have the correct shape.") +1638 +1639 if samples < length: +1640 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") +1641 +1642 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +1643 +1644 samples = scipy.linalg.lstsq(proj, boots[1:])[0] +1645 ret = Obs([samples], [name]) +1646 ret._value = boots[0] +1647 return ret +1648 1649 -1650 -1651def merge_obs(list_of_obs): -1652 """Combine all observables in list_of_obs into one new observable -1653 -1654 Parameters -1655 ---------- -1656 list_of_obs : list -1657 list of the Obs object to be combined -1658 -1659 Notes -1660 ----- -1661 It is not possible to combine obs which are based on the same replicum -1662 """ -1663 replist = [item for obs in list_of_obs for item in obs.names] -1664 if (len(replist) == len(set(replist))) is False: -1665 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1666 if any([len(o.cov_names) for o in list_of_obs]): -1667 raise Exception('Not possible to merge data that contains covobs!') -1668 new_dict = {} -1669 idl_dict = {} -1670 for o in list_of_obs: -1671 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1672 for key in set(o.deltas) | set(o.r_values)}) -1673 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1674 -1675 names = sorted(new_dict.keys()) -1676 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1677 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1678 return o +1650def merge_obs(list_of_obs): +1651 """Combine all observables in list_of_obs into one new observable +1652 +1653 Parameters +1654 ---------- +1655 list_of_obs : list +1656 list of the Obs object to be combined +1657 +1658 Notes +1659 ----- +1660 It is not possible to combine obs which are based on the same replicum +1661 """ +1662 replist = [item for obs in list_of_obs for item in obs.names] +1663 if (len(replist) == len(set(replist))) is False: +1664 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1665 if any([len(o.cov_names) for o in list_of_obs]): +1666 raise Exception('Not possible to merge data that contains covobs!') +1667 new_dict = {} +1668 idl_dict = {} +1669 for o in list_of_obs: +1670 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1671 for key in set(o.deltas) | set(o.r_values)}) +1672 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1673 +1674 names = sorted(new_dict.keys()) +1675 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1676 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1677 return o +1678 1679 -1680 -1681def cov_Obs(means, cov, name, grad=None): -1682 """Create an Obs based on mean(s) and a covariance matrix -1683 -1684 Parameters -1685 ---------- -1686 mean : list of floats or float -1687 N mean value(s) of the new Obs -1688 cov : list or array -1689 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1690 name : str -1691 identifier for the covariance matrix -1692 grad : list or array -1693 Gradient of the Covobs wrt. the means belonging to cov. -1694 """ -1695 -1696 def covobs_to_obs(co): -1697 """Make an Obs out of a Covobs -1698 -1699 Parameters -1700 ---------- -1701 co : Covobs -1702 Covobs to be embedded into the Obs -1703 """ -1704 o = Obs([], [], means=[]) -1705 o._value = co.value -1706 o.names.append(co.name) -1707 o._covobs[co.name] = co -1708 o._dvalue = np.sqrt(co.errsq()) -1709 return o -1710 -1711 ol = [] -1712 if isinstance(means, (float, int)): -1713 means = [means] -1714 -1715 for i in range(len(means)): -1716 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1717 if ol[0].covobs[name].N != len(means): -1718 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1719 if len(ol) == 1: -1720 return ol[0] -1721 return ol +1680def cov_Obs(means, cov, name, grad=None): +1681 """Create an Obs based on mean(s) and a covariance matrix +1682 +1683 Parameters +1684 ---------- +1685 mean : list of floats or float +1686 N mean value(s) of the new Obs +1687 cov : list or array +1688 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1689 name : str +1690 identifier for the covariance matrix +1691 grad : list or array +1692 Gradient of the Covobs wrt. the means belonging to cov. +1693 """ +1694 +1695 def covobs_to_obs(co): +1696 """Make an Obs out of a Covobs +1697 +1698 Parameters +1699 ---------- +1700 co : Covobs +1701 Covobs to be embedded into the Obs +1702 """ +1703 o = Obs([], [], means=[]) +1704 o._value = co.value +1705 o.names.append(co.name) +1706 o._covobs[co.name] = co +1707 o._dvalue = np.sqrt(co.errsq()) +1708 return o +1709 +1710 ol = [] +1711 if isinstance(means, (float, int)): +1712 means = [means] +1713 +1714 for i in range(len(means)): +1715 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1716 if ol[0].covobs[name].N != len(means): +1717 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1718 if len(ol) == 1: +1719 return ol[0] +1720 return ol +1721 1722 -1723 -1724def _determine_gap(o, e_content, e_name): -1725 gaps = [] -1726 for r_name in e_content[e_name]: -1727 if isinstance(o.idl[r_name], range): -1728 gaps.append(o.idl[r_name].step) -1729 else: -1730 gaps.append(np.min(np.diff(o.idl[r_name]))) -1731 -1732 gap = min(gaps) -1733 if not np.all([gi % gap == 0 for gi in gaps]): -1734 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) -1735 -1736 return gap +1723def _determine_gap(o, e_content, e_name): +1724 gaps = [] +1725 for r_name in e_content[e_name]: +1726 if isinstance(o.idl[r_name], range): +1727 gaps.append(o.idl[r_name].step) +1728 else: +1729 gaps.append(np.min(np.diff(o.idl[r_name]))) +1730 +1731 gap = min(gaps) +1732 if not np.all([gi % gap == 0 for gi in gaps]): +1733 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1734 +1735 return gap +1736 1737 -1738 -1739def _check_lists_equal(idl): -1740 ''' -1741 Use groupby to efficiently check whether all elements of idl are identical. -1742 Returns True if all elements are equal, otherwise False. -1743 -1744 Parameters -1745 ---------- -1746 idl : list of lists, ranges or np.ndarrays -1747 ''' -1748 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) -1749 if next(g, True) and not next(g, False): -1750 return True -1751 return False +1738def _check_lists_equal(idl): +1739 ''' +1740 Use groupby to efficiently check whether all elements of idl are identical. +1741 Returns True if all elements are equal, otherwise False. +1742 +1743 Parameters +1744 ---------- +1745 idl : list of lists, ranges or np.ndarrays +1746 ''' +1747 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) +1748 if next(g, True) and not next(g, False): +1749 return True +1750 return False
@@ -2852,144 +2851,143 @@ 774 return self.value >= other 775 776 def __eq__(self, other): -777 return (self - other).is_zero() -778 -779 def __ne__(self, other): -780 return not (self - other).is_zero() -781 -782 # Overload math operations -783 def __add__(self, y): -784 if isinstance(y, Obs): -785 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -786 else: -787 if isinstance(y, np.ndarray): -788 return np.array([self + o for o in y]) -789 elif y.__class__.__name__ in ['Corr', 'CObs']: -790 return NotImplemented -791 else: -792 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -793 -794 def __radd__(self, y): -795 return self + y -796 -797 def __mul__(self, y): -798 if isinstance(y, Obs): -799 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -800 else: -801 if isinstance(y, np.ndarray): -802 return np.array([self * o for o in y]) -803 elif isinstance(y, complex): -804 return CObs(self * y.real, self * y.imag) -805 elif y.__class__.__name__ in ['Corr', 'CObs']: -806 return NotImplemented -807 else: -808 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -809 -810 def __rmul__(self, y): -811 return self * y -812 -813 def __sub__(self, y): -814 if isinstance(y, Obs): -815 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -816 else: -817 if isinstance(y, np.ndarray): -818 return np.array([self - o for o in y]) -819 elif y.__class__.__name__ in ['Corr', 'CObs']: -820 return NotImplemented -821 else: -822 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -823 -824 def __rsub__(self, y): -825 return -1 * (self - y) -826 -827 def __pos__(self): -828 return self -829 -830 def __neg__(self): -831 return -1 * self -832 -833 def __truediv__(self, y): -834 if isinstance(y, Obs): -835 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -836 else: -837 if isinstance(y, np.ndarray): -838 return np.array([self / o for o in y]) -839 elif y.__class__.__name__ in ['Corr', 'CObs']: -840 return NotImplemented -841 else: -842 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -843 -844 def __rtruediv__(self, y): -845 if isinstance(y, Obs): -846 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -847 else: -848 if isinstance(y, np.ndarray): -849 return np.array([o / self for o in y]) -850 elif y.__class__.__name__ in ['Corr', 'CObs']: -851 return NotImplemented -852 else: -853 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -854 -855 def __pow__(self, y): -856 if isinstance(y, Obs): -857 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -858 else: -859 return derived_observable(lambda x: x[0] ** y, [self]) -860 -861 def __rpow__(self, y): -862 if isinstance(y, Obs): -863 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -864 else: -865 return derived_observable(lambda x: y ** x[0], [self]) -866 -867 def __abs__(self): -868 return derived_observable(lambda x: anp.abs(x[0]), [self]) -869 -870 # Overload numpy functions -871 def sqrt(self): -872 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -873 -874 def log(self): -875 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -876 -877 def exp(self): -878 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -879 -880 def sin(self): -881 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -882 -883 def cos(self): -884 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -885 -886 def tan(self): -887 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -888 -889 def arcsin(self): -890 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -891 -892 def arccos(self): -893 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -894 -895 def arctan(self): -896 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -897 -898 def sinh(self): -899 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -900 -901 def cosh(self): -902 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -903 -904 def tanh(self): -905 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -906 -907 def arcsinh(self): -908 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -909 -910 def arccosh(self): -911 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -912 -913 def arctanh(self): -914 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +777 if other is None: +778 return False +779 return (self - other).is_zero() +780 +781 # Overload math operations +782 def __add__(self, y): +783 if isinstance(y, Obs): +784 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +785 else: +786 if isinstance(y, np.ndarray): +787 return np.array([self + o for o in y]) +788 elif y.__class__.__name__ in ['Corr', 'CObs']: +789 return NotImplemented +790 else: +791 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +792 +793 def __radd__(self, y): +794 return self + y +795 +796 def __mul__(self, y): +797 if isinstance(y, Obs): +798 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +799 else: +800 if isinstance(y, np.ndarray): +801 return np.array([self * o for o in y]) +802 elif isinstance(y, complex): +803 return CObs(self * y.real, self * y.imag) +804 elif y.__class__.__name__ in ['Corr', 'CObs']: +805 return NotImplemented +806 else: +807 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +808 +809 def __rmul__(self, y): +810 return self * y +811 +812 def __sub__(self, y): +813 if isinstance(y, Obs): +814 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +815 else: +816 if isinstance(y, np.ndarray): +817 return np.array([self - o for o in y]) +818 elif y.__class__.__name__ in ['Corr', 'CObs']: +819 return NotImplemented +820 else: +821 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +822 +823 def __rsub__(self, y): +824 return -1 * (self - y) +825 +826 def __pos__(self): +827 return self +828 +829 def __neg__(self): +830 return -1 * self +831 +832 def __truediv__(self, y): +833 if isinstance(y, Obs): +834 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +835 else: +836 if isinstance(y, np.ndarray): +837 return np.array([self / o for o in y]) +838 elif y.__class__.__name__ in ['Corr', 'CObs']: +839 return NotImplemented +840 else: +841 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +842 +843 def __rtruediv__(self, y): +844 if isinstance(y, Obs): +845 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +846 else: +847 if isinstance(y, np.ndarray): +848 return np.array([o / self for o in y]) +849 elif y.__class__.__name__ in ['Corr', 'CObs']: +850 return NotImplemented +851 else: +852 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +853 +854 def __pow__(self, y): +855 if isinstance(y, Obs): +856 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +857 else: +858 return derived_observable(lambda x: x[0] ** y, [self]) +859 +860 def __rpow__(self, y): +861 if isinstance(y, Obs): +862 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +863 else: +864 return derived_observable(lambda x: y ** x[0], [self]) +865 +866 def __abs__(self): +867 return derived_observable(lambda x: anp.abs(x[0]), [self]) +868 +869 # Overload numpy functions +870 def sqrt(self): +871 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +872 +873 def log(self): +874 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +875 +876 def exp(self): +877 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +878 +879 def sin(self): +880 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +881 +882 def cos(self): +883 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +884 +885 def tan(self): +886 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +887 +888 def arcsin(self): +889 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +890 +891 def arccos(self): +892 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +893 +894 def arctan(self): +895 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +896 +897 def sinh(self): +898 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +899 +900 def cosh(self): +901 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +902 +903 def tanh(self): +904 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +905 +906 def arcsinh(self): +907 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +908 +909 def arccosh(self): +910 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +911 +912 def arctanh(self): +913 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -4479,8 +4477,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N). -
871    def sqrt(self):
-872        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
870    def sqrt(self):
+871        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -4498,8 +4496,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
874    def log(self):
-875        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
873    def log(self):
+874        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -4517,8 +4515,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
877    def exp(self):
-878        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
876    def exp(self):
+877        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -4536,8 +4534,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
880    def sin(self):
-881        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
879    def sin(self):
+880        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -4555,8 +4553,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
883    def cos(self):
-884        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
882    def cos(self):
+883        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -4574,8 +4572,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
886    def tan(self):
-887        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
885    def tan(self):
+886        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -4593,8 +4591,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
889    def arcsin(self):
-890        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
888    def arcsin(self):
+889        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4612,8 +4610,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
892    def arccos(self):
-893        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
891    def arccos(self):
+892        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4631,8 +4629,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
895    def arctan(self):
-896        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
894    def arctan(self):
+895        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4650,8 +4648,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
898    def sinh(self):
-899        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
897    def sinh(self):
+898        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4669,8 +4667,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
901    def cosh(self):
-902        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
900    def cosh(self):
+901        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4688,8 +4686,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
904    def tanh(self):
-905        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
903    def tanh(self):
+904        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4707,8 +4705,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
907    def arcsinh(self):
-908        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
906    def arcsinh(self):
+907        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4726,8 +4724,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
910    def arccosh(self):
-911        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
909    def arccosh(self):
+910        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4745,8 +4743,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
913    def arctanh(self):
-914        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
912    def arctanh(self):
+913        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4897,123 +4895,123 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
 917class CObs:
- 918    """Class for a complex valued observable."""
- 919    __slots__ = ['_real', '_imag', 'tag']
- 920
- 921    def __init__(self, real, imag=0.0):
- 922        self._real = real
- 923        self._imag = imag
- 924        self.tag = None
- 925
- 926    @property
- 927    def real(self):
- 928        return self._real
- 929
- 930    @property
- 931    def imag(self):
- 932        return self._imag
- 933
- 934    def gamma_method(self, **kwargs):
- 935        """Executes the gamma_method for the real and the imaginary part."""
- 936        if isinstance(self.real, Obs):
- 937            self.real.gamma_method(**kwargs)
- 938        if isinstance(self.imag, Obs):
- 939            self.imag.gamma_method(**kwargs)
- 940
- 941    def is_zero(self):
- 942        """Checks whether both real and imaginary part are zero within machine precision."""
- 943        return self.real == 0.0 and self.imag == 0.0
- 944
- 945    def conjugate(self):
- 946        return CObs(self.real, -self.imag)
- 947
- 948    def __add__(self, other):
- 949        if isinstance(other, np.ndarray):
- 950            return other + self
- 951        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 952            return CObs(self.real + other.real,
- 953                        self.imag + other.imag)
- 954        else:
- 955            return CObs(self.real + other, self.imag)
- 956
- 957    def __radd__(self, y):
- 958        return self + y
- 959
- 960    def __sub__(self, other):
- 961        if isinstance(other, np.ndarray):
- 962            return -1 * (other - self)
- 963        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 964            return CObs(self.real - other.real, self.imag - other.imag)
- 965        else:
- 966            return CObs(self.real - other, self.imag)
- 967
- 968    def __rsub__(self, other):
- 969        return -1 * (self - other)
- 970
- 971    def __mul__(self, other):
- 972        if isinstance(other, np.ndarray):
- 973            return other * self
- 974        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 975            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
- 976                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
- 977                                               [self.real, other.real, self.imag, other.imag],
- 978                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
- 979                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
- 980                                               [self.real, other.real, self.imag, other.imag],
- 981                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
- 982            elif getattr(other, 'imag', 0) != 0:
- 983                return CObs(self.real * other.real - self.imag * other.imag,
- 984                            self.imag * other.real + self.real * other.imag)
- 985            else:
- 986                return CObs(self.real * other.real, self.imag * other.real)
- 987        else:
- 988            return CObs(self.real * other, self.imag * other)
- 989
- 990    def __rmul__(self, other):
- 991        return self * other
- 992
- 993    def __truediv__(self, other):
- 994        if isinstance(other, np.ndarray):
- 995            return 1 / (other / self)
- 996        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 997            r = other.real ** 2 + other.imag ** 2
- 998            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
- 999        else:
-1000            return CObs(self.real / other, self.imag / other)
-1001
-1002    def __rtruediv__(self, other):
-1003        r = self.real ** 2 + self.imag ** 2
-1004        if hasattr(other, 'real') and hasattr(other, 'imag'):
-1005            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-1006        else:
-1007            return CObs(self.real * other / r, -self.imag * other / r)
-1008
-1009    def __abs__(self):
-1010        return np.sqrt(self.real**2 + self.imag**2)
-1011
-1012    def __pos__(self):
-1013        return self
-1014
-1015    def __neg__(self):
-1016        return -1 * self
-1017
-1018    def __eq__(self, other):
-1019        return self.real == other.real and self.imag == other.imag
-1020
-1021    def __str__(self):
-1022        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-1023
-1024    def __repr__(self):
-1025        return 'CObs[' + str(self) + ']'
-1026
-1027    def __format__(self, format_type):
-1028        if format_type == "":
-1029            significance = 2
-1030            format_type = "2"
-1031        else:
-1032            significance = int(float(format_type.replace("+", "").replace("-", "")))
-1033        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
+            
 916class CObs:
+ 917    """Class for a complex valued observable."""
+ 918    __slots__ = ['_real', '_imag', 'tag']
+ 919
+ 920    def __init__(self, real, imag=0.0):
+ 921        self._real = real
+ 922        self._imag = imag
+ 923        self.tag = None
+ 924
+ 925    @property
+ 926    def real(self):
+ 927        return self._real
+ 928
+ 929    @property
+ 930    def imag(self):
+ 931        return self._imag
+ 932
+ 933    def gamma_method(self, **kwargs):
+ 934        """Executes the gamma_method for the real and the imaginary part."""
+ 935        if isinstance(self.real, Obs):
+ 936            self.real.gamma_method(**kwargs)
+ 937        if isinstance(self.imag, Obs):
+ 938            self.imag.gamma_method(**kwargs)
+ 939
+ 940    def is_zero(self):
+ 941        """Checks whether both real and imaginary part are zero within machine precision."""
+ 942        return self.real == 0.0 and self.imag == 0.0
+ 943
+ 944    def conjugate(self):
+ 945        return CObs(self.real, -self.imag)
+ 946
+ 947    def __add__(self, other):
+ 948        if isinstance(other, np.ndarray):
+ 949            return other + self
+ 950        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 951            return CObs(self.real + other.real,
+ 952                        self.imag + other.imag)
+ 953        else:
+ 954            return CObs(self.real + other, self.imag)
+ 955
+ 956    def __radd__(self, y):
+ 957        return self + y
+ 958
+ 959    def __sub__(self, other):
+ 960        if isinstance(other, np.ndarray):
+ 961            return -1 * (other - self)
+ 962        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 963            return CObs(self.real - other.real, self.imag - other.imag)
+ 964        else:
+ 965            return CObs(self.real - other, self.imag)
+ 966
+ 967    def __rsub__(self, other):
+ 968        return -1 * (self - other)
+ 969
+ 970    def __mul__(self, other):
+ 971        if isinstance(other, np.ndarray):
+ 972            return other * self
+ 973        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 974            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+ 975                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+ 976                                               [self.real, other.real, self.imag, other.imag],
+ 977                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+ 978                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+ 979                                               [self.real, other.real, self.imag, other.imag],
+ 980                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+ 981            elif getattr(other, 'imag', 0) != 0:
+ 982                return CObs(self.real * other.real - self.imag * other.imag,
+ 983                            self.imag * other.real + self.real * other.imag)
+ 984            else:
+ 985                return CObs(self.real * other.real, self.imag * other.real)
+ 986        else:
+ 987            return CObs(self.real * other, self.imag * other)
+ 988
+ 989    def __rmul__(self, other):
+ 990        return self * other
+ 991
+ 992    def __truediv__(self, other):
+ 993        if isinstance(other, np.ndarray):
+ 994            return 1 / (other / self)
+ 995        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 996            r = other.real ** 2 + other.imag ** 2
+ 997            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+ 998        else:
+ 999            return CObs(self.real / other, self.imag / other)
+1000
+1001    def __rtruediv__(self, other):
+1002        r = self.real ** 2 + self.imag ** 2
+1003        if hasattr(other, 'real') and hasattr(other, 'imag'):
+1004            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+1005        else:
+1006            return CObs(self.real * other / r, -self.imag * other / r)
+1007
+1008    def __abs__(self):
+1009        return np.sqrt(self.real**2 + self.imag**2)
+1010
+1011    def __pos__(self):
+1012        return self
+1013
+1014    def __neg__(self):
+1015        return -1 * self
+1016
+1017    def __eq__(self, other):
+1018        return self.real == other.real and self.imag == other.imag
+1019
+1020    def __str__(self):
+1021        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+1022
+1023    def __repr__(self):
+1024        return 'CObs[' + str(self) + ']'
+1025
+1026    def __format__(self, format_type):
+1027        if format_type == "":
+1028            significance = 2
+1029            format_type = "2"
+1030        else:
+1031            significance = int(float(format_type.replace("+", "").replace("-", "")))
+1032        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
 
@@ -5031,10 +5029,10 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
921    def __init__(self, real, imag=0.0):
-922        self._real = real
-923        self._imag = imag
-924        self.tag = None
+            
920    def __init__(self, real, imag=0.0):
+921        self._real = real
+922        self._imag = imag
+923        self.tag = None
 
@@ -5085,12 +5083,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
934    def gamma_method(self, **kwargs):
-935        """Executes the gamma_method for the real and the imaginary part."""
-936        if isinstance(self.real, Obs):
-937            self.real.gamma_method(**kwargs)
-938        if isinstance(self.imag, Obs):
-939            self.imag.gamma_method(**kwargs)
+            
933    def gamma_method(self, **kwargs):
+934        """Executes the gamma_method for the real and the imaginary part."""
+935        if isinstance(self.real, Obs):
+936            self.real.gamma_method(**kwargs)
+937        if isinstance(self.imag, Obs):
+938            self.imag.gamma_method(**kwargs)
 
@@ -5110,9 +5108,9 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
941    def is_zero(self):
-942        """Checks whether both real and imaginary part are zero within machine precision."""
-943        return self.real == 0.0 and self.imag == 0.0
+            
940    def is_zero(self):
+941        """Checks whether both real and imaginary part are zero within machine precision."""
+942        return self.real == 0.0 and self.imag == 0.0
 
@@ -5132,8 +5130,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
945    def conjugate(self):
-946        return CObs(self.real, -self.imag)
+            
944    def conjugate(self):
+945        return CObs(self.real, -self.imag)
 
@@ -5152,12 +5150,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1036def gamma_method(x, **kwargs):
-1037    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
-1038
-1039    See docstring of pe.Obs.gamma_method for details.
-1040    """
-1041    return np.vectorize(lambda o: o.gm(**kwargs))(x)
+            
1035def gamma_method(x, **kwargs):
+1036    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
+1037
+1038    See docstring of pe.Obs.gamma_method for details.
+1039    """
+1040    return np.vectorize(lambda o: o.gm(**kwargs))(x)
 
@@ -5179,12 +5177,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1044def gm(x, **kwargs):
-1045    """Short version of the vectorized gamma_method.
-1046
-1047    See docstring of pe.Obs.gamma_method for details
-1048    """
-1049    return gamma_method(x, **kwargs)
+            
1043def gm(x, **kwargs):
+1044    """Short version of the vectorized gamma_method.
+1045
+1046    See docstring of pe.Obs.gamma_method for details
+1047    """
+1048    return gamma_method(x, **kwargs)
 
@@ -5206,174 +5204,174 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1171def derived_observable(func, data, array_mode=False, **kwargs):
-1172    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1173
-1174    Parameters
-1175    ----------
-1176    func : object
-1177        arbitrary function of the form func(data, **kwargs). For the
-1178        automatic differentiation to work, all numpy functions have to have
-1179        the autograd wrapper (use 'import autograd.numpy as anp').
-1180    data : list
-1181        list of Obs, e.g. [obs1, obs2, obs3].
-1182    num_grad : bool
-1183        if True, numerical derivatives are used instead of autograd
-1184        (default False). To control the numerical differentiation the
-1185        kwargs of numdifftools.step_generators.MaxStepGenerator
-1186        can be used.
-1187    man_grad : list
-1188        manually supply a list or an array which contains the jacobian
-1189        of func. Use cautiously, supplying the wrong derivative will
-1190        not be intercepted.
-1191
-1192    Notes
-1193    -----
-1194    For simple mathematical operations it can be practical to use anonymous
-1195    functions. For the ratio of two observables one can e.g. use
-1196
-1197    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1198    """
-1199
-1200    data = np.asarray(data)
-1201    raveled_data = data.ravel()
-1202
-1203    # Workaround for matrix operations containing non Obs data
-1204    if not all(isinstance(x, Obs) for x in raveled_data):
-1205        for i in range(len(raveled_data)):
-1206            if isinstance(raveled_data[i], (int, float)):
-1207                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1208
-1209    allcov = {}
-1210    for o in raveled_data:
-1211        for name in o.cov_names:
-1212            if name in allcov:
-1213                if not np.allclose(allcov[name], o.covobs[name].cov):
-1214                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1215            else:
-1216                allcov[name] = o.covobs[name].cov
-1217
-1218    n_obs = len(raveled_data)
-1219    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1220    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1221    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1222
-1223    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1224
-1225    if data.ndim == 1:
-1226        values = np.array([o.value for o in data])
-1227    else:
-1228        values = np.vectorize(lambda x: x.value)(data)
-1229
-1230    new_values = func(values, **kwargs)
-1231
-1232    multi = int(isinstance(new_values, np.ndarray))
-1233
-1234    new_r_values = {}
-1235    new_idl_d = {}
-1236    for name in new_sample_names:
-1237        idl = []
-1238        tmp_values = np.zeros(n_obs)
-1239        for i, item in enumerate(raveled_data):
-1240            tmp_values[i] = item.r_values.get(name, item.value)
-1241            tmp_idl = item.idl.get(name)
-1242            if tmp_idl is not None:
-1243                idl.append(tmp_idl)
-1244        if multi > 0:
-1245            tmp_values = np.array(tmp_values).reshape(data.shape)
-1246        new_r_values[name] = func(tmp_values, **kwargs)
-1247        new_idl_d[name] = _merge_idx(idl)
-1248
-1249    if 'man_grad' in kwargs:
-1250        deriv = np.asarray(kwargs.get('man_grad'))
-1251        if new_values.shape + data.shape != deriv.shape:
-1252            raise Exception('Manual derivative does not have correct shape.')
-1253    elif kwargs.get('num_grad') is True:
-1254        if multi > 0:
-1255            raise Exception('Multi mode currently not supported for numerical derivative')
-1256        options = {
-1257            'base_step': 0.1,
-1258            'step_ratio': 2.5}
-1259        for key in options.keys():
-1260            kwarg = kwargs.get(key)
-1261            if kwarg is not None:
-1262                options[key] = kwarg
-1263        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1264        if tmp_df.size == 1:
-1265            deriv = np.array([tmp_df.real])
-1266        else:
-1267            deriv = tmp_df.real
-1268    else:
-1269        deriv = jacobian(func)(values, **kwargs)
-1270
-1271    final_result = np.zeros(new_values.shape, dtype=object)
-1272
-1273    if array_mode is True:
-1274
-1275        class _Zero_grad():
-1276            def __init__(self, N):
-1277                self.grad = np.zeros((N, 1))
-1278
-1279        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1280        d_extracted = {}
-1281        g_extracted = {}
-1282        for name in new_sample_names:
-1283            d_extracted[name] = []
-1284            ens_length = len(new_idl_d[name])
-1285            for i_dat, dat in enumerate(data):
-1286                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1287        for name in new_cov_names:
-1288            g_extracted[name] = []
-1289            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1290            for i_dat, dat in enumerate(data):
-1291                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1292
-1293    for i_val, new_val in np.ndenumerate(new_values):
-1294        new_deltas = {}
-1295        new_grad = {}
-1296        if array_mode is True:
-1297            for name in new_sample_names:
-1298                ens_length = d_extracted[name][0].shape[-1]
-1299                new_deltas[name] = np.zeros(ens_length)
-1300                for i_dat, dat in enumerate(d_extracted[name]):
-1301                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1302            for name in new_cov_names:
-1303                new_grad[name] = 0
-1304                for i_dat, dat in enumerate(g_extracted[name]):
-1305                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1306        else:
-1307            for j_obs, obs in np.ndenumerate(data):
-1308                for name in obs.names:
-1309                    if name in obs.cov_names:
-1310                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1311                    else:
-1312                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1313
-1314        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1315
-1316        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1317            raise Exception('The same name has been used for deltas and covobs!')
-1318        new_samples = []
-1319        new_means = []
-1320        new_idl = []
-1321        new_names_obs = []
-1322        for name in new_names:
-1323            if name not in new_covobs:
-1324                new_samples.append(new_deltas[name])
-1325                new_idl.append(new_idl_d[name])
-1326                new_means.append(new_r_values[name][i_val])
-1327                new_names_obs.append(name)
-1328        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1329        for name in new_covobs:
-1330            final_result[i_val].names.append(name)
-1331        final_result[i_val]._covobs = new_covobs
-1332        final_result[i_val]._value = new_val
-1333        final_result[i_val].reweighted = reweighted
-1334
-1335    if multi == 0:
-1336        final_result = final_result.item()
-1337
-1338    return final_result
+            
1170def derived_observable(func, data, array_mode=False, **kwargs):
+1171    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1172
+1173    Parameters
+1174    ----------
+1175    func : object
+1176        arbitrary function of the form func(data, **kwargs). For the
+1177        automatic differentiation to work, all numpy functions have to have
+1178        the autograd wrapper (use 'import autograd.numpy as anp').
+1179    data : list
+1180        list of Obs, e.g. [obs1, obs2, obs3].
+1181    num_grad : bool
+1182        if True, numerical derivatives are used instead of autograd
+1183        (default False). To control the numerical differentiation the
+1184        kwargs of numdifftools.step_generators.MaxStepGenerator
+1185        can be used.
+1186    man_grad : list
+1187        manually supply a list or an array which contains the jacobian
+1188        of func. Use cautiously, supplying the wrong derivative will
+1189        not be intercepted.
+1190
+1191    Notes
+1192    -----
+1193    For simple mathematical operations it can be practical to use anonymous
+1194    functions. For the ratio of two observables one can e.g. use
+1195
+1196    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1197    """
+1198
+1199    data = np.asarray(data)
+1200    raveled_data = data.ravel()
+1201
+1202    # Workaround for matrix operations containing non Obs data
+1203    if not all(isinstance(x, Obs) for x in raveled_data):
+1204        for i in range(len(raveled_data)):
+1205            if isinstance(raveled_data[i], (int, float)):
+1206                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1207
+1208    allcov = {}
+1209    for o in raveled_data:
+1210        for name in o.cov_names:
+1211            if name in allcov:
+1212                if not np.allclose(allcov[name], o.covobs[name].cov):
+1213                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1214            else:
+1215                allcov[name] = o.covobs[name].cov
+1216
+1217    n_obs = len(raveled_data)
+1218    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1219    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1220    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1221
+1222    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1223
+1224    if data.ndim == 1:
+1225        values = np.array([o.value for o in data])
+1226    else:
+1227        values = np.vectorize(lambda x: x.value)(data)
+1228
+1229    new_values = func(values, **kwargs)
+1230
+1231    multi = int(isinstance(new_values, np.ndarray))
+1232
+1233    new_r_values = {}
+1234    new_idl_d = {}
+1235    for name in new_sample_names:
+1236        idl = []
+1237        tmp_values = np.zeros(n_obs)
+1238        for i, item in enumerate(raveled_data):
+1239            tmp_values[i] = item.r_values.get(name, item.value)
+1240            tmp_idl = item.idl.get(name)
+1241            if tmp_idl is not None:
+1242                idl.append(tmp_idl)
+1243        if multi > 0:
+1244            tmp_values = np.array(tmp_values).reshape(data.shape)
+1245        new_r_values[name] = func(tmp_values, **kwargs)
+1246        new_idl_d[name] = _merge_idx(idl)
+1247
+1248    if 'man_grad' in kwargs:
+1249        deriv = np.asarray(kwargs.get('man_grad'))
+1250        if new_values.shape + data.shape != deriv.shape:
+1251            raise Exception('Manual derivative does not have correct shape.')
+1252    elif kwargs.get('num_grad') is True:
+1253        if multi > 0:
+1254            raise Exception('Multi mode currently not supported for numerical derivative')
+1255        options = {
+1256            'base_step': 0.1,
+1257            'step_ratio': 2.5}
+1258        for key in options.keys():
+1259            kwarg = kwargs.get(key)
+1260            if kwarg is not None:
+1261                options[key] = kwarg
+1262        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1263        if tmp_df.size == 1:
+1264            deriv = np.array([tmp_df.real])
+1265        else:
+1266            deriv = tmp_df.real
+1267    else:
+1268        deriv = jacobian(func)(values, **kwargs)
+1269
+1270    final_result = np.zeros(new_values.shape, dtype=object)
+1271
+1272    if array_mode is True:
+1273
+1274        class _Zero_grad():
+1275            def __init__(self, N):
+1276                self.grad = np.zeros((N, 1))
+1277
+1278        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1279        d_extracted = {}
+1280        g_extracted = {}
+1281        for name in new_sample_names:
+1282            d_extracted[name] = []
+1283            ens_length = len(new_idl_d[name])
+1284            for i_dat, dat in enumerate(data):
+1285                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1286        for name in new_cov_names:
+1287            g_extracted[name] = []
+1288            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1289            for i_dat, dat in enumerate(data):
+1290                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1291
+1292    for i_val, new_val in np.ndenumerate(new_values):
+1293        new_deltas = {}
+1294        new_grad = {}
+1295        if array_mode is True:
+1296            for name in new_sample_names:
+1297                ens_length = d_extracted[name][0].shape[-1]
+1298                new_deltas[name] = np.zeros(ens_length)
+1299                for i_dat, dat in enumerate(d_extracted[name]):
+1300                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1301            for name in new_cov_names:
+1302                new_grad[name] = 0
+1303                for i_dat, dat in enumerate(g_extracted[name]):
+1304                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1305        else:
+1306            for j_obs, obs in np.ndenumerate(data):
+1307                for name in obs.names:
+1308                    if name in obs.cov_names:
+1309                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1310                    else:
+1311                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1312
+1313        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1314
+1315        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1316            raise Exception('The same name has been used for deltas and covobs!')
+1317        new_samples = []
+1318        new_means = []
+1319        new_idl = []
+1320        new_names_obs = []
+1321        for name in new_names:
+1322            if name not in new_covobs:
+1323                new_samples.append(new_deltas[name])
+1324                new_idl.append(new_idl_d[name])
+1325                new_means.append(new_r_values[name][i_val])
+1326                new_names_obs.append(name)
+1327        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1328        for name in new_covobs:
+1329            final_result[i_val].names.append(name)
+1330        final_result[i_val]._covobs = new_covobs
+1331        final_result[i_val]._value = new_val
+1332        final_result[i_val].reweighted = reweighted
+1333
+1334    if multi == 0:
+1335        final_result = final_result.item()
+1336
+1337    return final_result
 
@@ -5420,46 +5418,46 @@ functions. For the ratio of two observables one can e.g. use

-
1370def reweight(weight, obs, **kwargs):
-1371    """Reweight a list of observables.
-1372
-1373    Parameters
-1374    ----------
-1375    weight : Obs
-1376        Reweighting factor. An Observable that has to be defined on a superset of the
-1377        configurations in obs[i].idl for all i.
-1378    obs : list
-1379        list of Obs, e.g. [obs1, obs2, obs3].
-1380    all_configs : bool
-1381        if True, the reweighted observables are normalized by the average of
-1382        the reweighting factor on all configurations in weight.idl and not
-1383        on the configurations in obs[i].idl. Default False.
-1384    """
-1385    result = []
-1386    for i in range(len(obs)):
-1387        if len(obs[i].cov_names):
-1388            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1389        if not set(obs[i].names).issubset(weight.names):
-1390            raise Exception('Error: Ensembles do not fit')
-1391        for name in obs[i].names:
-1392            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1393                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1394        new_samples = []
-1395        w_deltas = {}
-1396        for name in sorted(obs[i].names):
-1397            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1398            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1399        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1400
-1401        if kwargs.get('all_configs'):
-1402            new_weight = weight
-1403        else:
-1404            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1405
-1406        result.append(tmp_obs / new_weight)
-1407        result[-1].reweighted = True
-1408
-1409    return result
+            
1369def reweight(weight, obs, **kwargs):
+1370    """Reweight a list of observables.
+1371
+1372    Parameters
+1373    ----------
+1374    weight : Obs
+1375        Reweighting factor. An Observable that has to be defined on a superset of the
+1376        configurations in obs[i].idl for all i.
+1377    obs : list
+1378        list of Obs, e.g. [obs1, obs2, obs3].
+1379    all_configs : bool
+1380        if True, the reweighted observables are normalized by the average of
+1381        the reweighting factor on all configurations in weight.idl and not
+1382        on the configurations in obs[i].idl. Default False.
+1383    """
+1384    result = []
+1385    for i in range(len(obs)):
+1386        if len(obs[i].cov_names):
+1387            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1388        if not set(obs[i].names).issubset(weight.names):
+1389            raise Exception('Error: Ensembles do not fit')
+1390        for name in obs[i].names:
+1391            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1392                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1393        new_samples = []
+1394        w_deltas = {}
+1395        for name in sorted(obs[i].names):
+1396            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1397            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1398        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1399
+1400        if kwargs.get('all_configs'):
+1401            new_weight = weight
+1402        else:
+1403            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1404
+1405        result.append(tmp_obs / new_weight)
+1406        result[-1].reweighted = True
+1407
+1408    return result
 
@@ -5493,47 +5491,47 @@ on the configurations in obs[i].idl. Default False.
-
1412def correlate(obs_a, obs_b):
-1413    """Correlate two observables.
-1414
-1415    Parameters
-1416    ----------
-1417    obs_a : Obs
-1418        First observable
-1419    obs_b : Obs
-1420        Second observable
-1421
-1422    Notes
-1423    -----
-1424    Keep in mind to only correlate primary observables which have not been reweighted
-1425    yet. The reweighting has to be applied after correlating the observables.
-1426    Currently only works if ensembles are identical (this is not strictly necessary).
-1427    """
-1428
-1429    if sorted(obs_a.names) != sorted(obs_b.names):
-1430        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1431    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1432        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1433    for name in obs_a.names:
-1434        if obs_a.shape[name] != obs_b.shape[name]:
-1435            raise Exception('Shapes of ensemble', name, 'do not fit')
-1436        if obs_a.idl[name] != obs_b.idl[name]:
-1437            raise Exception('idl of ensemble', name, 'do not fit')
-1438
-1439    if obs_a.reweighted is True:
-1440        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1441    if obs_b.reweighted is True:
-1442        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1443
-1444    new_samples = []
-1445    new_idl = []
-1446    for name in sorted(obs_a.names):
-1447        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1448        new_idl.append(obs_a.idl[name])
-1449
-1450    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1451    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1452    return o
+            
1411def correlate(obs_a, obs_b):
+1412    """Correlate two observables.
+1413
+1414    Parameters
+1415    ----------
+1416    obs_a : Obs
+1417        First observable
+1418    obs_b : Obs
+1419        Second observable
+1420
+1421    Notes
+1422    -----
+1423    Keep in mind to only correlate primary observables which have not been reweighted
+1424    yet. The reweighting has to be applied after correlating the observables.
+1425    Currently only works if ensembles are identical (this is not strictly necessary).
+1426    """
+1427
+1428    if sorted(obs_a.names) != sorted(obs_b.names):
+1429        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1430    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1431        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1432    for name in obs_a.names:
+1433        if obs_a.shape[name] != obs_b.shape[name]:
+1434            raise Exception('Shapes of ensemble', name, 'do not fit')
+1435        if obs_a.idl[name] != obs_b.idl[name]:
+1436            raise Exception('idl of ensemble', name, 'do not fit')
+1437
+1438    if obs_a.reweighted is True:
+1439        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1440    if obs_b.reweighted is True:
+1441        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1442
+1443    new_samples = []
+1444    new_idl = []
+1445    for name in sorted(obs_a.names):
+1446        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1447        new_idl.append(obs_a.idl[name])
+1448
+1449    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1450    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1451    return o
 
@@ -5568,74 +5566,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1455def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1456    r'''Calculates the error covariance matrix of a set of observables.
-1457
-1458    WARNING: This function should be used with care, especially for observables with support on multiple
-1459             ensembles with differing autocorrelations. See the notes below for details.
-1460
-1461    The gamma method has to be applied first to all observables.
-1462
-1463    Parameters
-1464    ----------
-1465    obs : list or numpy.ndarray
-1466        List or one dimensional array of Obs
-1467    visualize : bool
-1468        If True plots the corresponding normalized correlation matrix (default False).
-1469    correlation : bool
-1470        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1471    smooth : None or int
-1472        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1473        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1474        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1475        small ones.
-1476
-1477    Notes
-1478    -----
-1479    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1480    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1481    in the absence of autocorrelation.
-1482    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1483    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1484    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1485    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1486    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1487    '''
-1488
-1489    length = len(obs)
-1490
-1491    max_samples = np.max([o.N for o in obs])
-1492    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1493        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1494
-1495    cov = np.zeros((length, length))
-1496    for i in range(length):
-1497        for j in range(i, length):
-1498            cov[i, j] = _covariance_element(obs[i], obs[j])
-1499    cov = cov + cov.T - np.diag(np.diag(cov))
-1500
-1501    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1502
-1503    if isinstance(smooth, int):
-1504        corr = _smooth_eigenvalues(corr, smooth)
-1505
-1506    if visualize:
-1507        plt.matshow(corr, vmin=-1, vmax=1)
-1508        plt.set_cmap('RdBu')
-1509        plt.colorbar()
-1510        plt.draw()
-1511
-1512    if correlation is True:
-1513        return corr
-1514
-1515    errors = [o.dvalue for o in obs]
-1516    cov = np.diag(errors) @ corr @ np.diag(errors)
-1517
-1518    eigenvalues = np.linalg.eigh(cov)[0]
-1519    if not np.all(eigenvalues >= 0):
-1520        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1521
-1522    return cov
+            
1454def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1455    r'''Calculates the error covariance matrix of a set of observables.
+1456
+1457    WARNING: This function should be used with care, especially for observables with support on multiple
+1458             ensembles with differing autocorrelations. See the notes below for details.
+1459
+1460    The gamma method has to be applied first to all observables.
+1461
+1462    Parameters
+1463    ----------
+1464    obs : list or numpy.ndarray
+1465        List or one dimensional array of Obs
+1466    visualize : bool
+1467        If True plots the corresponding normalized correlation matrix (default False).
+1468    correlation : bool
+1469        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1470    smooth : None or int
+1471        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1472        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1473        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1474        small ones.
+1475
+1476    Notes
+1477    -----
+1478    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1479    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1480    in the absence of autocorrelation.
+1481    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1482    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1483    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1484    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1485    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1486    '''
+1487
+1488    length = len(obs)
+1489
+1490    max_samples = np.max([o.N for o in obs])
+1491    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1492        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1493
+1494    cov = np.zeros((length, length))
+1495    for i in range(length):
+1496        for j in range(i, length):
+1497            cov[i, j] = _covariance_element(obs[i], obs[j])
+1498    cov = cov + cov.T - np.diag(np.diag(cov))
+1499
+1500    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1501
+1502    if isinstance(smooth, int):
+1503        corr = _smooth_eigenvalues(corr, smooth)
+1504
+1505    if visualize:
+1506        plt.matshow(corr, vmin=-1, vmax=1)
+1507        plt.set_cmap('RdBu')
+1508        plt.colorbar()
+1509        plt.draw()
+1510
+1511    if correlation is True:
+1512        return corr
+1513
+1514    errors = [o.dvalue for o in obs]
+1515    cov = np.diag(errors) @ corr @ np.diag(errors)
+1516
+1517    eigenvalues = np.linalg.eigh(cov)[0]
+1518    if not np.all(eigenvalues >= 0):
+1519        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1520
+1521    return cov
 
@@ -5687,24 +5685,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1602def import_jackknife(jacks, name, idl=None):
-1603    """Imports jackknife samples and returns an Obs
-1604
-1605    Parameters
-1606    ----------
-1607    jacks : numpy.ndarray
-1608        numpy array containing the mean value as zeroth entry and
-1609        the N jackknife samples as first to Nth entry.
-1610    name : str
-1611        name of the ensemble the samples are defined on.
-1612    """
-1613    length = len(jacks) - 1
-1614    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1615    samples = jacks[1:] @ prj
-1616    mean = np.mean(samples)
-1617    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1618    new_obs._value = jacks[0]
-1619    return new_obs
+            
1601def import_jackknife(jacks, name, idl=None):
+1602    """Imports jackknife samples and returns an Obs
+1603
+1604    Parameters
+1605    ----------
+1606    jacks : numpy.ndarray
+1607        numpy array containing the mean value as zeroth entry and
+1608        the N jackknife samples as first to Nth entry.
+1609    name : str
+1610        name of the ensemble the samples are defined on.
+1611    """
+1612    length = len(jacks) - 1
+1613    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1614    samples = jacks[1:] @ prj
+1615    mean = np.mean(samples)
+1616    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1617    new_obs._value = jacks[0]
+1618    return new_obs
 
@@ -5734,34 +5732,34 @@ name of the ensemble the samples are defined on.
-
1622def import_bootstrap(boots, name, random_numbers):
-1623    """Imports bootstrap samples and returns an Obs
-1624
-1625    Parameters
-1626    ----------
-1627    boots : numpy.ndarray
-1628        numpy array containing the mean value as zeroth entry and
-1629        the N bootstrap samples as first to Nth entry.
-1630    name : str
-1631        name of the ensemble the samples are defined on.
-1632    random_numbers : np.ndarray
-1633        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
-1634        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
-1635        chain to be reconstructed.
-1636    """
-1637    samples, length = random_numbers.shape
-1638    if samples != len(boots) - 1:
-1639        raise ValueError("Random numbers do not have the correct shape.")
-1640
-1641    if samples < length:
-1642        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
-1643
-1644    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
-1645
-1646    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
-1647    ret = Obs([samples], [name])
-1648    ret._value = boots[0]
-1649    return ret
+            
1621def import_bootstrap(boots, name, random_numbers):
+1622    """Imports bootstrap samples and returns an Obs
+1623
+1624    Parameters
+1625    ----------
+1626    boots : numpy.ndarray
+1627        numpy array containing the mean value as zeroth entry and
+1628        the N bootstrap samples as first to Nth entry.
+1629    name : str
+1630        name of the ensemble the samples are defined on.
+1631    random_numbers : np.ndarray
+1632        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
+1633        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
+1634        chain to be reconstructed.
+1635    """
+1636    samples, length = random_numbers.shape
+1637    if samples != len(boots) - 1:
+1638        raise ValueError("Random numbers do not have the correct shape.")
+1639
+1640    if samples < length:
+1641        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
+1642
+1643    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+1644
+1645    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
+1646    ret = Obs([samples], [name])
+1647    ret._value = boots[0]
+1648    return ret
 
@@ -5795,34 +5793,34 @@ chain to be reconstructed.
-
1652def merge_obs(list_of_obs):
-1653    """Combine all observables in list_of_obs into one new observable
-1654
-1655    Parameters
-1656    ----------
-1657    list_of_obs : list
-1658        list of the Obs object to be combined
-1659
-1660    Notes
-1661    -----
-1662    It is not possible to combine obs which are based on the same replicum
-1663    """
-1664    replist = [item for obs in list_of_obs for item in obs.names]
-1665    if (len(replist) == len(set(replist))) is False:
-1666        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1667    if any([len(o.cov_names) for o in list_of_obs]):
-1668        raise Exception('Not possible to merge data that contains covobs!')
-1669    new_dict = {}
-1670    idl_dict = {}
-1671    for o in list_of_obs:
-1672        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1673                        for key in set(o.deltas) | set(o.r_values)})
-1674        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1675
-1676    names = sorted(new_dict.keys())
-1677    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1678    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1679    return o
+            
1651def merge_obs(list_of_obs):
+1652    """Combine all observables in list_of_obs into one new observable
+1653
+1654    Parameters
+1655    ----------
+1656    list_of_obs : list
+1657        list of the Obs object to be combined
+1658
+1659    Notes
+1660    -----
+1661    It is not possible to combine obs which are based on the same replicum
+1662    """
+1663    replist = [item for obs in list_of_obs for item in obs.names]
+1664    if (len(replist) == len(set(replist))) is False:
+1665        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1666    if any([len(o.cov_names) for o in list_of_obs]):
+1667        raise Exception('Not possible to merge data that contains covobs!')
+1668    new_dict = {}
+1669    idl_dict = {}
+1670    for o in list_of_obs:
+1671        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1672                        for key in set(o.deltas) | set(o.r_values)})
+1673        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1674
+1675    names = sorted(new_dict.keys())
+1676    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1677    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1678    return o
 
@@ -5853,47 +5851,47 @@ list of the Obs object to be combined
-
1682def cov_Obs(means, cov, name, grad=None):
-1683    """Create an Obs based on mean(s) and a covariance matrix
-1684
-1685    Parameters
-1686    ----------
-1687    mean : list of floats or float
-1688        N mean value(s) of the new Obs
-1689    cov : list or array
-1690        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1691    name : str
-1692        identifier for the covariance matrix
-1693    grad : list or array
-1694        Gradient of the Covobs wrt. the means belonging to cov.
-1695    """
-1696
-1697    def covobs_to_obs(co):
-1698        """Make an Obs out of a Covobs
-1699
-1700        Parameters
-1701        ----------
-1702        co : Covobs
-1703            Covobs to be embedded into the Obs
-1704        """
-1705        o = Obs([], [], means=[])
-1706        o._value = co.value
-1707        o.names.append(co.name)
-1708        o._covobs[co.name] = co
-1709        o._dvalue = np.sqrt(co.errsq())
-1710        return o
-1711
-1712    ol = []
-1713    if isinstance(means, (float, int)):
-1714        means = [means]
-1715
-1716    for i in range(len(means)):
-1717        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1718    if ol[0].covobs[name].N != len(means):
-1719        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1720    if len(ol) == 1:
-1721        return ol[0]
-1722    return ol
+            
1681def cov_Obs(means, cov, name, grad=None):
+1682    """Create an Obs based on mean(s) and a covariance matrix
+1683
+1684    Parameters
+1685    ----------
+1686    mean : list of floats or float
+1687        N mean value(s) of the new Obs
+1688    cov : list or array
+1689        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1690    name : str
+1691        identifier for the covariance matrix
+1692    grad : list or array
+1693        Gradient of the Covobs wrt. the means belonging to cov.
+1694    """
+1695
+1696    def covobs_to_obs(co):
+1697        """Make an Obs out of a Covobs
+1698
+1699        Parameters
+1700        ----------
+1701        co : Covobs
+1702            Covobs to be embedded into the Obs
+1703        """
+1704        o = Obs([], [], means=[])
+1705        o._value = co.value
+1706        o.names.append(co.name)
+1707        o._covobs[co.name] = co
+1708        o._dvalue = np.sqrt(co.errsq())
+1709        return o
+1710
+1711    ol = []
+1712    if isinstance(means, (float, int)):
+1713        means = [means]
+1714
+1715    for i in range(len(means)):
+1716        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1717    if ol[0].covobs[name].N != len(means):
+1718        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1719    if len(ol) == 1:
+1720        return ol[0]
+1721    return ol