From dc60784c314374c406e5c00e90fc85a3e796daab Mon Sep 17 00:00:00 2001
From: fjosw
Date: Tue, 18 Jan 2022 16:36:51 +0000
Subject: [PATCH] Documentation updated
---
docs/pyerrors/correlators.html | 700 ++++++++++++++++++++++++++++-----
docs/search.js | 2 +-
2 files changed, 597 insertions(+), 105 deletions(-)
diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html
index 502bbbc0..a3f979d6 100644
--- a/docs/pyerrors/correlators.html
+++ b/docs/pyerrors/correlators.html
@@ -86,6 +86,9 @@
Eigenvalue
+
+ Hankel
+
roll
@@ -173,9 +176,24 @@
arctanh
+
+ real
+
+
+ imag
+
+
+ sort_vectors
+
+
+ permutation
+
+
+ GEVP_solver
+
@@ -200,7 +218,7 @@
import autograd.numpy as anp
import matplotlib.pyplot as plt
import scipy.linalg
-from .obs import Obs, reweight, correlate
+from .obs import Obs, reweight, correlate, CObs
from .misc import dump_object
from .fits import least_squares
from .linalg import eigh, inv, cholesky
@@ -238,7 +256,11 @@
if not isinstance(data_input, list):
raise TypeError('Corr__init__ expects a list of timeslices.')
- if all([isinstance(item, Obs) for item in data_input]):
+
+ # data_input can have multiple shapes. The simplest one is a list of Obs.
+ # We check, if this is the case
+ if all([(isinstance(item, Obs) or isinstance(item, CObs)) for item in data_input]):
+
self.content = [np.asarray([item]) for item in data_input]
self.N = 1
@@ -298,7 +320,7 @@
# The method can use one or two vectors.
# If two are specified it returns v1@G@v2 (the order might be very important.)
# By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
- def projected(self, vector_l=None, vector_r=None):
+ def projected(self, vector_l=None, vector_r=None, normalize=False):
if self.N == 1:
raise Exception("Trying to project a Corr, that already has N=1.")
# This Exception is in no way necessary. One could just return self
@@ -310,17 +332,32 @@
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
elif(vector_r is None):
vector_r = vector_l
+ if isinstance(vector_l, list) and not isinstance(vector_r, list):
+ if len(vector_l) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_r = [vector_r] * self.T
+ if isinstance(vector_r, list) and not isinstance(vector_l, list):
+ if len(vector_r) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_l = [vector_l] * self.T
- if not vector_l.shape == vector_r.shape == (self.N,):
- raise Exception("Vectors are of wrong shape!")
+ if not isinstance(vector_l, list):
+ if not vector_l.shape == vector_r.shape == (self.N,):
+ raise Exception("Vectors are of wrong shape!")
+ if normalize:
+ vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ # if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+ # print("Vectors are normalized before projection!")
- # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
- if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
- print("Vectors are normalized before projection!")
+ newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
- vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ else:
+ # There are no checks here yet. There are so many possible scenarios, where this can go wrong.
+ if normalize:
+ for t in range(self.T):
+ vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
- newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+ newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
return Corr(newcontent)
def sum(self):
@@ -396,20 +433,45 @@
if self.N == 1:
raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
- # We also include a simple GEVP method based on Scipy.linalg
- def GEVP(self, t0, ts, state=1):
- if (self.content[t0] is None) or (self.content[ts] is None):
- raise Exception("Corr not defined at t0/ts")
- G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
- for i in range(self.N):
- for j in range(self.N):
- G0[i, j] = self.content[t0][i, j].value
- Gt[i, j] = self.content[ts][i, j].value
+ # There are two ways, the GEVP metod can be called.
+ # 1. return_list=False will return a single eigenvector, normalized according to V*C(t_0)*V=1
+ # 2. return_list=True will return a new eigenvector for every timeslice. The time t_s is used to order the vectors according to. arXiv:2004.10472 [hep-lat]
+ def GEVP(self, t0, ts, state=0, sorting="Eigenvalue", return_list=False):
+ if not return_list:
+ if (self.content[t0] is None) or (self.content[ts] is None):
+ raise Exception("Corr not defined at t0/ts")
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[ts][i, j].value
- sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
- sp_vec = sp_vec[:, np.argsort(sp_val)[-state]] # We only want the eigenvector belonging to the selected state
- sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
- return sp_vec
+ sp_vecs = GEVP_solver(Gt, G0)
+ sp_vec = sp_vecs[state]
+ return sp_vec
+ if return_list:
+ all_vecs = []
+ for t in range(self.T):
+ try:
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[t][i, j].value
+
+ sp_vecs = GEVP_solver(Gt, G0)
+ if sorting == "Eigenvalue":
+ sp_vec = sp_vecs[state]
+ all_vecs.append(sp_vec)
+ else:
+ all_vecs.append(sp_vecs)
+ except "Failure to solve for one timeslice": # This could contain a check for real eigenvectors
+ all_vecs.append(None)
+ if sorting == "Eigenvector":
+ all_vecs = sort_vectors(all_vecs, ts)
+ all_vecs = [a[state] for a in all_vecs]
+
+ return all_vecs
def Eigenvalue(self, t0, state=1):
G = self.smearing_symmetric()
@@ -420,13 +482,48 @@
LTi = inv(LT)
newcontent = []
for t in range(self.T):
- Gt = G.content[t]
- M = Li @ Gt @ LTi
- eigenvalues = eigh(M)[0]
- eigenvalue = eigenvalues[-state]
- newcontent.append(eigenvalue)
+ if self.content[t] is None:
+ newcontent.append(None)
+ else:
+ Gt = G.content[t]
+ M = Li @ Gt @ LTi
+ eigenvalues = eigh(M)[0]
+ eigenvalue = eigenvalues[-state]
+ newcontent.append(eigenvalue)
return Corr(newcontent)
+ def Hankel(self, N, periodic=False):
+ # Constructs an NxN Hankel matrix
+ # C(t) c(t+1) ... c(t+n-1)
+ # C(t+1) c(t+2) ... c(t+n)
+ # .................
+ # C(t+(n-1)) c(t+n) ... c(t+2(n-1))
+
+ if self.N != 1:
+ raise Exception("Multi-operator Prony not implemented!")
+
+ array = np.empty([N, N], dtype="object")
+ new_content = []
+ for t in range(self.T):
+ new_content.append(array.copy())
+
+ def wrap(i):
+ if i >= self.T:
+ return i - self.T
+ return i
+
+ for t in range(self.T):
+ for i in range(N):
+ for j in range(N):
+ if periodic:
+ new_content[t][i, j] = self.content[wrap(t + i + j)][0]
+ elif (t + i + j) >= self.T:
+ new_content[t] = None
+ else:
+ new_content[t][i, j] = self.content[t + i + j][0]
+
+ return Corr(new_content)
+
def roll(self, dt):
"""Periodically shift the correlator by dt timeslices
@@ -439,7 +536,7 @@
def reverse(self):
"""Reverse the time ordering of the Corr"""
- return Corr(self.content[::-1])
+ return Corr(self.content[:: -1])
def correlate(self, partner):
"""Correlate the correlator with another correlator or Obs
@@ -461,7 +558,7 @@
new_content.append(None)
else:
new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
- elif isinstance(partner, Obs):
+ elif isinstance(partner, Obs): # Should this include CObs?
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
else:
raise Exception("Can only correlate with an Obs or a Corr.")
@@ -787,7 +884,6 @@
def dump(self, filename, **kwargs):
"""Dumps the Corr into a pickle file
-
Parameters
----------
filename : str
@@ -796,14 +892,23 @@
specifies a custom path for the file (default '.')
"""
dump_object(self, filename, **kwargs)
+ return
def print(self, range=[0, None]):
print(self.__repr__(range))
def __repr__(self, range=[0, None]):
content_string = ""
+
+ content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here
+
if self.tag is not None:
content_string += "Description: " + self.tag + "\n"
+ if self.N != 1:
+ return content_string
+ # This avoids a crash for N>1. I do not know, what else to do here. I like the list representation for N==1. We could print only one "smearing" or one matrix. Printing everything will just
+ # be a wall of numbers.
+
if range[1]:
range[1] += 1
content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
@@ -837,7 +942,7 @@
newcontent.append(self.content[t] + y.content[t])
return Corr(newcontent)
- elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -860,7 +965,7 @@
newcontent.append(self.content[t] * y.content[t])
return Corr(newcontent)
- elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -893,9 +998,14 @@
raise Exception("Division returns completely undefined correlator")
return Corr(newcontent)
- elif isinstance(y, Obs):
- if y.value == 0:
- raise Exception('Division by zero will return undefined correlator')
+ elif isinstance(y, Obs) or isinstance(y, CObs):
+ if isinstance(y, Obs):
+ if y.value == 0:
+ raise Exception('Division by zero will return undefined correlator')
+ if isinstance(y, CObs):
+ if y.is_zero():
+ raise Exception('Division by zero will return undefined correlator')
+
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -925,7 +1035,7 @@
return self + (-y)
def __pow__(self, y):
- if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = [None if (item is None) else item**y for item in self.content]
return Corr(newcontent, prange=self.prange)
else:
@@ -1006,6 +1116,73 @@
def __rtruediv__(self, y):
return (self / y) ** (-1)
+
+ @property
+ def real(self):
+ def return_real(obs_OR_cobs):
+ if isinstance(obs_OR_cobs, CObs):
+ return obs_OR_cobs.real
+ else:
+ return obs_OR_cobs
+
+ return self._apply_func_to_corr(return_real)
+
+ @property
+ def imag(self):
+ def return_imag(obs_OR_cobs):
+ if isinstance(obs_OR_cobs, CObs):
+ return obs_OR_cobs.imag
+ else:
+ return obs_OR_cobs * 0 # So it stays the right type
+
+ return self._apply_func_to_corr(return_imag)
+
+
+def sort_vectors(vec_set, ts): # Helper function used to find a set of Eigenvectors consistent over all timeslices
+ reference_sorting = np.array(vec_set[ts])
+ N = reference_sorting.shape[0]
+ sorted_vec_set = []
+ for t in range(len(vec_set)):
+ if vec_set[t] is None:
+ sorted_vec_set.append(None)
+ elif not t == ts:
+ perms = permutation([i for i in range(N)])
+ best_score = 0
+ for perm in perms:
+ current_score = 1
+ for k in range(N):
+ new_sorting = reference_sorting.copy()
+ new_sorting[perm[k], :] = vec_set[t][k]
+ current_score *= abs(np.linalg.det(new_sorting))
+ if current_score > best_score:
+ best_score = current_score
+ best_perm = perm
+ # print("best perm", best_perm)
+ sorted_vec_set.append([vec_set[t][k] for k in best_perm])
+ else:
+ sorted_vec_set.append(vec_set[t])
+
+ return sorted_vec_set
+
+
+def permutation(lst): # Shamelessly copied
+ if len(lst) == 1:
+ return [lst]
+ ll = []
+ for i in range(len(lst)):
+ m = lst[i]
+ remLst = lst[:i] + lst[i + 1:]
+ # Generating all permutations where m is first
+ for p in permutation(remLst):
+ ll.append([m] + p)
+ return ll
+
+
+def GEVP_solver(Gt, G0): # Just so normalization an sorting does not need to be repeated. Here we could later put in some checks
+ sp_val, sp_vecs = scipy.linalg.eig(Gt, G0)
+ sp_vecs = [sp_vecs[:, np.argsort(sp_val)[-i]] for i in range(1, sp_vecs.shape[0] + 1)]
+ sp_vecs = [v / np.sqrt((v.T @ G0 @ v)) for v in sp_vecs]
+ return sp_vecs
@@ -1053,7 +1230,11 @@
if not isinstance(data_input, list):
raise TypeError('Corr__init__ expects a list of timeslices.')
- if all([isinstance(item, Obs) for item in data_input]):
+
+ # data_input can have multiple shapes. The simplest one is a list of Obs.
+ # We check, if this is the case
+ if all([(isinstance(item, Obs) or isinstance(item, CObs)) for item in data_input]):
+
self.content = [np.asarray([item]) for item in data_input]
self.N = 1
@@ -1113,7 +1294,7 @@
# The method can use one or two vectors.
# If two are specified it returns v1@G@v2 (the order might be very important.)
# By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
- def projected(self, vector_l=None, vector_r=None):
+ def projected(self, vector_l=None, vector_r=None, normalize=False):
if self.N == 1:
raise Exception("Trying to project a Corr, that already has N=1.")
# This Exception is in no way necessary. One could just return self
@@ -1125,17 +1306,32 @@
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
elif(vector_r is None):
vector_r = vector_l
+ if isinstance(vector_l, list) and not isinstance(vector_r, list):
+ if len(vector_l) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_r = [vector_r] * self.T
+ if isinstance(vector_r, list) and not isinstance(vector_l, list):
+ if len(vector_r) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_l = [vector_l] * self.T
- if not vector_l.shape == vector_r.shape == (self.N,):
- raise Exception("Vectors are of wrong shape!")
+ if not isinstance(vector_l, list):
+ if not vector_l.shape == vector_r.shape == (self.N,):
+ raise Exception("Vectors are of wrong shape!")
+ if normalize:
+ vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ # if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+ # print("Vectors are normalized before projection!")
- # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
- if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
- print("Vectors are normalized before projection!")
+ newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
- vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ else:
+ # There are no checks here yet. There are so many possible scenarios, where this can go wrong.
+ if normalize:
+ for t in range(self.T):
+ vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
- newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+ newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
return Corr(newcontent)
def sum(self):
@@ -1211,20 +1407,45 @@
if self.N == 1:
raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
- # We also include a simple GEVP method based on Scipy.linalg
- def GEVP(self, t0, ts, state=1):
- if (self.content[t0] is None) or (self.content[ts] is None):
- raise Exception("Corr not defined at t0/ts")
- G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
- for i in range(self.N):
- for j in range(self.N):
- G0[i, j] = self.content[t0][i, j].value
- Gt[i, j] = self.content[ts][i, j].value
+ # There are two ways, the GEVP metod can be called.
+ # 1. return_list=False will return a single eigenvector, normalized according to V*C(t_0)*V=1
+ # 2. return_list=True will return a new eigenvector for every timeslice. The time t_s is used to order the vectors according to. arXiv:2004.10472 [hep-lat]
+ def GEVP(self, t0, ts, state=0, sorting="Eigenvalue", return_list=False):
+ if not return_list:
+ if (self.content[t0] is None) or (self.content[ts] is None):
+ raise Exception("Corr not defined at t0/ts")
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[ts][i, j].value
- sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
- sp_vec = sp_vec[:, np.argsort(sp_val)[-state]] # We only want the eigenvector belonging to the selected state
- sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
- return sp_vec
+ sp_vecs = GEVP_solver(Gt, G0)
+ sp_vec = sp_vecs[state]
+ return sp_vec
+ if return_list:
+ all_vecs = []
+ for t in range(self.T):
+ try:
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[t][i, j].value
+
+ sp_vecs = GEVP_solver(Gt, G0)
+ if sorting == "Eigenvalue":
+ sp_vec = sp_vecs[state]
+ all_vecs.append(sp_vec)
+ else:
+ all_vecs.append(sp_vecs)
+ except "Failure to solve for one timeslice": # This could contain a check for real eigenvectors
+ all_vecs.append(None)
+ if sorting == "Eigenvector":
+ all_vecs = sort_vectors(all_vecs, ts)
+ all_vecs = [a[state] for a in all_vecs]
+
+ return all_vecs
def Eigenvalue(self, t0, state=1):
G = self.smearing_symmetric()
@@ -1235,13 +1456,48 @@
LTi = inv(LT)
newcontent = []
for t in range(self.T):
- Gt = G.content[t]
- M = Li @ Gt @ LTi
- eigenvalues = eigh(M)[0]
- eigenvalue = eigenvalues[-state]
- newcontent.append(eigenvalue)
+ if self.content[t] is None:
+ newcontent.append(None)
+ else:
+ Gt = G.content[t]
+ M = Li @ Gt @ LTi
+ eigenvalues = eigh(M)[0]
+ eigenvalue = eigenvalues[-state]
+ newcontent.append(eigenvalue)
return Corr(newcontent)
+ def Hankel(self, N, periodic=False):
+ # Constructs an NxN Hankel matrix
+ # C(t) c(t+1) ... c(t+n-1)
+ # C(t+1) c(t+2) ... c(t+n)
+ # .................
+ # C(t+(n-1)) c(t+n) ... c(t+2(n-1))
+
+ if self.N != 1:
+ raise Exception("Multi-operator Prony not implemented!")
+
+ array = np.empty([N, N], dtype="object")
+ new_content = []
+ for t in range(self.T):
+ new_content.append(array.copy())
+
+ def wrap(i):
+ if i >= self.T:
+ return i - self.T
+ return i
+
+ for t in range(self.T):
+ for i in range(N):
+ for j in range(N):
+ if periodic:
+ new_content[t][i, j] = self.content[wrap(t + i + j)][0]
+ elif (t + i + j) >= self.T:
+ new_content[t] = None
+ else:
+ new_content[t][i, j] = self.content[t + i + j][0]
+
+ return Corr(new_content)
+
def roll(self, dt):
"""Periodically shift the correlator by dt timeslices
@@ -1254,7 +1510,7 @@
def reverse(self):
"""Reverse the time ordering of the Corr"""
- return Corr(self.content[::-1])
+ return Corr(self.content[:: -1])
def correlate(self, partner):
"""Correlate the correlator with another correlator or Obs
@@ -1276,7 +1532,7 @@
new_content.append(None)
else:
new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
- elif isinstance(partner, Obs):
+ elif isinstance(partner, Obs): # Should this include CObs?
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
else:
raise Exception("Can only correlate with an Obs or a Corr.")
@@ -1602,7 +1858,6 @@
def dump(self, filename, **kwargs):
"""Dumps the Corr into a pickle file
-
Parameters
----------
filename : str
@@ -1611,14 +1866,23 @@
specifies a custom path for the file (default '.')
"""
dump_object(self, filename, **kwargs)
+ return
def print(self, range=[0, None]):
print(self.__repr__(range))
def __repr__(self, range=[0, None]):
content_string = ""
+
+ content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here
+
if self.tag is not None:
content_string += "Description: " + self.tag + "\n"
+ if self.N != 1:
+ return content_string
+ # This avoids a crash for N>1. I do not know, what else to do here. I like the list representation for N==1. We could print only one "smearing" or one matrix. Printing everything will just
+ # be a wall of numbers.
+
if range[1]:
range[1] += 1
content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
@@ -1652,7 +1916,7 @@
newcontent.append(self.content[t] + y.content[t])
return Corr(newcontent)
- elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -1675,7 +1939,7 @@
newcontent.append(self.content[t] * y.content[t])
return Corr(newcontent)
- elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -1708,9 +1972,14 @@
raise Exception("Division returns completely undefined correlator")
return Corr(newcontent)
- elif isinstance(y, Obs):
- if y.value == 0:
- raise Exception('Division by zero will return undefined correlator')
+ elif isinstance(y, Obs) or isinstance(y, CObs):
+ if isinstance(y, Obs):
+ if y.value == 0:
+ raise Exception('Division by zero will return undefined correlator')
+ if isinstance(y, CObs):
+ if y.is_zero():
+ raise Exception('Division by zero will return undefined correlator')
+
newcontent = []
for t in range(self.T):
if (self.content[t] is None):
@@ -1740,7 +2009,7 @@
return self + (-y)
def __pow__(self, y):
- if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+ if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
newcontent = [None if (item is None) else item**y for item in self.content]
return Corr(newcontent, prange=self.prange)
else:
@@ -1821,6 +2090,26 @@
def __rtruediv__(self, y):
return (self / y) ** (-1)
+
+ @property
+ def real(self):
+ def return_real(obs_OR_cobs):
+ if isinstance(obs_OR_cobs, CObs):
+ return obs_OR_cobs.real
+ else:
+ return obs_OR_cobs
+
+ return self._apply_func_to_corr(return_real)
+
+ @property
+ def imag(self):
+ def return_imag(obs_OR_cobs):
+ if isinstance(obs_OR_cobs, CObs):
+ return obs_OR_cobs.imag
+ else:
+ return obs_OR_cobs * 0 # So it stays the right type
+
+ return self._apply_func_to_corr(return_imag)
@@ -1864,7 +2153,11 @@ smearing matrix at every timeslice. Other dependency (eg. spatial) are not suppo
if not isinstance(data_input, list):
raise TypeError('Corr__init__ expects a list of timeslices.')
- if all([isinstance(item, Obs) for item in data_input]):
+
+ # data_input can have multiple shapes. The simplest one is a list of Obs.
+ # We check, if this is the case
+ if all([(isinstance(item, Obs) or isinstance(item, CObs)) for item in data_input]):
+
self.content = [np.asarray([item]) for item in data_input]
self.N = 1
@@ -1955,12 +2248,12 @@ region indentified for this correlator.
def
- projected(self, vector_l=None, vector_r=None):
+ projected(self, vector_l=None, vector_r=None, normalize=False):
View Source
- def projected(self, vector_l=None, vector_r=None):
+ def projected(self, vector_l=None, vector_r=None, normalize=False):
if self.N == 1:
raise Exception("Trying to project a Corr, that already has N=1.")
# This Exception is in no way necessary. One could just return self
@@ -1972,17 +2265,32 @@ region indentified for this correlator.
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
elif(vector_r is None):
vector_r = vector_l
+ if isinstance(vector_l, list) and not isinstance(vector_r, list):
+ if len(vector_l) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_r = [vector_r] * self.T
+ if isinstance(vector_r, list) and not isinstance(vector_l, list):
+ if len(vector_r) != self.T:
+ raise Exception("Length of vector list must be equal to T")
+ vector_l = [vector_l] * self.T
- if not vector_l.shape == vector_r.shape == (self.N,):
- raise Exception("Vectors are of wrong shape!")
+ if not isinstance(vector_l, list):
+ if not vector_l.shape == vector_r.shape == (self.N,):
+ raise Exception("Vectors are of wrong shape!")
+ if normalize:
+ vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ # if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+ # print("Vectors are normalized before projection!")
- # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
- if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
- print("Vectors are normalized before projection!")
+ newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
- vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+ else:
+ # There are no checks here yet. There are so many possible scenarios, where this can go wrong.
+ if normalize:
+ for t in range(self.T):
+ vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
- newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+ newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
return Corr(newcontent)
@@ -2167,24 +2475,47 @@ timeslice and the error on each timeslice.
def
- GEVP(self, t0, ts, state=1):
+ GEVP(self, t0, ts, state=0, sorting='Eigenvalue', return_list=False):
View Source
- def GEVP(self, t0, ts, state=1):
- if (self.content[t0] is None) or (self.content[ts] is None):
- raise Exception("Corr not defined at t0/ts")
- G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
- for i in range(self.N):
- for j in range(self.N):
- G0[i, j] = self.content[t0][i, j].value
- Gt[i, j] = self.content[ts][i, j].value
+ def GEVP(self, t0, ts, state=0, sorting="Eigenvalue", return_list=False):
+ if not return_list:
+ if (self.content[t0] is None) or (self.content[ts] is None):
+ raise Exception("Corr not defined at t0/ts")
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[ts][i, j].value
- sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
- sp_vec = sp_vec[:, np.argsort(sp_val)[-state]] # We only want the eigenvector belonging to the selected state
- sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
- return sp_vec
+ sp_vecs = GEVP_solver(Gt, G0)
+ sp_vec = sp_vecs[state]
+ return sp_vec
+ if return_list:
+ all_vecs = []
+ for t in range(self.T):
+ try:
+ G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+ for i in range(self.N):
+ for j in range(self.N):
+ G0[i, j] = self.content[t0][i, j].value
+ Gt[i, j] = self.content[t][i, j].value
+
+ sp_vecs = GEVP_solver(Gt, G0)
+ if sorting == "Eigenvalue":
+ sp_vec = sp_vecs[state]
+ all_vecs.append(sp_vec)
+ else:
+ all_vecs.append(sp_vecs)
+ except "Failure to solve for one timeslice": # This could contain a check for real eigenvectors
+ all_vecs.append(None)
+ if sorting == "Eigenvector":
+ all_vecs = sort_vectors(all_vecs, ts)
+ all_vecs = [a[state] for a in all_vecs]
+
+ return all_vecs
@@ -2211,11 +2542,14 @@ timeslice and the error on each timeslice.
LTi = inv(LT)
newcontent = []
for t in range(self.T):
- Gt = G.content[t]
- M = Li @ Gt @ LTi
- eigenvalues = eigh(M)[0]
- eigenvalue = eigenvalues[-state]
- newcontent.append(eigenvalue)
+ if self.content[t] is None:
+ newcontent.append(None)
+ else:
+ Gt = G.content[t]
+ M = Li @ Gt @ LTi
+ eigenvalues = eigh(M)[0]
+ eigenvalue = eigenvalues[-state]
+ newcontent.append(eigenvalue)
return Corr(newcontent)
@@ -2223,6 +2557,54 @@ timeslice and the error on each timeslice.
+
+
+
+
+
+ def
+ Hankel(self, N, periodic=False):
+
+
+
+ View Source
+ def Hankel(self, N, periodic=False):
+ # Constructs an NxN Hankel matrix
+ # C(t) c(t+1) ... c(t+n-1)
+ # C(t+1) c(t+2) ... c(t+n)
+ # .................
+ # C(t+(n-1)) c(t+n) ... c(t+2(n-1))
+
+ if self.N != 1:
+ raise Exception("Multi-operator Prony not implemented!")
+
+ array = np.empty([N, N], dtype="object")
+ new_content = []
+ for t in range(self.T):
+ new_content.append(array.copy())
+
+ def wrap(i):
+ if i >= self.T:
+ return i - self.T
+ return i
+
+ for t in range(self.T):
+ for i in range(N):
+ for j in range(N):
+ if periodic:
+ new_content[t][i, j] = self.content[wrap(t + i + j)][0]
+ elif (t + i + j) >= self.T:
+ new_content[t] = None
+ else:
+ new_content[t][i, j] = self.content[t + i + j][0]
+
+ return Corr(new_content)
+
+
+
+
+
+
@@ -2271,7 +2653,7 @@ number of timeslices
View Source
def reverse(self):
"""Reverse the time ordering of the Corr"""
- return Corr(self.content[::-1])
+ return Corr(self.content[:: -1])
@@ -2311,7 +2693,7 @@ number of timeslices
new_content.append(None)
else:
new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
-
elif isinstance(partner, Obs):
+
elif isinstance(partner, Obs): # Should this include CObs?
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
else:
raise Exception("Can only correlate with an Obs or a Corr.")
@@ -2920,7 +3302,6 @@ path to file in which the figure should be saved
View Source
def dump(self, filename, **kwargs):
"""Dumps the Corr into a pickle file
-
Parameters
----------
filename : str
@@ -2929,6 +3310,7 @@ path to file in which the figure should be saved
specifies a custom path for the file (default '.')
"""
dump_object(self, filename, **kwargs)
+ return
@@ -3253,6 +3635,116 @@ specifies a custom path for the file (default '.')
+
+
+
+
+
+
+
+ def
+ sort_vectors(vec_set, ts):
+
+
+
+ View Source
+ def sort_vectors(vec_set, ts): # Helper function used to find a set of Eigenvectors consistent over all timeslices
+ reference_sorting = np.array(vec_set[ts])
+ N = reference_sorting.shape[0]
+ sorted_vec_set = []
+ for t in range(len(vec_set)):
+ if vec_set[t] is None:
+ sorted_vec_set.append(None)
+ elif not t == ts:
+ perms = permutation([i for i in range(N)])
+ best_score = 0
+ for perm in perms:
+ current_score = 1
+ for k in range(N):
+ new_sorting = reference_sorting.copy()
+ new_sorting[perm[k], :] = vec_set[t][k]
+ current_score *= abs(np.linalg.det(new_sorting))
+ if current_score > best_score:
+ best_score = current_score
+ best_perm = perm
+ # print("best perm", best_perm)
+ sorted_vec_set.append([vec_set[t][k] for k in best_perm])
+ else:
+ sorted_vec_set.append(vec_set[t])
+
+ return sorted_vec_set
+
+
+
+
+
+
+
+
+
+
+
+ def
+ permutation(lst):
+
+
+
+ View Source
+ def permutation(lst): # Shamelessly copied
+ if len(lst) == 1:
+ return [lst]
+ ll = []
+ for i in range(len(lst)):
+ m = lst[i]
+ remLst = lst[:i] + lst[i + 1:]
+ # Generating all permutations where m is first
+ for p in permutation(remLst):
+ ll.append([m] + p)
+ return ll
+
+
+
+
+
+
+
+
+
+
+
+ def
+ GEVP_solver(Gt, G0):
+
+
+
+ View Source
+ def GEVP_solver(Gt, G0): # Just so normalization an sorting does not need to be repeated. Here we could later put in some checks
+ sp_val, sp_vecs = scipy.linalg.eig(Gt, G0)
+ sp_vecs = [sp_vecs[:, np.argsort(sp_val)[-i]] for i in range(1, sp_vecs.shape[0] + 1)]
+ sp_vecs = [v / np.sqrt((v.T @ G0 @ v)) for v in sp_vecs]
+ return sp_vecs
+
+
+
+
+
+