pyerrors.input.sfcf
1import os 2import fnmatch 3import re 4import numpy as np # Thinly-wrapped numpy 5from ..obs import Obs 6from .utils import sort_names, check_idl 7 8 9def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs): 10 """Read sfcf files from given folder structure. 11 12 Parameters 13 ---------- 14 path : str 15 Path to the sfcf files. 16 prefix : str 17 Prefix of the sfcf files. 18 name : str 19 Name of the correlation function to read. 20 quarks : str 21 Label of the quarks used in the sfcf input file. e.g. "quark quark" 22 for version 0.0 this does NOT need to be given with the typical " - " 23 that is present in the output file, 24 this is done automatically for this version 25 corr_type : str 26 Type of correlation function to read. Can be 27 - 'bi' for boundary-inner 28 - 'bb' for boundary-boundary 29 - 'bib' for boundary-inner-boundary 30 noffset : int 31 Offset of the source (only relevant when wavefunctions are used) 32 wf : int 33 ID of wave function 34 wf2 : int 35 ID of the second wavefunction 36 (only relevant for boundary-to-boundary correlation functions) 37 im : bool 38 if True, read imaginary instead of real part 39 of the correlation function. 40 names : list 41 Alternative labeling for replicas/ensembles. 42 Has to have the appropriate length 43 ens_name : str 44 replaces the name of the ensemble 45 version: str 46 version of SFCF, with which the measurement was done. 47 if the compact output option (-c) was specified, 48 append a "c" to the version (e.g. "1.0c") 49 if the append output option (-a) was specified, 50 append an "a" to the version 51 cfg_separator : str 52 String that separates the ensemble identifier from the configuration number (default 'n'). 53 replica: list 54 list of replica to be read, default is all 55 files: list 56 list of files to be read per replica, default is all. 57 for non-compact output format, hand the folders to be read here. 58 check_configs: list[list[int]] 59 list of list of supposed configs, eg. [range(1,1000)] 60 for one replicum with 1000 configs 61 62 Returns 63 ------- 64 result: list[Obs] 65 list of Observables with length T, observable per timeslice. 66 bb-type correlators have length 1. 67 """ 68 if kwargs.get('im'): 69 im = 1 70 part = 'imaginary' 71 else: 72 im = 0 73 part = 'real' 74 75 if corr_type == 'bb': 76 b2b = True 77 single = True 78 elif corr_type == 'bib': 79 b2b = True 80 single = False 81 else: 82 b2b = False 83 single = False 84 85 known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"] 86 87 if version not in known_versions: 88 raise Exception("This version is not known!") 89 if (version[-1] == "c"): 90 appended = False 91 compact = True 92 version = version[:-1] 93 elif (version[-1] == "a"): 94 appended = True 95 compact = False 96 version = version[:-1] 97 else: 98 compact = False 99 appended = False 100 ls = [] 101 if "replica" in kwargs: 102 ls = kwargs.get("replica") 103 else: 104 for (dirpath, dirnames, filenames) in os.walk(path): 105 if not appended: 106 ls.extend(dirnames) 107 else: 108 ls.extend(filenames) 109 break 110 if not ls: 111 raise Exception('Error, directory not found') 112 # Exclude folders with different names 113 for exc in ls: 114 if not fnmatch.fnmatch(exc, prefix + '*'): 115 ls = list(set(ls) - set([exc])) 116 117 if not appended: 118 ls = sort_names(ls) 119 replica = len(ls) 120 121 else: 122 replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls])) 123 if not silent: 124 print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica') 125 126 if 'names' in kwargs: 127 new_names = kwargs.get('names') 128 if len(new_names) != len(set(new_names)): 129 raise Exception("names are not unique!") 130 if len(new_names) != replica: 131 raise Exception('names should have the length', replica) 132 133 else: 134 ens_name = kwargs.get("ens_name") 135 if not appended: 136 new_names = _get_rep_names(ls, ens_name) 137 else: 138 new_names = _get_appended_rep_names(ls, prefix, name, ens_name) 139 new_names = sort_names(new_names) 140 141 idl = [] 142 if not appended: 143 for i, item in enumerate(ls): 144 rep_path = path + '/' + item 145 if "files" in kwargs: 146 files = kwargs.get("files") 147 else: 148 files = [] 149 sub_ls = _find_files(rep_path, prefix, compact, files) 150 rep_idl = [] 151 no_cfg = len(sub_ls) 152 for cfg in sub_ls: 153 try: 154 if compact: 155 rep_idl.append(int(cfg.split(cfg_separator)[-1])) 156 else: 157 rep_idl.append(int(cfg[3:])) 158 except Exception: 159 raise Exception("Couldn't parse idl from directroy, problem with file " + cfg) 160 rep_idl.sort() 161 # maybe there is a better way to print the idls 162 if not silent: 163 print(item, ':', no_cfg, ' configurations') 164 idl.append(rep_idl) 165 # here we have found all the files we need to look into. 166 if i == 0: 167 # here, we want to find the place within the file, 168 # where the correlator we need is stored. 169 # to do so, the pattern needed is put together 170 # from the input values 171 if version == "0.0": 172 file = path + '/' + item + '/' + sub_ls[0] + '/' + name 173 else: 174 if compact: 175 file = path + '/' + item + '/' + sub_ls[0] 176 else: 177 file = path + '/' + item + '/' + sub_ls[0] + '/' + name 178 179 pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks) 180 start_read, T = _find_correlator(file, version, pattern, b2b, silent=silent) 181 182 # preparing the datastructure 183 # the correlators get parsed into... 184 deltas = [] 185 for j in range(T): 186 deltas.append([]) 187 188 if compact: 189 rep_deltas = _read_compact_rep(path, item, sub_ls, start_read, T, b2b, name, im) 190 191 for t in range(T): 192 deltas[t].append(rep_deltas[t]) 193 else: 194 for t in range(T): 195 deltas[t].append(np.zeros(no_cfg)) 196 for cnfg, subitem in enumerate(sub_ls): 197 with open(path + '/' + item + '/' + subitem + '/' + name) as fp: 198 for k, line in enumerate(fp): 199 if (k >= start_read and k < start_read + T): 200 floats = list(map(float, line.split())) 201 if version == "0.0": 202 deltas[k - start_read][i][cnfg] = floats[im - single] 203 else: 204 deltas[k - start_read][i][cnfg] = floats[1 + im - single] 205 206 else: 207 if "files" in kwargs: 208 ls = kwargs.get("files") 209 else: 210 for exc in ls: 211 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 212 ls = list(set(ls) - set([exc])) 213 ls = sort_names(ls) 214 pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks) 215 deltas = [] 216 for rep, file in enumerate(ls): 217 rep_idl = [] 218 filename = path + '/' + file 219 T, rep_idl, rep_data = _read_append_rep(filename, pattern, b2b, cfg_separator, im, single) 220 if rep == 0: 221 for t in range(T): 222 deltas.append([]) 223 for t in range(T): 224 deltas[t].append(rep_data[t]) 225 idl.append(rep_idl) 226 227 if "check_configs" in kwargs: 228 if not silent: 229 print("Checking for missing configs...") 230 che = kwargs.get("check_configs") 231 if not (len(che) == len(idl)): 232 raise Exception("check_configs has to be the same length as replica!") 233 for r in range(len(idl)): 234 if not silent: 235 print("checking " + new_names[r]) 236 check_idl(idl[r], che[r]) 237 if not silent: 238 print("Done") 239 result = [] 240 for t in range(T): 241 result.append(Obs(deltas[t], new_names, idl=idl)) 242 return result 243 244 245def _find_files(rep_path, prefix, compact, files=[]): 246 sub_ls = [] 247 if not files == []: 248 files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 249 else: 250 for (dirpath, dirnames, filenames) in os.walk(rep_path): 251 if compact: 252 sub_ls.extend(filenames) 253 else: 254 sub_ls.extend(dirnames) 255 break 256 if compact: 257 for exc in sub_ls: 258 if not fnmatch.fnmatch(exc, prefix + '*'): 259 sub_ls = list(set(sub_ls) - set([exc])) 260 sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 261 else: 262 for exc in sub_ls: 263 if not fnmatch.fnmatch(exc, 'cfg*'): 264 sub_ls = list(set(sub_ls) - set([exc])) 265 sub_ls.sort(key=lambda x: int(x[3:])) 266 files = sub_ls 267 if len(files) == 0: 268 raise FileNotFoundError("Did not find files in", rep_path, "with prefix", prefix, "and the given structure.") 269 return files 270 271 272def _make_pattern(version, name, noffset, wf, wf2, b2b, quarks): 273 if version == "0.0": 274 pattern = "# " + name + " : offset " + str(noffset) + ", wf " + str(wf) 275 if b2b: 276 pattern += ", wf_2 " + str(wf2) 277 qs = quarks.split(" ") 278 pattern += " : " + qs[0] + " - " + qs[1] 279 else: 280 pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf) 281 if b2b: 282 pattern += '\nwf_2 ' + str(wf2) 283 return pattern 284 285 286def _find_correlator(file_name, version, pattern, b2b, silent=False): 287 T = 0 288 289 file = open(file_name, "r") 290 291 content = file.read() 292 match = re.search(pattern, content) 293 if match: 294 if version == "0.0": 295 start_read = content.count('\n', 0, match.start()) + 1 296 T = content.count('\n', start_read) 297 else: 298 start_read = content.count('\n', 0, match.start()) + 5 + b2b 299 end_match = re.search(r'\n\s*\n', content[match.start():]) 300 T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b 301 if not T > 0: 302 raise ValueError("Correlator with pattern\n" + pattern + "\nis empty!") 303 if not silent: 304 print(T, 'entries, starting to read in line', start_read) 305 306 else: 307 file.close() 308 raise ValueError('Correlator with pattern\n' + pattern + '\nnot found.') 309 310 file.close() 311 return start_read, T 312 313 314def _read_compact_file(rep_path, config_file, start_read, T, b2b, name, im): 315 with open(rep_path + config_file) as fp: 316 lines = fp.readlines() 317 # check, if the correlator is in fact 318 # printed completely 319 if (start_read + T + 1 > len(lines)): 320 raise Exception("EOF before end of correlator data! Maybe " + rep_path + config_file + " is corrupted?") 321 corr_lines = lines[start_read - 6: start_read + T] 322 del lines 323 t_vals = [] 324 325 if corr_lines[1 - b2b].strip() != 'name ' + name: 326 raise Exception('Wrong format in file', config_file) 327 328 for k in range(6, T + 6): 329 floats = list(map(float, corr_lines[k].split())) 330 t_vals.append(floats[-2:][im]) 331 return t_vals 332 333 334def _read_compact_rep(path, rep, sub_ls, start_read, T, b2b, name, im): 335 rep_path = path + '/' + rep + '/' 336 no_cfg = len(sub_ls) 337 deltas = [] 338 for t in range(T): 339 deltas.append(np.zeros(no_cfg)) 340 for cfg in range(no_cfg): 341 cfg_file = sub_ls[cfg] 342 cfg_data = _read_compact_file(rep_path, cfg_file, start_read, T, b2b, name, im) 343 for t in range(T): 344 deltas[t][cfg] = cfg_data[t] 345 return deltas 346 347 348def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, pattern, im, single): 349 try: 350 idl = int(chunk[gauge_line].split(cfg_sep)[-1]) 351 except Exception: 352 raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line) 353 354 found_pat = "" 355 data = [] 356 for li in chunk[corr_line + 1:corr_line + 6 + b2b]: 357 found_pat += li 358 if re.search(pattern, found_pat): 359 for t, line in enumerate(chunk[start_read:start_read + T]): 360 floats = list(map(float, line.split())) 361 data.append(floats[im + 1 - single]) 362 return idl, data 363 364 365def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single): 366 with open(filename, 'r') as fp: 367 content = fp.readlines() 368 data_starts = [] 369 for linenumber, line in enumerate(content): 370 if "[run]" in line: 371 data_starts.append(linenumber) 372 if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1: 373 raise Exception("Irregularities in file structure found, not all runs have the same output length") 374 chunk = content[:data_starts[1]] 375 for linenumber, line in enumerate(chunk): 376 if line.startswith("gauge_name"): 377 gauge_line = linenumber 378 elif line.startswith("[correlator]"): 379 corr_line = linenumber 380 found_pat = "" 381 for li in chunk[corr_line + 1: corr_line + 6 + b2b]: 382 found_pat += li 383 if re.search(pattern, found_pat): 384 start_read = corr_line + 7 + b2b 385 break 386 else: 387 raise ValueError("Did not find pattern\n", pattern, "\nin\n", filename) 388 endline = corr_line + 6 + b2b 389 while not chunk[endline] == "\n": 390 endline += 1 391 T = endline - start_read 392 393 # all other chunks should follow the same structure 394 rep_idl = [] 395 rep_data = [] 396 397 for cnfg in range(len(data_starts)): 398 start = data_starts[cnfg] 399 stop = start + data_starts[1] 400 chunk = content[start:stop] 401 idl, data = _read_chunk(chunk, gauge_line, cfg_separator, start_read, T, corr_line, b2b, pattern, im, single) 402 rep_idl.append(idl) 403 rep_data.append(data) 404 405 data = [] 406 407 for t in range(T): 408 data.append([]) 409 for c in range(len(rep_data)): 410 data[t].append(rep_data[c][t]) 411 return T, rep_idl, data 412 413 414def _get_rep_names(ls, ens_name=None): 415 new_names = [] 416 for entry in ls: 417 try: 418 idx = entry.index('r') 419 except Exception: 420 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 421 422 if ens_name: 423 new_names.append('ens_name' + '|' + entry[idx:]) 424 else: 425 new_names.append(entry[:idx] + '|' + entry[idx:]) 426 return new_names 427 428 429def _get_appended_rep_names(ls, prefix, name, ens_name=None): 430 new_names = [] 431 for exc in ls: 432 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 433 ls = list(set(ls) - set([exc])) 434 ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 435 for entry in ls: 436 myentry = entry[:-len(name) - 1] 437 try: 438 idx = myentry.index('r') 439 except Exception: 440 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 441 442 if ens_name: 443 new_names.append('ens_name' + '|' + entry[idx:]) 444 else: 445 new_names.append(myentry[:idx] + '|' + myentry[idx:]) 446 return new_names
def
read_sfcf( path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version='1.0c', cfg_separator='n', silent=False, **kwargs):
10def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs): 11 """Read sfcf files from given folder structure. 12 13 Parameters 14 ---------- 15 path : str 16 Path to the sfcf files. 17 prefix : str 18 Prefix of the sfcf files. 19 name : str 20 Name of the correlation function to read. 21 quarks : str 22 Label of the quarks used in the sfcf input file. e.g. "quark quark" 23 for version 0.0 this does NOT need to be given with the typical " - " 24 that is present in the output file, 25 this is done automatically for this version 26 corr_type : str 27 Type of correlation function to read. Can be 28 - 'bi' for boundary-inner 29 - 'bb' for boundary-boundary 30 - 'bib' for boundary-inner-boundary 31 noffset : int 32 Offset of the source (only relevant when wavefunctions are used) 33 wf : int 34 ID of wave function 35 wf2 : int 36 ID of the second wavefunction 37 (only relevant for boundary-to-boundary correlation functions) 38 im : bool 39 if True, read imaginary instead of real part 40 of the correlation function. 41 names : list 42 Alternative labeling for replicas/ensembles. 43 Has to have the appropriate length 44 ens_name : str 45 replaces the name of the ensemble 46 version: str 47 version of SFCF, with which the measurement was done. 48 if the compact output option (-c) was specified, 49 append a "c" to the version (e.g. "1.0c") 50 if the append output option (-a) was specified, 51 append an "a" to the version 52 cfg_separator : str 53 String that separates the ensemble identifier from the configuration number (default 'n'). 54 replica: list 55 list of replica to be read, default is all 56 files: list 57 list of files to be read per replica, default is all. 58 for non-compact output format, hand the folders to be read here. 59 check_configs: list[list[int]] 60 list of list of supposed configs, eg. [range(1,1000)] 61 for one replicum with 1000 configs 62 63 Returns 64 ------- 65 result: list[Obs] 66 list of Observables with length T, observable per timeslice. 67 bb-type correlators have length 1. 68 """ 69 if kwargs.get('im'): 70 im = 1 71 part = 'imaginary' 72 else: 73 im = 0 74 part = 'real' 75 76 if corr_type == 'bb': 77 b2b = True 78 single = True 79 elif corr_type == 'bib': 80 b2b = True 81 single = False 82 else: 83 b2b = False 84 single = False 85 86 known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"] 87 88 if version not in known_versions: 89 raise Exception("This version is not known!") 90 if (version[-1] == "c"): 91 appended = False 92 compact = True 93 version = version[:-1] 94 elif (version[-1] == "a"): 95 appended = True 96 compact = False 97 version = version[:-1] 98 else: 99 compact = False 100 appended = False 101 ls = [] 102 if "replica" in kwargs: 103 ls = kwargs.get("replica") 104 else: 105 for (dirpath, dirnames, filenames) in os.walk(path): 106 if not appended: 107 ls.extend(dirnames) 108 else: 109 ls.extend(filenames) 110 break 111 if not ls: 112 raise Exception('Error, directory not found') 113 # Exclude folders with different names 114 for exc in ls: 115 if not fnmatch.fnmatch(exc, prefix + '*'): 116 ls = list(set(ls) - set([exc])) 117 118 if not appended: 119 ls = sort_names(ls) 120 replica = len(ls) 121 122 else: 123 replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls])) 124 if not silent: 125 print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica') 126 127 if 'names' in kwargs: 128 new_names = kwargs.get('names') 129 if len(new_names) != len(set(new_names)): 130 raise Exception("names are not unique!") 131 if len(new_names) != replica: 132 raise Exception('names should have the length', replica) 133 134 else: 135 ens_name = kwargs.get("ens_name") 136 if not appended: 137 new_names = _get_rep_names(ls, ens_name) 138 else: 139 new_names = _get_appended_rep_names(ls, prefix, name, ens_name) 140 new_names = sort_names(new_names) 141 142 idl = [] 143 if not appended: 144 for i, item in enumerate(ls): 145 rep_path = path + '/' + item 146 if "files" in kwargs: 147 files = kwargs.get("files") 148 else: 149 files = [] 150 sub_ls = _find_files(rep_path, prefix, compact, files) 151 rep_idl = [] 152 no_cfg = len(sub_ls) 153 for cfg in sub_ls: 154 try: 155 if compact: 156 rep_idl.append(int(cfg.split(cfg_separator)[-1])) 157 else: 158 rep_idl.append(int(cfg[3:])) 159 except Exception: 160 raise Exception("Couldn't parse idl from directroy, problem with file " + cfg) 161 rep_idl.sort() 162 # maybe there is a better way to print the idls 163 if not silent: 164 print(item, ':', no_cfg, ' configurations') 165 idl.append(rep_idl) 166 # here we have found all the files we need to look into. 167 if i == 0: 168 # here, we want to find the place within the file, 169 # where the correlator we need is stored. 170 # to do so, the pattern needed is put together 171 # from the input values 172 if version == "0.0": 173 file = path + '/' + item + '/' + sub_ls[0] + '/' + name 174 else: 175 if compact: 176 file = path + '/' + item + '/' + sub_ls[0] 177 else: 178 file = path + '/' + item + '/' + sub_ls[0] + '/' + name 179 180 pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks) 181 start_read, T = _find_correlator(file, version, pattern, b2b, silent=silent) 182 183 # preparing the datastructure 184 # the correlators get parsed into... 185 deltas = [] 186 for j in range(T): 187 deltas.append([]) 188 189 if compact: 190 rep_deltas = _read_compact_rep(path, item, sub_ls, start_read, T, b2b, name, im) 191 192 for t in range(T): 193 deltas[t].append(rep_deltas[t]) 194 else: 195 for t in range(T): 196 deltas[t].append(np.zeros(no_cfg)) 197 for cnfg, subitem in enumerate(sub_ls): 198 with open(path + '/' + item + '/' + subitem + '/' + name) as fp: 199 for k, line in enumerate(fp): 200 if (k >= start_read and k < start_read + T): 201 floats = list(map(float, line.split())) 202 if version == "0.0": 203 deltas[k - start_read][i][cnfg] = floats[im - single] 204 else: 205 deltas[k - start_read][i][cnfg] = floats[1 + im - single] 206 207 else: 208 if "files" in kwargs: 209 ls = kwargs.get("files") 210 else: 211 for exc in ls: 212 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 213 ls = list(set(ls) - set([exc])) 214 ls = sort_names(ls) 215 pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks) 216 deltas = [] 217 for rep, file in enumerate(ls): 218 rep_idl = [] 219 filename = path + '/' + file 220 T, rep_idl, rep_data = _read_append_rep(filename, pattern, b2b, cfg_separator, im, single) 221 if rep == 0: 222 for t in range(T): 223 deltas.append([]) 224 for t in range(T): 225 deltas[t].append(rep_data[t]) 226 idl.append(rep_idl) 227 228 if "check_configs" in kwargs: 229 if not silent: 230 print("Checking for missing configs...") 231 che = kwargs.get("check_configs") 232 if not (len(che) == len(idl)): 233 raise Exception("check_configs has to be the same length as replica!") 234 for r in range(len(idl)): 235 if not silent: 236 print("checking " + new_names[r]) 237 check_idl(idl[r], che[r]) 238 if not silent: 239 print("Done") 240 result = [] 241 for t in range(T): 242 result.append(Obs(deltas[t], new_names, idl=idl)) 243 return result
Read sfcf files from given folder structure.
Parameters
- path (str): Path to the sfcf files.
- prefix (str): Prefix of the sfcf files.
- name (str): Name of the correlation function to read.
- quarks (str): Label of the quarks used in the sfcf input file. e.g. "quark quark" for version 0.0 this does NOT need to be given with the typical " - " that is present in the output file, this is done automatically for this version
- corr_type (str):
Type of correlation function to read. Can be
- 'bi' for boundary-inner
- 'bb' for boundary-boundary
- 'bib' for boundary-inner-boundary
- noffset (int): Offset of the source (only relevant when wavefunctions are used)
- wf (int): ID of wave function
- wf2 (int): ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)
- im (bool): if True, read imaginary instead of real part of the correlation function.
- names (list): Alternative labeling for replicas/ensembles. Has to have the appropriate length
- ens_name (str): replaces the name of the ensemble
- version (str): version of SFCF, with which the measurement was done. if the compact output option (-c) was specified, append a "c" to the version (e.g. "1.0c") if the append output option (-a) was specified, append an "a" to the version
- cfg_separator (str): String that separates the ensemble identifier from the configuration number (default 'n').
- replica (list): list of replica to be read, default is all
- files (list): list of files to be read per replica, default is all. for non-compact output format, hand the folders to be read here.
- check_configs (list[list[int]]): list of list of supposed configs, eg. [range(1,1000)] for one replicum with 1000 configs
Returns
- result (list[Obs]): list of Observables with length T, observable per timeslice. bb-type correlators have length 1.