Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • manon.arfib/birdcall-detection
1 result
Show changes
Commits on Source (2)
Showing
with 2449 additions and 0 deletions
input/*.csv
input/audio_files/**
output/**
**/__pycache__
train/temp/train/**
train/temp/val/**
train/data_training/audio_files/**
train/data_training/*.csv
\ No newline at end of file
prepare:
python creation_datasets/create_csv.py France
python creation_datasets/download_files.py France.csv
rm -rf train/data_training/birds.csv
mv creation_datasets/fichiers_csv/France.csv train/data_training/birds.csv
rm -rf train/data_training/audio_files/
mv creation_datasets/audio_files_France train/data_training/audio_files/
python preload_training_data.py
train:
python train.py -m all
\ No newline at end of file
To use the ia :
put in the input folder :
an audio_files with the audios and a csv file containing the audios you want to test. The format is :
audio_id
audio1
audio2
...
no extension on the audio_id column
Note : the csv can have multiple columns, but only the audio_id is considered
To make the training :
download and preprocess the dataset : in Terminal, put yourself in the "birdcall-detection" folder. Then type "make prepare"
Wait the download end. Then you can launch the training with "make train".
If you have not enough RAM, you'd better train models one by one. To do so, write (still in the "birdcall-detection" folder) :
python train -m 1
python train -m 2
python train -m 3
python train -m 4
Note : If you want to make your own dataset, in the train/data_training folder, put an audio_files folder and a csv with the adequate format :
cnt,en,id,length
**,class1,audio1.mp3,**
**,class1,audio2.mp3,**
...
**,class2,audio4.wav,**
...
where audio.* is just the name of the corresponding audio file and *** are characters that needs to be there but are not important
import torch
import pandas as pd
from pathlib import Path
import numpy as np
from fastprogress import progress_bar
import warnings
from contextlib import contextmanager
import time
from src.models import models, AttBlock
from src.preproc import clip_to_image
PERIOD = 30
# Arbitrary
ratio = {
"ref2_th03": 0.25/0.77,
"ref2_th04": 0.14/0.77,
"eff_th04": 0.13/0.77,
"ext": 0.25/0.77
}
all_time_duration = 0
# We may determine tresholds for each class but it's not done here
thresholds = {}
inv_bird_call = np.load('inv_bird_code.npy', allow_pickle=True)
@contextmanager
def timer(name: str):
t0 = time.time()
msg = f"[{name}] start"
print(msg)
yield
global all_time_duration
all_time_duration += time.time() - t0
msg = f"[{name}] done in {time.time() - t0:.2f} s"
print(msg)
def prediction_for_clip(test_df: pd.DataFrame,
clip: Path, models):
"""Given a clip, the function predict the bird singing"""
images = clip_to_image(clip)
array = np.asarray(images)
tensors = torch.from_numpy(array)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
estimated_event_list = []
global_time = 0.0
audio_id = test_df["audio_id"].values[0]
for image in progress_bar(tensors):
image = image[None, :]/255.0
image = image.to(device)
outputs = {}
with torch.no_grad():
for key in models:
prediction = models[key](image)
framewise_outputs = prediction["framewise_output"].detach(
).cpu().numpy()[0]
outputs[key] = framewise_outputs
key = list(outputs.keys())[0]
framewise_outputs = np.zeros_like(outputs[key], dtype=np.float32)
for key in outputs:
framewise_outputs += ratio[key] * outputs[key]
thresholded = np.zeros_like(framewise_outputs)
for i in range(len(inv_bird_call)):
thresholded[:, i] = framewise_outputs[:, i] >= 0.01
# thresholded[:, i] = framewise_outputs[:, i] >= thresholds[INV_BIRD_CODE[i]] # uncoment if there is personalized tresholds
sec_per_frame = PERIOD / thresholded.shape[0]
for target_idx in range(thresholded.shape[1]):
if thresholded[:, target_idx].mean() == 0:
pass
else:
detected = np.argwhere(thresholded[:, target_idx]).reshape(-1)
head_idx = 0
tail_idx = 0
while True:
if (tail_idx + 1 == len(detected)) or (
detected[tail_idx + 1] -
detected[tail_idx] != 1):
onset = sec_per_frame * detected[
head_idx] + global_time
offset = sec_per_frame * detected[
tail_idx] + global_time
onset_idx = detected[head_idx]
offset_idx = detected[tail_idx]
max_confidence = framewise_outputs[
onset_idx:offset_idx, target_idx].max()
mean_confidence = framewise_outputs[
onset_idx:offset_idx, target_idx].mean()
estimated_event = {
"audio_id": audio_id,
"ebird_code": inv_bird_call[target_idx],
"onset": onset,
"offset": offset,
"max_confidence": max_confidence,
"mean_confidence": mean_confidence
}
estimated_event_list.append(estimated_event)
head_idx = tail_idx + 1
tail_idx = tail_idx + 1
if head_idx >= len(detected):
break
else:
tail_idx += 1
global_time += PERIOD
prediction_df = pd.DataFrame(estimated_event_list)
return prediction_df
def prediction(test_df: pd.DataFrame,
test_audio: Path,
models):
""""given the pass of a folder containing audios and a csv corresponding, it returns a prediction for each audio which need a postprocess"""
unique_audio_id = test_df.audio_id.unique()
warnings.filterwarnings("ignore")
prediction_dfs = []
for audio_id in unique_audio_id:
clip_path = test_audio + audio_id
test_df_for_audio_id = test_df.query(
f"audio_id == '{audio_id}'").reset_index(drop=True)
with timer(f"Prediction & load on {audio_id}"):
prediction_df = prediction_for_clip(test_df_for_audio_id,
clip=clip_path,
models=models)
prediction_dfs.append(prediction_df)
prediction_df = pd.concat(prediction_dfs, axis=0,
sort=False).reset_index(drop=True)
return prediction_df
def postproc(prediction_df, test):
"""Make the postprocessing"""
labels = {}
for audio_id, sub_df in prediction_df.groupby("audio_id"):
events = sub_df[["ebird_code", "mean_confidence"]].values
n_events = len(events)
bird_max_conf = np.max(events[:, 1])
for i in range(n_events):
if events[i][1] == bird_max_conf:
row_id = f"{audio_id}"
bird = events[i][0]
labels[row_id] = {bird, ""}
for key in labels:
labels[key] = " ".join(sorted(list(labels[key])))
row_ids = list(labels.keys())
birds = list(labels.values())
post_processed = pd.DataFrame({
"audio_id": row_ids,
"birds": birds})
all_row_id = test[["audio_id"]]
submission = all_row_id.merge(post_processed, on="audio_id", how="left")
submission = submission.fillna("nocall")
return submission
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for model in models: # load the model
num_ftrs = models[model].fc1.in_features
models[model].att_block = AttBlock(
num_ftrs, len(inv_bird_call), activation="sigmoid")
models[model].load_state_dict(torch.load(
'weights_trained/'+model+'.pth'))
models[model].to(device)
test = pd.read_csv("input/test.csv")
test_audio = "input/audio_files/"
test["audio_id"] = test["audio_id"].map(str)
prediction_df = prediction(
test_df=test, test_audio=test_audio, models=models)
post_processed = prediction_df
if not prediction_df.empty:
post_processed = postproc(prediction_df, test)
all_row_id = test[["audio_id"]]
submission = all_row_id.merge(post_processed, on="audio_id", how="left")
submission.to_csv("output/submission.csv", index=False)
print(f"all done in {all_time_duration:.2f} s")
File added
from src.preproc import transform_all_images
if __name__ == '__main__':
# Make the preprocessing and save .png in the train/temp/ folder
transform_all_images('train/data_training/', 'audio_files/', 'birds.csv')
anaconda-client==1.11.1
anaconda-project==0.11.1
appdirs==1.4.4
astroid==2.15.3
asttokens==2.2.1
attrs==22.1.0
audioread==3.0.0
backcall==0.2.0
backports.functools-lru-cache==1.6.4
backports.tempfile==1.0
backports.weakref==1.0.post1
beautifulsoup4==4.11.1
boltons==23.0.0
Bottleneck==1.3.7
brotlipy==0.7.0
certifi==2022.9.24
cffi==1.15.1
chardet==4.0.0
charset-normalizer==2.0.4
click==8.0.4
clyent==1.2.2
colorama==0.4.6
comm==0.1.3
conda-content-trust==0.1.3
conda-pack==0.6.0
conda-package-handling==2.0.2
conda_package_streaming==0.7.0
conda-repo-cli==1.0.41
conda-verify==3.4.2
contourpy==1.0.7
cryptography==39.0.1
cycler==0.11.0
debugpy==1.6.6
decorator==5.1.1
defusedxml==0.7.1
dill==0.3.6
executing==1.2.0
fastjsonschema==2.16.2
fastprogress==1.0.0
filelock==3.9.0
flit_core==3.8.0
fonttools==4.39.3
future==0.18.3
glob2==0.7
idna==3.4
importlib-metadata==6.1.0
ipykernel==6.15.0
ipython==8.11.0
isort==5.12.0
jedi==0.18.2
Jinja2==3.1.2
joblib==1.2.0
jsonschema==4.17.3
jupyter_client==8.1.0
jupyter_core==5.3.0
keyboard==0.13.5
kiwisolver==1.4.4
lazy_loader==0.2
lazy-object-proxy==1.9.0
libarchive-c==2.9
librosa==0.10.0.post2
llvmlite==0.39.1
MarkupSafe==2.1.1
matplotlib==3.7.1
matplotlib-inline==0.1.6
mccabe==0.7.0
menuinst==1.4.19
mkl-fft==1.3.1
mkl-random==1.2.2
mkl-service==2.4.0
mpmath==1.2.1
msgpack==1.0.5
nbformat==5.7.0
nest-asyncio==1.5.6
networkx==2.8.4
numba==0.56.4
numexpr==2.8.4
numpy==1.23.5
packaging==23.0
panda==0.3.1
pandas==1.5.3
parso==0.8.3
pathlib==1.0.1
pickleshare==0.7.5
Pillow==9.4.0
pip==22.3.1
pkginfo==1.9.6
platformdirs==3.2.0
pluggy==1.0.0
ply==3.11
pooch==1.6.0
prompt-toolkit==3.0.38
psutil==5.9.0
pure-eval==0.2.2
pycosat==0.6.4
pycparser==2.21
Pygments==2.14.0
PyJWT==2.4.0
pylint==2.17.2
pyOpenSSL==23.0.0
pyparsing==3.0.9
PyQt5==5.15.7
PyQt5-sip==12.11.0
pyrsistent==0.18.0
PySocks==1.7.1
PySoundFile==0.9.0.post1
python-dateutil==2.8.2
pytz==2022.7
pywin32==306
PyYAML==6.0
pyzmq==25.0.2
QtPy==2.2.0
requests==2.28.1
resampy==0.4.2
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.6
ruamel-yaml-conda==0.17.21
scikit-learn==1.2.2
scipy==1.10.1
setuptools==65.6.3
sip==6.6.2
six==1.16.0
soundfile==0.12.1
soupsieve==2.3.2.post1
soxr==0.3.4
stack-data==0.6.2
sympy==1.11.1
threadpoolctl==3.1.0
toml==0.10.2
tomli==2.0.1
tomlkit==0.11.7
toolz==0.12.0
torch==2.0.0
torchaudio==2.0.0
torchsummary==1.5.1
torchvision==0.15.0
tornado==6.2
tqdm==4.65.0
traitlets==5.7.1
typing_extensions==4.4.0
ujson==5.4.0
urllib3==1.26.14
wcwidth==0.2.6
wheel==0.38.4
win-inet-pton==1.1.0
wincertstore==0.2
wrapt==1.15.0
zipp==3.15.0
zstandard==0.19.0
\ No newline at end of file
This diff is collapsed.
import librosa
import numpy as np
from pathlib import Path
import shutil
import cv2
import os
from src.utils import normalize_melspec
from fastprogress import progress_bar
# Parameters
TARGET_SR = 32000
melspectrogram_parameters = {
"n_mels": 128,
"fmin": 20,
"fmax": 16000
}
pcen_parameters = {
"gain": 0.98,
"bias": 2,
"power": 0.5,
"time_constant": 0.4,
"eps": 0.000001
}
PERIOD = 30
CHUNK = PERIOD * TARGET_SR
###
def transform_all_images(dirpath: str, sound_file: str, csv_file: str):
"""Create a folder with .png for the training"""
csv_file = open(dirpath + csv_file, "r", encoding='utf-8')
i = 0
csv_file.readline()
# Reset the temp folder
shutil.rmtree('train/temp/train', ignore_errors=True)
os.makedirs('train/temp/train')
shutil.rmtree('train/temp/val', ignore_errors=True)
os.makedirs('train/temp/val')
for audio_line in progress_bar(csv_file.readlines()):
L = audio_line.split(",")
id_audio = L[-2]
id_species = L[1]
# Create a folder for each species
os.makedirs('train/temp/train/'+id_species, exist_ok=True)
os.makedirs('train/temp/val/'+id_species, exist_ok=True)
image = np.swapaxes(clip_to_image(
dirpath+sound_file+id_audio, all_chunks=False), 0, 2)
# 70% of the audio are used for the training phase and 30% for the validation phase
if i % 50 > 15:
cv2.imwrite('train/temp/train/'+id_species +
'/'+id_audio+'.png', image)
else:
cv2.imwrite('train/temp/val/'+id_species +
'/'+id_audio+'.png', image)
i += 1
def preproc(y):
"""return the preprocessing of a clip 'y' """
y_batch = y.astype(np.float32)
if len(y_batch) > 0: # Normalization
max_vol = np.abs(y_batch).max()
if max_vol > 0:
y_batch = np.asfortranarray(y_batch * 1 / max_vol)
# Zero paddling to have an input of constant size
y_pad = np.zeros(PERIOD * TARGET_SR, dtype=np.float32)
y_pad[:len(y_batch)] = y_batch
# spectrograms
melspec = librosa.feature.melspectrogram(y=y_pad,
sr=TARGET_SR,
**melspectrogram_parameters)
pcen = librosa.pcen(melspec, sr=TARGET_SR, **pcen_parameters)
clean_mel = librosa.power_to_db(melspec ** 1.5)
melspec = librosa.power_to_db(melspec).astype(np.float32)
# Normalization
norm_melspec = normalize_melspec(melspec)
norm_pcen = normalize_melspec(pcen)
norm_clean_mel = normalize_melspec(clean_mel)
# Concatenate, we have a color picture
image = np.stack([norm_melspec, norm_pcen, norm_clean_mel], axis=-1)
height, width, _ = image.shape
image = cv2.resize(image, (int(width * 224 / height), 224))
image = np.moveaxis(image, 2, 0)
image = (image).astype(np.float32)
return image
def clip_to_image(clip_path: str, all_chunks=True):
"""return the clip almost ready to apply the model. If all_chunks=False, only the first chunk is returned"""
# load the audio file
if Path(clip_path+".mp3").exists():
clip, _ = librosa.load(clip_path+".mp3",
sr=TARGET_SR,
mono=True,
res_type="kaiser_fast")
elif Path(clip_path+".wav").exists():
clip, _ = librosa.load(clip_path + ".wav",
sr=TARGET_SR,
mono=True,
res_type="kaiser_fast")
try:
clip
except UnboundLocalError:
raise FileExistsError(
f"{clip_path}.mp3 or .wav doesn't exist, only .wav & .mp3 are allowed. Aswell, it might be an audio from the .csv that is not in the audio folder. Easy fix : delete the corresponding line in the csv")
y = clip.astype(np.float32)
if not all_chunks:
image = preproc(y[:CHUNK])
array = np.asarray(image)
return (array)
nb_chunk = (len(y)-1)//CHUNK+1
images = []
for k in range(nb_chunk):
image = preproc(y[k*CHUNK:(k+1)*CHUNK])
images.append(image)
array = np.asarray(images)
return (array)
import torch.nn as nn
import os
import numpy as np
class ImprovedPANNsLoss(nn.Module):
"""criterion used for the training"""
def __init__(self, output_key="logit", weights=[1, 0.5]):
super().__init__()
self.output_key = output_key
if output_key == "logit":
self.normal_loss = nn.BCEWithLogitsLoss()
else:
self.normal_loss = nn.BCELoss()
self.bce = nn.BCELoss()
self.weights = weights
def forward(self, input, target):
input_ = input[self.output_key]
target = target.float()
framewise_output = input["framewise_output"]
clipwise_output_with_max, _ = framewise_output.max(dim=1)
normal_loss = self.normal_loss(input_, target)
auxiliary_loss = self.bce(clipwise_output_with_max, target)
return self.weights[0] * normal_loss + self.weights[1] * auxiliary_loss
def find_classes(dir: str):
"""return inv_bird_code and bird_code"""
classes = os.listdir(dir)
classes.sort()
class_to_idx = {bird: i for i, bird in enumerate(classes)}
return classes, class_to_idx
def normalize_melspec(X: np.ndarray):
"""Normalize a spectrogram in a strange way"""
eps = 1e-6
mean = X.mean()
X = X - mean
std = X.std()
Xstd = X / (std + eps)
norm_min, norm_max = Xstd.min(), Xstd.max()
if (norm_max - norm_min) > eps:
V = Xstd
V[V < norm_min] = norm_min
V[V > norm_max] = norm_max
V = 255 * (V - norm_min) / (norm_max - norm_min)
V = V.astype(np.uint8)
else:
# Just zero
V = np.zeros_like(Xstd, dtype=np.uint8)
return V
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
import keyboard
from src.utils import ImprovedPANNsLoss, find_classes
from src.models import get_optimizer, get_scheduler, AttBlock, models
# Utils
def transform_PIL_Array(image_PIL):
image = np.array(image_PIL)
image = np.swapaxes(image, 0, 2)
image = torch.from_numpy(image/255.0)
image = image.float()
return image
def transform_labels(labels, num_classes):
siz = labels.size()
new_labels = torch.zeros((siz[0], num_classes))
for i, label in enumerate(labels):
new_labels[i, label] = 1
return new_labels
__keep_running__ = True
def stop_running():
"appuyer sur '$' termine la derniere epoch puis fini l'entrainement"
global __keep_running__
__keep_running__ = False
keyboard.add_hotkey('$', stop_running)
def train_model(model, device, criterion, optimizer, scheduler, model_name, dataloaders, dataset_sizes, class_names, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = -np.inf
for epoch in range(num_epochs):
if not __keep_running__:
continue
print(f'Epoch {epoch}/{num_epochs - 1}')
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
labels = transform_labels(labels, len(class_names))
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
if model_name == "eff_th04":
outputs["segmentwise_output"], _ = outputs["segmentwise_output"].max(
dim=1)
loss = criterion(outputs, labels)
else:
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
###
result = outputs['framewise_output']
result, _ = torch.max(result, dim=1)
# least squares
running_corrects -= torch.sum((result-labels)**2)
###
del inputs, outputs, loss, result
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
del epoch_acc, epoch_loss, running_corrects, running_loss
print()
time_elapsed = time.time() - since
print(
f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
print(f'Best val Acc: {best_acc:4f}')
# load best model weights
model.load_state_dict(best_model_wts)
return model
def train(models, device, key_model=None, num_epochs=25, lr=0.001, batch_size=2):
image_datasets = {x: datasets.ImageFolder('train/temp/' + x, transform=transform_PIL_Array)
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
if not key_model:
key_model = models.keys()
for model in key_model:
print(model)
model_conv = models[model]
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc1.in_features
model_conv.att_block = AttBlock(
num_ftrs, len(class_names), activation="sigmoid")
model_conv = model_conv.to(device)
print()
# Observe that only parameters of final layer are being optimized as
# opposed to before.
if model != "eff_th04":
criterion = ImprovedPANNsLoss()
else:
criterion = ImprovedPANNsLoss('segmentwise_output')
optimizer_conv = get_optimizer(
model_conv, {"optimizer": {'name': 'Adam', 'params': {'lr': lr}}})
exp_lr_scheduler = get_scheduler(optimizer_conv, {'scheduler': {
'name': 'CosineAnnealingLR', 'params': {'T_max': 10}}})
print('ok')
model_conv = train_model(model_conv, device, criterion, optimizer_conv,
exp_lr_scheduler, model, dataloaders, dataset_sizes, class_names, num_epochs=num_epochs)
torch.save(model_conv.state_dict(), 'weights_trained/'+model+'.pth')
torch.cuda.empty_cache()
del model_conv
torch.cuda.empty_cache()
return None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--model', help="""Chose if you want to train model "1", "2", "3", "4" or "all" """)
args = parser.parse_args()
keys = args.model
if keys == 'all':
keys = ["ref2_th03", "ref2_th04",
"eff_th04", "ext"]
elif keys in ['1', '2', '3', '4']:
keys = [["ref2_th03", "ref2_th04",
"eff_th04", "ext"][int(keys)-1]]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
cudnn.benchmark = False
# batch 20 et 15 pour eff_th04 (8GB VRAM) # if you don't have enough ram you'd better launch one by one
train(models, device, keys, num_epochs=20, batch_size=15)
classes, _ = find_classes('train/temp/train/')
np.save('inv_bird_code.npy', classes)
{'ref2_th03': [2.0378282148759443, 1.350958687918527, 1.1509922572544644, 1.0251973749517085, 0.9530742771022923, 0.9385142064356543, 0.8788359924987122, 0.8812886332417583, 0.8496827345628005, 0.866959456559066, 0.8618548885806577, 0.8511872343964629, 0.8530584482046275, 0.8455350268018116, 0.8048266735705701, 0.8087591653341776, 0.7847911289760046, 0.7558997856391656, 0.7466989035134788, 0.7164284003959908, 0.7004573109385732, 0.6832559606531164, 0.6746111859332075, 0.6557628505832547, 0.6601907080346412],
'ext': [2.203362265785972, 1.4098027826665522, 1.159971886938745, 1.0570843036358173, 1.0038691929408483, 0.9624923035338685, 0.9488933688991673, 0.9413243388081646, 0.927853762448489, 0.9318580208243905, 0.9206070952363067, 0.923733637883113, 0.9244260683164492, 0.9089159284319197, 0.8776338179032882, 0.8544231875912175, 0.8257928366189475, 0.8005734957181491, 0.7814002403846154, 0.7663307609138909, 0.745571765270862, 0.7303421523544815, 0.7229315789191278, 0.7038964910821601, 0.7025682218782195],
'eff_th04': [1.1523478581355169, 1.009532215831044, 0.9885831560407367, 0.9802307296585251, 0.9729183322780736, 0.9660697350135217, 0.9601244245256697, 0.956034398340917, 0.953995589371566, 0.9535502339457418, 0.953701648083362, 0.953343737256396, 0.9514200399210165, 0.9463589008037862, 0.9367290957943425, 0.921370118528932, 0.8963166959993133, 0.8660067044771635, 0.8309774713201837, 0.7993172446450035, 0.7720077850006439, 0.7488513988452954, 0.7336551540500516, 0.7187049781883157, 0.7086745880462312],
'ref2_th04': [1.4462518377618476, 1.0416387201665522, 0.8972107604309754, 0.8218668843363668, 0.7769343617198232, 0.7448355706183466, 0.7304498022729224, 0.7148164183228881, 0.71854735992767, 0.713101565182864, 0.7222622043483861, 0.712948977292239, 0.702530242584564, 0.7084923209724846, 0.6837784064995064, 0.6682654999114656, 0.6659571888682607, 0.6426979316459908, 0.6317920475215703, 0.6097299345247038, 0.6050359704992274, 0.5991017268254207, 0.5904199369661101, 0.5811248611617875, 0.5721830011724116]}
\ No newline at end of file
File added
File added
File added
File added
File added
File added
File added