Skip to content

Commit

Permalink
Run black, isort
Browse files Browse the repository at this point in the history
  • Loading branch information
APJansen committed Nov 20, 2023
1 parent d49196b commit ed168a1
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 32 deletions.
36 changes: 18 additions & 18 deletions n3fit/src/n3fit/model_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,16 @@
AddPhoton,
FkRotation,
FlavourToEvolution,
Mask,
ObsRotation,
Preprocessing,
Mask,
losses,
)
from n3fit.layers.observable import is_unique
from n3fit.msr import generate_msr_model_and_grid
from validphys.photon.compute import Photon # only used for type hint here


@dataclass
class ObservableWrapper:
"""Wraps many observables into an experimental layer once the PDF model is prepared
Expand Down Expand Up @@ -62,7 +63,8 @@ def _generate_loss(self, mask=None):
was initialized with"""
if self.invcovmat is not None:
loss = losses.LossInvcovmat(
self.invcovmat, self.data, mask, covmat=self.covmat, name=self.name)
self.invcovmat, self.data, mask, covmat=self.covmat, name=self.name
)
elif self.positivity:
loss = losses.LossPositivity(name=self.name, c=self.multiplier)
elif self.integrability:
Expand Down Expand Up @@ -105,14 +107,15 @@ def __call__(self, pdf_layer, mask=None):
return loss_f(experiment_prediction)


def observable_generator(spec_dict,
mask_array=None,
training_data=None,
validation_data=None,
invcovmat_tr=None,
invcovmat_vl=None,
positivity_initial=1.0,
integrability=False
def observable_generator(
spec_dict,
mask_array=None,
training_data=None,
validation_data=None,
invcovmat_tr=None,
invcovmat_vl=None,
positivity_initial=1.0,
integrability=False,
): # pylint: disable=too-many-locals
"""
This function generates the observable models for each experiment.
Expand Down Expand Up @@ -183,7 +186,7 @@ def observable_generator(spec_dict,

# Extract the masks that will end up in the observable wrappers...
if apply_masks:
trmask = mask_array[:, offset:offset + dataset.ndata]
trmask = mask_array[:, offset : offset + dataset.ndata]
masks.append(trmask)
tr_mask_layers.append(Mask(trmask, axis=1, name=f"trmask_{dataset_name}"))
vl_mask_layers.append(Mask(~trmask, axis=1, name=f"vlmask_{dataset_name}"))
Expand All @@ -195,10 +198,8 @@ def observable_generator(spec_dict,
# these will then be used to check how many different pdf inputs are needed
# (and convolutions if given the case)
obs_layer = Obs_Layer(
dataset.fktables_data,
dataset.fktables(),
operation_name,
name=f"dat_{dataset_name}")
dataset.fktables_data, dataset.fktables(), operation_name, name=f"dat_{dataset_name}"
)

# If the observable layer found that all input grids are equal, the splitting will be None
# otherwise the different xgrids need to be stored separately
Expand Down Expand Up @@ -674,9 +675,8 @@ def compute_unnormalized_pdf(x, neural_network, compute_preprocessing_factor):
"pdf_xgrid_integration": pdf_integration_grid,
"xgrid_integration": integrator_input,
# The photon is treated separately, need to get its integrals to normalize the pdf
"photon_integral": op.numpy_to_tensor([[
0.0 if not photons else photons.integral[i_replica]
]]
"photon_integral": op.numpy_to_tensor(
[[0.0 if not photons else photons.integral[i_replica]]]
),
}
)
Expand Down
40 changes: 26 additions & 14 deletions n3fit/src/n3fit/model_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,12 +557,14 @@ def _generate_observables(
invcovmat = np.stack([e[index]["invcovmat"] for e in self.exp_info])
invcovmat_vl = np.stack([e[index]["invcovmat_vl"] for e in self.exp_info])

exp_layer = model_gen.observable_generator(exp_dict,
mask_array=replica_masks,
training_data=training_data,
validation_data=validation_data,
invcovmat_tr=invcovmat,
invcovmat_vl=invcovmat_vl)
exp_layer = model_gen.observable_generator(
exp_dict,
mask_array=replica_masks,
training_data=training_data,
validation_data=validation_data,
invcovmat_tr=invcovmat,
invcovmat_vl=invcovmat_vl,
)

# Save the input(s) corresponding to this experiment
self.input_list.append(exp_layer["inputs"])
Expand All @@ -584,13 +586,17 @@ def _generate_observables(
all_pos_initial, all_pos_multiplier, max_lambda, positivity_steps
)
replica_masks = np.stack([pos_dict["trmask"] for i in range(len(self.exp_info))])
training_data = np.stack([pos_dict["expdata"].flatten() for i in range(len(self.exp_info))])
training_data = np.stack(
[pos_dict["expdata"].flatten() for i in range(len(self.exp_info))]
)

pos_layer = model_gen.observable_generator(pos_dict,
positivity_initial=pos_initial,
mask_array=replica_masks,
training_data=training_data,
validation_data=training_data)
pos_layer = model_gen.observable_generator(
pos_dict,
positivity_initial=pos_initial,
mask_array=replica_masks,
training_data=training_data,
validation_data=training_data,
)
# The input list is still common
self.input_list.append(pos_layer["inputs"])

Expand Down Expand Up @@ -867,7 +873,9 @@ def hyperparametrizable(self, params):
# Initialize all photon classes for the different replicas:
if self.lux_params:
photons = Photon(
theoryid=self.theoryid, lux_params=self.lux_params, replicas=self.replicas,
theoryid=self.theoryid,
lux_params=self.lux_params,
replicas=self.replicas,
)
else:
photons = None
Expand Down Expand Up @@ -939,7 +947,11 @@ def hyperparametrizable(self, params):
for model in models.values():
model.compile(**params["optimizer"])

passed = self._train_and_fit(models["training"], stopping_object, epochs=epochs,)
passed = self._train_and_fit(
models["training"],
stopping_object,
epochs=epochs,
)

if self.mode_hyperopt:
# If doing a hyperparameter scan we need to keep track of the loss function
Expand Down

0 comments on commit ed168a1

Please sign in to comment.