Evaluate doesn't return the avg loss

hi everyone, I am new in SpeechBrain community and I wrote model with SpeechBrain in python.
and now I am trying to evaluate my avg loss on my test data.
but the function evaluate doesn’t return me the avg loss, someone know why it’s happening ?
thank you, avishai

Hi, we will need more context to help you. What recipe are you using, what did you modify etc.

Hi, thank you for quickly response.
I am trying to use the function evaluate(), that doesn’t return me anything (it’s should return me the average loss).
I attach my Code here, so you can take a look :slight_smile:
thank you very much, Avishai

from speechbrain.dataio.dataset import DynamicItemDataset
from speechbrain.dataio.encoder import CategoricalEncoder

dataset = DynamicItemDataset.from_csv("data.csv")
words_encoder.update_from_didataset(dataset, "words")
dataset.add_dynamic_item(words_encoder.encode_label_torch, takes="words", provides="words_encoded")
dataset.add_dynamic_item(speechbrain.dataio.dataio.read_audio, takes="file_path", provides="signal")
dataset.set_output_keys(["id", "signal", "words_encoded"])
sorted_data = dataset.filtered_sorted(sort_key="length")

from speechbrain.lobes.features import MFCC, Fbank

from speechbrain.nnet.losses import nll_loss

class SimpleBrain(speechbrain.Brain):

    def compute_forward(self, batch, stage):

        example_batch = batch

        x = self.modules.features(batch.signal.data)

        x = self.modules.encoder(x)

        x = self.modules.pooling(x, batch.signal.lengths)

        x = self.modules.to_output(x)

        return self.modules.softmax(x)


    def compute_objectives(self, logits, batch, stage):

        return nll_loss(logits,  batch.words_encoded.data)

modules = {"features": Fbank(left_frames=1, right_frames=1),

          "encoder": torch.nn.Sequential(torch.nn.Linear(40, 256),


          "pooling": speechbrain.nnet.pooling.StatisticsPooling(),

          "to_output": torch.nn.Linear(512, len(words_encoder)),

          "softmax": speechbrain.nnet.activations.Softmax(apply_log=True)}

brain = SimpleBrain(modules, opt_class=lambda x: torch.optim.SGD(x, 1))

brain.fit(range(3), train_set=sorted_data, 

          train_loader_kwargs={"batch_size": 8, "drop_last":True})

average=brain.evaluate(test_set=dataset,progressbar=True,test_loader_kwargs={"batch_size": 1, "drop_last":True})