REVISTA INNOVACIÓN Y SOFTWARE VOL 4 Nº 2 Septiembre - Febrero 2023 ISSN Nº 2708-0935
Fig. 10: Código Python
Fig. 11: Código Python
avg_train_loss = train_loss /
,→ len(train_dataloader) avg_train_accuracy =
train_accuracy /
,→ len(train_dataloader) avg_train_f1 = train_f1 /
len(train_dataloader) avg_train_recall = train_recall /
,→ len(train_dataloader) avg_train_precision =
train_precision / ,→ len(train_dataloader)
# Imprimir y dar formato a las métricas con dos
,→ decimales
print(f'Train loss: {avg_train_loss:.2f}') print(f'Train accuracy:
,→ {avg_train_accuracy:.2f}') print(f'Train f1: {avg_train_f1:.2f}')
print(f'Train recall: {avg_train_recall:.2f}') print(f'Train precision:
,→ {avg_train_precision:.2f}') print('-' * 30)
val_loss = 0.0 val_accuracy = 0.0
val_f1 = 0.0 val_recall = 0.0
val_precision = 0.0 model.eval()
for batch in test_dataloader: batch_input_ids = ,→
batch['input_ids'].to(device)
batch_attention_mask =
,→ batch['attention_mask'].to(device) batch_labels =
batch['labels'].to(device)
# Convertir las etiquetas a tipo entero batch_labels =
batch_labels.long()
# Convertir las etiquetas en vectores binarios batch_labels =
F.one_hot(batch_labels, ,→ num_classes=N_CAT)
# Convertir las etiquetas a tipo flotante batch_labels =
batch_labels.float()
with torch.no_grad(): outputs = model(
batch_input_ids, token_type_ids=None,
attention_mask=batch_attention_mask,
labels=batch_labels
)
loss = outputs[0] logits =
outputs[1] val_loss += loss.item()
preds = logits.detach().cpu().numpy() labels =
batch_labels.to('cpu').numpy()
avg_train_loss = train_loss /
,→ len(train_dataloader) avg_train_accuracy =
train_accuracy /
,→ len(train_dataloader) avg_train_f1 = train_f1 /
len(train_dataloader) avg_train_recall = train_recall /
,→ len(train_dataloader) avg_train_precision =
train_precision / ,→ len(train_dataloader)
# Imprimir y dar formato a las métricas con dos
,→ decimales
print(f'Train loss: {avg_train_loss:.2f}') print(f'Train accuracy:
,→ {avg_train_accuracy:.2f}') print(f'Train f1: {avg_train_f1:.2f}')
print(f'Train recall: {avg_train_recall:.2f}') print(f'Train precision:
,→ {avg_train_precision:.2f}') print('-' * 30)
val_loss = 0.0 val_accuracy = 0.0
val_f1 = 0.0 val_recall = 0.0
val_precision = 0.0 model.eval()
for batch in test_dataloader: batch_input_ids = ,→
batch['input_ids'].to(device)
batch_attention_mask =
,→ batch['attention_mask'].to(device) batch_labels =
batch['labels'].to(device)
# Convertir las etiquetas a tipo entero batch_labels =
batch_labels.long()
# Convertir las etiquetas en vectores binarios batch_labels =
F.one_hot(batch_labels, ,→ num_classes=N_CAT)
# Convertir las etiquetas a tipo flotante batch_labels =
batch_labels.float()
with torch.no_grad(): outputs = model(
batch_input_ids, token_type_ids=None,
attention_mask=batch_attention_mask,
labels=batch_labels
)
loss = outputs[0] logits =
outputs[1] val_loss += loss.item()
preds = logits.detach().cpu().numpy() labels =
batch_labels.to('cpu').numpy()
# Convertir las etiquetas y las
,→ predicciones a tipo entero
labels = labels.astype(int) preds =
preds.astype(int)
# Obtener la clase con mayor probabilidad
,→ para cada ejemplo
labels = np.argmax(labels, axis=1) preds =
np.argmax(preds, axis=1)
# Usar las métricas de sklearn para
,→ calcular la precisión, el f1, el
,→ recall y la precisión para cada lote
val_accuracy += accuracy_score(labels,
,→ preds) val_f1 += f1_score(labels, preds, ,→
average='macro', zero_division=0) val_recall +=
recall_score(labels, preds,
,→ average='macro', zero_division=0) val_precision +=
precision_score(labels,
,→ preds, average='macro', ,→
zero_division=0)
avg_val_loss = val_loss / len(test_dataloader) avg_val_accuracy
= val_accuracy /
,→ len(test_dataloader) avg_val_f1 = val_f1 /
len(test_dataloader) avg_val_recall = val_recall /
len(test_dataloader) avg_val_precision = val_precision / ,→
len(test_dataloader)
# Imprimir y dar formato a las métricas con dos
,→ decimales
print(f'Validation loss: {avg_val_loss:.2f}') print(f'Validation
accuracy:
,→ {avg_val_accuracy:.2f}') print(f'Validation f1: {avg_val_f1:.2f}')
print(f'Validation recall: {avg_val_recall:.2f}') print(f'Validation
precision: ,→ {avg_val_precision:.2f}') print('\n')
# Convertir las etiquetas y las
,→ predicciones a tipo entero
labels = labels.astype(int) preds =
preds.astype(int)
# Obtener la clase con mayor probabilidad
,→ para cada ejemplo
labels = np.argmax(labels, axis=1) preds =
np.argmax(preds, axis=1)
# Usar las métricas de sklearn para
,→ calcular la precisión, el f1, el
,→ recall y la precisión para cada lote
val_accuracy += accuracy_score(labels,
,→ preds) val_f1 += f1_score(labels, preds, ,→
average='macro', zero_division=0) val_recall +=
recall_score(labels, preds,
,→ average='macro', zero_division=0) val_precision +=
precision_score(labels,
,→ preds, average='macro', ,→
zero_division=0)
avg_val_loss = val_loss / len(test_dataloader) avg_val_accuracy
= val_accuracy /
,→ len(test_dataloader) avg_val_f1 = val_f1 /
len(test_dataloader) avg_val_recall = val_recall /
len(test_dataloader) avg_val_precision = val_precision / ,→
len(test_dataloader)
# Imprimir y dar formato a las métricas con dos
,→ decimales
print(f'Validation loss: {avg_val_loss:.2f}') print(f'Validation
accuracy:
,→ {avg_val_accuracy:.2f}') print(f'Validation f1: {avg_val_f1:.2f}')
print(f'Validation recall: {avg_val_recall:.2f}') print(f'Validation
precision: ,→ {avg_val_precision:.2f}') print('\n')