fix(tensor/BatchAccuracyForLogit): fixed not switching grad enabled on again after test calculation

This commit is contained in:
sugarme 2020-07-09 12:50:58 +10:00
parent b2958d438f
commit 69380e1456
2 changed files with 3 additions and 1 deletions

View File

@ -163,7 +163,7 @@ func main() {
testAcc := ts.BatchAccuracyForLogits(net, ds.TestImages, ds.TestLabels, vs.Device(), 512)
si = gotch.GetSysInfo()
memUsed := (float64(si.TotalRam-si.FreeRam) - float64(startRAM)) / 1024
fmt.Printf("Epoch:\t %v\t Memory Used:\t [%8.2f MiB]\tLoss: \t %.3f \tAcc: %10.3f\n", epoch, memUsed, lossVal, testAcc*100.0)
fmt.Printf("Epoch:\t %v\t Memory Used:\t [%8.2f MiB]\tLoss: \t %.3f \tAcc: %10.2f%%\n", epoch, memUsed, lossVal, testAcc*100.0)
iter.Drop()
// Print out GPU used

View File

@ -86,6 +86,8 @@ func BatchAccuracyForLogits(m ModuleT, xs, ys Tensor, d gotch.Device, batchSize
acc.MustDrop()
}
MustGradSetEnabled(true)
return sumAccuracy / sampleCount
}