Skip to content
Snippets Groups Projects
Commit f76a0bb7 authored by Franck Dary's avatar Franck Dary
Browse files

Loss

parent 66deb59c
No related branches found
No related tags found
No related merge requests found
...@@ -56,6 +56,10 @@ def selectAction(network, state, ts, config, missingLinks, probaRandom, probaOra ...@@ -56,6 +56,10 @@ def selectAction(network, state, ts, config, missingLinks, probaRandom, probaOra
################################################################################ ################################################################################
def optimizeModel(batchSize, policy_net, target_net, memory, optimizer, gamma) : def optimizeModel(batchSize, policy_net, target_net, memory, optimizer, gamma) :
#lossFct = torch.nn.MSELoss()
#lossFct = torch.nn.L1Loss()
lossFct = torch.nn.SmoothL1Loss()
totalLoss = 0.0 totalLoss = 0.0
for fromState in range(len(memory)) : for fromState in range(len(memory)) :
for toState in range(len(memory[fromState])) : for toState in range(len(memory[fromState])) :
...@@ -74,7 +78,7 @@ def optimizeModel(batchSize, policy_net, target_net, memory, optimizer, gamma) : ...@@ -74,7 +78,7 @@ def optimizeModel(batchSize, policy_net, target_net, memory, optimizer, gamma) :
expectedReward = gamma*nextQ + rewards expectedReward = gamma*nextQ + rewards
loss = F.smooth_l1_loss(predictedQ, expectedReward) loss = lossFct(predictedQ, expectedReward)
optimizer.zero_grad() optimizer.zero_grad()
loss.backward() loss.backward()
for param in policy_net.parameters() : for param in policy_net.parameters() :
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment