File tree Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Original file line number Diff line number Diff line change @@ -239,11 +239,11 @@ def forward(self, inputs):
239
239
optimizer = optim .SGD (model .parameters (), lr = 0.001 )
240
240
241
241
for epoch in range (10 ):
242
- total_loss = torch . Tensor ([ 0 ])
242
+ total_loss = 0
243
243
for context , target in trigrams :
244
244
245
245
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
246
- # into integer indices and wrap them in variables )
246
+ # into integer indices and wrap them in tensors )
247
247
context_idxs = torch .tensor ([word_to_ix [w ] for w in context ], dtype = torch .long )
248
248
249
249
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
@@ -256,7 +256,7 @@ def forward(self, inputs):
256
256
log_probs = model (context_idxs )
257
257
258
258
# Step 4. Compute your loss function. (Again, Torch wants the target
259
- # word wrapped in a variable )
259
+ # word wrapped in a tensor )
260
260
loss = loss_function (log_probs , torch .tensor ([word_to_ix [target ]], dtype = torch .long ))
261
261
262
262
# Step 5. Do the backward pass and update the gradient
You can’t perform that action at this time.
0 commit comments