Commit e3f2dbb0 authored by Carlos GO's avatar Carlos GO
Browse files

memory leak fix

parent bcc7f3c0
......@@ -78,7 +78,8 @@ def test(model, test_loader, device, reconstruction_lam, motif_lam):
graph = send_graph_to_device(graph, device)
# Do the computations for the forward pass
out, attributions = model(graph)
with torch.no_grad():
out, attributions = model(graph)
loss, reconstruction_loss, motif_loss = compute_loss(model=model, attributions=attributions, fp=fp,
out=out, K=K, device=device,
reconstruction_lam=reconstruction_lam,
......@@ -95,8 +96,6 @@ def test(model, test_loader, device, reconstruction_lam, motif_lam):
del reconstruction_loss
del motif_loss
torch.cuda.empty_cache()
torch.cuda.synchronize()
return test_loss / test_size, motif_loss_tot / test_size, recons_loss_tot / test_size
def compute_loss(model, attributions, out, K, fp,
......@@ -235,8 +234,6 @@ def train_model(model, criterion, optimizer, device, train_loader, test_loader,
del reconstruction_loss
del motif_loss
torch.cuda.empty_cache()
torch.cuda.synchronize()
# Log training metrics
train_loss = running_loss / num_batches
writer.add_scalar("Training epoch loss", train_loss, epoch)
......@@ -247,9 +244,8 @@ def train_model(model, criterion, optimizer, device, train_loader, test_loader,
# Test phase
test_loss, motif_loss, reconstruction_loss = test(model, test_loader, device, reconstruction_lam, motif_lam)
torch.cuda.empty_cache()
torch.cuda.synchronize()
print(f"test_loss {test_loss}, reconstruction_loss {reconstruction_loss}, fp loss {motif_loss}")
writer.add_scalar("Test loss during training", test_loss, epoch)
writer.add_scalar("Test reconstruction loss", reconstruction_loss, epoch)
writer.add_scalar("Test motif loss", motif_loss, epoch)
......
......@@ -57,6 +57,9 @@ class V1(Dataset):
g_dgl.ndata['h'] = torch.ones((n_nodes, self.emb_size))
g_dgl.title = self.all_graphs[idx]
#sort ring to match dgl node order
ring = dict(sorted(ring.items()))
return g_dgl, ring, fp
def _get_edge_data(self):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment