diff --git a/python/cugraph-dgl/tests/nn/test_gatconv.py b/python/cugraph-dgl/tests/nn/test_gatconv.py index ef3047dc2cd..ce145b2bc87 100644 --- a/python/cugraph-dgl/tests/nn/test_gatconv.py +++ b/python/cugraph-dgl/tests/nn/test_gatconv.py @@ -35,6 +35,7 @@ def test_gatconv_equality( ): from dgl.nn.pytorch import GATConv + torch.manual_seed(12345) g = create_graph1().to("cuda") if idtype_int: @@ -121,6 +122,7 @@ def test_gatconv_equality( def test_gatconv_edge_feats( bias, bipartite, concat, max_in_degree, num_heads, to_block, use_edge_feats ): + torch.manual_seed(12345) g = create_graph1().to("cuda") if to_block: diff --git a/python/cugraph-dgl/tests/nn/test_gatv2conv.py b/python/cugraph-dgl/tests/nn/test_gatv2conv.py index cc46a6e4b39..52003edacca 100644 --- a/python/cugraph-dgl/tests/nn/test_gatv2conv.py +++ b/python/cugraph-dgl/tests/nn/test_gatv2conv.py @@ -35,6 +35,7 @@ def test_gatv2conv_equality( ): from dgl.nn.pytorch import GATv2Conv + torch.manual_seed(12345) g = create_graph1().to("cuda") if idtype_int: @@ -109,6 +110,7 @@ def test_gatv2conv_equality( def test_gatv2conv_edge_feats( bias, bipartite, concat, max_in_degree, num_heads, to_block, use_edge_feats ): + torch.manual_seed(12345) g = create_graph1().to("cuda") if to_block: diff --git a/python/cugraph-dgl/tests/nn/test_relgraphconv.py b/python/cugraph-dgl/tests/nn/test_relgraphconv.py index 901f9ba1433..bdaa89e57f2 100644 --- a/python/cugraph-dgl/tests/nn/test_relgraphconv.py +++ b/python/cugraph-dgl/tests/nn/test_relgraphconv.py @@ -41,6 +41,7 @@ def test_relgraphconv_equality( ): from dgl.nn.pytorch import RelGraphConv + torch.manual_seed(12345) in_feat, out_feat, num_rels = 10, 2, 3 args = (in_feat, out_feat, num_rels) kwargs = { @@ -75,12 +76,18 @@ def test_relgraphconv_equality( size=size, src_ids=indices, cdst_ids=offsets, values=etypes, formats="csc" ) - torch.manual_seed(0) conv1 = RelGraphConv(*args, **kwargs).cuda() + conv2 = CuGraphRelGraphConv(*args, **kwargs, apply_norm=False).cuda() - torch.manual_seed(0) - kwargs["apply_norm"] = False - conv2 = CuGraphRelGraphConv(*args, **kwargs).cuda() + with torch.no_grad(): + if self_loop: + conv2.W.data[:-1] = conv1.linear_r.W.data + conv2.W.data[-1] = conv1.loop_weight.data + else: + conv2.W.data = conv1.linear_r.W.data.detach().clone() + + if regularizer is not None: + conv2.coeff.data = conv1.linear_r.coeff.data.detach().clone() out1 = conv1(g, feat, g.edata[dgl.ETYPE]) diff --git a/python/cugraph-dgl/tests/nn/test_sageconv.py b/python/cugraph-dgl/tests/nn/test_sageconv.py index e2acf9e6596..b5d0a44b868 100644 --- a/python/cugraph-dgl/tests/nn/test_sageconv.py +++ b/python/cugraph-dgl/tests/nn/test_sageconv.py @@ -35,6 +35,7 @@ def test_sageconv_equality( ): from dgl.nn.pytorch import SAGEConv + torch.manual_seed(12345) kwargs = {"aggregator_type": aggr, "bias": bias} g = create_graph1().to("cuda") diff --git a/python/cugraph-dgl/tests/nn/test_transformerconv.py b/python/cugraph-dgl/tests/nn/test_transformerconv.py index b2b69cb35ab..5ac4fd7bea7 100644 --- a/python/cugraph-dgl/tests/nn/test_transformerconv.py +++ b/python/cugraph-dgl/tests/nn/test_transformerconv.py @@ -41,6 +41,7 @@ def test_transformerconv( use_edge_feats, sparse_format, ): + torch.manual_seed(12345) device = "cuda" g = create_graph1().to(device)