From f283fc43213e7db75bd4ed8b35231d5fdfdf9683 Mon Sep 17 00:00:00 2001 From: Matthias Jouanneaux Date: Fri, 30 Jun 2023 09:54:26 -0700 Subject: [PATCH 1/4] fix renaming of cugraph-ops symbols (refactoring) --- cpp/src/sampling/neighborhood.cu | 22 +++++++++++++++++----- cpp/src/utilities/cugraph_ops_utils.hpp | 24 ++++++++---------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/cpp/src/sampling/neighborhood.cu b/cpp/src/sampling/neighborhood.cu index 0c0beb8d8b0..2f7b203a319 100644 --- a/cpp/src/sampling/neighborhood.cu +++ b/cpp/src/sampling/neighborhood.cu @@ -22,6 +22,8 @@ #include +#include + namespace cugraph { template @@ -34,14 +36,19 @@ sample_neighbors_adjacency_list(raft::handle_t const& handle, size_t sampling_size, ops::graph::SamplingAlgoT sampling_algo) { - const auto [ops_graph, max_degree] = detail::get_graph_and_max_degree(graph_view); - return ops::graph::uniform_sample_csr(rng_state, + using base_vertex_t = std::decay_t; + using base_edge_t = std::decay_t; + static_assert(std::is_same_v, + "cugraph-ops sampling not yet implemented for different node and edge types"); + + const auto ops_graph = detail::get_graph(graph_view); + return ops::graph::uniform_sample_csc(rng_state, ops_graph, ptr_d_start, num_start_vertices, sampling_size, sampling_algo, - max_degree, + ops_graph.dst_max_in_degree, handle.get_stream()); } @@ -55,14 +62,19 @@ std::tuple, rmm::device_uvector> sample_ size_t sampling_size, ops::graph::SamplingAlgoT sampling_algo) { - const auto [ops_graph, max_degree] = detail::get_graph_and_max_degree(graph_view); + using base_vertex_t = std::decay_t; + using base_edge_t = std::decay_t; + static_assert(std::is_same_v, + "cugraph-ops sampling not yet implemented for different node and edge types"); + + const auto ops_graph = detail::get_graph(graph_view); return ops::graph::uniform_sample_coo(rng_state, ops_graph, ptr_d_start, num_start_vertices, sampling_size, sampling_algo, - max_degree, + ops_graph.dst_max_in_degree, handle.get_stream()); } diff --git a/cpp/src/utilities/cugraph_ops_utils.hpp b/cpp/src/utilities/cugraph_ops_utils.hpp index 1dbe930e4c9..9aea4183866 100644 --- a/cpp/src/utilities/cugraph_ops_utils.hpp +++ b/cpp/src/utilities/cugraph_ops_utils.hpp @@ -20,18 +20,20 @@ #include -#include - namespace cugraph { namespace detail { template -ops::graph::fg_csr get_graph( +ops::graph::csc get_graph( graph_view_t const& gview) { - ops::graph::fg_csr graph; - graph.n_nodes = gview.number_of_vertices(); - graph.n_indices = gview.number_of_edges(); + ops::graph::csc graph; + graph.n_src_nodes = gview.number_of_vertices(); + graph.n_dst_nodes = gview.number_of_vertices(); + graph.n_indices = gview.number_of_edges(); + // FIXME this is sufficient for now, but if there is a fast (cached) way + // of getting max degree, use that instead + graph.dst_max_in_degree = std::numeric_limits::max(); // FIXME: this is evil and is just temporary until we have a matching type in cugraph-ops // or we change the type accepted by the functions calling into cugraph-ops graph.offsets = const_cast(gview.local_edge_partition_view().offsets().data()); @@ -39,15 +41,5 @@ ops::graph::fg_csr get_graph( return graph; } -template -std::tuple, NodeTypeT> get_graph_and_max_degree( - graph_view_t const& gview) -{ - // FIXME this is sufficient for now, but if there is a fast (cached) way - // of getting max degree, use that instead - auto max_degree = std::numeric_limits::max(); - return std::make_tuple(get_graph(gview), max_degree); -} - } // namespace detail } // namespace cugraph From 142b68d42129747d7c5ad163177bcc40fd5afc04 Mon Sep 17 00:00:00 2001 From: Tingyu Wang Date: Mon, 3 Jul 2023 16:23:27 -0400 Subject: [PATCH 2/4] update conv layers based on cugraph-ops refactoring --- .../cugraph_dgl/nn/conv/gatconv.py | 71 +++++++------------ .../cugraph_dgl/nn/conv/transformerconv.py | 43 +++++------ .../tests/nn/test_transformerconv.py | 6 +- .../cugraph-pyg/cugraph_pyg/nn/conv/base.py | 56 +++++++-------- .../cugraph_pyg/nn/conv/gat_conv.py | 35 +++------ .../cugraph_pyg/nn/conv/gatv2_conv.py | 52 +++++--------- .../cugraph_pyg/nn/conv/transformer_conv.py | 8 +-- 7 files changed, 109 insertions(+), 162 deletions(-) diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py index e70f2d0c6d1..7825febc24b 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/gatconv.py @@ -19,8 +19,8 @@ from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional -from pylibcugraphops.pytorch import BipartiteCSC, SampledCSC, StaticCSC -from pylibcugraphops.pytorch.operators import mha_gat_n2n, mha_gat_n2n_bipartite +from pylibcugraphops.pytorch import CSC +from pylibcugraphops.pytorch.operators import mha_gat_n2n dgl = import_optional("dgl") torch = import_optional("torch") @@ -173,9 +173,20 @@ def forward( :math:`H` is the number of heads, and :math:`D_{out}` is size of output feature. """ + if max_in_degree is None: + max_in_degree = -1 + bipartite = not isinstance(nfeat, torch.Tensor) offsets, indices, _ = g.adj_tensors("csc") + graph = CSC( + offsets=offsets, + indices=indices, + num_src_nodes=g.num_src_nodes(), + dst_max_in_degree=max_in_degree, + is_bipartite=bipartite, + ) + if efeat is not None: if self.fc_edge is None: raise RuntimeError( @@ -191,23 +202,8 @@ def forward( f"integers to allow bipartite node features, but got " f"{self.in_feats}." ) - _graph = BipartiteCSC( - offsets=offsets, indices=indices, num_src_nodes=g.num_src_nodes() - ) nfeat_src = self.fc_src(nfeat[0]) nfeat_dst = self.fc_dst(nfeat[1]) - - out = mha_gat_n2n_bipartite( - src_feat=nfeat_src, - dst_feat=nfeat_dst, - attn_weights=self.attn_weights, - graph=_graph, - num_heads=self.num_heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=efeat, - ) else: if not hasattr(self, "fc"): raise RuntimeError( @@ -215,36 +211,17 @@ def forward( f"integer, but got {self.in_feats}." ) nfeat = self.fc(nfeat) - # Sampled primitive does not support edge features - if g.is_block and efeat is None: - if max_in_degree is None: - max_in_degree = g.in_degrees().max().item() - - if max_in_degree < self.MAX_IN_DEGREE_MFG: - _graph = SampledCSC( - offsets=offsets, - indices=indices, - max_num_neighbors=max_in_degree, - num_src_nodes=g.num_src_nodes(), - ) - else: - offsets = self.pad_offsets(offsets, g.num_src_nodes() + 1) - _graph = StaticCSC(offsets=offsets, indices=indices) - else: - if g.is_block: - offsets = self.pad_offsets(offsets, g.num_src_nodes() + 1) - _graph = StaticCSC(offsets=offsets, indices=indices) - - out = mha_gat_n2n( - feat=nfeat, - attn_weights=self.attn_weights, - graph=_graph, - num_heads=self.num_heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=efeat, - )[: g.num_dst_nodes()] + + out = mha_gat_n2n( + (nfeat_src, nfeat_dst) if bipartite else nfeat, + self.attn_weights, + graph, + num_heads=self.num_heads, + activation="LeakyReLU", + negative_slope=self.negative_slope, + concat_heads=self.concat, + edge_feat=efeat, + )[: g.num_dst_nodes()] if self.concat: out = out.view(-1, self.num_heads, self.out_feats) diff --git a/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py b/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py index 1898f5159b1..141adc86069 100644 --- a/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py +++ b/python/cugraph-dgl/cugraph_dgl/nn/conv/transformerconv.py @@ -15,7 +15,7 @@ from cugraph_dgl.nn.conv.base import BaseConv from cugraph.utilities.utils import import_optional -from pylibcugraphops.pytorch import BipartiteCSC, StaticCSC +from pylibcugraphops.pytorch import CSC from pylibcugraphops.pytorch.operators import mha_simple_n2n dgl = import_optional("dgl") @@ -132,31 +132,34 @@ def forward( efeat: torch.Tensor, optional Edge feature tensor. Default: ``None``. """ - bipartite = not isinstance(nfeat, torch.Tensor) offsets, indices, _ = g.adj_tensors("csc") - - if bipartite: - src_feats, dst_feats = nfeat - _graph = BipartiteCSC( - offsets=offsets, indices=indices, num_src_nodes=g.num_src_nodes() - ) - else: - src_feats = dst_feats = nfeat - if g.is_block: - offsets = self.pad_offsets(offsets, g.num_src_nodes() + 1) - _graph = StaticCSC(offsets=offsets, indices=indices) - - query = self.lin_query(dst_feats) - key = self.lin_key(src_feats) - value = self.lin_value(src_feats) - if self.lin_edge is not None: + graph = CSC( + offsets=offsets, + indices=indices, + num_src_nodes=g.num_src_nodes(), + is_bipartite=True, + ) + + if isinstance(nfeat, torch.Tensor): + nfeat = (nfeat, nfeat) + + query = self.lin_query(nfeat[1][: g.num_dst_nodes()]) + key = self.lin_key(nfeat[0]) + value = self.lin_value(nfeat[0]) + + if efeat is not None: + if self.lin_edge is None: + raise RuntimeError( + f"{self.__class__.__name__}.edge_feats must be set to allow " + f"edge features." + ) efeat = self.lin_edge(efeat) out = mha_simple_n2n( key_emb=key, query_emb=query, value_emb=value, - graph=_graph, + graph=graph, num_heads=self.num_heads, concat_heads=self.concat, edge_emb=efeat, @@ -165,7 +168,7 @@ def forward( )[: g.num_dst_nodes()] if self.root_weight: - res = self.lin_skip(dst_feats[: g.num_dst_nodes()]) + res = self.lin_skip(nfeat[1][: g.num_dst_nodes()]) if self.lin_beta is not None: beta = self.lin_beta(torch.cat([out, res, out - res], dim=-1)) beta = beta.sigmoid() diff --git a/python/cugraph-dgl/tests/nn/test_transformerconv.py b/python/cugraph-dgl/tests/nn/test_transformerconv.py index 64af795231c..00476b9f0bb 100644 --- a/python/cugraph-dgl/tests/nn/test_transformerconv.py +++ b/python/cugraph-dgl/tests/nn/test_transformerconv.py @@ -26,14 +26,14 @@ @pytest.mark.parametrize("beta", [False, True]) -@pytest.mark.parametrize("bipartite", [False, True]) +@pytest.mark.parametrize("bipartite_node_feats", [False, True]) @pytest.mark.parametrize("concat", [False, True]) @pytest.mark.parametrize("idtype_int", [False, True]) @pytest.mark.parametrize("num_heads", [1, 2, 3, 4]) @pytest.mark.parametrize("to_block", [False, True]) @pytest.mark.parametrize("use_edge_feats", [False, True]) def test_TransformerConv( - beta, bipartite, concat, idtype_int, num_heads, to_block, use_edge_feats + beta, bipartite_node_feats, concat, idtype_int, num_heads, to_block, use_edge_feats ): device = "cuda" g = create_graph1().to(device) @@ -44,7 +44,7 @@ def test_TransformerConv( if to_block: g = dgl.to_block(g) - if bipartite: + if bipartite_node_feats: in_node_feats = (5, 3) nfeat = ( torch.rand(g.num_src_nodes(), in_node_feats[0], device=device), diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py index bec50792131..207efcdace4 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/base.py @@ -12,7 +12,7 @@ # limitations under the License. import warnings -from typing import Any, Optional, Tuple, Union +from typing import Optional, Tuple, Union from cugraph.utilities.utils import import_optional @@ -20,13 +20,7 @@ torch_geometric = import_optional("torch_geometric") try: # pragma: no cover - from pylibcugraphops.pytorch import ( - BipartiteCSC, - SampledCSC, - SampledHeteroCSC, - StaticCSC, - StaticHeteroCSC, - ) + from pylibcugraphops.pytorch import CSC, HeteroCSC HAS_PYLIBCUGRAPHOPS = True except ImportError: @@ -94,7 +88,7 @@ def get_cugraph( csc: Tuple[torch.Tensor, torch.Tensor, int], bipartite: bool = False, max_num_neighbors: Optional[int] = None, - ) -> Any: + ) -> CSC: r"""Constructs a :obj:`cugraph-ops` graph object from CSC representation. Supports both bipartite and non-bipartite graphs. @@ -119,16 +113,16 @@ def get_cugraph( f"based processing (got CPU tensor)" ) - if bipartite: - return BipartiteCSC(colptr, row, num_src_nodes) + if max_num_neighbors is None: + max_num_neighbors = -1 - if num_src_nodes != colptr.numel() - 1: - if max_num_neighbors is None: - max_num_neighbors = int((colptr[1:] - colptr[:-1]).max()) - - return SampledCSC(colptr, row, max_num_neighbors, num_src_nodes) - - return StaticCSC(colptr, row) + return CSC( + offsets=colptr, + indices=row, + num_src_nodes=num_src_nodes, + dst_max_in_degree=max_num_neighbors, + is_bipartite=bipartite, + ) def get_typed_cugraph( self, @@ -137,7 +131,7 @@ def get_typed_cugraph( num_edge_types: Optional[int] = None, bipartite: bool = False, max_num_neighbors: Optional[int] = None, - ) -> Any: + ) -> HeteroCSC: r"""Constructs a typed :obj:`cugraph` graph object from a CSC representation where each edge corresponds to a given edge type. Supports both bipartite and non-bipartite graphs. @@ -162,21 +156,21 @@ def get_typed_cugraph( if num_edge_types is None: num_edge_types = int(edge_type.max()) + 1 + if max_num_neighbors is None: + max_num_neighbors = -1 + row, colptr, num_src_nodes = csc edge_type = edge_type.int() - if bipartite: - raise NotImplementedError - - if num_src_nodes != colptr.numel() - 1: - if max_num_neighbors is None: - max_num_neighbors = int((colptr[1:] - colptr[:-1]).max()) - - return SampledHeteroCSC( - colptr, row, edge_type, max_num_neighbors, num_src_nodes, num_edge_types - ) - - return StaticHeteroCSC(colptr, row, edge_type, num_edge_types) + return HeteroCSC( + offsets=colptr, + indices=row, + edge_types=edge_type, + num_src_nodes=num_src_nodes, + num_edge_types=num_edge_types, + dst_max_in_degree=max_num_neighbors, + is_bipartite=bipartite, + ) def forward( self, diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py index 4bf37cf3e72..23b7d50ba96 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/gat_conv.py @@ -12,7 +12,7 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_gat_n2n, mha_gat_n2n_bipartite +from pylibcugraphops.pytorch.operators import mha_gat_n2n from cugraph.utilities.utils import import_optional @@ -203,19 +203,6 @@ def forward( ) x_src = self.lin_src(x[0]) x_dst = self.lin_dst(x[1]) - - out = mha_gat_n2n_bipartite( - x_src, - x_dst, - self.att, - graph, - num_heads=self.heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=edge_attr, - ) - else: if not hasattr(self, "lin"): raise RuntimeError( @@ -224,16 +211,16 @@ def forward( ) x = self.lin(x) - out = mha_gat_n2n( - x, - self.att, - graph, - num_heads=self.heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=edge_attr, - ) + out = mha_gat_n2n( + (x_src, x_dst) if bipartite else x, + self.att, + graph, + num_heads=self.heads, + activation="LeakyReLU", + negative_slope=self.negative_slope, + concat_heads=self.concat, + edge_feat=edge_attr, + ) if self.bias is not None: out = out + self.bias diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py index 66d962b3f86..d4c947b952a 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/gatv2_conv.py @@ -12,7 +12,7 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_gat_v2_n2n, mha_gat_v2_n2n_bipartite +from pylibcugraphops.pytorch.operators import mha_gat_v2_n2n from cugraph.utilities.utils import import_optional @@ -187,8 +187,8 @@ def forward( representation to the desired format. edge_attr: (torch.Tensor, optional) The edge features. """ - bipartite = not isinstance(x, torch.Tensor) - graph = self.get_cugraph(csc, bipartite=bipartite or not self.share_weights) + bipartite = not isinstance(x, torch.Tensor) or not self.share_weights + graph = self.get_cugraph(csc, bipartite=bipartite) if edge_attr is not None: if self.lin_edge is None: @@ -200,38 +200,24 @@ def forward( edge_attr = edge_attr.view(-1, 1) edge_attr = self.lin_edge(edge_attr) - if not bipartite and self.share_weights: + if bipartite: + if isinstance(x, torch.Tensor): + x = (x, x) + x_src = self.lin_src(x[0]) + x_dst = self.lin_dst(x[1]) + else: x = self.lin_src(x) - out = mha_gat_v2_n2n( - x, - self.att, - graph, - num_heads=self.heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=edge_attr, - ) - else: - if bipartite: - x_src = self.lin_src(x[0]) - x_dst = self.lin_dst(x[1]) - else: - x_src = self.lin_src(x) - x_dst = self.lin_dst(x) - - out = mha_gat_v2_n2n_bipartite( - x_src, - x_dst, - self.att, - graph, - num_heads=self.heads, - activation="LeakyReLU", - negative_slope=self.negative_slope, - concat_heads=self.concat, - edge_feat=edge_attr, - ) + out = mha_gat_v2_n2n( + (x_src, x_dst) if bipartite else x, + self.att, + graph, + num_heads=self.heads, + activation="LeakyReLU", + negative_slope=self.negative_slope, + concat_heads=self.concat, + edge_feat=edge_attr, + ) if self.bias is not None: out = out + self.bias diff --git a/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py b/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py index aeb51c028ae..f67756eb3fe 100644 --- a/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py +++ b/python/cugraph-pyg/cugraph_pyg/nn/conv/transformer_conv.py @@ -12,7 +12,7 @@ # limitations under the License. from typing import Optional, Tuple, Union -from pylibcugraphops.pytorch.operators import mha_simple_n2n as TransformerConvAgg +from pylibcugraphops.pytorch.operators import mha_simple_n2n from cugraph.utilities.utils import import_optional @@ -168,10 +168,10 @@ def forward( representation to the desired format. edge_attr: (torch.Tensor, optional) The edge features. """ - bipartite = not isinstance(x, torch.Tensor) + bipartite = True graph = self.get_cugraph(csc, bipartite=bipartite) - if not bipartite: + if isinstance(x, torch.Tensor): x = (x, x) query = self.lin_query(x[1]) @@ -186,7 +186,7 @@ def forward( ) edge_attr = self.lin_edge(edge_attr) - out = TransformerConvAgg( + out = mha_simple_n2n( key, query, value, From 4d39bad3f25e07e97640b0710666e57ccf24f53f Mon Sep 17 00:00:00 2001 From: Naim <110031745+naimnv@users.noreply.github.com> Date: Wed, 5 Jul 2023 02:13:07 +0200 Subject: [PATCH 3/4] Update pr.yaml to use pytest module as script. --- .github/workflows/pr.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 1518d7ba432..4d52cd26de4 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -94,7 +94,7 @@ jobs: with: build_type: pull-request package-name: pylibcugraph - test-unittest: "RAPIDS_DATASET_ROOT_DIR=./datasets pytest ./python/pylibcugraph/pylibcugraph/tests" + test-unittest: "RAPIDS_DATASET_ROOT_DIR=./datasets python -m pytest ./python/pylibcugraph/pylibcugraph/tests" test-smoketest: "python ci/wheel_smoke_test_pylibcugraph.py" wheel-build-cugraph: needs: wheel-tests-pylibcugraph @@ -120,5 +120,5 @@ jobs: test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && RAPIDS_PY_WHEEL_NAME=pylibcugraph_${{ '${PIP_CU_VERSION}' }} rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.08" # Skip dataset downloads on arm to save CI time -- arm only runs smoke tests. test-before-arm64: "RAPIDS_PY_WHEEL_NAME=pylibcugraph_${{ '${PIP_CU_VERSION}' }} rapids-download-wheels-from-s3 ./local-pylibcugraph-dep && pip install --no-deps ./local-pylibcugraph-dep/*.whl && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.08" - test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -m sg ./python/cugraph/cugraph/tests" + test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets python -m pytest -m sg ./python/cugraph/cugraph/tests" test-smoketest: "python ci/wheel_smoke_test_cugraph.py" From 5cb3b13ce92a60e247cf94c2e1107efd7b9f6846 Mon Sep 17 00:00:00 2001 From: Tingyu Wang Date: Tue, 4 Jul 2023 23:12:03 -0400 Subject: [PATCH 4/4] fix pytest usage in test.yaml --- .github/workflows/test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 33cc3f27825..d697b8f1649 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -39,7 +39,7 @@ jobs: date: ${{ inputs.date }} sha: ${{ inputs.sha }} package-name: pylibcugraph - test-unittest: "RAPIDS_DATASET_ROOT_DIR=./datasets pytest ./python/pylibcugraph/pylibcugraph/tests" + test-unittest: "RAPIDS_DATASET_ROOT_DIR=./datasets python -m pytest ./python/pylibcugraph/pylibcugraph/tests" wheel-tests-cugraph: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/wheels-manylinux-test.yml@branch-23.08 @@ -52,4 +52,4 @@ jobs: # Always want to test against latest dask/distributed. test-before-amd64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.08" test-before-arm64: "cd ./datasets && bash ./get_test_data.sh && cd - && pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.08" - test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets pytest -m sg ./python/cugraph/cugraph/tests" + test-unittest: "RAPIDS_DATASET_ROOT_DIR=/__w/cugraph/cugraph/datasets python -m pytest -m sg ./python/cugraph/cugraph/tests"