From c11eff23926dd483d23444a4757629e8ed069683 Mon Sep 17 00:00:00 2001 From: Don Acosta <97529984+acostadon@users.noreply.github.com> Date: Mon, 25 Sep 2023 21:59:29 -0400 Subject: [PATCH 1/5] similarity notebook to compare link prediction algos (#3868) New notebook to compare link prediction Dependent on dining_prefs being added to datasets api in PR #3866 Authors: - Don Acosta (https://github.com/acostadon) Approvers: - Brad Rees (https://github.com/BradReesWork) URL: https://github.com/rapidsai/cugraph/pull/3868 --- .../link_prediction/similarity_combined.ipynb | 217 ++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 notebooks/algorithms/link_prediction/similarity_combined.ipynb diff --git a/notebooks/algorithms/link_prediction/similarity_combined.ipynb b/notebooks/algorithms/link_prediction/similarity_combined.ipynb new file mode 100644 index 00000000000..cd80ee34002 --- /dev/null +++ b/notebooks/algorithms/link_prediction/similarity_combined.ipynb @@ -0,0 +1,217 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Similarity Compared\n", + "----\n", + "\n", + "In this notebook, we will compute vertex similarity scores using the various cuGraph algorithms. We will then compare the similarities scores in tables.\n", + "\n", + "| Author Credit | Date | Update | cuGraph Version | Test Hardware |\n", + "| --------------|------------|------------------|-----------------|-----------------------|\n", + "| Don Acosta | 09/25/2023 | created | 23.10 nightly | AMPERE A6000 CUDA 11.7|\n", + "\n", + "\n", + "**Note: On large graphs these algorithms can take prohibitive time or memory. The notebook will show how to run on defined pairs instead.**\n", + "\n", + "The Similarity algorithms in cuGraph use different methods to compare pairs of vertices. All of them use the intersection of the set of adjacent nodes for the set overlap. However each of the three algorithms differ on the denominator to determine the similarity coefficients. All three are normalized between zero and one. where zero is no overlap at all and one means identical adjacencies.\n", + "\n", + "__Jaccard Similarity__
\n", + "The [Jaccard similarity](https://en.wikipedia.org/wiki/Jaccard_index) measure was developed by botonist, Paul Jaccard who used the measure to compare plant species. His work popularized the measure's use in in other fields as well.\n", + "\n", + "It can be expressed as:
\n", + "$\\text{Jaccard similarity} = \\frac{|A \\cap B|}{|A \\cup B|}$\n", + "\n", + "__Overlap Similarity__
\n", + "The [Overlap Similarity](https://en.wikipedia.org/wiki/Overlap_coefficient) is also known as the Szymkiewicz–Simpson coefficient. It is often used to compare binary and categorical data in the fields of Genome analysis, recommender systems and anomaly detection. It differs from the Jaccard measure above in that it uses the size of the smaller of the two set sizes as the denominator.\n", + "\n", + "It can be expressed as\n", + "\n", + "$oc(A,B)=\\frac{|A|\\cap|B|}{min(|A|,|B|)}$\n", + "\n", + "__Sørensen-Dice Coefficient__
\n", + "The [Sørensen coefficient](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient#) is known as the Sørensen-Dice coefficient. It was independently developed for use by botonists Lee Raymond Dice and Thorvald Sørensen. Although originating in the field of Botony, the coefficient is now used in computer vision, Natural Language Processing(NLP) and Data Mining among other fields.\n", + "It differs from Jaccard and Overlap in that the calculation doubles the intersection size and divides it by the sum of the two set sizes.\n", + "\n", + "It can be expressed as\n", + "\n", + "Sørensen coefficient = $\\left(2 * |A \\cap B| \\right) \\over \\left(|A| + |B| \\right)$\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "# Now for the code !" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the required dependencies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cugraph\n", + "from cugraph.datasets import dining_prefs\n", + "# only needed to display results in a table \n", + "from IPython.display import display_html " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Function that calls all the cuGraph similarity/link prediction algorithms " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def compute_similarity(G,pairs=None):\n", + " _jdf = cugraph.jaccard(G,pairs)\n", + " _jdf2 = _jdf[ (_jdf['first'] != _jdf['second'] ) ]\n", + " _odf = cugraph.overlap(G,pairs)\n", + " _odf2 = _odf[ (_odf['first'] != _odf['second'] ) ]\n", + " _sdf = cugraph.sorensen_coefficient(G,pairs)\n", + " _sdf2 = _sdf[ (_sdf['first'] != _sdf['second'] ) ]\n", + " return _jdf2, _odf2, _sdf2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Function to put all the results in a convenient table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Print function\n", + "def print_similarity(jdf,odf,sdf,num_records=5):\n", + "\n", + " js_top = jdf.sort_values(by='jaccard_coeff', ascending=False).head(num_records).to_pandas()\n", + " os_top = odf.sort_values(by='overlap_coeff', ascending=False).head(num_records).to_pandas()\n", + " ss_top = sdf.sort_values(by='sorensen_coeff', ascending=False).head(num_records).to_pandas()\n", + " \n", + " df1_styler = js_top.style.set_table_attributes(\"style='display:inline'\").set_caption('Jaccard').hide(axis='index')\n", + " df2_styler = os_top.style.set_table_attributes(\"style='display:inline'\").set_caption('Overlap').hide(axis='index')\n", + " df3_styler = ss_top.style.set_table_attributes(\"style='display:inline'\").set_caption('Sørensen').hide(axis='index')\n", + "\n", + " display_html(df1_styler._repr_html_()+df2_styler._repr_html_()+df3_styler._repr_html_(), raw=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create the graph from the Dining preferences data set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "G = dining_prefs.get_graph(download=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run the three similarity Algorithms and print out the five links with the highest scores." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "jdf, odf, sdf = compute_similarity(G)\n", + "print_similarity(jdf,odf,sdf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now find the the complete set of two-hop neigbors and compare them instead of just using the existing one-hop edges. In a larger graph, this will run considerably faster since the default " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# this cugraph algorithm pulls a set containing every pair of vertices\n", + "# that are within 2-hops of each other\n", + "two_hops_pairs = G.get_two_hop_neighbors()\n", + "\n", + "jdf_hops, odf_hops, sdf_hops = compute_similarity(G,pairs=two_hops_pairs)\n", + "print_similarity(jdf_hops,odf_hops,sdf_hops)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### It's that easy with cuGraph" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "----\n", + "Copyright (c) 2023, NVIDIA CORPORATION.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "cugraph_0802", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From b199bf01a2dbb3a8bc89198c6d35fd5a0444e213 Mon Sep 17 00:00:00 2001 From: Vibhu Jawa Date: Tue, 26 Sep 2023 05:33:13 -0700 Subject: [PATCH 2/5] [REVIEW] Add Pure DGL Dataloading benchmark (#3660) This PR adds the DGL data loading benchmark: Arguments supported: - dataset_path: path to the dataset - replication_factors: replication factors for number of edges - fanouts: fanouts - batch_sizes: batch sizes ```bash python3 dgl_dataloading.py --dataset_path "/datasets/abarghi/ogbn_papers100M" \ --replication_factors "1,2,4" \ --fanouts "25_25,10_10_10,5_10_20" \ --batch_sizes "512,1024" ``` This produces the following results on a V100: | Fanout | Batch Size | Data Loading Time per Epoch | Data Loading Time per Batch | Number of Edges | Number of Batches | Replication Factor | |--------|------------|-----------------------------|-----------------------------|-----------------|-------------------|--------------------| | [25, 25] | 512 | 9.48 | 0.0031 | 1615685872 | 3022 | 1 | | [25, 25] | 1024 | 6.39 | 0.0042 | 1615685872 | 1511 | 1 | | [10, 10, 10] | 512 | 15.91 | 0.0053 | 1615685872 | 3022 | 1 | | [10, 10, 10] | 1024 | 11.64 | 0.0077 | 1615685872 | 1511 | 1 | | [5, 10, 20] | 512 | 17.73 | 0.0059 | 1615685872 | 3022 | 1 | | [5, 10, 20] | 1024 | 13.52 | 0.0089 | 1615685872 | 1511 | 1 | | [25, 25] | 512 | 19.44 | 0.0032 | 3231371744 | 6043 | 2 | | [25, 25] | 1024 | 12.98 | 0.0043 | 3231371744 | 3022 | 2 | | [10, 10, 10] | 512 | 32.88 | 0.0054 | 3231371744 | 6043 | 2 | | [10, 10, 10] | 1024 | 24.35 | 0.0081 | 3231371744 | 3022 | 2 | | [5, 10, 20] | 512 | 38.35 | 0.0063 | 3231371744 | 6043 | 2 | | [5, 10, 20] | 1024 | 28.93 | 0.0096 | 3231371744 | 3022 | 2 | | [25, 25] | 512 | 37.31 | 0.0031 | 6462743488 | 12085 | 4 | | [25, 25] | 1024 | 25.15 | 0.0042 | 6462743488 | 6043 | 4 | | [10, 10, 10] | 512 | 64.29 | 0.0053 | 6462743488 | 12085 | 4 | | [10, 10, 10] | 1024 | 47.13 | 0.0078 | 6462743488 | 6043 | 4 | | [5, 10, 20] | 512 | 72.90 | 0.0060 | 6462743488 | 12085 | 4 | | [5, 10, 20] | 1024 | 56.70 | 0.0094 | 6462743488 | 6043 | 4 | | [25, 25] | 512 | 80.99 | 0.0034 | 12925486976 | 24169 | 8 | | [25, 25] | 1024 | 50.89 | 0.0042 | 12925486976 | 12085 | 8 | | [10, 10, 10] | 512 | 129.49 | 0.0054 | 12925486976 | 24169 | 8 | | [10, 10, 10] | 1024 | 93.66 | 0.0078 | 12925486976 | 12085 | 8 | | [5, 10, 20] | 512 | 143.45 | 0.0059 | 12925486976 | 24169 | 8 | | [5, 10, 20] | 1024 | 110.22 | 0.0091 | 12925486976 | 12085 | 8 | Authors: - Vibhu Jawa (https://github.com/VibhuJawa) - Alex Barghi (https://github.com/alexbarghi-nv) - Brad Rees (https://github.com/BradReesWork) Approvers: - Alex Barghi (https://github.com/alexbarghi-nv) - Tingyu Wang (https://github.com/tingyu66) URL: https://github.com/rapidsai/cugraph/pull/3660 --- .../dgl_benchmark.py | 291 ++++++++++++++++++ 1 file changed, 291 insertions(+) create mode 100644 benchmarks/cugraph-dgl/python-script/dgl_dataloading_benchmark/dgl_benchmark.py diff --git a/benchmarks/cugraph-dgl/python-script/dgl_dataloading_benchmark/dgl_benchmark.py b/benchmarks/cugraph-dgl/python-script/dgl_dataloading_benchmark/dgl_benchmark.py new file mode 100644 index 00000000000..0a52703c546 --- /dev/null +++ b/benchmarks/cugraph-dgl/python-script/dgl_dataloading_benchmark/dgl_benchmark.py @@ -0,0 +1,291 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import dgl +import torch +import pandas as pd +import os +import time +import json +import random +import numpy as np +from argparse import ArgumentParser + + +def load_edges_from_disk(parquet_path, replication_factor, input_meta): + """ + Load the edges from disk into a graph data dictionary. + Args: + parquet_path: Path to the parquet directory. + replication_factor: Number of times to replicate the edges. + input_meta: Input meta data. + Returns: + dict: Dictionary of edge types to a tuple of (src, dst) + """ + graph_data = {} + for edge_type in input_meta["num_edges"].keys(): + print( + f"Loading edge index for edge type {edge_type}" + f"for replication factor = {replication_factor}" + ) + can_edge_type = tuple(edge_type.split("__")) + # TODO: Rename `edge_index` to a better name + ei = pd.read_parquet( + os.path.join(parquet_path, edge_type, "edge_index.parquet") + ) + ei = { + "src": torch.from_numpy(ei.src.values), + "dst": torch.from_numpy(ei.dst.values), + } + if replication_factor > 1: + src_ls = [ei["src"]] + dst_ls = [ei["dst"]] + for r in range(1, replication_factor): + new_src = ei["src"] + ( + r * input_meta["num_nodes"][can_edge_type[0]] + ) + src_ls.append(new_src) + new_dst = ei["dst"] + ( + r * input_meta["num_nodes"][can_edge_type[2]] + ) + dst_ls.append(new_dst) + + ei["src"] = torch.cat(src_ls).contiguous() + ei["dst"] = torch.cat(dst_ls).contiguous() + graph_data[can_edge_type] = ei["src"], ei["dst"] + print("Graph Data compiled") + return graph_data + + +def load_node_labels(dataset_path, replication_factor, input_meta): + num_nodes_dict = { + node_type: t * replication_factor + for node_type, t in input_meta["num_nodes"].items() + } + node_data = {} + for node_type in input_meta["num_nodes"].keys(): + node_data[node_type] = {} + label_path = os.path.join( + dataset_path, "parquet", node_type, "node_label.parquet" + ) + if os.path.exists(label_path): + node_label = pd.read_parquet(label_path) + if replication_factor > 1: + base_num_nodes = input_meta["num_nodes"][node_type] + dfr = pd.DataFrame( + { + "node": pd.concat( + [ + node_label.node + (r * base_num_nodes) + for r in range(1, replication_factor) + ] + ), + "label": pd.concat( + [ + node_label.label + for r in range(1, replication_factor) + ] + ), + } + ) + node_label = pd.concat([node_label, dfr]).reset_index( + drop=True + ) + + node_label_tensor = torch.full( + (num_nodes_dict[node_type],), -1, dtype=torch.float32 + ) + node_label_tensor[ + torch.as_tensor(node_label.node.values) + ] = torch.as_tensor(node_label.label.values) + + del node_label + node_data[node_type]["train_idx"] = ( + (node_label_tensor > -1).contiguous().nonzero().view(-1) + ) + node_data[node_type]["y"] = node_label_tensor.contiguous() + else: + node_data[node_type]["num_nodes"] = num_nodes_dict[node_type] + return node_data + + +def create_dgl_graph_from_disk(dataset_path, replication_factor=1): + """ + Create a DGL graph from a dataset on disk. + Args: + dataset_path: Path to the dataset on disk. + replication_factor: Number of times to replicate the edges. + Returns: + DGLGraph: DGLGraph with the loaded dataset. + """ + with open(os.path.join(dataset_path, "meta.json"), "r") as f: + input_meta = json.load(f) + + parquet_path = os.path.join(dataset_path, "parquet") + graph_data = load_edges_from_disk( + parquet_path, replication_factor, input_meta + ) + node_data = load_node_labels(dataset_path, replication_factor, input_meta) + g = dgl.heterograph(graph_data) + + return g, node_data + + +def create_dataloader(g, train_idx, batch_size, fanouts, use_uva): + """ + Create a DGL dataloader from a DGL graph. + Args: + g: DGLGraph to create the dataloader from. + train_idx: Tensor containing the training indices. + batch_size: Batch size to use for the dataloader. + fanouts: List of fanouts to use for the dataloader. + use_uva: Whether to use unified virtual address space. + Returns: + DGLGraph: DGLGraph with the loaded dataset. + """ + + print("Creating dataloader", flush=True) + st = time.time() + if use_uva: + train_idx = {k: v.to("cuda") for k, v in train_idx.items()} + sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts=fanouts) + dataloader = dgl.dataloading.DataLoader( + g, + train_idx, + sampler, + num_workers=0, + batch_size=batch_size, + use_uva=use_uva, + shuffle=False, + drop_last=False, + ) + et = time.time() + print(f"Time to create dataloader = {et - st:.2f} seconds") + return dataloader + + +def dataloading_benchmark(g, train_idx, fanouts, batch_sizes, use_uva): + """ + Run the dataloading benchmark. + Args: + g: DGLGraph + train_idx: Tensor containing the training indices. + fanouts: List of fanouts to use for the dataloader. + batch_sizes: List of batch sizes to use for the dataloader. + use_uva: Whether to use unified virtual address space. + """ + time_ls = [] + for fanout in fanouts: + for batch_size in batch_sizes: + dataloader = create_dataloader( + g, + train_idx, + batch_size=batch_size, + fanouts=fanout, + use_uva=use_uva, + ) + dataloading_st = time.time() + for input_nodes, output_nodes, blocks in dataloader: + pass + dataloading_et = time.time() + dataloading_time = dataloading_et - dataloading_st + time_d = { + "fanout": fanout, + "batch_size": batch_size, + "dataloading_time_per_epoch": dataloading_time, + "dataloading_time_per_batch": dataloading_time / len(dataloader), + "num_edges": g.num_edges(), + "num_batches": len(dataloader), + } + time_ls.append(time_d) + + print("Dataloading completed") + print(f"Fanout = {fanout}, batch_size = {batch_size}") + print( + f"Time taken {dataloading_time:.2f} ", + f"seconds for num batches {len(dataloader)}", + flush=True, + ) + print("==============================================") + return time_ls + +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument( + "--dataset_path", type=str, default="/datasets/abarghi/ogbn_papers100M" + ) + parser.add_argument("--replication_factors", type=str, default="1,2,4,8") + parser.add_argument( + "--fanouts", type=str, default="25_25,10_10_10,5_10_20" + ) + parser.add_argument("--batch_sizes", type=str, default="512,1024") + parser.add_argument("--do_not_use_uva", action="store_true") + parser.add_argument("--seed", type=int, default=42) + args = parser.parse_args() + + if args.do_not_use_uva: + use_uva = False + else: + use_uva = True + set_seed(args.seed) + replication_factors = [int(x) for x in args.replication_factors.split(",")] + fanouts = [[int(y) for y in x.split("_")] for x in args.fanouts.split(",")] + batch_sizes = [int(x) for x in args.batch_sizes.split(",")] + + print("Running dgl dataloading benchmark with the following parameters:") + print(f"Dataset path = {args.dataset_path}") + print(f"Replication factors = {replication_factors}") + print(f"Fanouts = {fanouts}") + print(f"Batch sizes = {batch_sizes}") + print(f"Use UVA = {use_uva}") + print("==============================================") + + time_ls = [] + for replication_factor in replication_factors: + st = time.time() + g, node_data = create_dgl_graph_from_disk( + dataset_path=args.dataset_path, + replication_factor=replication_factor, + ) + et = time.time() + print(f"Replication factor = {replication_factor}") + print( + f"G has {g.num_edges()} edges and took", + f" {et - st:.2f} seconds to load" + ) + train_idx = {"paper": node_data["paper"]["train_idx"]} + r_time_ls = dataloading_benchmark( + g, train_idx, fanouts, batch_sizes, use_uva=use_uva + ) + print( + "Benchmark completed for replication factor = ", replication_factor + ) + print("==============================================") + # Add replication factor to the time list + [ + x.update({"replication_factor": replication_factor}) + for x in r_time_ls + ] + time_ls.extend(r_time_ls) + + df = pd.DataFrame(time_ls) + df.to_csv("dgl_dataloading_benchmark.csv", index=False) + print("Benchmark completed for all replication factors") + print("==============================================") From 8b02e241617df8bac33d0fd69b03046d2ddaf2d9 Mon Sep 17 00:00:00 2001 From: Naim <110031745+naimnv@users.noreply.github.com> Date: Tue, 26 Sep 2023 15:43:02 +0200 Subject: [PATCH 3/5] Enable temporarily disabled MG tests (#3837) Enable TEMPORARILY disable single-GPU "MG" tests And Skip deleting copied Dataframe while creating distributed graph from cudf edge-lists. Ideally we would like to merger this PR once the [issue 3790](https://github.com/rapidsai/cugraph/issues/3790) is closed, but me might need to merger it if the issue is not resolved before the next release. Authors: - Naim (https://github.com/naimnv) Approvers: - Rick Ratzel (https://github.com/rlratzel) - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cugraph/pull/3837 --- ci/test_python.sh | 6 +----- ci/test_wheel.sh | 4 +--- .../graph_implementation/simpleDistributedGraph.py | 5 ++++- python/cugraph/cugraph/tests/traversal/test_bfs_mg.py | 5 ++++- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ci/test_python.sh b/ci/test_python.sh index 14886909fc9..7b0077991ae 100755 --- a/ci/test_python.sh +++ b/ci/test_python.sh @@ -63,10 +63,6 @@ pytest \ tests popd -# FIXME: TEMPORARILY disable single-GPU "MG" testing until -# https://github.com/rapidsai/cugraph/issues/3790 is closed -# When closed, replace -k "not _mg" with -# -k "not test_property_graph_mg" \ rapids-logger "pytest cugraph" pushd python/cugraph/cugraph export DASK_WORKER_DEVICES="0" @@ -79,7 +75,7 @@ pytest \ --cov=cugraph \ --cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cugraph-coverage.xml" \ --cov-report=term \ - -k "not _mg" \ + -k "not test_property_graph_mg" \ tests popd diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index b62635d08b4..146186ae2e7 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -18,7 +18,5 @@ arch=$(uname -m) if [[ "${arch}" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then python ./ci/wheel_smoke_test_${package_name}.py else - # FIXME: TEMPORARILY disable single-GPU "MG" testing until - # https://github.com/rapidsai/cugraph/issues/3790 is closed - RAPIDS_DATASET_ROOT_DIR=`pwd`/datasets python -m pytest -k "not _mg" ./python/${package_name}/${package_name}/tests + RAPIDS_DATASET_ROOT_DIR=`pwd`/datasets python -m pytest ./python/${package_name}/${package_name}/tests fi diff --git a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py index 0586d0d853c..01885c2d1c3 100644 --- a/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py +++ b/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py @@ -14,6 +14,7 @@ import gc from typing import Union import warnings +import random import cudf import cupy as cp @@ -182,7 +183,9 @@ def __from_edgelist( # Repartition to 2 partitions per GPU for memory efficient process input_ddf = input_ddf.repartition(npartitions=len(workers) * 2) # FIXME: Make a copy of the input ddf before implicitly altering it. - input_ddf = input_ddf.map_partitions(lambda df: df.copy()) + input_ddf = input_ddf.map_partitions( + lambda df: df.copy(), token="custom-" + str(random.random()) + ) # The dataframe will be symmetrized iff the graph is undirected # otherwise, the inital dataframe will be returned if edge_attr is not None: diff --git a/python/cugraph/cugraph/tests/traversal/test_bfs_mg.py b/python/cugraph/cugraph/tests/traversal/test_bfs_mg.py index 8ffbecea4fc..5eafc231141 100644 --- a/python/cugraph/cugraph/tests/traversal/test_bfs_mg.py +++ b/python/cugraph/cugraph/tests/traversal/test_bfs_mg.py @@ -12,6 +12,7 @@ # limitations under the License. import gc +import random import pytest @@ -61,7 +62,9 @@ def modify_dataset(df): return cudf.concat([df, temp_df]) meta = ddf._meta - ddf = ddf.map_partitions(modify_dataset, meta=meta) + ddf = ddf.map_partitions( + modify_dataset, meta=meta, token="custom-" + str(random.random()) + ) df = cudf.read_csv( input_data_path, From a9f4297223593f8df211599277519e206c597630 Mon Sep 17 00:00:00 2001 From: Joseph Nke <76006812+jnke2016@users.noreply.github.com> Date: Tue, 26 Sep 2023 08:57:51 -0500 Subject: [PATCH 4/5] Enable weights for MG similarity algorithms (#3879) This is a follow up PR to #3828 which enabled weighted for the python SG similarity algorithms. This PR also updates the tests, docstrings and remove experimental calls Authors: - Joseph Nke (https://github.com/jnke2016) Approvers: - Alex Barghi (https://github.com/alexbarghi-nv) URL: https://github.com/rapidsai/cugraph/pull/3879 --- .../cugraph/dask/link_prediction/jaccard.py | 10 +--- .../cugraph/dask/link_prediction/overlap.py | 10 +--- .../cugraph/dask/link_prediction/sorensen.py | 10 +--- .../tests/link_prediction/test_jaccard_mg.py | 59 ++++++------------- .../tests/link_prediction/test_overlap_mg.py | 59 ++++++------------- .../tests/link_prediction/test_sorensen_mg.py | 59 ++++++------------- 6 files changed, 60 insertions(+), 147 deletions(-) diff --git a/python/cugraph/cugraph/dask/link_prediction/jaccard.py b/python/cugraph/cugraph/dask/link_prediction/jaccard.py index 218e6206fc3..5362c7a9e1e 100644 --- a/python/cugraph/cugraph/dask/link_prediction/jaccard.py +++ b/python/cugraph/cugraph/dask/link_prediction/jaccard.py @@ -118,7 +118,9 @@ def jaccard(input_graph, vertex_pair=None, use_weight=False): adjacent vertices in the graph. use_weight : bool, optional (default=False) - Currently not supported + Flag to indicate whether to compute weighted jaccard (if use_weight==True) + or un-weighted jaccard (if use_weight==False). + 'input_graph' must be weighted if 'use_weight=True'. Returns ------- @@ -144,12 +146,6 @@ def jaccard(input_graph, vertex_pair=None, use_weight=False): vertex_pair_col_name = vertex_pair.columns - if use_weight: - raise ValueError("'use_weight' is currently not supported.") - - if input_graph.is_weighted(): - raise ValueError("Weighted graphs are currently not supported.") - if isinstance(vertex_pair, (dask_cudf.DataFrame, cudf.DataFrame)): vertex_pair = renumber_vertex_pair(input_graph, vertex_pair) diff --git a/python/cugraph/cugraph/dask/link_prediction/overlap.py b/python/cugraph/cugraph/dask/link_prediction/overlap.py index 5540be28fd1..4bda05e3c95 100644 --- a/python/cugraph/cugraph/dask/link_prediction/overlap.py +++ b/python/cugraph/cugraph/dask/link_prediction/overlap.py @@ -96,7 +96,9 @@ def overlap(input_graph, vertex_pair=None, use_weight=False): adjacent vertices in the graph. use_weight : bool, optional (default=False) - Currently not supported + Flag to indicate whether to compute weighted overlap (if use_weight==True) + or un-weighted overlap (if use_weight==False). + 'input_graph' must be weighted if 'use_weight=True'. Returns ------- @@ -122,12 +124,6 @@ def overlap(input_graph, vertex_pair=None, use_weight=False): vertex_pair_col_name = vertex_pair.columns - if use_weight: - raise ValueError("'use_weight' is currently not supported.") - - if input_graph.is_weighted(): - raise ValueError("Weighted graphs are currently not supported.") - if isinstance(vertex_pair, (dask_cudf.DataFrame, cudf.DataFrame)): vertex_pair = renumber_vertex_pair(input_graph, vertex_pair) diff --git a/python/cugraph/cugraph/dask/link_prediction/sorensen.py b/python/cugraph/cugraph/dask/link_prediction/sorensen.py index 24295ac330c..163b0d0dc16 100644 --- a/python/cugraph/cugraph/dask/link_prediction/sorensen.py +++ b/python/cugraph/cugraph/dask/link_prediction/sorensen.py @@ -92,7 +92,9 @@ def sorensen(input_graph, vertex_pair=None, use_weight=False): adjacent vertices in the graph. use_weight : bool, optional (default=False) - Currently not supported + Flag to indicate whether to compute weighted sorensen (if use_weight==True) + or un-weighted sorensen (if use_weight==False). + 'input_graph' must be weighted if 'use_weight=True'. Returns ------- @@ -118,12 +120,6 @@ def sorensen(input_graph, vertex_pair=None, use_weight=False): vertex_pair_col_name = vertex_pair.columns - if use_weight: - raise ValueError("'use_weight' is currently not supported.") - - if input_graph.is_weighted(): - raise ValueError("Weighted graphs are currently not supported.") - if isinstance(vertex_pair, (dask_cudf.DataFrame, cudf.DataFrame)): vertex_pair = renumber_vertex_pair(input_graph, vertex_pair) diff --git a/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py index b56a6baae2b..ee739c9f236 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py @@ -34,6 +34,7 @@ def setup_function(): IS_DIRECTED = [False] HAS_VERTEX_PAIR = [True, False] +IS_WEIGHTED = [True, False] # ============================================================================= @@ -48,6 +49,7 @@ def setup_function(): (datasets, "graph_file"), (IS_DIRECTED, "directed"), (HAS_VERTEX_PAIR, "has_vertex_pair"), + (IS_WEIGHTED, "is_weighted"), ) @@ -57,7 +59,9 @@ def input_combo(request): Simply return the current combination of params as a dictionary for use in tests or other parameterized fixtures. """ - parameters = dict(zip(("graph_file", "directed", "has_vertex_pair"), request.param)) + parameters = dict( + zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param) + ) return parameters @@ -72,7 +76,10 @@ def input_expected_output(input_combo): input_data_path = input_combo["graph_file"] directed = input_combo["directed"] has_vertex_pair = input_combo["has_vertex_pair"] - G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed) + is_weighted = input_combo["is_weighted"] + G = utils.generate_cugraph_graph_from_file( + input_data_path, directed=directed, edgevals=is_weighted + ) if has_vertex_pair: # Sample random vertices from the graph and compute the two_hop_neighbors # with those seeds @@ -84,7 +91,9 @@ def input_expected_output(input_combo): vertex_pair = None input_combo["vertex_pair"] = vertex_pair - sg_cugraph_jaccard = cugraph.experimental.jaccard(G, input_combo["vertex_pair"]) + sg_cugraph_jaccard = cugraph.jaccard( + G, input_combo["vertex_pair"], use_weight=is_weighted + ) # Save the results back to the input_combo dictionary to prevent redundant # cuGraph runs. Other tests using the input_combo fixture will look for # them, and if not present they will have to re-run the same cuGraph call. @@ -104,6 +113,7 @@ def input_expected_output(input_combo): ddf, source="src", destination="dst", + edge_attr="value" if is_weighted else None, renumber=True, store_transposed=True, ) @@ -122,8 +132,11 @@ def input_expected_output(input_combo): def test_dask_mg_jaccard(dask_client, benchmark, input_expected_output): dg = input_expected_output["MGGraph"] + use_weight = input_expected_output["is_weighted"] - result_jaccard = benchmark(dcg.jaccard, dg, input_expected_output["vertex_pair"]) + result_jaccard = benchmark( + dcg.jaccard, dg, input_expected_output["vertex_pair"], use_weight=use_weight + ) result_jaccard = ( result_jaccard.compute() @@ -151,41 +164,3 @@ def test_dask_mg_jaccard(dask_client, benchmark, input_expected_output): assert len(jaccard_coeff_diffs1) == 0 assert len(jaccard_coeff_diffs2) == 0 - - -@pytest.mark.mg -def test_dask_mg_weighted_jaccard(dask_client): - input_data_path = datasets[0] - chunksize = dcg.get_chunksize(input_data_path) - ddf = dask_cudf.read_csv( - input_data_path, - chunksize=chunksize, - delimiter=" ", - names=["src", "dst", "value"], - dtype=["int32", "int32", "float32"], - ) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - renumber=True, - store_transposed=True, - ) - with pytest.raises(ValueError): - dcg.jaccard(dg) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - store_transposed=True, - ) - - use_weight = True - with pytest.raises(ValueError): - dcg.jaccard(dg, use_weight=use_weight) diff --git a/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py index ce4bf619f47..87407d7b59c 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py @@ -34,6 +34,7 @@ def setup_function(): IS_DIRECTED = [False] HAS_VERTEX_PAIR = [True, False] +IS_WEIGHTED = [True, False] # ============================================================================= @@ -48,6 +49,7 @@ def setup_function(): (datasets, "graph_file"), (IS_DIRECTED, "directed"), (HAS_VERTEX_PAIR, "has_vertex_pair"), + (IS_WEIGHTED, "is_weighted"), ) @@ -57,7 +59,9 @@ def input_combo(request): Simply return the current combination of params as a dictionary for use in tests or other parameterized fixtures. """ - parameters = dict(zip(("graph_file", "directed", "has_vertex_pair"), request.param)) + parameters = dict( + zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param) + ) return parameters @@ -72,7 +76,10 @@ def input_expected_output(input_combo): input_data_path = input_combo["graph_file"] directed = input_combo["directed"] has_vertex_pair = input_combo["has_vertex_pair"] - G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed) + is_weighted = input_combo["is_weighted"] + G = utils.generate_cugraph_graph_from_file( + input_data_path, directed=directed, edgevals=is_weighted + ) if has_vertex_pair: # Sample random vertices from the graph and compute the two_hop_neighbors # with those seeds @@ -84,7 +91,9 @@ def input_expected_output(input_combo): vertex_pair = None input_combo["vertex_pair"] = vertex_pair - sg_cugraph_overlap = cugraph.experimental.overlap(G, input_combo["vertex_pair"]) + sg_cugraph_overlap = cugraph.overlap( + G, input_combo["vertex_pair"], use_weight=is_weighted + ) # Save the results back to the input_combo dictionary to prevent redundant # cuGraph runs. Other tests using the input_combo fixture will look for # them, and if not present they will have to re-run the same cuGraph call. @@ -104,6 +113,7 @@ def input_expected_output(input_combo): ddf, source="src", destination="dst", + edge_attr="value" if is_weighted else None, renumber=True, store_transposed=True, ) @@ -125,8 +135,11 @@ def input_expected_output(input_combo): def test_dask_mg_overlap(dask_client, benchmark, input_expected_output): dg = input_expected_output["MGGraph"] + use_weight = input_expected_output["is_weighted"] - result_overlap = benchmark(dcg.overlap, dg, input_expected_output["vertex_pair"]) + result_overlap = benchmark( + dcg.overlap, dg, input_expected_output["vertex_pair"], use_weight=use_weight + ) result_overlap = ( result_overlap.compute() @@ -154,41 +167,3 @@ def test_dask_mg_overlap(dask_client, benchmark, input_expected_output): assert len(overlap_coeff_diffs1) == 0 assert len(overlap_coeff_diffs2) == 0 - - -@pytest.mark.mg -def test_dask_mg_weighted_overlap(): - input_data_path = datasets[0] - chunksize = dcg.get_chunksize(input_data_path) - ddf = dask_cudf.read_csv( - input_data_path, - chunksize=chunksize, - delimiter=" ", - names=["src", "dst", "value"], - dtype=["int32", "int32", "float32"], - ) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - renumber=True, - store_transposed=True, - ) - with pytest.raises(ValueError): - dcg.overlap(dg) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - store_transposed=True, - ) - - use_weight = True - with pytest.raises(ValueError): - dcg.overlap(dg, use_weight=use_weight) diff --git a/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py b/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py index af6b60771a0..66832d08427 100644 --- a/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py +++ b/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py @@ -35,6 +35,7 @@ def setup_function(): IS_DIRECTED = [False] HAS_VERTEX_PAIR = [True, False] +IS_WEIGHTED = [True, False] # ============================================================================= @@ -49,6 +50,7 @@ def setup_function(): (datasets, "graph_file"), (IS_DIRECTED, "directed"), (HAS_VERTEX_PAIR, "has_vertex_pair"), + (IS_WEIGHTED, "is_weighted"), ) @@ -58,7 +60,9 @@ def input_combo(request): Simply return the current combination of params as a dictionary for use in tests or other parameterized fixtures. """ - parameters = dict(zip(("graph_file", "directed", "has_vertex_pair"), request.param)) + parameters = dict( + zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param) + ) return parameters @@ -73,7 +77,10 @@ def input_expected_output(input_combo): input_data_path = input_combo["graph_file"] directed = input_combo["directed"] has_vertex_pair = input_combo["has_vertex_pair"] - G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed) + is_weighted = input_combo["is_weighted"] + G = utils.generate_cugraph_graph_from_file( + input_data_path, directed=directed, edgevals=is_weighted + ) if has_vertex_pair: # Sample random vertices from the graph and compute the two_hop_neighbors # with those seeds @@ -85,7 +92,9 @@ def input_expected_output(input_combo): vertex_pair = None input_combo["vertex_pair"] = vertex_pair - sg_cugraph_sorensen = cugraph.experimental.sorensen(G, input_combo["vertex_pair"]) + sg_cugraph_sorensen = cugraph.sorensen( + G, input_combo["vertex_pair"], use_weight=is_weighted + ) # Save the results back to the input_combo dictionary to prevent redundant # cuGraph runs. Other tests using the input_combo fixture will look for # them, and if not present they will have to re-run the same cuGraph call. @@ -105,6 +114,7 @@ def input_expected_output(input_combo): ddf, source="src", destination="dst", + edge_attr="value" if is_weighted else None, renumber=True, store_transposed=True, ) @@ -124,8 +134,11 @@ def input_expected_output(input_combo): def test_dask_mg_sorensen(dask_client, benchmark, input_expected_output): dg = input_expected_output["MGGraph"] + use_weight = input_expected_output["is_weighted"] - result_sorensen = benchmark(dcg.sorensen, dg, input_expected_output["vertex_pair"]) + result_sorensen = benchmark( + dcg.sorensen, dg, input_expected_output["vertex_pair"], use_weight=use_weight + ) result_sorensen = ( result_sorensen.compute() @@ -153,41 +166,3 @@ def test_dask_mg_sorensen(dask_client, benchmark, input_expected_output): assert len(sorensen_coeff_diffs1) == 0 assert len(sorensen_coeff_diffs2) == 0 - - -@pytest.mark.mg -def test_dask_mg_weighted_sorensen(dask_client): - input_data_path = datasets[0] - chunksize = dcg.get_chunksize(input_data_path) - ddf = dask_cudf.read_csv( - input_data_path, - chunksize=chunksize, - delimiter=" ", - names=["src", "dst", "value"], - dtype=["int32", "int32", "float32"], - ) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - renumber=True, - store_transposed=True, - ) - with pytest.raises(ValueError): - dcg.sorensen(dg) - - dg = cugraph.Graph(directed=False) - dg.from_dask_cudf_edgelist( - ddf, - source="src", - destination="dst", - edge_attr="value", - store_transposed=True, - ) - - use_weight = True - with pytest.raises(ValueError): - dcg.sorensen(dg, use_weight=use_weight) From 5c34d3dd340c76678d8f2667057c6b0ce2f1f480 Mon Sep 17 00:00:00 2001 From: Alex Barghi <105237337+alexbarghi-nv@users.noreply.github.com> Date: Tue, 26 Sep 2023 11:57:30 -0400 Subject: [PATCH 5/5] Update Allocator Selection in cuGraph-DGL Example (#3877) Closes #3847 Authors: - Alex Barghi (https://github.com/alexbarghi-nv) Approvers: - Vibhu Jawa (https://github.com/VibhuJawa) - Brad Rees (https://github.com/BradReesWork) URL: https://github.com/rapidsai/cugraph/pull/3877 --- .../cugraph-dgl/examples/graphsage/node-classification.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/cugraph-dgl/examples/graphsage/node-classification.py b/python/cugraph-dgl/examples/graphsage/node-classification.py index 24df73ada75..320890b0312 100644 --- a/python/cugraph-dgl/examples/graphsage/node-classification.py +++ b/python/cugraph-dgl/examples/graphsage/node-classification.py @@ -39,14 +39,16 @@ def set_allocators(): + import rmm import cudf import cupy - import rmm + from rmm.allocators.torch import rmm_torch_allocator + from rmm.allocators.cupy import rmm_cupy_allocator mr = rmm.mr.CudaAsyncMemoryResource() rmm.mr.set_current_device_resource(mr) - torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator) - cupy.cuda.set_allocator(rmm.allocators.cupy.rmm_cupy_allocator) + torch.cuda.memory.change_current_allocator(rmm_torch_allocator) + cupy.cuda.set_allocator(rmm_cupy_allocator) cudf.set_option("spill", True)