Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add nx-cugraph Benchmarking Scripts #4616

Merged
merged 22 commits into from
Aug 31, 2024
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions benchmarks/nx-cugraph/pytest-based/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
## `nx-cugraph` Benchmarks

### Overview

This directory contains a set of scripts designed to benchmark NetworkX with the `nx-cugraph` backend and deliver a report that summarizes the speed-up and runtime deltas over default NetworkX.

Our current benchmarks provide the following datasets:

| Dataset | Nodes | Edges | Directed |
| -------- | ------- | ------- | ------- |
| netscience | 1,461 | 5,484 | Yes |
| email-Eu-core | 1,005 | 25,571 | Yes |
| cit-Patents | 3,774,768 | 16,518,948 | Yes |
| hollywood | 1,139,905 | 57,515,616 | No |
| soc-LiveJournal1 | 4,847,571 | 68,993,773 | Yes |



### Scripts

#### 1. `run-gap-benchmarks.sh`
nv-rliu marked this conversation as resolved.
Show resolved Hide resolved
This script allows users to run selected algorithms across multiple datasets and backends. All results are stored inside a sub-directory (`logs/`) and output files are named based on the combination of parameters for that benchmark.

NOTE: If running with all algorithms, datasets, and backends, this script may take a few hours to finish running.

**Usage:**
```bash
bash run-gap-benchmarks.sh # edit this script directly
```

#### 2. `get_graph_bench_dataset.py`
This script downloads the specified dataset using `cugraph.datasets`.

**Usage:**
```bash
python get_graph_bench_dataset.py [dataset]
```

#### 3. `create_results_summary_page.py`
This script is designed to be run after `run-gap-benchmarks.sh` in order to generate an HTML page displaying a results table comparing default NetworkX to nx-cugraph. The script also provides information about the current system.

**Usage:**
```bash
python create_results_summary_page.py > report.html
```
31 changes: 29 additions & 2 deletions benchmarks/nx-cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,9 +271,8 @@ def bench_from_networkx(benchmark, graph_obj):


# normalized_param_values = [True, False]
# k_param_values = [10, 100]
normalized_param_values = [True]
k_param_values = [10]
k_param_values = [10, 100, 1000]


@pytest.mark.parametrize(
Expand All @@ -282,6 +281,10 @@ def bench_from_networkx(benchmark, graph_obj):
@pytest.mark.parametrize("k", k_param_values, ids=lambda k: f"{k=}")
def bench_betweenness_centrality(benchmark, graph_obj, backend_wrapper, normalized, k):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)

if k > G.number_of_nodes():
pytest.skip(reason=f"{k=} > {G.number_of_nodes()=}")

result = benchmark.pedantic(
target=backend_wrapper(nx.betweenness_centrality),
args=(G,),
Expand All @@ -305,6 +308,10 @@ def bench_edge_betweenness_centrality(
benchmark, graph_obj, backend_wrapper, normalized, k
):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)

if k > G.number_of_nodes():
pytest.skip(reason=f"{k=} > {G.number_of_nodes()=}")

result = benchmark.pedantic(
target=backend_wrapper(nx.edge_betweenness_centrality),
args=(G,),
Expand Down Expand Up @@ -473,6 +480,26 @@ def bench_pagerank_personalized(benchmark, graph_obj, backend_wrapper):
assert type(result) is dict


def bench_shortest_path(benchmark, graph_obj, backend_wrapper):
"""
This passes in the source node with the highest degree, but no target.
"""
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)

result = benchmark.pedantic(
target=backend_wrapper(nx.shortest_path),
args=(G,),
kwargs=dict(
source=node,
),
rounds=rounds,
iterations=iterations,
warmup_rounds=warmup_rounds,
)
assert type(result) is dict


def bench_single_source_shortest_path_length(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
Expand Down
245 changes: 245 additions & 0 deletions benchmarks/nx-cugraph/pytest-based/create_results_summary_page.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,245 @@
# Copyright (c) 2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import re
import pathlib
import json
import platform
import psutil
import socket
import subprocess


def compute_perf_vals(cugraph_runtime, networkx_runtime):
speedup_string = f"{networkx_runtime / cugraph_runtime:.3f}X"
delta = networkx_runtime - cugraph_runtime
if abs(delta) < 1:
if abs(delta) < 0.001:
units = "us"
delta *= 1e6
else:
units = "ms"
delta *= 1e3
else:
units = "s"
delta_string = f"{delta:.3f}{units}"

return (speedup_string, delta_string)


def get_mem_info():
return round(psutil.virtual_memory().total / (1024**3), 2)


def get_cuda_version():
output = subprocess.check_output("nvidia-smi", shell=True).decode()
try:
return next(
line.split("CUDA Version: ")[1].split()[0]
for line in output.splitlines()
if "CUDA Version" in line
)
except subprocess.CalledProcessError:
return "Failed to get CUDA version."


def get_first_gpu_info():
try:
gpu_info = (
subprocess.check_output(
"nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv,noheader",
shell=True,
)
.decode()
.strip()
)
if gpu_info:
gpus = gpu_info.split("\n")
num_gpus = len(gpus)
first_gpu = gpus[0] # Get the information for the first GPU
gpu_name, mem_total, _, _ = first_gpu.split(",")
return f"{num_gpus} x {gpu_name.strip()} ({round(int(mem_total.strip().split()[0]) / (1024), 2)} GB)"
else:
print("No GPU found or unable to query GPU details.")
except subprocess.CalledProcessError:
print("Failed to execute nvidia-smi. No GPU information available.")


def get_system_info():
print(f"<p>Hostname: {socket.gethostname()}</p>")
print(
f'<p class="text-indent"">Operating System: {platform.system()} {platform.release()}</p>'
)
print(f'<p class="text-indent">Kernel Version : {platform.version()}</p>')
with open("/proc/cpuinfo") as f:
print(
f'<p>CPU: {next(line.strip().split(": ")[1] for line in f if "model name" in line)} ({psutil.cpu_count(logical=False)} cores)</p>'
)
print(f'<p class="text-indent">Memory: {get_mem_info()} GB</p>')
print(f"<p>GPU: {get_first_gpu_info()}</p>")
print(f"<p>CUDA Version: {get_cuda_version()}</p>")


if __name__ == "__main__":
logs_dir = pathlib.Path("logs")

dataset_patt = re.compile(".*ds=([\w-]+).*")
backend_patt = re.compile(".*backend=(\w+).*")
k_patt = re.compile(".*k=(10*).*")

# Organize all benchmark runs by the following hierarchy: algo -> backend -> dataset
benchmarks = {}

# Populate benchmarks dir from .json files
for json_file in logs_dir.glob("*.json"):
# print(f"READING {json_file}")
try:
data = json.loads(open(json_file).read())
except json.decoder.JSONDecodeError:
# print(f"PROBLEM READING {json_file}, skipping.")
continue

for benchmark_run in data["benchmarks"]:
# example name: "bench_triangles[ds=netscience-backend=cugraph-preconverted]"
name = benchmark_run["name"]

algo_name = name.split("[")[0]
if algo_name.startswith("bench_"):
algo_name = algo_name[6:]
# special case for betweenness_centrality
match = k_patt.match(name)
if match is not None:
algo_name += f", k={match.group(1)}"

match = dataset_patt.match(name)
if match is None:
raise RuntimeError(
f"benchmark name {name} in file {json_file} has an unexpected format"
)
dataset = match.group(1)
if dataset.endswith("-backend"):
dataset = dataset[:-8]

match = backend_patt.match(name)
if match is None:
raise RuntimeError(
f"benchmark name {name} in file {json_file} has an unexpected format"
)
backend = match.group(1)
if backend == "None":
backend = "networkx"

runtime = benchmark_run["stats"]["mean"]
benchmarks.setdefault(algo_name, {}).setdefault(backend, {})[
dataset
] = runtime

# dump HTML table
ordered_datasets = [
"netscience",
"email_Eu_core",
"cit-patents",
"hollywood",
"soc-livejournal1",
]

print(
"""
<html>
<head>
<style>
table {
table-layout: fixed;
width: 100%;
border-collapse: collapse;
}
tbody tr:nth-child(odd) {
background-color: #ffffff;
}
tbody tr:nth-child(even) {
background-color: #d3d3d3;
}
tbody td {
text-align: center;
color: black;
}
th,
td {
padding: 10px;
}
.footer {
background-color: #f1f1f1;
padding: 10px;
font-size: 12px;
color: black;
width: 100%;
}
.text-indent {
text-indent: 20px; /* Indents the first line of the text by 30px */
}
</style>
</head>
<table>
<thead>
<tr>
<th></th>"""
)
for ds in ordered_datasets:
print(f" <th>{ds}</th>")
nv-rliu marked this conversation as resolved.
Show resolved Hide resolved
print(
""" </tr>
</thead>
<tbody>
"""
)
for algo_name in sorted(benchmarks):
algo_runs = benchmarks[algo_name]
print(" <tr>")
print(f" <td>{algo_name}</td>")
# Proceed only if any results are present for both cugraph and NX
if "cugraph" in algo_runs and "networkx" in algo_runs:
cugraph_algo_runs = algo_runs["cugraph"]
networkx_algo_runs = algo_runs["networkx"]
datasets_in_both = set(cugraph_algo_runs).intersection(networkx_algo_runs)

# populate the table with speedup results for each dataset in the order
# specified in ordered_datasets. If results for a run using a dataset
# are not present for both cugraph and NX, output an empty cell.
for dataset in ordered_datasets:
if dataset in datasets_in_both:
cugraph_runtime = cugraph_algo_runs[dataset]
networkx_runtime = networkx_algo_runs[dataset]
(speedup, runtime_delta) = compute_perf_vals(
cugraph_runtime=cugraph_runtime,
networkx_runtime=networkx_runtime,
)
print(f" <td>{speedup}<br>{runtime_delta}</td>")
else:
print(f" <td></td>")

# If a comparison between cugraph and NX cannot be made, output empty cells
# for each dataset
else:
for _ in range(len(ordered_datasets)):
print(" <td></td>")

print(" </tr>")

print(
"""
</tbody>\n</table>
<div class="footer">"""
)
get_system_info()
print("""</div>\n</html>""")
35 changes: 35 additions & 0 deletions benchmarks/nx-cugraph/pytest-based/get_graph_bench_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (c) 2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Checks if a particular dataset has been downloaded inside the datasets dir
(RAPIDS_DATAEST_ROOT_DIR). If not, the file will be downloaded using the
datasets API.

Positional Arguments:
1) dataset name (e.g. 'email_Eu_core', 'cit-patents')
available datasets can be found here: `python/cugraph/cugraph/datasets/__init__.py`
"""

import sys

import cugraph.datasets as cgds


if __name__ == "__main__":
# download and store dataset (csv) by using the Datasets API
dataset = sys.argv[1].replace("-", "_")
dataset_obj = getattr(cgds, dataset)

if not dataset_obj.get_path().exists():
dataset_obj.get_edgelist(download=True)
Loading
Loading