Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Buffered in-memory sampling in cuGraph-PyG, link prediction in cuGraph-PyG, and negative sampling in cuGraph-PyG (rapidsai/cugraph#4660) #48

Merged
merged 2 commits into from
Sep 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions python/cugraph-dgl/cugraph_dgl/dataloading/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
create_homogeneous_sampled_graphs_from_tensors_csc,
)

from cugraph.gnn import DistSampleReader

from cugraph.utilities.utils import import_optional

Expand All @@ -33,14 +32,18 @@ class SampleReader:
Iterator that processes results from the cuGraph distributed sampler.
"""

def __init__(self, base_reader: DistSampleReader, output_format: str = "dgl.Block"):
def __init__(
self,
base_reader: Iterator[Tuple[Dict[str, "torch.Tensor"], int, int]],
output_format: str = "dgl.Block",
):
"""
Constructs a new SampleReader.

Parameters
----------
base_reader: DistSampleReader
The reader responsible for loading saved samples produced by
base_reader: Iterator[Tuple[Dict[str, "torch.Tensor"], int, int]]
The iterator responsible for loading saved samples produced by
the cuGraph distributed sampler.
"""
self.__output_format = output_format
Expand Down Expand Up @@ -83,7 +86,7 @@ class HomogeneousSampleReader(SampleReader):

def __init__(
self,
base_reader: DistSampleReader,
base_reader: Iterator[Tuple[Dict[str, "torch.Tensor"], int, int]],
output_format: str = "dgl.Block",
edge_dir="in",
):
Expand All @@ -92,7 +95,7 @@ def __init__(

Parameters
----------
base_reader: DistSampleReader
base_reader: Iterator[Tuple[Dict[str, "torch.Tensor"], int, int]]
The reader responsible for loading saved samples produced by
the cuGraph distributed sampler.
output_format: str
Expand Down
5 changes: 5 additions & 0 deletions python/cugraph-pyg/cugraph_pyg/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,8 @@
# limitations under the License.

from cugraph_pyg._version import __git_commit__, __version__

import cugraph_pyg.data
import cugraph_pyg.loader
import cugraph_pyg.sampler
import cugraph_pyg.nn
17 changes: 11 additions & 6 deletions python/cugraph-pyg/cugraph_pyg/data/graph_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,13 +205,18 @@ def _num_vertices(self) -> Dict[str, int]:
else edge_attr.size[1]
)
else:
if edge_attr.edge_type[0] not in num_vertices:
if edge_attr.edge_type[0] != edge_attr.edge_type[2]:
if edge_attr.edge_type[0] not in num_vertices:
num_vertices[edge_attr.edge_type[0]] = int(
self.__edge_indices[edge_attr.edge_type][0].max() + 1
)
if edge_attr.edge_type[2] not in num_vertices:
num_vertices[edge_attr.edge_type[1]] = int(
self.__edge_indices[edge_attr.edge_type][1].max() + 1
)
elif edge_attr.edge_type[0] not in num_vertices:
num_vertices[edge_attr.edge_type[0]] = int(
self.__edge_indices[edge_attr.edge_type][0].max() + 1
)
if edge_attr.edge_type[2] not in num_vertices:
num_vertices[edge_attr.edge_type[1]] = int(
self.__edge_indices[edge_attr.edge_type][1].max() + 1
self.__edge_indices[edge_attr.edge_type].max() + 1
)

if self.is_multi_gpu:
Expand Down
24 changes: 18 additions & 6 deletions python/cugraph-pyg/cugraph_pyg/examples/gcn_dist_mnmg.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@ def run_train(
wall_clock_start,
tempdir=None,
num_layers=3,
in_memory=False,
seeds_per_call=-1,
):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005)

Expand All @@ -196,20 +198,23 @@ def run_train(
from cugraph_pyg.loader import NeighborLoader

ix_train = split_idx["train"].cuda()
train_path = os.path.join(tempdir, f"train_{global_rank}")
os.mkdir(train_path)
train_path = None if in_memory else os.path.join(tempdir, f"train_{global_rank}")
if train_path:
os.mkdir(train_path)
train_loader = NeighborLoader(
data,
input_nodes=ix_train,
directory=train_path,
shuffle=True,
drop_last=True,
local_seeds_per_call=seeds_per_call if seeds_per_call > 0 else None,
**kwargs,
)

ix_test = split_idx["test"].cuda()
test_path = os.path.join(tempdir, f"test_{global_rank}")
os.mkdir(test_path)
test_path = None if in_memory else os.path.join(tempdir, f"test_{global_rank}")
if test_path:
os.mkdir(test_path)
test_loader = NeighborLoader(
data,
input_nodes=ix_test,
Expand All @@ -221,14 +226,16 @@ def run_train(
)

ix_valid = split_idx["valid"].cuda()
valid_path = os.path.join(tempdir, f"valid_{global_rank}")
os.mkdir(valid_path)
valid_path = None if in_memory else os.path.join(tempdir, f"valid_{global_rank}")
if valid_path:
os.mkdir(valid_path)
valid_loader = NeighborLoader(
data,
input_nodes=ix_valid,
directory=valid_path,
shuffle=True,
drop_last=True,
local_seeds_per_call=seeds_per_call if seeds_per_call > 0 else None,
**kwargs,
)

Expand Down Expand Up @@ -347,6 +354,9 @@ def parse_args():
parser.add_argument("--skip_partition", action="store_true")
parser.add_argument("--wg_mem_type", type=str, default="distributed")

parser.add_argument("--in_memory", action="store_true", default=False)
parser.add_argument("--seeds_per_call", type=int, default=-1)

return parser.parse_args()


Expand Down Expand Up @@ -429,6 +439,8 @@ def parse_args():
wall_clock_start,
tempdir,
args.num_layers,
args.in_memory,
args.seeds_per_call,
)
else:
warnings.warn("This script should be run with 'torchrun`. Exiting.")
24 changes: 20 additions & 4 deletions python/cugraph-pyg/cugraph_pyg/examples/gcn_dist_sg.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,17 +91,28 @@ def test(loader: NeighborLoader, val_steps: Optional[int] = None):


def create_loader(
data, num_neighbors, input_nodes, replace, batch_size, samples_dir, stage_name
data,
num_neighbors,
input_nodes,
replace,
batch_size,
samples_dir,
stage_name,
local_seeds_per_call,
):
directory = os.path.join(samples_dir, stage_name)
os.mkdir(directory)
if samples_dir is not None:
directory = os.path.join(samples_dir, stage_name)
os.mkdir(directory)
else:
directory = None
return NeighborLoader(
data,
num_neighbors=num_neighbors,
input_nodes=input_nodes,
replace=replace,
batch_size=batch_size,
directory=directory,
local_seeds_per_call=local_seeds_per_call,
)


Expand Down Expand Up @@ -147,6 +158,8 @@ def parse_args():
parser.add_argument("--tempdir_root", type=str, default=None)
parser.add_argument("--dataset_root", type=str, default="dataset")
parser.add_argument("--dataset", type=str, default="ogbn-products")
parser.add_argument("--in_memory", action="store_true", default=False)
parser.add_argument("--seeds_per_call", type=int, default=-1)

return parser.parse_args()

Expand All @@ -170,7 +183,10 @@ def parse_args():
"num_neighbors": [args.fan_out] * args.num_layers,
"replace": False,
"batch_size": args.batch_size,
"samples_dir": samples_dir,
"samples_dir": None if args.in_memory else samples_dir,
"local_seeds_per_call": None
if args.seeds_per_call <= 0
else args.seeds_per_call,
}

train_loader = create_loader(
Expand Down
23 changes: 17 additions & 6 deletions python/cugraph-pyg/cugraph_pyg/examples/gcn_dist_snmg.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def run_train(
wall_clock_start,
tempdir=None,
num_layers=3,
in_memory=False,
seeds_per_call=-1,
):

init_pytorch_worker(
Expand Down Expand Up @@ -119,20 +121,23 @@ def run_train(
dist.barrier()

ix_train = torch.tensor_split(split_idx["train"], world_size)[rank].cuda()
train_path = os.path.join(tempdir, f"train_{rank}")
os.mkdir(train_path)
train_path = None if in_memory else os.path.join(tempdir, f"train_{rank}")
if train_path:
os.mkdir(train_path)
train_loader = NeighborLoader(
(feature_store, graph_store),
input_nodes=ix_train,
directory=train_path,
shuffle=True,
drop_last=True,
local_seeds_per_call=seeds_per_call if seeds_per_call > 0 else None,
**kwargs,
)

ix_test = torch.tensor_split(split_idx["test"], world_size)[rank].cuda()
test_path = os.path.join(tempdir, f"test_{rank}")
os.mkdir(test_path)
test_path = None if in_memory else os.path.join(tempdir, f"test_{rank}")
if test_path:
os.mkdir(test_path)
test_loader = NeighborLoader(
(feature_store, graph_store),
input_nodes=ix_test,
Expand All @@ -144,14 +149,16 @@ def run_train(
)

ix_valid = torch.tensor_split(split_idx["valid"], world_size)[rank].cuda()
valid_path = os.path.join(tempdir, f"valid_{rank}")
os.mkdir(valid_path)
valid_path = None if in_memory else os.path.join(tempdir, f"valid_{rank}")
if valid_path:
os.mkdir(valid_path)
valid_loader = NeighborLoader(
(feature_store, graph_store),
input_nodes=ix_valid,
directory=valid_path,
shuffle=True,
drop_last=True,
local_seeds_per_call=seeds_per_call if seeds_per_call > 0 else None,
**kwargs,
)

Expand Down Expand Up @@ -269,6 +276,8 @@ def run_train(
parser.add_argument("--tempdir_root", type=str, default=None)
parser.add_argument("--dataset_root", type=str, default="dataset")
parser.add_argument("--dataset", type=str, default="ogbn-products")
parser.add_argument("--in_memory", action="store_true", default=False)
parser.add_argument("--seeds_per_call", type=int, default=-1)

parser.add_argument(
"--n_devices",
Expand Down Expand Up @@ -322,6 +331,8 @@ def run_train(
wall_clock_start,
tempdir,
args.num_layers,
args.in_memory,
args.seeds_per_call,
),
nprocs=world_size,
join=True,
Expand Down
Loading