Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update flake8 to 7.1.1. #4652

Merged
merged 2 commits into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ repos:
files: ^(python/.*|benchmarks/.*)$
exclude: ^python/nx-cugraph/
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
rev: 7.1.1
hooks:
- id: flake8
args: ["--config=.flake8"]
Expand All @@ -34,7 +34,7 @@ repos:
hooks:
- id: yesqa
additional_dependencies:
- flake8==6.0.0
- flake8==7.1.1
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.6
hooks:
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/cugraph-dgl/python-script/ogbn_mag_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Copyright (c) 2022-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -126,4 +126,4 @@ def sampling_func(g, seed_nodes, labels, train_loader):
st = time.time()
sampling_func(g, subset_split_idx["train"], labels, train_loader)
et = time.time()
print(f"Sampling time taken = {et-st} s")
print(f"Sampling time taken = {et - st} s")
4 changes: 2 additions & 2 deletions python/cugraph-dgl/cugraph_dgl/nn/conv/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,15 +129,15 @@ def __init__(
if csrc_ids is not None:
if csrc_ids.numel() != self._num_src_nodes + 1:
raise RuntimeError(
f"Size mismatch for 'csrc_ids': expected ({size[0]+1},), "
f"Size mismatch for 'csrc_ids': expected ({size[0] + 1},), "
f"but got {tuple(csrc_ids.size())}"
)
csrc_ids = csrc_ids.contiguous()

if cdst_ids is not None:
if cdst_ids.numel() != self._num_dst_nodes + 1:
raise RuntimeError(
f"Size mismatch for 'cdst_ids': expected ({size[1]+1},), "
f"Size mismatch for 'cdst_ids': expected ({size[1] + 1},), "
f"but got {tuple(cdst_ids.size())}"
)
cdst_ids = cdst_ids.contiguous()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,9 @@ def train(args, device, g, dataset, model):

et = time.time()

print(f"Time taken for epoch {epoch} with batch_size {batch_size} = {et-st} s")
print(
f"Time taken for epoch {epoch} with batch_size {batch_size} = {et - st} s"
)
acc = evaluate(model, g, val_dataloader)
print(
"Epoch {:05d} | Loss {:.4f} | Accuracy {:.4f} ".format(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,9 @@ def train(args, device, g, dataset, model, directory):

et = time.time()

print(f"Time taken for epoch {epoch} with batch_size {batch_size} = {et-st} s")
print(
f"Time taken for epoch {epoch} with batch_size {batch_size} = {et - st} s"
)
acc = evaluate(model, g, val_dataloader)
print(
"Epoch {:05d} | Loss {:.4f} | Accuracy {:.4f} ".format(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def train_model(model, g, opt, train_dataloader, num_epochs, rank, val_nid):
et = time.time()
print(
f"Total time taken for num_epochs {num_epochs} "
f"with batch_size {train_dataloader._batch_size} = {et-st} s on rank ={rank}"
f"with batch_size {train_dataloader._batch_size} = {et - st} s on rank ={rank}"
)
if rank == 0:
val_acc = layerwise_infer(g, val_nid, model, 1024 * 5, "cuda")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def run_workflow(
torch.distributed.barrier()
total_et = time.time()
print(
f"Total time taken on n_epochs {n_epochs} = {total_et-total_st} s",
f"Total time taken on n_epochs {n_epochs} = {total_et - total_st} s",
f"measured by worker = {global_rank}",
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def run_workflow(rank, world_size, cugraph_id, dataset, temp_dir):
torch.distributed.barrier()
total_et = time.time()
print(
f"Total time taken on n_epochs {n_epochs} = {total_et-total_st} s",
f"Total time taken on n_epochs {n_epochs} = {total_et - total_st} s",
f"measured by worker = {rank}",
)

Expand Down
2 changes: 1 addition & 1 deletion python/cugraph-pyg/cugraph_pyg/data/dask_graph_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def is_set(self, key):
if key not in self.__dataclass_fields__:
raise KeyError(key)
attr = getattr(self, key)
return type(attr) != _field_status or attr != _field_status.UNSET
return type(attr) is not _field_status or attr != _field_status.UNSET

def is_fully_specified(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion python/cugraph-pyg/cugraph_pyg/examples/gcn_dist_sg.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def train(epoch: int):
torch.cuda.synchronize()
print(
f"Average Training Iteration Time (s/iter): \
{(time.perf_counter() - start_avg_time)/(i-warmup_steps):.6f}"
{(time.perf_counter() - start_avg_time) / (i - warmup_steps):.6f}"
)


Expand Down
6 changes: 3 additions & 3 deletions python/cugraph-service/tests/test_e2e.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Copyright (c) 2022-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -307,8 +307,8 @@ def test_load_call_unload_extension(client, extension1):
assert len(results) == 2
assert len(results[0]) == 33
assert len(results[1]) == 21
assert type(results[0][0]) == int
assert type(results[1][0]) == float
assert type(results[0][0]) is int
assert type(results[1][0]) is float
assert results[0][0] == 9
assert results[1][0] == 9.0

Expand Down
6 changes: 3 additions & 3 deletions python/cugraph-service/tests/test_mg_e2e.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Copyright (c) 2022-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -413,8 +413,8 @@ def test_call_extension_result_on_device(
assert len(results) == 2
assert len(results[0]) == array1_len
assert len(results[1]) == array2_len
assert type(results[0][0]) == int
assert type(results[1][0]) == float
assert type(results[0][0]) is int
assert type(results[1][0]) is float
assert results[0][0] == 9
assert results[1][0] == 9.0
else:
Expand Down
4 changes: 3 additions & 1 deletion python/cugraph/cugraph/tests/generators/test_rmat.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
_scale_values = [2, 4, 16]
_scale_test_ids = [f"scale={x}" for x in _scale_values]
_graph_types = [cugraph.Graph, None, int]
_graph_test_ids = [f"create_using={getattr(x,'__name__',str(x))}" for x in _graph_types]
_graph_test_ids = [
f"create_using={getattr(x, '__name__', str(x))}" for x in _graph_types
]
_clip_and_flip = [False, True]
_clip_and_flip_test_ids = [f"clip_and_flip={x}" for x in _clip_and_flip]
_scramble_vertex_ids = [False, True]
Expand Down
6 changes: 4 additions & 2 deletions python/cugraph/cugraph/tests/generators/test_rmat_mg.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -34,7 +34,9 @@
_scale_values = [2, 4, 16]
_scale_test_ids = [f"scale={x}" for x in _scale_values]
_graph_types = [cugraph.Graph, None, int]
_graph_test_ids = [f"create_using={getattr(x,'__name__',str(x))}" for x in _graph_types]
_graph_test_ids = [
f"create_using={getattr(x, '__name__', str(x))}" for x in _graph_types
]


def _call_rmat(scale, num_edges, create_using, mg=True):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def test_bulk_sampler_remainder(scratch_dir):
assert b in recovered_samples["batch_id"].values_host.tolist()

for x in range(0, 6, 2):
subdir = f"{x}-{x+1}"
subdir = f"{x}-{x + 1}"
df = cudf.read_parquet(os.path.join(samples_path, f"batch={subdir}.parquet"))

assert ((df.batch_id == x) | (df.batch_id == (x + 1))).all()
Expand Down
4 changes: 2 additions & 2 deletions python/cugraph/cugraph/tests/structure/test_graph_mg.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def test_mg_graph_serializable(dask_client, input_combo):
G = input_combo["MGGraph"]
dask_client.publish_dataset(shared_g=G)
shared_g = dask_client.get_dataset("shared_g")
assert type(shared_g) == type(G)
assert type(shared_g) is type(G)
assert G.number_of_vertices() == shared_g.number_of_vertices()
assert G.number_of_edges() == shared_g.number_of_edges()
# cleanup
Expand All @@ -314,7 +314,7 @@ def test_mg_graph_serializable(dask_client, input_combo):
def test_mg_graph_copy():
G = cugraph.MultiGraph(directed=True)
G_c = copy.deepcopy(G)
assert type(G) == type(G_c)
assert type(G) is type(G_c)


@pytest.mark.mg
Expand Down
4 changes: 2 additions & 2 deletions python/cugraph/cugraph/tests/traversal/test_sssp.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -486,7 +486,7 @@ def test_scipy_api_compat():
distances = cugraph.shortest_path(
input_coo_matrix, source=0, return_predecessors=False
)
assert type(distances) != tuple
assert type(distances) is not tuple

with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, unweighted=False)
Expand Down
10 changes: 6 additions & 4 deletions python/cugraph/cugraph/traversal/sssp.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -36,7 +36,7 @@ def _ensure_args(
# checks common to all input types
if (method is not None) and (method != "auto"):
raise ValueError("only 'auto' is currently accepted for method")
if (indices is not None) and (type(indices) == list):
if (indices is not None) and (type(indices) is list):
raise ValueError("indices currently cannot be a list-like type")
if (indices is not None) and (source is not None):
raise TypeError("cannot specify both 'source' and 'indices'")
Expand Down Expand Up @@ -70,9 +70,11 @@ def _ensure_args(

# Check for non-Graph-type inputs
else:
if (directed is not None) and (type(directed) != bool):
if (directed is not None) and (type(directed) is not bool):
raise ValueError("'directed' must be a bool")
if (return_predecessors is not None) and (type(return_predecessors) != bool):
if (return_predecessors is not None) and (
type(return_predecessors) is not bool
):
raise ValueError("'return_predecessors' must be a bool")
if (unweighted is not None) and (unweighted is not True):
raise ValueError("'unweighted' currently must be True if " "specified")
Expand Down
Loading