Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove checks for Pascal, no longer supported #4044

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions ci/notebook_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def skip_book_dir(runtype):
# Not strictly true... however what we mean is
# Pascal or earlier
#
pascal = False
ampere = False
device = cuda.get_current_device()

Expand All @@ -62,8 +61,6 @@ def skip_book_dir(runtype):
cc = getattr(device, "COMPUTE_CAPABILITY", None) or getattr(
device, "compute_capability"
)
if cc[0] < 7:
pascal = True
if cc[0] >= 8:
ampere = True

Expand Down Expand Up @@ -91,10 +88,6 @@ def skip_book_dir(runtype):
)
skip = True
break
elif pascal and re.search("# Does not run on Pascal", line):
print(f"SKIPPING {filename} (does not run on Pascal)", file=sys.stderr)
skip = True
break
elif ampere and re.search("# Does not run on Ampere", line):
print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr)
skip = True
Expand Down
4 changes: 1 addition & 3 deletions ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ fi
# EXITCODE for the script.
set +e

if (python ${CUGRAPH_ROOT}/ci/utils/is_pascal.py); then
echo "WARNING: skipping C++ tests on Pascal GPU arch."
elif hasArg "--run-cpp-tests"; then
if hasArg "--run-cpp-tests"; then
echo "C++ gtests for cuGraph (single-GPU only)..."
for gt in "${CONDA_PREFIX}/bin/gtests/libcugraph/"*_TEST; do
test_name=$(basename $gt)
Expand Down
37 changes: 0 additions & 37 deletions ci/utils/is_pascal.py

This file was deleted.

2 changes: 1 addition & 1 deletion cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ endif()
# cuhornet currently doesn't support
#
# >= 90
set(supported_archs "60" "62" "70" "72" "75" "80" "86" "89" "90")
set(supported_archs "70" "72" "75" "80" "86" "89" "90")
foreach( arch IN LISTS CMAKE_CUDA_ARCHITECTURES)
string(REPLACE "-real" "" arch ${arch})
if( arch IN_LIST supported_archs )
Expand Down
42 changes: 15 additions & 27 deletions cpp/tests/community/ecg_test.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
Expand Down Expand Up @@ -121,41 +121,29 @@ TEST(ecg, dolphin)
cugraph::legacy::GraphCSRView<int, int, float> graph_csr(
offsets_v.data(), indices_v.data(), weights_v.data(), num_verts, num_edges);

// "FIXME": remove this check once we drop support for Pascal
//
// Calling louvain on Pascal will throw an exception, we'll check that
// this is the behavior while we still support Pascal (device_prop.major < 7)
//
if (handle.get_device_properties().major < 7) {
EXPECT_THROW(
(cugraph::ecg<int32_t, int32_t, float>(handle, graph_csr, .05, 16, result_v.data())),
cugraph::logic_error);
} else {
cugraph::ecg<int32_t, int32_t, float>(handle, graph_csr, .05, 16, result_v.data());
cugraph::ecg<int32_t, int32_t, float>(handle, graph_csr, .05, 16, result_v.data());

auto cluster_id = cugraph::test::to_host(handle, result_v);
auto cluster_id = cugraph::test::to_host(handle, result_v);

int max = *max_element(cluster_id.begin(), cluster_id.end());
int min = *min_element(cluster_id.begin(), cluster_id.end());
int max = *max_element(cluster_id.begin(), cluster_id.end());
int min = *min_element(cluster_id.begin(), cluster_id.end());

ASSERT_EQ((min >= 0), 1);
ASSERT_EQ((min >= 0), 1);

std::set<int> cluster_ids;
for (auto c : cluster_id) {
cluster_ids.insert(c);
}
std::set<int> cluster_ids;
for (auto c : cluster_id) {
cluster_ids.insert(c);
}

ASSERT_EQ(cluster_ids.size(), size_t(max + 1));
ASSERT_EQ(cluster_ids.size(), size_t(max + 1));

float modularity{0.0};
float modularity{0.0};

cugraph::ext_raft::analyzeClustering_modularity(
graph_csr, max + 1, result_v.data(), &modularity);
cugraph::ext_raft::analyzeClustering_modularity(graph_csr, max + 1, result_v.data(), &modularity);

float random_modularity{0.95 * 0.4962422251701355};
float random_modularity{0.95 * 0.4962422251701355};

ASSERT_GT(modularity, random_modularity);
}
ASSERT_GT(modularity, random_modularity);
}

CUGRAPH_TEST_PROGRAM_MAIN()
36 changes: 8 additions & 28 deletions cpp/tests/community/leiden_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,39 +79,19 @@ class Tests_Leiden : public ::testing::TestWithParam<std::tuple<Leiden_Usecase,
auto edge_weight_view =
edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt;

// "FIXME": remove this check once we drop support for Pascal
//
// Calling leiden on Pascal will throw an exception, we'll check that
// this is the behavior while we still support Pascal (device_prop.major < 7)
//
cudaDeviceProp device_prop;
RAFT_CUDA_TRY(cudaGetDeviceProperties(&device_prop, 0));

if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
hr_timer.start("Leiden");
}

if (device_prop.major < 7) {
EXPECT_THROW(leiden(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
leiden_usecase.max_level_,
leiden_usecase.resolution_,
leiden_usecase.check_correctness_,
leiden_usecase.expected_level_,
leiden_usecase.expected_modularity_),
cugraph::logic_error);
} else {
leiden(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
leiden_usecase.max_level_,
leiden_usecase.resolution_,
leiden_usecase.check_correctness_,
leiden_usecase.expected_level_,
leiden_usecase.expected_modularity_);
}
leiden(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
leiden_usecase.max_level_,
leiden_usecase.resolution_,
leiden_usecase.check_correctness_,
leiden_usecase.expected_level_,
leiden_usecase.expected_modularity_);

if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
Expand Down
66 changes: 14 additions & 52 deletions cpp/tests/community/louvain_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,28 +69,11 @@ class Tests_Louvain
handle, input_usecase, true);
auto graph_view = graph->view();

// "FIXME": remove this check once we drop support for Pascal
//
// Calling louvain on Pascal will throw an exception, we'll check that
// this is the behavior while we still support Pascal (device_prop.major < 7)
//
cudaDeviceProp device_prop;
RAFT_CUDA_TRY(cudaGetDeviceProperties(&device_prop, 0));

if (device_prop.major < 7) {
EXPECT_THROW(louvain_legacy(graph_view,
graph_view.get_number_of_vertices(),
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_),
cugraph::logic_error);
} else {
louvain_legacy(graph_view,
graph_view.get_number_of_vertices(),
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_);
}
louvain_legacy(graph_view,
graph_view.get_number_of_vertices(),
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_);
}

template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
Expand Down Expand Up @@ -124,41 +107,20 @@ class Tests_Louvain
auto edge_weight_view =
edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt;

// "FIXME": remove this check once we drop support for Pascal
//
// Calling louvain on Pascal will throw an exception, we'll check that
// this is the behavior while we still support Pascal (device_prop.major < 7)
//
cudaDeviceProp device_prop;
RAFT_CUDA_TRY(cudaGetDeviceProperties(&device_prop, 0));

if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
hr_timer.start("Louvain");
}

if (device_prop.major < 7) {
EXPECT_THROW(louvain(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
louvain_usecase.max_level_,
louvain_usecase.threshold_,
louvain_usecase.resolution_,
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_),
cugraph::logic_error);
} else {
louvain(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
louvain_usecase.max_level_,
louvain_usecase.threshold_,
louvain_usecase.resolution_,
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_);
}
louvain(graph_view,
edge_weight_view,
graph_view.local_vertex_partition_range_size(),
louvain_usecase.max_level_,
louvain_usecase.threshold_,
louvain_usecase.resolution_,
louvain_usecase.check_correctness_,
louvain_usecase.expected_level_,
louvain_usecase.expected_modularity_);

if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
Expand Down