-
Notifications
You must be signed in to change notification settings - Fork 16
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #78 from rapidsai/branch-24.12
Forward-merge branch-24.12 into branch-25.02
Showing
5 changed files
with
163 additions
and
46 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
// Copyright (c) 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
#include "nvml_wrap.h" | ||
|
||
#if CUDA_VERSION >= 12030 | ||
#include <dlfcn.h> | ||
#include <mutex> | ||
#include <stdio.h> | ||
|
||
namespace { | ||
|
||
void* nvml_handle = nullptr; | ||
std::mutex nvml_mutex; | ||
bool nvml_loaded = false; | ||
|
||
bool LoadNvmlLibrary() | ||
{ | ||
nvml_handle = dlopen("libnvidia-ml.so.1", RTLD_NOW); | ||
if (!nvml_handle) { | ||
nvml_handle = dlopen("libnvidia-ml.so", RTLD_NOW); | ||
if (!nvml_handle) { | ||
fprintf(stderr, "Failed to load NVML library: %s\n", dlerror()); | ||
return false; | ||
} | ||
} | ||
return true; | ||
} | ||
|
||
template <typename T> | ||
T LoadNvmlSymbol(const char* name) | ||
{ | ||
void* symbol = dlsym(nvml_handle, name); | ||
if (!symbol) { return nullptr; } | ||
return reinterpret_cast<T>(symbol); | ||
} | ||
|
||
} // namespace | ||
|
||
// Global function pointers | ||
nvmlDeviceGetHandleByIndexFunc nvmlDeviceGetHandleByIndexPtr = nullptr; | ||
nvmlDeviceGetGpuFabricInfoFunc nvmlDeviceGetGpuFabricInfoPtr = nullptr; | ||
|
||
// Ensure NVML is loaded and symbols are initialized | ||
bool NvmlFabricSymbolLoaded() | ||
{ | ||
std::lock_guard<std::mutex> lock(nvml_mutex); | ||
if (nvml_loaded) { | ||
return true; // Already loaded | ||
} | ||
|
||
if (LoadNvmlLibrary()) { | ||
nvmlDeviceGetHandleByIndexPtr = | ||
LoadNvmlSymbol<nvmlDeviceGetHandleByIndexFunc>("nvmlDeviceGetHandleByIndex"); | ||
nvmlDeviceGetGpuFabricInfoPtr = | ||
LoadNvmlSymbol<nvmlDeviceGetGpuFabricInfoFunc>("nvmlDeviceGetGpuFabricInfo"); | ||
|
||
if (!nvmlDeviceGetHandleByIndexPtr || !nvmlDeviceGetGpuFabricInfoPtr) { | ||
dlclose(nvml_handle); | ||
nvml_handle = nullptr; | ||
} else { | ||
nvml_loaded = true; | ||
} | ||
} | ||
return nvml_loaded; | ||
} | ||
#endif // CUDA_VERSION >= 12030 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
// Copyright (c) 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
#pragma once | ||
#include <cuda.h> | ||
|
||
#if CUDA_VERSION >= 12030 | ||
#include <nvml.h> | ||
|
||
bool NvmlFabricSymbolLoaded(); | ||
|
||
typedef nvmlReturn_t (*nvmlDeviceGetHandleByIndexFunc)(unsigned int, nvmlDevice_t*); | ||
typedef nvmlReturn_t (*nvmlDeviceGetGpuFabricInfoFunc)(nvmlDevice_t, nvmlGpuFabricInfo_t*); | ||
|
||
extern nvmlDeviceGetHandleByIndexFunc nvmlDeviceGetHandleByIndexPtr; | ||
extern nvmlDeviceGetGpuFabricInfoFunc nvmlDeviceGetGpuFabricInfoPtr; | ||
#endif // CUDA_VERSION >= 12030 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters