From ae34b8f2545316c9cb03e608b20600906f42198f Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 18 Nov 2024 21:22:16 +0000 Subject: [PATCH 01/32] Remove old loggers --- cpp/include/cuml/common/logger.hpp | 234 ----------------------------- cpp/src/common/logger.cpp | 120 --------------- 2 files changed, 354 deletions(-) delete mode 100644 cpp/include/cuml/common/logger.hpp delete mode 100644 cpp/src/common/logger.cpp diff --git a/cpp/include/cuml/common/logger.hpp b/cpp/include/cuml/common/logger.hpp deleted file mode 100644 index 161d881087..0000000000 --- a/cpp/include/cuml/common/logger.hpp +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -#include - -#include - -#include -#include -#include -#include - -namespace spdlog { -class logger; -namespace sinks { -template -class CallbackSink; -using callback_sink_mt = CallbackSink; -}; // namespace sinks -}; // namespace spdlog - -namespace ML { - -/** - * @defgroup CStringFormat Expand a C-style format string - * - * @brief Expands C-style formatted string into std::string - * - * @param[in] fmt format string - * @param[in] vl respective values for each of format modifiers in the string - * - * @return the expanded `std::string` - * - * @{ - */ -std::string format(const char* fmt, va_list& vl); -std::string format(const char* fmt, ...); -/** @} */ - -/** - * @brief The main Logging class for cuML library. - * - * This class acts as a thin wrapper over the underlying `spdlog` interface. The - * design is done in this way in order to avoid us having to also ship `spdlog` - * header files in our installation. - * - * @todo This currently only supports logging to stdout. Need to add support in - * future to add custom loggers as well [Issue #2046] - */ -class Logger { - public: - /** - * @brief Singleton method to get the underlying logger object - * - * @return the singleton logger object - */ - static Logger& get(); - - /** - * @brief Set the logging level. - * - * Only messages with level equal or above this will be printed - * - * @param[in] level logging level - * - * @note The log level will actually be set only if the input is within the - * range [CUML_LEVEL_TRACE, CUML_LEVEL_OFF]. If it is not, then it'll - * be ignored. See documentation of decisiontree for how this gets used - */ - void setLevel(int level); - - /** - * @brief Set the logging pattern - * - * @param[in] pattern the pattern to be set. Refer this link - * https://github.com/gabime/spdlog/wiki/3.-Custom-formatting - * to know the right syntax of this pattern - */ - void setPattern(const std::string& pattern); - - /** - * @brief Register a callback function to be run in place of usual log call - * - * @param[in] callback the function to be run on all logged messages - */ - void setCallback(void (*callback)(int lvl, const char* msg)); - - /** - * @brief Register a flush function compatible with the registered callback - * - * @param[in] flush the function to use when flushing logs - */ - void setFlush(void (*flush)()); - - /** - * @brief Tells whether messages will be logged for the given log level - * - * @param[in] level log level to be checked for - * @return true if messages will be logged for this level, else false - */ - bool shouldLogFor(int level) const; - - /** - * @brief Query for the current log level - * - * @return the current log level - */ - int getLevel() const; - - /** - * @brief Get the current logging pattern - * @return the pattern - */ - std::string getPattern() const { return currPattern; } - - /** - * @brief Main logging method - * - * @param[in] level logging level of this message - * @param[in] fmt C-like format string, followed by respective params - */ - void log(int level, const char* fmt, ...); - - /** - * @brief Flush logs by calling flush on underlying logger - */ - void flush(); - - private: - Logger(); - ~Logger() {} - - std::shared_ptr sink; - std::shared_ptr logger; - std::string currPattern; - static const std::string DefaultPattern; -}; // class Logger - -/** - * @brief RAII based pattern setter for Logger class - * - * @code{.cpp} - * { - * PatternSetter _("%l -- %v"); - * CUML_LOG_INFO("Test message\n"); - * } - * @endcode - */ -class PatternSetter { - public: - /** - * @brief Set the pattern for the rest of the log messages - * @param[in] pattern pattern to be set - */ - PatternSetter(const std::string& pattern = "%v"); - - /** - * @brief This will restore the previous pattern that was active during the - * moment this object was created - */ - ~PatternSetter(); - - private: - std::string prevPattern; -}; // class PatternSetter - -/** - * @defgroup LoggerMacros Helper macros for dealing with logging - * @{ - */ -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_TRACE) -#define CUML_LOG_TRACE(fmt, ...) \ - do { \ - std::stringstream ss; \ - ss << ML::format("%s:%d ", __FILE__, __LINE__); \ - ss << ML::format(fmt, ##__VA_ARGS__); \ - ML::Logger::get().log(CUML_LEVEL_TRACE, ss.str().c_str()); \ - } while (0) -#else -#define CUML_LOG_TRACE(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_DEBUG) -#define CUML_LOG_DEBUG(fmt, ...) \ - do { \ - std::stringstream ss; \ - ss << ML::format("%s:%d ", __FILE__, __LINE__); \ - ss << ML::format(fmt, ##__VA_ARGS__); \ - ML::Logger::get().log(CUML_LEVEL_DEBUG, ss.str().c_str()); \ - } while (0) -#else -#define CUML_LOG_DEBUG(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_INFO) -#define CUML_LOG_INFO(fmt, ...) ML::Logger::get().log(CUML_LEVEL_INFO, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_INFO(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_WARN) -#define CUML_LOG_WARN(fmt, ...) ML::Logger::get().log(CUML_LEVEL_WARN, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_WARN(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_ERROR) -#define CUML_LOG_ERROR(fmt, ...) ML::Logger::get().log(CUML_LEVEL_ERROR, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_ERROR(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_CRITICAL) -#define CUML_LOG_CRITICAL(fmt, ...) ML::Logger::get().log(CUML_LEVEL_CRITICAL, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_CRITICAL(fmt, ...) void(0) -#endif -/** @} */ - -}; // namespace ML diff --git a/cpp/src/common/logger.cpp b/cpp/src/common/logger.cpp deleted file mode 100644 index b39a59e94f..0000000000 --- a/cpp/src/common/logger.cpp +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#define SPDLOG_HEADER_ONLY -#include -#include - -#include // NOLINT -#include // NOLINT - -#include -#include - -namespace ML { - -std::string format(const char* fmt, va_list& vl) -{ - char buf[4096]; - vsnprintf(buf, sizeof(buf), fmt, vl); - return std::string(buf); -} - -std::string format(const char* fmt, ...) -{ - va_list vl; - va_start(vl, fmt); - std::string str = format(fmt, vl); - va_end(vl); - return str; -} - -int convert_level_to_spdlog(int level) -{ - level = std::max(CUML_LEVEL_OFF, std::min(CUML_LEVEL_TRACE, level)); - return CUML_LEVEL_TRACE - level; -} - -const std::string Logger::DefaultPattern("[%L] [%H:%M:%S.%f] %v"); - -Logger& Logger::get() -{ - static Logger logger; - return logger; -} - -Logger::Logger() - : sink{std::make_shared()}, - logger{std::make_shared("cuml", sink)}, - currPattern() -{ - setPattern(DefaultPattern); - setLevel(CUML_LEVEL_INFO); -} - -void Logger::setLevel(int level) -{ - level = convert_level_to_spdlog(level); - logger->set_level(static_cast(level)); -} - -void Logger::setPattern(const std::string& pattern) -{ - currPattern = pattern; - logger->set_pattern(pattern); -} - -void Logger::setCallback(spdlog::sinks::LogCallback callback) { sink->set_callback(callback); } - -void Logger::setFlush(void (*flush)()) { sink->set_flush(flush); } - -bool Logger::shouldLogFor(int level) const -{ - level = convert_level_to_spdlog(level); - auto level_e = static_cast(level); - return logger->should_log(level_e); -} - -int Logger::getLevel() const -{ - auto level_e = logger->level(); - return CUML_LEVEL_TRACE - static_cast(level_e); -} - -void Logger::log(int level, const char* fmt, ...) -{ - level = convert_level_to_spdlog(level); - auto level_e = static_cast(level); - // explicit check to make sure that we only expand messages when required - if (logger->should_log(level_e)) { - va_list vl; - va_start(vl, fmt); - auto msg = format(fmt, vl); - va_end(vl); - logger->log(level_e, msg); - } -} - -void Logger::flush() { logger->flush(); } - -PatternSetter::PatternSetter(const std::string& pattern) : prevPattern() -{ - prevPattern = Logger::get().getPattern(); - Logger::get().setPattern(pattern); -} - -PatternSetter::~PatternSetter() { Logger::get().setPattern(prevPattern); } - -} // namespace ML From 0db46a16c0a06f10f5728c9f178057ded01530ed Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 18 Nov 2024 22:32:50 +0000 Subject: [PATCH 02/32] Switch over to new logger and update all code to use the new enums (still not fully compiling due to missing logger functions) --- cpp/CMakeLists.txt | 17 ++++-- cpp/bench/sg/svc.cu | 5 +- cpp/bench/sg/svr.cu | 2 +- cpp/examples/dbscan/dbscan_example.cpp | 2 +- cpp/include/cuml/cluster/dbscan.hpp | 10 ++-- cpp/include/cuml/cluster/kmeans.hpp | 2 - cpp/include/cuml/common/log_levels.hpp | 37 ------------- cpp/include/cuml/common/utils.hpp | 2 +- cpp/include/cuml/ensemble/randomforest.hpp | 30 +++++------ cpp/include/cuml/linear_model/qn.h | 8 +-- cpp/include/cuml/manifold/tsne.h | 2 +- cpp/include/cuml/manifold/umapparams.h | 2 +- cpp/include/cuml/svm/linear.hpp | 4 +- cpp/include/cuml/svm/svc.hpp | 8 +-- cpp/include/cuml/svm/svm_api.h | 4 +- cpp/include/cuml/svm/svm_parameter.h | 12 +++-- cpp/src/dbscan/dbscan.cu | 8 +-- cpp/src/dbscan/dbscan.cuh | 4 +- cpp/src/dbscan/dbscan_api.cpp | 6 +-- cpp/src/decisiontree/decisiontree.cuh | 4 +- cpp/src/glm/qn/qn_solvers.cuh | 10 ++-- cpp/src/randomforest/randomforest.cu | 40 +++++++------- cpp/src/randomforest/randomforest.cuh | 12 ++--- cpp/src/solver/lars.cu | 4 +- cpp/src/solver/lars_impl.cuh | 16 +++--- cpp/src/svm/smosolver.h | 2 +- cpp/src/svm/svc.cu | 2 +- cpp/src/svm/svm_api.cpp | 4 +- cpp/src/svm/workingset.cuh | 8 +-- cpp/src/tsne/tsne_runner.cuh | 2 +- cpp/src/tsne/utils.cuh | 6 +-- cpp/src/umap/fuzzy_simpl_set/naive.cuh | 4 +- cpp/src/umap/runner.cuh | 14 ++--- cpp/src/umap/simpl_set_embed/algo.cuh | 2 +- cpp/src/umap/supervised.cuh | 4 +- cpp/test/sg/genetic/evolution_test.cu | 2 +- cpp/test/sg/genetic/program_test.cu | 2 +- cpp/test/sg/hdbscan_test.cu | 6 +-- cpp/test/sg/lars_test.cu | 22 ++++---- cpp/test/sg/logger.cpp | 32 +++++------ cpp/test/sg/quasi_newton.cu | 62 +++++++++++----------- cpp/test/sg/svc_test.cu | 18 +++---- cpp/test/sg/tsne_test.cu | 2 +- 43 files changed, 212 insertions(+), 233 deletions(-) delete mode 100644 cpp/include/cuml/common/log_levels.hpp diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index e176f802bf..b479962b1f 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -220,6 +220,16 @@ endif() rapids_cpm_init() rapids_cmake_install_lib_dir(lib_dir) +# Not using rapids-cmake since we never want to find, always download. +CPMAddPackage( + NAME rapids_logger GITHUB_REPOSITORY vyasr/rmm GIT_SHALLOW TRUE SOURCE_SUBDIR rapids_logger + GIT_TAG feat/logger +) +rapids_make_logger( + ML VISIBILITY_MACRO "__attribute__((visibility(\"default\")))" EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger + SUPPORTS_LOGGING +) + if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS) find_package(Threads) endif() @@ -291,8 +301,7 @@ if(BUILD_CUML_CPP_LIBRARY) # single GPU components # common components - add_library(${CUML_CPP_TARGET} - src/common/logger.cpp) + add_library(${CUML_CPP_TARGET}) if (CUML_ENABLE_GPU) target_compile_definitions(${CUML_CPP_TARGET} PUBLIC CUML_ENABLE_GPU) endif() @@ -628,8 +637,10 @@ if(BUILD_CUML_CPP_LIBRARY) target_link_libraries(${CUML_CPP_TARGET} PUBLIC rmm::rmm rmm::rmm_logger ${CUVS_LIB} ${_cuml_cpp_public_libs} + cuml_logger PRIVATE ${_cuml_cpp_private_libs} - rmm::rmm_logger_impl + rmm::rmm_logger_impl + cuml_logger_impl ) # If we export the libdmlc symbols, they can lead to weird crashes with other diff --git a/cpp/bench/sg/svc.cu b/cpp/bench/sg/svc.cu index 8ddd8be441..966cf9f0da 100644 --- a/cpp/bench/sg/svc.cu +++ b/cpp/bench/sg/svc.cu @@ -100,8 +100,9 @@ std::vector> getInputs() p.blobs.seed = 12345ULL; // SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity}) - p.svm_param = ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, CUML_LEVEL_INFO, 0, ML::SVM::C_SVC}; - p.model = ML::SVM::SvmModel{0, 0, 0, nullptr, {}, nullptr, 0, nullptr}; + p.svm_param = + ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, ML::level_enum::info, 0, ML::SVM::C_SVC}; + p.model = ML::SVM::SvmModel{0, 0, 0, nullptr, {}, nullptr, 0, nullptr}; std::vector rowcols = {{50000, 2, 2}, {2048, 100000, 2}, {50000, 1000, 2}}; diff --git a/cpp/bench/sg/svr.cu b/cpp/bench/sg/svr.cu index c061e53b1f..79500a8625 100644 --- a/cpp/bench/sg/svr.cu +++ b/cpp/bench/sg/svr.cu @@ -102,7 +102,7 @@ std::vector> getInputs() // SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity, // epsilon, svmType}) p.svm_param = - ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, CUML_LEVEL_INFO, 0.1, ML::SVM::EPSILON_SVR}; + ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, ML::level_enum::info, 0.1, ML::SVM::EPSILON_SVR}; p.model = new ML::SVM::SvmModel{0, 0, 0, 0}; std::vector rowcols = {{50000, 2, 2}, {1024, 10000, 10}, {3000, 200, 200}}; diff --git a/cpp/examples/dbscan/dbscan_example.cpp b/cpp/examples/dbscan/dbscan_example.cpp index 3ba367cbdc..4fe5653cab 100644 --- a/cpp/examples/dbscan/dbscan_example.cpp +++ b/cpp/examples/dbscan/dbscan_example.cpp @@ -209,7 +209,7 @@ int main(int argc, char* argv[]) nullptr, max_bytes_per_batch, ML::Dbscan::EpsNnMethod::BRUTE_FORCE, - false); + ML::level_enum::off); CUDA_RT_CALL(cudaMemcpyAsync( h_labels.data(), d_labels, nRows * sizeof(int), cudaMemcpyDeviceToHost, stream)); CUDA_RT_CALL(cudaStreamSynchronize(stream)); diff --git a/cpp/include/cuml/cluster/dbscan.hpp b/cpp/include/cuml/cluster/dbscan.hpp index d691452db2..8a2937fcee 100644 --- a/cpp/include/cuml/cluster/dbscan.hpp +++ b/cpp/include/cuml/cluster/dbscan.hpp @@ -16,7 +16,7 @@ #pragma once -#include +#include #include @@ -73,7 +73,7 @@ void fit(const raft::handle_t& handle, float* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, double* input, @@ -87,7 +87,7 @@ void fit(const raft::handle_t& handle, double* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, @@ -102,7 +102,7 @@ void fit(const raft::handle_t& handle, float* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, double* input, @@ -116,7 +116,7 @@ void fit(const raft::handle_t& handle, double* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); /** @} */ diff --git a/cpp/include/cuml/cluster/kmeans.hpp b/cpp/include/cuml/cluster/kmeans.hpp index f075e49843..95761998c2 100644 --- a/cpp/include/cuml/cluster/kmeans.hpp +++ b/cpp/include/cuml/cluster/kmeans.hpp @@ -16,8 +16,6 @@ #pragma once -#include - #include namespace raft { diff --git a/cpp/include/cuml/common/log_levels.hpp b/cpp/include/cuml/common/log_levels.hpp deleted file mode 100644 index 2029f2aeac..0000000000 --- a/cpp/include/cuml/common/log_levels.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -/** - * @defgroup CumlLogLevels Logging levels used in cuML - * - * @note exactly match the corresponding ones (but reverse in terms of value) - * in spdlog for wrapping purposes - * - * @{ - */ -#define CUML_LEVEL_TRACE 6 -#define CUML_LEVEL_DEBUG 5 -#define CUML_LEVEL_INFO 4 -#define CUML_LEVEL_WARN 3 -#define CUML_LEVEL_ERROR 2 -#define CUML_LEVEL_CRITICAL 1 -#define CUML_LEVEL_OFF 0 -/** @} */ - -#if !defined(CUML_ACTIVE_LEVEL) -#define CUML_ACTIVE_LEVEL CUML_LEVEL_DEBUG -#endif diff --git a/cpp/include/cuml/common/utils.hpp b/cpp/include/cuml/common/utils.hpp index 8ac9d93a1f..9a5f08cc62 100644 --- a/cpp/include/cuml/common/utils.hpp +++ b/cpp/include/cuml/common/utils.hpp @@ -16,7 +16,7 @@ #pragma once -#include "logger.hpp" +#include #include #include diff --git a/cpp/include/cuml/ensemble/randomforest.hpp b/cpp/include/cuml/ensemble/randomforest.hpp index 2df7929cd5..a0351c0dab 100644 --- a/cpp/include/cuml/ensemble/randomforest.hpp +++ b/cpp/include/cuml/ensemble/randomforest.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,13 +101,13 @@ struct RF_params { void preprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); /* Revert preprocessing effect, if needed. */ void postprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); template struct RandomForestMetaData { @@ -147,7 +147,7 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void fit(const raft::handle_t& user_handle, RandomForestClassifierD*& forest, double* input, @@ -156,7 +156,7 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, @@ -164,27 +164,27 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const double* input, int n_rows, int n_cols, int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, const int* ref_labels, int n_rows, const int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const int* ref_labels, int n_rows, const int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_params set_rf_params(int max_depth, int max_leaves, @@ -213,7 +213,7 @@ void fit(const raft::handle_t& user_handle, int n_cols, float* labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void fit(const raft::handle_t& user_handle, RandomForestRegressorD*& forest, double* input, @@ -221,7 +221,7 @@ void fit(const raft::handle_t& user_handle, int n_cols, double* labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, @@ -229,25 +229,25 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, float* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* input, int n_rows, int n_cols, double* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, const float* ref_labels, int n_rows, const float* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* ref_labels, int n_rows, const double* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); }; // namespace ML diff --git a/cpp/include/cuml/linear_model/qn.h b/cpp/include/cuml/linear_model/qn.h index f3b1c63604..d19df0773a 100644 --- a/cpp/include/cuml/linear_model/qn.h +++ b/cpp/include/cuml/linear_model/qn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ */ #pragma once +#include + #include #ifdef __cplusplus @@ -82,7 +84,7 @@ struct qn_params { /** Number of vectors approximating the hessian (l-bfgs). */ int lbfgs_memory; /** Triggers extra output when greater than zero. */ - int verbose; + level_enum verbose; /** Whether to fit the bias term. */ bool fit_intercept; /** @@ -105,7 +107,7 @@ struct qn_params { max_iter(1000), linesearch_max_iter(50), lbfgs_memory(5), - verbose(0), + verbose(level_enum::off), fit_intercept(true), penalty_normalized(true) { diff --git a/cpp/include/cuml/manifold/tsne.h b/cpp/include/cuml/manifold/tsne.h index 8c658b3c69..a87c9a85c0 100644 --- a/cpp/include/cuml/manifold/tsne.h +++ b/cpp/include/cuml/manifold/tsne.h @@ -94,7 +94,7 @@ struct TSNEParams { long long random_state = -1; // verbosity level for logging messages during execution - int verbosity = CUML_LEVEL_INFO; + level_enum verbosity = ML::level_enum::info; // Embedding initializer algorithm TSNE_INIT init = TSNE_INIT::RANDOM; diff --git a/cpp/include/cuml/manifold/umapparams.h b/cpp/include/cuml/manifold/umapparams.h index a337c6cf64..bb21abae2b 100644 --- a/cpp/include/cuml/manifold/umapparams.h +++ b/cpp/include/cuml/manifold/umapparams.h @@ -117,7 +117,7 @@ class UMAPParams { /** * Control logging level during algorithm execution */ - int verbosity = CUML_LEVEL_INFO; + level_enum verbosity = level_enum::info; /** * More specific parameters controlling the embedding. If None these diff --git a/cpp/include/cuml/svm/linear.hpp b/cpp/include/cuml/svm/linear.hpp index 9cefdcc883..27e97a4535 100644 --- a/cpp/include/cuml/svm/linear.hpp +++ b/cpp/include/cuml/svm/linear.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ struct LinearSVMParams { */ int lbfgs_memory = 5; /** Triggers extra output when greater than zero. */ - int verbose = 0; + level_enum verbose = level_enum::off; /** * The constant scaling factor of the main term in the loss function. * (You can also think of that as the inverse factor of the penalty term). diff --git a/cpp/include/cuml/svm/svc.hpp b/cpp/include/cuml/svm/svc.hpp index 426a049483..3c2a0366e7 100644 --- a/cpp/include/cuml/svm/svc.hpp +++ b/cpp/include/cuml/svm/svc.hpp @@ -236,10 +236,10 @@ class SVC { math_t tol = 1.0e-3, raft::distance::kernels::KernelParams kernel_params = raft::distance::kernels::KernelParams{raft::distance::kernels::LINEAR, 3, 1, 0}, - math_t cache_size = 200, - int max_iter = -1, - int nochange_steps = 1000, - int verbosity = CUML_LEVEL_INFO); + math_t cache_size = 200, + int max_iter = -1, + int nochange_steps = 1000, + level_enum verbosity = ML::level_enum::info); ~SVC(); diff --git a/cpp/include/cuml/svm/svm_api.h b/cpp/include/cuml/svm/svm_api.h index 5da03f903a..8851e2ad27 100644 --- a/cpp/include/cuml/svm/svm_api.h +++ b/cpp/include/cuml/svm/svm_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ typedef enum cumlSvmKernelType { LINEAR, POLYNOMIAL, RBF, TANH } cumlSvmKernelTy * @param [in] tol tolerance to stop fitting * @param [in] verbosity Fine grained control over logging of useful information * as algorithm executes. Currently passing anything greater than or equal to - * CUML_LEVEL_INFO will make it execute quietly + * ML::level_enum::info will make it execute quietly * @param [in] kernel type of kernel (LINEAR, POLYNOMIAL, RBF or TANH) * @param [in] degree of polynomial kernel (ignored by others) * @param [in] gamma multiplier in the RBF, POLYNOMIAL and TANH kernels diff --git a/cpp/include/cuml/svm/svm_parameter.h b/cpp/include/cuml/svm/svm_parameter.h index c5fc4ef2d0..66df9c13cc 100644 --- a/cpp/include/cuml/svm/svm_parameter.h +++ b/cpp/include/cuml/svm/svm_parameter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ */ #pragma once +#include + namespace ML { namespace SVM { @@ -37,10 +39,10 @@ struct SvmParameter { //! maximum number of outer SMO iterations. Use -1 to let the SMO solver set //! a default value (100*n_rows). int max_iter; - int nochange_steps; //(verbosity)); } // TODO: Implement this // catch (const MLCommon::Exception& e) @@ -93,7 +93,7 @@ cumlError_t cumlDpDbscanFit(cumlHandle_t handle, NULL, max_bytes_per_batch, ML::Dbscan::EpsNnMethod::BRUTE_FORCE, - verbosity); + static_cast(verbosity)); } // TODO: Implement this // catch (const MLCommon::Exception& e) @@ -107,4 +107,4 @@ cumlError_t cumlDpDbscanFit(cumlHandle_t handle, } return status; } -} \ No newline at end of file +} diff --git a/cpp/src/decisiontree/decisiontree.cuh b/cpp/src/decisiontree/decisiontree.cuh index e5514ffa71..2bf9ac595c 100644 --- a/cpp/src/decisiontree/decisiontree.cuh +++ b/cpp/src/decisiontree/decisiontree.cuh @@ -364,9 +364,9 @@ class DecisionTree { std::size_t n_cols, DataT* predictions, int num_outputs, - int verbosity) + level_enum verbosity) { - if (verbosity >= 0) { ML::Logger::get().setLevel(verbosity); } + if (verbosity >= level_enum::off) { default_logger().set_level(verbosity); } ASSERT(is_host_ptr(rows) && is_host_ptr(predictions), "DT Error: Current impl. expects both input and predictions to be CPU " "pointers.\n"); diff --git a/cpp/src/glm/qn/qn_solvers.cuh b/cpp/src/glm/qn/qn_solvers.cuh index 6f43c7eef0..9fb67c34d5 100644 --- a/cpp/src/glm/qn/qn_solvers.cuh +++ b/cpp/src/glm/qn/qn_solvers.cuh @@ -148,7 +148,7 @@ inline OPT_RETCODE min_lbfgs(const LBFGSParam& param, int* k, // output iterations SimpleVec& workspace, // scratch space cudaStream_t stream, - int verbosity = 0) + level_enum verbosity = 0) { int n = x.len; const int workspace_size = lbfgs_workspace_size(param, n); @@ -179,7 +179,7 @@ inline OPT_RETCODE min_lbfgs(const LBFGSParam& param, std::vector fx_hist(param.past > 0 ? param.past : 0); *k = 0; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Running L-BFGS"); // Evaluate function and compute gradient @@ -278,7 +278,7 @@ inline OPT_RETCODE min_owlqn(const LBFGSParam& param, int* k, SimpleVec& workspace, // scratch space cudaStream_t stream, - const int verbosity = 0) + const level_enum verbosity = 0) { int n = x.len; const int workspace_size = owlqn_workspace_size(param, n); @@ -305,7 +305,7 @@ inline OPT_RETCODE min_owlqn(const LBFGSParam& param, p_ws += vec_size; T* dev_scalar = p_ws; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); SimpleVec svec, yvec; // mask vectors @@ -419,7 +419,7 @@ inline int qn_minimize(const raft::handle_t& handle, LossFunction& loss, const T l1, const LBFGSParam& opt_param, - const int verbosity = 0) + const level_enum verbosity = 0) { // TODO should the worksapce allocation happen outside? cudaStream_t stream = handle.get_stream(); diff --git a/cpp/src/randomforest/randomforest.cu b/cpp/src/randomforest/randomforest.cu index 14e76c8f38..28c044224f 100644 --- a/cpp/src/randomforest/randomforest.cu +++ b/cpp/src/randomforest/randomforest.cu @@ -122,11 +122,11 @@ void print(const RF_metrics rf_metrics) void preprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity) + level_enum verbosity) { std::pair::iterator, bool> ret; int n_unique_labels = 0; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Preprocessing labels"); for (int i = 0; i < n_rows; i++) { @@ -149,9 +149,9 @@ void preprocess_labels(int n_rows, void postprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity) + level_enum verbosity) { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Postrocessing labels"); std::map::iterator it; int n_unique_cnt = labels_map.size(); @@ -385,10 +385,10 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -406,10 +406,10 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -440,7 +440,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity) + level_enum verbosity) { ASSERT(!forest->trees.empty(), "Cannot predict! No trees in the forest."); std::shared_ptr> rf_classifier = @@ -454,7 +454,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity) + level_enum verbosity) { ASSERT(!forest->trees.empty(), "Cannot predict! No trees in the forest."); std::shared_ptr> rf_classifier = @@ -482,7 +482,7 @@ RF_metrics score(const raft::handle_t& user_handle, const int* ref_labels, int n_rows, const int* predictions, - int verbosity) + level_enum verbosity) { RF_metrics classification_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::CLASSIFICATION); @@ -494,7 +494,7 @@ RF_metrics score(const raft::handle_t& user_handle, const int* ref_labels, int n_rows, const int* predictions, - int verbosity) + level_enum verbosity) { RF_metrics classification_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::CLASSIFICATION); @@ -575,10 +575,10 @@ void fit(const raft::handle_t& user_handle, int n_cols, float* labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -595,10 +595,10 @@ void fit(const raft::handle_t& user_handle, int n_cols, double* labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -628,7 +628,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, float* predictions, - int verbosity) + level_enum verbosity) { std::shared_ptr> rf_regressor = std::make_shared>(forest->rf_params, RF_type::REGRESSION); @@ -641,7 +641,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, double* predictions, - int verbosity) + level_enum verbosity) { std::shared_ptr> rf_regressor = std::make_shared>(forest->rf_params, RF_type::REGRESSION); @@ -670,7 +670,7 @@ RF_metrics score(const raft::handle_t& user_handle, const float* ref_labels, int n_rows, const float* predictions, - int verbosity) + level_enum verbosity) { RF_metrics regression_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::REGRESSION); @@ -683,7 +683,7 @@ RF_metrics score(const raft::handle_t& user_handle, const double* ref_labels, int n_rows, const double* predictions, - int verbosity) + level_enum verbosity) { RF_metrics regression_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::REGRESSION); diff --git a/cpp/src/randomforest/randomforest.cuh b/cpp/src/randomforest/randomforest.cuh index 0233db736c..a47f55b255 100644 --- a/cpp/src/randomforest/randomforest.cuh +++ b/cpp/src/randomforest/randomforest.cuh @@ -211,9 +211,9 @@ class RandomForest { int n_cols, L* predictions, const RandomForestMetaData* forest, - int verbosity) const + level_enum verbosity) const { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); this->error_checking(input, predictions, n_rows, n_cols, true); std::vector h_predictions(n_rows); cudaStream_t stream = user_handle.get_stream(); @@ -276,16 +276,16 @@ class RandomForest { const L* ref_labels, int n_rows, const L* predictions, - int verbosity, + level_enum verbosity, int rf_type = RF_type::CLASSIFICATION) { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); cudaStream_t stream = user_handle.get_stream(); RF_metrics stats; if (rf_type == RF_type::CLASSIFICATION) { // task classifiation: get classification metrics float accuracy = raft::stats::accuracy(predictions, ref_labels, n_rows, stream); stats = set_rf_metrics_classification(accuracy); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) print(stats); + if (ML::default_logger().should_log(ML::level_enum::debug)) print(stats); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg. @@ -300,7 +300,7 @@ class RandomForest { mean_squared_error, median_abs_error); stats = set_rf_metrics_regression(mean_abs_error, mean_squared_error, median_abs_error); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) print(stats); + if (ML::default_logger().should_log(ML::level_enum::debug)) print(stats); } return stats; diff --git a/cpp/src/solver/lars.cu b/cpp/src/solver/lars.cu index b7a478e098..0f6499c471 100644 --- a/cpp/src/solver/lars.cu +++ b/cpp/src/solver/lars.cu @@ -37,7 +37,7 @@ template void larsFit(const raft::handle_t& handle, float* Gram, int max_iter, float* coef_path, - int verbosity, + level_enum verbosity, int ld_X, int ld_G, float eps); @@ -54,7 +54,7 @@ template void larsFit(const raft::handle_t& handle, double* Gram, int max_iter, double* coef_path, - int verbosity, + level_enum verbosity, int ld_X, int ld_G, double eps); diff --git a/cpp/src/solver/lars_impl.cuh b/cpp/src/solver/lars_impl.cuh index 37e7c76df5..402b5bf51e 100644 --- a/cpp/src/solver/lars_impl.cuh +++ b/cpp/src/solver/lars_impl.cuh @@ -884,17 +884,17 @@ void larsFit(const raft::handle_t& handle, idx_t* active_idx, math_t* alphas, idx_t* n_active, - math_t* Gram = nullptr, - int max_iter = 500, - math_t* coef_path = nullptr, - int verbosity = 0, - idx_t ld_X = 0, - idx_t ld_G = 0, - math_t eps = -1) + math_t* Gram = nullptr, + int max_iter = 500, + math_t* coef_path = nullptr, + level_enum verbosity = ML::level_enum::off, + idx_t ld_X = 0, + idx_t ld_G = 0, + math_t eps = -1) { ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one"); ASSERT(n_rows > 0, "Parameter n_rows: number of rows cannot be less than one"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); // Set default ld parameters if needed. if (ld_X == 0) ld_X = n_rows; diff --git a/cpp/src/svm/smosolver.h b/cpp/src/svm/smosolver.h index d2355d68a5..f394d7fdd7 100644 --- a/cpp/src/svm/smosolver.h +++ b/cpp/src/svm/smosolver.h @@ -87,7 +87,7 @@ class SmoSolver { f(0, stream), y_label(0, stream) { - ML::Logger::get().setLevel(param.verbosity); + ML::default_logger().set_level(param.verbosity); } void GetNonzeroDeltaAlpha(const math_t* vec, diff --git a/cpp/src/svm/svc.cu b/cpp/src/svm/svc.cu index 7c90f0214b..941de3d0cf 100644 --- a/cpp/src/svm/svc.cu +++ b/cpp/src/svm/svc.cu @@ -139,7 +139,7 @@ SVC::SVC(raft::handle_t& handle, math_t cache_size, int max_iter, int nochange_steps, - int verbosity) + level_enum verbosity) : handle(handle), param(SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity}), kernel_params(kernel_params) diff --git a/cpp/src/svm/svm_api.cpp b/cpp/src/svm/svm_api.cpp index 2f6f2b6efc..c9917c3356 100644 --- a/cpp/src/svm/svm_api.cpp +++ b/cpp/src/svm/svm_api.cpp @@ -54,7 +54,7 @@ cumlError_t cumlSpSvcFit(cumlHandle_t handle, param.max_iter = max_iter; param.nochange_steps = nochange_steps; param.tol = tol; - param.verbosity = verbosity; + param.verbosity = static_cast(verbosity); raft::distance::kernels::KernelParams kernel_param; kernel_param.kernel = (raft::distance::kernels::KernelType)kernel; @@ -128,7 +128,7 @@ cumlError_t cumlDpSvcFit(cumlHandle_t handle, param.max_iter = max_iter; param.nochange_steps = nochange_steps; param.tol = tol; - param.verbosity = verbosity; + param.verbosity = static_cast(verbosity); raft::distance::kernels::KernelParams kernel_param; kernel_param.kernel = (raft::distance::kernels::KernelType)kernel; diff --git a/cpp/src/svm/workingset.cuh b/cpp/src/svm/workingset.cuh index 318ee5e14d..5aff3f0183 100644 --- a/cpp/src/svm/workingset.cuh +++ b/cpp/src/svm/workingset.cuh @@ -83,7 +83,7 @@ inline void WorkingSet::SimpleSelect( (int)8 * sizeof(math_t), stream); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("idx_sorted", f_idx_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -236,7 +236,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, available, n_train, idx.data(), n_already_selected); RAFT_CUDA_TRY(cudaPeekAtLastError()); } - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail", available, n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -250,7 +250,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, thrust::make_permutation_iterator(av_ptr, idx_ptr), thrust::make_permutation_iterator(av_ptr, idx_ptr + n_train), av_sorted_ptr); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail_sorted", available_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -276,7 +276,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, raft::copy( idx.data() + n_already_selected, idx_tmp.data() + n_selected - n_copy, n_copy, stream); } - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("selected", idx.data(), n_already_selected + n_copy, ss); CUML_LOG_DEBUG(ss.str().c_str()); diff --git a/cpp/src/tsne/tsne_runner.cuh b/cpp/src/tsne/tsne_runner.cuh index b735be0e63..2961074e87 100644 --- a/cpp/src/tsne/tsne_runner.cuh +++ b/cpp/src/tsne/tsne_runner.cuh @@ -63,7 +63,7 @@ class TSNE_runner { this->p = input.d; this->Y = input.y; - ML::Logger::get().setLevel(params.verbosity); + ML::default_logger().set_level(params.verbosity); if (params.dim > 2 and params.algorithm != TSNE_ALGORITHM::EXACT) { params.algorithm = TSNE_ALGORITHM::EXACT; CUML_LOG_WARN( diff --git a/cpp/src/tsne/utils.cuh b/cpp/src/tsne/utils.cuh index 895fe412d2..446d4c9d5b 100644 --- a/cpp/src/tsne/utils.cuh +++ b/cpp/src/tsne/utils.cuh @@ -82,20 +82,20 @@ double SymmetrizeTime = 0, DistancesTime = 0, NormalizeTime = 0, PerplexityTime // To silence warnings #define START_TIMER \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ gettimeofday(&timecheck, NULL); \ start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ } #define END_TIMER(add_onto) \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ gettimeofday(&timecheck, NULL); \ end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ add_onto += (end - start); \ } #define PRINT_TIMES \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ double total = (SymmetrizeTime + DistancesTime + NormalizeTime + PerplexityTime + \ BoundingBoxKernel_time + ClearKernel1_time + TreeBuildingKernel_time + \ ClearKernel2_time + SummarizationKernel_time + SortKernel_time + \ diff --git a/cpp/src/umap/fuzzy_simpl_set/naive.cuh b/cpp/src/umap/fuzzy_simpl_set/naive.cuh index f872b80c4b..58a55a486b 100644 --- a/cpp/src/umap/fuzzy_simpl_set/naive.cuh +++ b/cpp/src/umap/fuzzy_simpl_set/naive.cuh @@ -314,7 +314,7 @@ void launcher(int n, raft::sparse::COO in(stream, n * n_neighbors, n, n); // check for logging in order to avoid the potentially costly `arr2Str` call! - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Smooth kNN Distances"); auto str = raft::arr2Str(sigmas.data(), 25, "sigmas", stream); CUML_LOG_DEBUG("%s", str.c_str()); @@ -342,7 +342,7 @@ void launcher(int n, n_neighbors); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Compute Membership Strength"); std::stringstream ss; ss << in; diff --git a/cpp/src/umap/runner.cuh b/cpp/src/umap/runner.cuh index 0ceeb3acaa..9f779e320b 100644 --- a/cpp/src/umap/runner.cuh +++ b/cpp/src/umap/runner.cuh @@ -102,7 +102,7 @@ void _get_graph(const raft::handle_t& handle, int k = params->n_neighbors; - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); CUML_LOG_DEBUG("n_neighbors=%d", params->n_neighbors); @@ -159,7 +159,7 @@ void _get_graph_supervised(const raft::handle_t& handle, int k = params->n_neighbors; - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); if (params->target_n_neighbors == -1) params->target_n_neighbors = params->n_neighbors; @@ -247,7 +247,7 @@ void _refine(const raft::handle_t& handle, value_t* embeddings) { cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); /** * Run simplicial set embedding to approximate low-dimensional representation @@ -263,7 +263,7 @@ void _init_and_refine(const raft::handle_t& handle, value_t* embeddings) { cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); // Initialize embeddings InitEmbed::run(handle, inputs.n, inputs.d, graph, params, embeddings, stream, params->init); @@ -282,7 +282,7 @@ void _fit(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::unsupervised::fit"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); UMAPAlgo::_get_graph(handle, inputs, params, graph); @@ -318,7 +318,7 @@ void _fit_supervised(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::supervised::fit"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); UMAPAlgo::_get_graph_supervised( handle, inputs, params, graph); @@ -360,7 +360,7 @@ void _transform(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::transform"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); CUML_LOG_DEBUG("Running transform"); diff --git a/cpp/src/umap/simpl_set_embed/algo.cuh b/cpp/src/umap/simpl_set_embed/algo.cuh index b6f2b5286d..0487be4b31 100644 --- a/cpp/src/umap/simpl_set_embed/algo.cuh +++ b/cpp/src/umap/simpl_set_embed/algo.cuh @@ -341,7 +341,7 @@ void launcher( make_epochs_per_sample(out.vals(), out.nnz, n_epochs, epochs_per_sample.data(), stream); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { std::stringstream ss; ss << raft::arr2Str(epochs_per_sample.data(), out.nnz, "epochs_per_sample", stream); CUML_LOG_DEBUG(ss.str().c_str()); diff --git a/cpp/src/umap/supervised.cuh b/cpp/src/umap/supervised.cuh index 21ed42f157..5918e37fc0 100644 --- a/cpp/src/umap/supervised.cuh +++ b/cpp/src/umap/supervised.cuh @@ -301,7 +301,7 @@ void perform_general_intersection(const raft::handle_t& handle, handle, y_inputs, y_inputs, knn_graph, params->target_n_neighbors, params, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Target kNN Graph"); std::stringstream ss1, ss2; ss1 << raft::arr2Str( @@ -326,7 +326,7 @@ void perform_general_intersection(const raft::handle_t& handle, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Target Fuzzy Simplicial Set"); std::stringstream ss; ss << ygraph_coo; diff --git a/cpp/test/sg/genetic/evolution_test.cu b/cpp/test/sg/genetic/evolution_test.cu index 526acb5280..fa8ebf77bd 100644 --- a/cpp/test/sg/genetic/evolution_test.cu +++ b/cpp/test/sg/genetic/evolution_test.cu @@ -58,7 +58,7 @@ class GeneticEvolutionTest : public ::testing::Test { protected: void SetUp() override { - ML::Logger::get().setLevel(CUML_LEVEL_INFO); + ML::default_logger().set_level(ML::level_enum::info); // Set training param vals hyper_params.population_size = 5000; diff --git a/cpp/test/sg/genetic/program_test.cu b/cpp/test/sg/genetic/program_test.cu index 1205baf9d9..d549906ebf 100644 --- a/cpp/test/sg/genetic/program_test.cu +++ b/cpp/test/sg/genetic/program_test.cu @@ -642,7 +642,7 @@ TEST_F(GeneticProgramTest, ProgramExecution) MLCommon::CompareApprox compApprox(tolerance); // Enable debug logging - ML::Logger::get().setLevel(CUML_LEVEL_INFO); + ML::default_logger().set_level(ML::level_enum::info); // Allocate memory std::vector h_ypred(n_progs * n_samples, 0.0f); diff --git a/cpp/test/sg/hdbscan_test.cu b/cpp/test/sg/hdbscan_test.cu index d90e9f4314..888496e98c 100644 --- a/cpp/test/sg/hdbscan_test.cu +++ b/cpp/test/sg/hdbscan_test.cu @@ -85,7 +85,7 @@ class HDBSCANTest : public ::testing::TestWithParam> { rmm::device_uvector out_probabilities(params.n_row, handle.get_stream()); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); HDBSCAN::Common::hdbscan_output out(handle, params.n_row, @@ -167,7 +167,7 @@ class ClusterCondensingTest : public ::testing::TestWithParam out_delta(params.n_row, handle.get_stream()); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); raft::sparse::op::coo_sort_by_weight( mst_src.data(), mst_dst.data(), mst_data.data(), (IdxT)mst_src.size(), handle.get_stream()); @@ -257,7 +257,7 @@ class ClusterSelectionTest : public ::testing::TestWithParam>::GetParam(); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); rmm::device_uvector condensed_parents(params.condensed_parents.size(), handle.get_stream()); diff --git a/cpp/test/sg/lars_test.cu b/cpp/test/sg/lars_test.cu index d5d276eab8..3f58f337ee 100644 --- a/cpp/test/sg/lars_test.cu +++ b/cpp/test/sg/lars_test.cu @@ -481,9 +481,9 @@ class LarsTestFitPredict : public ::testing::Test { void testFitGram() { - auto stream = handle.get_stream(); - int max_iter = 10; - int verbosity = 0; + auto stream = handle.get_stream(); + int max_iter = 10; + ML::level_enum verbosity = ML::level_enum::off; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), @@ -512,9 +512,9 @@ class LarsTestFitPredict : public ::testing::Test { void testFitX() { - auto stream = handle.get_stream(); - int max_iter = 10; - int verbosity = 0; + auto stream = handle.get_stream(); + int max_iter = 10; + ML::level_enum verbosity = ML::level_enum::off; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), @@ -593,11 +593,11 @@ class LarsTestFitPredict : public ::testing::Test { void testFitLarge() { - auto stream = handle.get_stream(); - int n_rows = 65536; - int n_cols = 10; - int max_iter = n_cols; - int verbosity = 0; + auto stream = handle.get_stream(); + int n_rows = 65536; + int n_cols = 10; + int max_iter = n_cols; + ML::level_enum verbosity = ML::level_enum::off; int n_active; rmm::device_uvector X(n_rows * n_cols, stream); rmm::device_uvector y(n_rows, stream); diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index 1a286c4597..701e51eded 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -29,15 +29,15 @@ TEST(Logger, Test) CUML_LOG_WARN("This is a warning message"); CUML_LOG_INFO("This is an info message"); - Logger::get().setLevel(CUML_LEVEL_WARN); - ASSERT_EQ(CUML_LEVEL_WARN, Logger::get().getLevel()); - Logger::get().setLevel(CUML_LEVEL_INFO); - ASSERT_EQ(CUML_LEVEL_INFO, Logger::get().getLevel()); - - ASSERT_FALSE(Logger::get().shouldLogFor(CUML_LEVEL_TRACE)); - ASSERT_FALSE(Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)); - ASSERT_TRUE(Logger::get().shouldLogFor(CUML_LEVEL_INFO)); - ASSERT_TRUE(Logger::get().shouldLogFor(CUML_LEVEL_WARN)); + default_logger().set_level(ML::level_enum::warn); + ASSERT_EQ(ML::level_enum::warn, default_logger().level()); + default_logger().set_level(ML::level_enum::info); + ASSERT_EQ(ML::level_enum::info, default_logger().level()); + + ASSERT_FALSE(default_logger().should_log(ML::level_enum::trace)); + ASSERT_FALSE(default_logger().should_log(ML::level_enum::debug)); + ASSERT_TRUE(default_logger().should_log(ML::level_enum::info)); + ASSERT_TRUE(default_logger().should_log(ML::level_enum::warn)); } std::string logged = ""; @@ -52,21 +52,21 @@ class LoggerTest : public ::testing::Test { { flushCount = 0; logged = ""; - Logger::get().setLevel(CUML_LEVEL_TRACE); + default_logger().set_level(ML::level_enum::trace); } void TearDown() override { - Logger::get().setCallback(nullptr); - Logger::get().setFlush(nullptr); - Logger::get().setLevel(CUML_LEVEL_INFO); + default_logger().setCallback(nullptr); + default_logger().setFlush(nullptr); + default_logger().set_level(ML::level_enum::info); } }; TEST_F(LoggerTest, callback) { std::string testMsg; - Logger::get().setCallback(exampleCallback); + default_logger().setCallback(exampleCallback); testMsg = "This is a critical message"; CUML_LOG_CRITICAL(testMsg.c_str()); @@ -91,8 +91,8 @@ TEST_F(LoggerTest, callback) TEST_F(LoggerTest, flush) { - Logger::get().setFlush(exampleFlush); - Logger::get().flush(); + default_logger().setFlush(exampleFlush); + default_logger().flush(); ASSERT_EQ(1, flushCount); } diff --git a/cpp/test/sg/quasi_newton.cu b/cpp/test/sg/quasi_newton.cu index bc2120af4e..809084dd8f 100644 --- a/cpp/test/sg/quasi_newton.cu +++ b/cpp/test/sg/quasi_newton.cu @@ -111,7 +111,7 @@ T run(const raft::handle_t& handle, T l2, T* w, SimpleDenseMat& z, - int verbosity, + level_enum verbosity, cudaStream_t stream) { qn_params pams; @@ -144,7 +144,7 @@ T run_api(const raft::handle_t& cuml_handle, T l2, T* w, SimpleDenseMat& z, - int verbosity, + level_enum verbosity, cudaStream_t stream) { qn_params pams; @@ -226,7 +226,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = alpha; l2 = 0.0; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); @@ -240,7 +240,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -250,7 +250,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = 0; l2 = alpha; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); @@ -265,7 +265,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -274,7 +274,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = alpha; l2 = 0.0; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -289,7 +289,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -298,7 +298,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = 0; l2 = alpha; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -313,7 +313,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -346,7 +346,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = 0.0; double obj_l1_b = 0.5407911382311313; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); fx = run_api(cuml_handle, @@ -359,7 +359,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -367,7 +367,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = alpha; double obj_l2_b = 0.5721784062720949; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); fx = run_api(cuml_handle, @@ -380,7 +380,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -388,7 +388,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = 0.0; double obj_l1_no_b = 0.6606929813245878; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); fx = run_api(cuml_handle, @@ -401,7 +401,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -410,7 +410,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) double obj_l2_no_b = 0.6597171282106854; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); fx = run_api(cuml_handle, @@ -423,7 +423,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -457,7 +457,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231}; double b_l1_b = -0.08140861819001188; double obj_l1_b = 0.011136986298775138; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); @@ -471,7 +471,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -481,7 +481,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double b_l2_b = -0.08062397391797513; double obj_l2_b = 0.004268621967866347; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); @@ -495,7 +495,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -504,7 +504,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813}; double obj_l1_no_b = 0.013981355746112447; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -519,7 +519,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -528,7 +528,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560}; double obj_l2_no_b = 0.007061261366969662; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -543,7 +543,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -667,8 +667,10 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) pams.fit_intercept = loss.fit_intercept; double f_dense, f_sparse; - f_dense = run(handle, loss, *Xdev, *ydev, l1, l2, w0_dense.data, z_dense, 0, stream); - f_sparse = run(handle, loss, X_sparse, *ydev, l1, l2, w0_sparse.data, z_sparse, 0, stream); + f_dense = + run(handle, loss, *Xdev, *ydev, l1, l2, w0_dense.data, z_dense, level_enum::off, stream); + f_sparse = + run(handle, loss, X_sparse, *ydev, l1, l2, w0_sparse.data, z_sparse, level_enum::off, stream); ASSERT_TRUE(compApprox(f_dense, f_sparse)); qnPredict( @@ -702,7 +704,7 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) l2, w0_dense.data, z_dense, - 0, + level_enum::off, stream); f_sparse = run_api(cuml_handle, QN_LOSS_SOFTMAX, @@ -714,7 +716,7 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) l2, w0_sparse.data, z_sparse, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(f_dense, f_sparse)); }; diff --git a/cpp/test/sg/svc_test.cu b/cpp/test/sg/svc_test.cu index 0caad107d5..661fe35e36 100644 --- a/cpp/test/sg/svc_test.cu +++ b/cpp/test/sg/svc_test.cu @@ -590,7 +590,7 @@ SvmParameter getDefaultSvmParameter() param.cache_size = 200; param.max_iter = -1; param.nochange_steps = 1000; - param.verbosity = CUML_LEVEL_INFO; + param.verbosity = ML::level_enum::info; param.epsilon = 0.1; param.svmType = C_SVC; return param; @@ -1381,7 +1381,7 @@ TYPED_TEST(SmoSolverTest, BlobPredict) rmm::device_uvector y_pred(n_pred, stream); make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2, centers.data()); - SVC svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50, CUML_LEVEL_INFO); + SVC svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50, ML::level_enum::info); svc.fit(x.data(), p.n_rows, p.n_cols, y.data()); // Create a different dataset for prediction @@ -1500,7 +1500,7 @@ TYPED_TEST(SmoSolverTest, DISABLED_MillionRows) make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2, centers.data()); const int max_iter = 2; SVC svc( - this->handle, p.C, p.tol, p.kernel_params, 0, max_iter, 50, CUML_LEVEL_DEBUG); + this->handle, p.C, p.tol, p.kernel_params, 0, max_iter, 50, ML::level_enum::debug); svc.fit(x.data(), p.n_rows, p.n_cols, y.data()); // predict on the same dataset svc.predict(x.data(), p.n_rows, p.n_cols, y_pred.data()); @@ -1955,7 +1955,7 @@ class SvrTest : public ::testing::Test { auto stream = this->handle.get_stream(); std::vector, smoOutput2>> data{ {SvrInput{ - SvmParameter{1, 0, 1, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 1, 10, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 1, // n_cols @@ -1965,7 +1965,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 2.1, {0.8}, {0, 1}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 10, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 10, 1, 1, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 1, // n_cols @@ -1975,7 +1975,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 1.3, {0.8}, {1, 2}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 0, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 1, 1, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 2, // n_cols @@ -1985,7 +1985,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 1.3, {0.8, 0.0}, {1, 2, 5, 5}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-6, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols @@ -2001,7 +2001,7 @@ class SvrTest : public ::testing::Test { {0.7, 1.8, 2.9, 4, 5.1, 6.2, 7.3}}}, // Almost same as above, but with sample weights {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols @@ -2012,7 +2012,7 @@ class SvrTest : public ::testing::Test { smoOutput2{ 6, {}, -15.5, {3.9}, {1.0, 2.0, 3.0, 4.0, 6.0, 7.0}, {0, 1, 2, 3, 5, 6}, {}}}, {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-6, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols diff --git a/cpp/test/sg/tsne_test.cu b/cpp/test/sg/tsne_test.cu index f1e3d47703..ec9f69e805 100644 --- a/cpp/test/sg/tsne_test.cu +++ b/cpp/test/sg/tsne_test.cu @@ -124,7 +124,7 @@ class TSNETest : public ::testing::TestWithParam { model_params.dim = 2; model_params.n_neighbors = 90; model_params.min_grad_norm = 1e-12; - model_params.verbosity = CUML_LEVEL_DEBUG; + model_params.verbosity = ML::level_enum::debug; model_params.metric = DEFAULT_DISTANCE_METRIC; // Allocate memory From 53262f76a6270881119c05399f892f48258fbb19 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 18 Nov 2024 22:38:40 +0000 Subject: [PATCH 03/32] Stop changing pattern unnecessarily --- cpp/src/randomforest/randomforest.cuh | 1 - 1 file changed, 1 deletion(-) diff --git a/cpp/src/randomforest/randomforest.cuh b/cpp/src/randomforest/randomforest.cuh index a47f55b255..9a60891077 100644 --- a/cpp/src/randomforest/randomforest.cuh +++ b/cpp/src/randomforest/randomforest.cuh @@ -224,7 +224,6 @@ class RandomForest { int row_size = n_cols; - ML::PatternSetter _("%v"); for (int row_id = 0; row_id < n_rows; row_id++) { std::vector row_prediction(forest->trees[0]->num_outputs); for (int i = 0; i < this->rf_params.n_trees; i++) { From a007134ad3ea2443e7510bbcedd5c37dc6eac685 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 19 Nov 2024 05:25:36 +0000 Subject: [PATCH 04/32] Get C++ compiling --- cpp/include/cuml/linear_model/qn.h | 8 +++----- cpp/src/glm/qn/mg/qn_mg.cuh | 4 ++-- cpp/src/glm/qn/qn.cuh | 6 ++++-- cpp/src/randomforest/randomforest.cu | 3 ++- cpp/src/randomforest/randomforest.cuh | 2 ++ cpp/src/svm/linear.cu | 2 +- cpp/test/sg/logger.cpp | 8 ++++---- cpp/test/sg/quasi_newton.cu | 4 ++-- 8 files changed, 20 insertions(+), 17 deletions(-) diff --git a/cpp/include/cuml/linear_model/qn.h b/cpp/include/cuml/linear_model/qn.h index d19df0773a..f3b1c63604 100644 --- a/cpp/include/cuml/linear_model/qn.h +++ b/cpp/include/cuml/linear_model/qn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ */ #pragma once -#include - #include #ifdef __cplusplus @@ -84,7 +82,7 @@ struct qn_params { /** Number of vectors approximating the hessian (l-bfgs). */ int lbfgs_memory; /** Triggers extra output when greater than zero. */ - level_enum verbose; + int verbose; /** Whether to fit the bias term. */ bool fit_intercept; /** @@ -107,7 +105,7 @@ struct qn_params { max_iter(1000), linesearch_max_iter(50), lbfgs_memory(5), - verbose(level_enum::off), + verbose(0), fit_intercept(true), penalty_normalized(true) { diff --git a/cpp/src/glm/qn/mg/qn_mg.cuh b/cpp/src/glm/qn/mg/qn_mg.cuh index d594890f1e..78e34af47b 100644 --- a/cpp/src/glm/qn/mg/qn_mg.cuh +++ b/cpp/src/glm/qn/mg/qn_mg.cuh @@ -66,7 +66,7 @@ int qn_fit_mg(const raft::handle_t& handle, auto obj_function = GLMWithDataMG(handle, rank, n_ranks, n_samples, ®ularizer_obj, X, y, Z, stder_p); return ML::GLM::detail::qn_minimize( - handle, w0, fx, num_iters, obj_function, l1, opt_param, pams.verbose); + handle, w0, fx, num_iters, obj_function, l1, opt_param, static_cast(pams.verbose)); } template @@ -126,4 +126,4 @@ inline void qn_fit_x_mg(const raft::handle_t& handle, }; // namespace opg }; // namespace GLM -}; // namespace ML \ No newline at end of file +}; // namespace ML diff --git a/cpp/src/glm/qn/qn.cuh b/cpp/src/glm/qn/qn.cuh index 5be5abd9a7..b76085b25f 100644 --- a/cpp/src/glm/qn/qn.cuh +++ b/cpp/src/glm/qn/qn.cuh @@ -61,14 +61,16 @@ int qn_fit(const raft::handle_t& handle, if (l2 == 0) { GLMWithData lossWith(&loss, X, y, Z); - return qn_minimize(handle, w0, fx, num_iters, lossWith, l1, opt_param, pams.verbose); + return qn_minimize( + handle, w0, fx, num_iters, lossWith, l1, opt_param, static_cast(pams.verbose)); } else { Tikhonov reg(l2); RegularizedGLM obj(&loss, ®); GLMWithData lossWith(&obj, X, y, Z); - return qn_minimize(handle, w0, fx, num_iters, lossWith, l1, opt_param, pams.verbose); + return qn_minimize( + handle, w0, fx, num_iters, lossWith, l1, opt_param, static_cast(pams.verbose)); } } diff --git a/cpp/src/randomforest/randomforest.cu b/cpp/src/randomforest/randomforest.cu index 28c044224f..dd13465bb9 100644 --- a/cpp/src/randomforest/randomforest.cu +++ b/cpp/src/randomforest/randomforest.cu @@ -182,10 +182,10 @@ void delete_rf_metadata(RandomForestMetaData* forest) template std::string _get_rf_text(const RandomForestMetaData* forest, bool summary) { - ML::PatternSetter _("%v"); if (!forest) { return "Empty forest"; } else { + default_logger().set_pattern("%v"); std::ostringstream oss; oss << "Forest has " << forest->rf_params.n_trees << " trees, " << "max_depth " << forest->rf_params.tree_params.max_depth << ", and max_leaves " @@ -198,6 +198,7 @@ std::string _get_rf_text(const RandomForestMetaData* forest, bool summary) oss << DT::get_tree_text(forest->trees[i].get()) << "\n"; } } + default_logger().set_pattern(default_pattern()); return oss.str(); } } diff --git a/cpp/src/randomforest/randomforest.cuh b/cpp/src/randomforest/randomforest.cuh index 9a60891077..0ef58f7ee5 100644 --- a/cpp/src/randomforest/randomforest.cuh +++ b/cpp/src/randomforest/randomforest.cuh @@ -224,6 +224,7 @@ class RandomForest { int row_size = n_cols; + default_logger().set_pattern("%v"); for (int row_id = 0; row_id < n_rows; row_id++) { std::vector row_prediction(forest->trees[0]->num_outputs); for (int i = 0; i < this->rf_params.n_trees; i++) { @@ -257,6 +258,7 @@ class RandomForest { raft::update_device(predictions, h_predictions.data(), n_rows, stream); user_handle.sync_stream(stream); + default_logger().set_pattern(default_pattern()); } /** diff --git a/cpp/src/svm/linear.cu b/cpp/src/svm/linear.cu index ac1d561ed0..2dadc2c937 100644 --- a/cpp/src/svm/linear.cu +++ b/cpp/src/svm/linear.cu @@ -416,7 +416,7 @@ LinearSVMModel LinearSVMModel::fit(const raft::handle_t& handle, qn_pams.change_tol = params.change_tol; qn_pams.linesearch_max_iter = params.linesearch_max_iter; qn_pams.lbfgs_memory = params.lbfgs_memory; - qn_pams.verbose = params.verbose; + qn_pams.verbose = static_cast(params.verbose); ML::GLM::qn_params qn_pams_logistic = qn_pams; qn_pams_logistic.loss = ML::GLM::QN_LOSS_LOGISTIC; diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index 701e51eded..d57838fa1e 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -57,8 +57,8 @@ class LoggerTest : public ::testing::Test { void TearDown() override { - default_logger().setCallback(nullptr); - default_logger().setFlush(nullptr); + default_logger().remove_callback(); + // default_logger().setFlush(nullptr); default_logger().set_level(ML::level_enum::info); } }; @@ -66,7 +66,7 @@ class LoggerTest : public ::testing::Test { TEST_F(LoggerTest, callback) { std::string testMsg; - default_logger().setCallback(exampleCallback); + default_logger().set_callback(exampleCallback); testMsg = "This is a critical message"; CUML_LOG_CRITICAL(testMsg.c_str()); @@ -91,7 +91,7 @@ TEST_F(LoggerTest, callback) TEST_F(LoggerTest, flush) { - default_logger().setFlush(exampleFlush); + // default_logger().setFlush(exampleFlush); default_logger().flush(); ASSERT_EQ(1, flushCount); } diff --git a/cpp/test/sg/quasi_newton.cu b/cpp/test/sg/quasi_newton.cu index 809084dd8f..4dca80f911 100644 --- a/cpp/test/sg/quasi_newton.cu +++ b/cpp/test/sg/quasi_newton.cu @@ -122,7 +122,7 @@ T run(const raft::handle_t& handle, pams.lbfgs_memory = 5; pams.penalty_l1 = l1; pams.penalty_l2 = l2; - pams.verbose = verbosity; + pams.verbose = static_cast(verbosity); int num_iters = 0; @@ -156,7 +156,7 @@ T run_api(const raft::handle_t& cuml_handle, pams.lbfgs_memory = 5; pams.penalty_l1 = l1; pams.penalty_l2 = l2; - pams.verbose = verbosity; + pams.verbose = static_cast(verbosity); pams.fit_intercept = fit_intercept; pams.loss = loss_type; From 91c8e83e7e5ee0a558e3bcb6b0971529dfa65292 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 3 Dec 2024 20:51:35 +0000 Subject: [PATCH 05/32] Switch to using new repo --- cpp/CMakeLists.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index b479962b1f..aeb49cfcc9 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -222,12 +222,10 @@ rapids_cmake_install_lib_dir(lib_dir) # Not using rapids-cmake since we never want to find, always download. CPMAddPackage( - NAME rapids_logger GITHUB_REPOSITORY vyasr/rmm GIT_SHALLOW TRUE SOURCE_SUBDIR rapids_logger - GIT_TAG feat/logger + NAME rapids_logger GITHUB_REPOSITORY rapidsai/rapids-logger GIT_SHALLOW TRUE GIT_TAG 14bb233d2420f7187a690f0bb528ec0420c70d48 ) rapids_make_logger( - ML VISIBILITY_MACRO "__attribute__((visibility(\"default\")))" EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger - SUPPORTS_LOGGING + ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger ) if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS) From f4fad025f3a1a6c1fde83bb9099176a12bc3db7a Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 6 Dec 2024 01:23:24 +0000 Subject: [PATCH 06/32] Update to use the new functionality --- cpp/CMakeLists.txt | 2 +- cpp/src/randomforest/randomforest.cu | 2 +- cpp/test/sg/logger.cpp | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index aeb49cfcc9..8d51ad91ce 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -222,7 +222,7 @@ rapids_cmake_install_lib_dir(lib_dir) # Not using rapids-cmake since we never want to find, always download. CPMAddPackage( - NAME rapids_logger GITHUB_REPOSITORY rapidsai/rapids-logger GIT_SHALLOW TRUE GIT_TAG 14bb233d2420f7187a690f0bb528ec0420c70d48 + NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW TRUE GIT_TAG feat/cuml ) rapids_make_logger( ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger diff --git a/cpp/src/randomforest/randomforest.cu b/cpp/src/randomforest/randomforest.cu index dd13465bb9..4d70f3ea27 100644 --- a/cpp/src/randomforest/randomforest.cu +++ b/cpp/src/randomforest/randomforest.cu @@ -166,7 +166,7 @@ void postprocess_labels(int n_rows, labels[i] = reverse_map[prev]; CUML_LOG_DEBUG("Mapping %d back to %d", prev, labels[i]); } - CUML_LOG_DEBUG("Finished postrocessing labels"); + CUML_LOG_DEBUG("Finished postprocessing labels"); } /** diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index d57838fa1e..929f023e0e 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -57,8 +57,7 @@ class LoggerTest : public ::testing::Test { void TearDown() override { - default_logger().remove_callback(); - // default_logger().setFlush(nullptr); + default_logger().sinks().pop_back(); default_logger().set_level(ML::level_enum::info); } }; @@ -66,7 +65,7 @@ class LoggerTest : public ::testing::Test { TEST_F(LoggerTest, callback) { std::string testMsg; - default_logger().set_callback(exampleCallback); + default_logger().sinks().push_back(std::make_shared(exampleCallback)); testMsg = "This is a critical message"; CUML_LOG_CRITICAL(testMsg.c_str()); @@ -91,8 +90,9 @@ TEST_F(LoggerTest, callback) TEST_F(LoggerTest, flush) { - // default_logger().setFlush(exampleFlush); - default_logger().flush(); + default_logger().sinks().push_back(std::make_shared(exampleCallback)); + auto const testMsg = "This is a critical message"; + CUML_LOG_CRITICAL(testMsg); ASSERT_EQ(1, flushCount); } From c898c2981df56f5c7796b705a3c87978b45f8f91 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 6 Dec 2024 22:53:50 +0000 Subject: [PATCH 07/32] Get Python code compiling --- cpp/include/cuml/solvers/lars.hpp | 6 +- cpp/include/cuml/svm/linear.hpp | 2 + python/cuml/cuml/cluster/dbscan.pyx | 17 +- python/cuml/cuml/cluster/kmeans.pyx | 7 +- .../cuml/ensemble/randomforestclassifier.pyx | 25 +-- .../cuml/ensemble/randomforestregressor.pyx | 25 +-- .../cuml/experimental/linear_model/lars.pyx | 9 +- python/cuml/cuml/internals/base.pyx | 6 +- python/cuml/cuml/internals/logger.pxd | 86 ++++++++++ python/cuml/cuml/internals/logger.pyx | 156 ++++++------------ python/cuml/cuml/manifold/simpl_set.pyx | 4 +- python/cuml/cuml/manifold/t_sne.pyx | 9 +- python/cuml/cuml/manifold/umap.pyx | 2 +- python/cuml/cuml/manifold/umap_utils.pxd | 3 +- python/cuml/cuml/svm/linear.pyx | 3 +- python/cuml/cuml/svm/svc.pyx | 11 +- python/cuml/cuml/svm/svm_base.pyx | 11 +- python/cuml/cuml/svm/svr.pyx | 5 +- 18 files changed, 219 insertions(+), 168 deletions(-) create mode 100644 python/cuml/cuml/internals/logger.pxd diff --git a/cpp/include/cuml/solvers/lars.hpp b/cpp/include/cuml/solvers/lars.hpp index c740b64672..7a70d2e435 100644 --- a/cpp/include/cuml/solvers/lars.hpp +++ b/cpp/include/cuml/solvers/lars.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ #pragma once +#include + #include namespace ML { @@ -66,7 +68,7 @@ void larsFit(const raft::handle_t& handle, math_t* Gram, int max_iter, math_t* coef_path, - int verbosity, + level_enum verbosity, idx_t ld_X, idx_t ld_G, math_t eps); diff --git a/cpp/include/cuml/svm/linear.hpp b/cpp/include/cuml/svm/linear.hpp index 27e97a4535..dc391a3e5b 100644 --- a/cpp/include/cuml/svm/linear.hpp +++ b/cpp/include/cuml/svm/linear.hpp @@ -16,6 +16,8 @@ #pragma once +#include + #include namespace ML { diff --git a/python/cuml/cuml/cluster/dbscan.pyx b/python/cuml/cuml/cluster/dbscan.pyx index 07af1d142c..a6ef9d622c 100644 --- a/python/cuml/cuml/cluster/dbscan.pyx +++ b/python/cuml/cuml/cluster/dbscan.pyx @@ -29,6 +29,7 @@ from cuml.internals.mixins import ClusterMixin from cuml.internals.mixins import CMajorInputTagMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals.logger cimport level_enum IF GPUBUILD == 1: @@ -57,7 +58,7 @@ IF GPUBUILD == 1: float* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -72,7 +73,7 @@ IF GPUBUILD == 1: double* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -87,7 +88,7 @@ IF GPUBUILD == 1: float* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -102,7 +103,7 @@ IF GPUBUILD == 1: double* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + @@ -360,7 +361,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: fit(handle_[0], @@ -375,7 +376,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: @@ -392,7 +393,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: fit(handle_[0], @@ -407,7 +408,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) # make sure that the `fit` is complete before the following diff --git a/python/cuml/cuml/cluster/kmeans.pyx b/python/cuml/cuml/cluster/kmeans.pyx index 48ad769cd8..81e63ab5b9 100644 --- a/python/cuml/cuml/cluster/kmeans.pyx +++ b/python/cuml/cuml/cluster/kmeans.pyx @@ -36,6 +36,7 @@ IF GPUBUILD == 1: from cuml.metrics.distance_type cimport DistanceType from cuml.cluster.kmeans_utils cimport params as KMeansParams from cuml.cluster.kmeans_utils cimport KMeansPlusPlus, Random, Array + from cuml.internals.logger cimport level_enum from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor @@ -205,7 +206,11 @@ class KMeans(UniversalBase, params.init = self._params_init params.max_iter = self.max_iter params.tol = self.tol - params.verbosity = self.verbose + breakpoint() + # TODO: This params object inherits from a cuvs type that still uses raft's + # integer-based legacy logging. Once raft's logger is also converted to + # using rapids-logger we will instead need to translate between enums here. + params.verbosity = self.verbose params.rng_state.seed = self.random_state params.metric = DistanceType.L2Expanded # distance metric as squared L2: @todo - support other metrics # noqa: E501 params.batch_samples = self.max_samples_per_batch diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 45bc4ce2e8..9664b4d008 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -34,6 +34,7 @@ from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array +from cuml.internals.logger cimport level_enum from cuml.ensemble.randomforest_common import BaseRandomForestModel from cuml.ensemble.randomforest_common import _obtain_fil_model from cuml.ensemble.randomforest_shared cimport * @@ -61,7 +62,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int*, int, RF_params, - int) except + + level_enum) except + cdef void fit(handle_t& handle, RandomForestMetaData[double, int]*, @@ -71,7 +72,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int*, int, RF_params, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[float, int] *, @@ -79,7 +80,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, int*, - bool) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[double, int]*, @@ -87,21 +88,21 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, int*, - bool) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[float, int]*, int*, int, int*, - bool) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[double, int]*, int*, int, int*, - bool) except + + level_enum) except + class RandomForestClassifier(BaseRandomForestModel, @@ -476,7 +477,7 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, self.num_classes, rf_params, - self.verbose) + self.verbose) elif self.dtype == np.float64: rf_params64 = rf_params @@ -488,7 +489,7 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, self.num_classes, rf_params64, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," @@ -528,7 +529,7 @@ class RandomForestClassifier(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: predict(handle_[0], @@ -537,7 +538,7 @@ class RandomForestClassifier(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," " but input of type '%s' passed." @@ -765,14 +766,14 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: self.stats = score(handle_[0], rf_forest64, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," " but input of type '%s' passed." diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 96a197e5c5..5625d278ca 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -30,6 +30,7 @@ from cuml.internals.array import CumlArray import cuml.internals from cuml.internals.mixins import RegressorMixin +from cuml.internals.logger cimport level_enum from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array @@ -59,7 +60,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, float*, RF_params, - int) except + + level_enum) except + cdef void fit(handle_t& handle, RandomForestMetaData[double, double]*, @@ -68,7 +69,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, double*, RF_params, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[float, float] *, @@ -76,7 +77,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, float*, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[double, double]*, @@ -84,21 +85,21 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, double*, - int) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[float, float]*, float*, int, float*, - int) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[double, double]*, double*, int, double*, - int) except + + level_enum) except + class RandomForestRegressor(BaseRandomForestModel, @@ -462,7 +463,7 @@ class RandomForestRegressor(BaseRandomForestModel, self.n_cols, y_ptr, rf_params, - self.verbose) + self.verbose) else: rf_params64 = rf_params @@ -473,7 +474,7 @@ class RandomForestRegressor(BaseRandomForestModel, self.n_cols, y_ptr, rf_params64, - self.verbose) + self.verbose) # make sure that the `fit` is complete before the following delete # call happens self.handle.sync() @@ -508,7 +509,7 @@ class RandomForestRegressor(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: predict(handle_[0], @@ -517,7 +518,7 @@ class RandomForestRegressor(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only float32 and float64 input," " but input of type '%s' passed." @@ -685,7 +686,7 @@ class RandomForestRegressor(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: self.temp_stats = score(handle_[0], @@ -693,7 +694,7 @@ class RandomForestRegressor(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) if self.accuracy_metric == 'median_ae': stats = self.temp_stats['median_abs_error'] diff --git a/python/cuml/cuml/experimental/linear_model/lars.pyx b/python/cuml/cuml/experimental/linear_model/lars.pyx index 4a836740c7..ddb3f69fa3 100644 --- a/python/cuml/cuml/experimental/linear_model/lars.pyx +++ b/python/cuml/cuml/experimental/linear_model/lars.pyx @@ -23,7 +23,8 @@ from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') from cuml.internals.safe_imports import gpu_only_import cp = gpu_only_import('cupy') -import cuml.internals.logger as logger +from cuml.internals import logger +from cuml.internals cimport logger import cuml.internals from libcpp cimport nullptr @@ -43,7 +44,7 @@ cdef extern from "cuml/solvers/lars.hpp" namespace "ML::Solver::Lars": const handle_t& handle, math_t* X, int n_rows, int n_cols, const math_t* y, math_t* beta, int* active_idx, math_t* alphas, int* n_active, math_t* Gram, int max_iter, math_t* coef_path, - int verbosity, int ld_X, int ld_G, math_t epsilon) except + + logger.level_enum verbosity, int ld_X, int ld_G, math_t epsilon) except + cdef void larsPredict[math_t]( const handle_t& handle, const math_t* X, int n_rows, int n_cols, @@ -270,13 +271,13 @@ class Lars(Base, RegressorMixin): larsFit(handle_[0], X_ptr, n_rows, self.n_cols, y_ptr, beta_ptr, active_idx_ptr, alphas_ptr, &n_active, Gram_ptr, - max_iter, coef_path_ptr, self.verbose, ld_X, + max_iter, coef_path_ptr, self.verbose, ld_X, ld_G, self.eps) else: larsFit(handle_[0], X_ptr, n_rows, self.n_cols, y_ptr, beta_ptr, active_idx_ptr, alphas_ptr, &n_active, Gram_ptr, - max_iter, coef_path_ptr, self.verbose, + max_iter, coef_path_ptr, self.verbose, ld_X, ld_G, self.eps) self.n_active = n_active self.n_iter_ = n_active diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index a97ff44baf..71c382cda6 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -226,11 +226,11 @@ class Base(TagsMixin, # 0 is most logging, and logging decreases from there. # So if the user passes an int value for logging, we convert it. if verbose is True: - self.verbose = logger.level_debug + self.verbose = logger.level_enum.debug elif verbose is False: - self.verbose = logger.level_info + self.verbose = logger.level_enum.info else: - self.verbose = verbose + self.verbose = logger.level_enum(verbose) ELSE: self.verbose = verbose diff --git a/python/cuml/cuml/internals/logger.pxd b/python/cuml/cuml/internals/logger.pxd new file mode 100644 index 0000000000..bda0374448 --- /dev/null +++ b/python/cuml/cuml/internals/logger.pxd @@ -0,0 +1,86 @@ +# +# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# distutils: language = c++ + + +IF GPUBUILD == 1: + import sys + from libcpp.string cimport string + from libcpp.memory cimport make_shared, shared_ptr + from libcpp cimport bool + + cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: + + cpdef enum class level_enum: + trace + debug + info + warn + error + critical + off + n_levels + + cdef cppclass sink: + pass + + ctypedef shared_ptr[sink] sink_ptr + + # Spoof the logger as a namespace to get the sink_vector generated correctly. + cdef extern from "cuml/common/logger.hpp" namespace "ML::logger" nogil: + + cdef cppclass sink_vector: + void push_back(const sink_ptr& sink) except + + void pop_back() except + + + cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: + cdef cppclass logger: + logger(string name, string filename) except + + void set_level(level_enum log_level) except + + void set_pattern(const string& pattern) + level_enum level() except + + void flush() except + + void flush_on(level_enum level) except + + level_enum flush_level() except + + bool should_log(level_enum msg_level) except + + void log(level_enum lvl, const string& fmt, ...) + const sink_vector& sinks() const + # string getPattern() const + # void flush() + + cdef logger& default_logger() except + + cdef string default_pattern() except + + + ctypedef void(*log_callback_t)(int, const char*) except * with gil + + cdef cppclass callback_sink_mt: + # TODO: Expose flushing in C++ + callback_sink_mt(log_callback_t callback) except + + # void setFlush(void(*flush)()) except + + + cdef void _log_callback(int lvl, const char * msg) with gil + cdef void _log_flush() with gil + + +cdef class LogLevelSetter: + """Internal "context manager" object for restoring previous log level""" + cdef level_enum prev_log_level + + +cdef class PatternSetter: + """Internal "context manager" object for restoring previous log pattern""" + cdef string prev_pattern diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index 6f76f924d6..a283edf460 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,66 +17,11 @@ # distutils: language = c++ -IF GPUBUILD == 0: - import logging +import logging IF GPUBUILD == 1: import sys - from libcpp.string cimport string - from libcpp cimport bool - - cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: - cdef cppclass Logger: - @staticmethod - Logger& get() - void setLevel(int level) - void setPattern(const string& pattern) - void setCallback(void(*callback)(int, char*)) - void setFlush(void(*flush)()) - void setCallback(void(*callback)(int, const char*) except *) - void setFlush(void(*flush)() except *) - bool shouldLogFor(int level) const - int getLevel() const - string getPattern() const - void flush() - - cdef extern from "cuml/common/logger.hpp" nogil: - void CUML_LOG_TRACE(const char* fmt, ...) - void CUML_LOG_DEBUG(const char* fmt, ...) - void CUML_LOG_INFO(const char* fmt, ...) - void CUML_LOG_WARN(const char* fmt, ...) - void CUML_LOG_ERROR(const char* fmt, ...) - void CUML_LOG_CRITICAL(const char* fmt, ...) - - cdef int CUML_LEVEL_TRACE - cdef int CUML_LEVEL_DEBUG - cdef int CUML_LEVEL_INFO - cdef int CUML_LEVEL_WARN - cdef int CUML_LEVEL_ERROR - cdef int CUML_LEVEL_CRITICAL - cdef int CUML_LEVEL_OFF - - """Enables all log messages upto and including `trace()`""" - level_trace = CUML_LEVEL_TRACE - - """Enables all log messages upto and including `debug()`""" - level_debug = CUML_LEVEL_DEBUG - - """Enables all log messages upto and including `info()`""" - level_info = CUML_LEVEL_INFO - - """Enables all log messages upto and including `warn()`""" - level_warn = CUML_LEVEL_WARN - - """Enables all log messages upto and include `error()`""" - level_error = CUML_LEVEL_ERROR - - """Enables only `critical()` messages""" - level_critical = CUML_LEVEL_CRITICAL - - """Disables all log messages""" - level_off = CUML_LEVEL_OFF cdef void _log_callback(int lvl, const char * msg) with gil: """ @@ -99,10 +44,10 @@ IF GPUBUILD == 1: sys.stdout.flush() -class LogLevelSetter: +cdef class LogLevelSetter: """Internal "context manager" object for restoring previous log level""" - def __init__(self, prev_log_level): + def __cinit__(self, level_enum prev_log_level): self.prev_log_level = prev_log_level def __enter__(self): @@ -110,7 +55,7 @@ class LogLevelSetter: def __exit__(self, a, b, c): IF GPUBUILD == 1: - Logger.get().setLevel(self.prev_log_level) + default_logger().set_level(self.prev_log_level) def set_level(level): @@ -133,9 +78,8 @@ def set_level(level): Parameters ---------- - level : int - Logging level to be set. \ - It must be one of cuml.internals.logger.LEVEL_* + level : level_enum + Logging level to be set. Returns ------- @@ -144,13 +88,13 @@ def set_level(level): level for a code section, as described in the example section above. """ IF GPUBUILD == 1: - cdef int prev = Logger.get().getLevel() + cdef level_enum prev = default_logger().level() context_object = LogLevelSetter(prev) - Logger.get().setLevel(level) + default_logger().set_level(level) return context_object -class PatternSetter: +cdef class PatternSetter: """Internal "context manager" object for restoring previous log pattern""" def __init__(self, prev_pattern): @@ -161,8 +105,7 @@ class PatternSetter: def __exit__(self, a, b, c): IF GPUBUILD == 1: - cdef string s = self.prev_pattern.encode("utf-8") - Logger.get().setPattern(s) + default_logger().set_pattern(self.prev_pattern) def set_pattern(pattern): @@ -195,10 +138,16 @@ def set_pattern(pattern): pattern for a code section, as described in the example section above. """ IF GPUBUILD == 1: - cdef string prev = Logger.get().getPattern() + # TODO: We probably can't implement this exact API because you can't + # get the pattern from a spdlog logger since it could be different for + # every sink (conversely, you could set because it forces every sink to + # be the same). The best we can probably do is revert to the default + # pattern. + cdef string prev = default_pattern() + # TODO: Need to cast to a Python string? context_object = PatternSetter(prev.decode("UTF-8")) cdef string s = pattern.encode("UTF-8") - Logger.get().setPattern(s) + default_logger().set_pattern(s) return context_object @@ -219,12 +168,31 @@ def should_log_for(level): Parameters ---------- - level : int + level : level_enum Logging level to be set. \ It must be one of cuml.common.logger.level_* """ IF GPUBUILD == 1: - return Logger.get().shouldLogFor(level) + return default_logger().should_log(level) + + +def _log(level_enum lvl, msg, default_func): + """ + Internal function to log a message at a given level. + + Parameters + ---------- + lvl : int + Logging level to be set. \ + It must be one of cuml.common.logger.level_* + msg : str + Message to be logged. + """ + IF GPUBUILD == 1: + cdef string s = msg.encode("UTF-8") + default_logger().log(lvl, s) + ELSE: + default_func(msg) def trace(msg): @@ -243,11 +211,8 @@ def trace(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_TRACE(s.c_str()) - ELSE: - logging.debug(msg) + # No trace level in Python so we use the closest thing, debug. + _log(level_enum.trace, msg, logging.debug) def debug(msg): @@ -266,11 +231,7 @@ def debug(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_DEBUG(s.c_str()) - ELSE: - logging.debug(msg) + _log(level_enum.debug, msg, logging.debug) def info(msg): @@ -289,11 +250,7 @@ def info(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_INFO(s.c_str()) - ELSE: - logging.info(msg) + _log(level_enum.info, msg, logging.info) def warn(msg): @@ -312,11 +269,7 @@ def warn(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_WARN(s.c_str()) - ELSE: - logging.warning(msg) + _log(level_enum.warn, msg, logging.warn) def error(msg): @@ -335,11 +288,7 @@ def error(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_ERROR(s.c_str()) - ELSE: - logging.error(msg) + _log(level_enum.error, msg, logging.error) def critical(msg): @@ -358,11 +307,7 @@ def critical(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_CRITICAL(s.c_str()) - ELSE: - logging.critical(msg) + _log(level_enum.critical, msg, logging.critical) def flush(): @@ -370,10 +315,11 @@ def flush(): Flush the logs. """ IF GPUBUILD == 1: - Logger.get().flush() + default_logger().flush() IF GPUBUILD == 1: # Set callback functions to handle redirected sys.stdout in Python - Logger.get().setCallback(_log_callback) - Logger.get().setFlush(_log_flush) + default_logger().sinks().push_back( make_shared[callback_sink_mt](_log_callback)) + # TODO: Expose flushing in the callback sink. + # default_logger().setFlush(_log_flush) diff --git a/python/cuml/cuml/manifold/simpl_set.pyx b/python/cuml/cuml/manifold/simpl_set.pyx index b0be2d5de7..20e1207bf6 100644 --- a/python/cuml/cuml/manifold/simpl_set.pyx +++ b/python/cuml/cuml/manifold/simpl_set.pyx @@ -165,7 +165,7 @@ def fuzzy_simplicial_set(X, umap_params.p = 2.0 else: umap_params.p = metric_kwds.get("p", 2.0) - umap_params.verbosity = verbose + umap_params.verbosity = verbose X_m, _, _, _ = \ input_to_cuml_array(X, @@ -366,7 +366,7 @@ def simplicial_set_embedding( umap_params.target_metric = MetricType.CATEGORICAL umap_params.target_weight = output_metric_kwds['p'] \ if 'p' in output_metric_kwds else 0.5 - umap_params.verbosity = verbose + umap_params.verbosity = verbose X_m, _, _, _ = \ input_to_cuml_array(data, diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index 248a25b933..31a0fa65d7 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -31,7 +31,8 @@ from cuml.internals.base import UniversalBase from pylibraft.common.handle cimport handle_t from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -import cuml.internals.logger as logger +from cuml.internals import logger +from cuml.internals cimport logger from cuml.internals.array import CumlArray @@ -82,7 +83,7 @@ cdef extern from "cuml/manifold/tsne.h" namespace "ML": float pre_momentum, float post_momentum, long long random_state, - int verbosity, + logger.level_enum verbosity, TSNE_INIT init, bool square_distances, DistanceType metric, @@ -511,7 +512,7 @@ class TSNE(UniversalBase, self.pre_learning_rate = max(n / 3.0, 1) self.post_learning_rate = self.pre_learning_rate self.early_exaggeration = 24.0 if n > 10000 else 12.0 - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("New n_neighbors = {}, learning_rate = {}, " "exaggeration = {}" .format(self.n_neighbors, self.pre_learning_rate, @@ -615,7 +616,7 @@ class TSNE(UniversalBase, params.pre_momentum = self.pre_momentum params.post_momentum = self.post_momentum params.random_state = seed - params.verbosity = self.verbose + params.verbosity = self.verbose params.square_distances = self.square_distances params.algorithm = algo diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index ea62fa4430..a4708648b1 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -469,7 +469,7 @@ class UMAP(UniversalBase, umap_params.repulsion_strength = cls.repulsion_strength umap_params.negative_sample_rate = cls.negative_sample_rate umap_params.transform_queue_size = cls.transform_queue_size - umap_params.verbosity = cls.verbose + umap_params.verbosity = cls.verbose umap_params.a = cls.a umap_params.b = cls.b if cls.init == "spectral": diff --git a/python/cuml/cuml/manifold/umap_utils.pxd b/python/cuml/cuml/manifold/umap_utils.pxd index edf8039053..e2df77a22f 100644 --- a/python/cuml/cuml/manifold/umap_utils.pxd +++ b/python/cuml/cuml/manifold/umap_utils.pxd @@ -24,6 +24,7 @@ from libc.stdint cimport uint64_t, uintptr_t, int64_t from libcpp cimport bool from libcpp.memory cimport shared_ptr from cuml.metrics.distance_type cimport DistanceType +from cuml.internals.logger cimport level_enum cdef extern from "cuml/manifold/umapparams.h" namespace "ML::UMAPParams": @@ -61,7 +62,7 @@ cdef extern from "cuml/manifold/umapparams.h" namespace "ML": float repulsion_strength, int negative_sample_rate, float transform_queue_size, - int verbosity, + level_enum verbosity, float a, float b, float initial_alpha, diff --git a/python/cuml/cuml/svm/linear.pyx b/python/cuml/cuml/svm/linear.pyx index c5ff47cde9..9cad6898c8 100644 --- a/python/cuml/cuml/svm/linear.pyx +++ b/python/cuml/cuml/svm/linear.pyx @@ -30,6 +30,7 @@ from cuml.internals.base_helpers import BaseMetaClass from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.array import CumlArray from cuml.internals.base import Base +from cuml.internals.logger cimport level_enum from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array @@ -69,7 +70,7 @@ cdef extern from "cuml/svm/linear.hpp" namespace "ML::SVM" nogil: int max_iter int linesearch_max_iter int lbfgs_memory - int verbose + level_enum verbose double C double grad_tol double change_tol diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index 290f5bc2a2..283ce34bbe 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -32,7 +32,8 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring -from cuml.internals.logger import warn +from cuml.internals import logger +from cuml.internals cimport logger from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array, input_to_host_array, input_to_host_array_with_sparse_support @@ -76,7 +77,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + logger.level_enum verbosity double epsilon SvmType svmType @@ -352,7 +353,7 @@ class SVC(SVMBase, self.probability = probability self.random_state = random_state if probability and random_state is not None: - warn("Random state is currently ignored by probabilistic SVC") + logger.warn("Random state is currently ignored by probabilistic SVC") self.class_weight = class_weight self.svmType = C_SVC self.multiclass_strategy = multiclass_strategy @@ -404,8 +405,8 @@ class SVC(SVMBase, def _fit_multiclass(self, X, y, sample_weight) -> "SVC": if sample_weight is not None: - warn("Sample weights are currently ignored for multi class " - "classification") + logger.warn("Sample weights are currently ignored for multi class " + "classification") if not has_sklearn(): raise RuntimeError("Scikit-learn is needed to fit multiclass SVM") diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 9b68147f2b..11c523a7fd 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -34,7 +34,8 @@ from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array from cuml.internals.input_utils import determine_array_type_full from cuml.common import using_output_type -from cuml.internals.logger import warn +from cuml.internals import logger +from cuml.internals cimport logger from cuml.internals.mixins import FMajorInputTagMixin from cuml.internals.array_sparse import SparseCumlArray, SparseCumlArrayInput from libcpp cimport bool @@ -68,7 +69,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + logger.level_enum verbosity double epsilon SvmType svmType @@ -263,9 +264,9 @@ class SVMBase(Base, and not getattr(type(self), "_linear_kernel_warned", False): setattr(type(self), "_linear_kernel_warned", True) cname = type(self).__name__ - warn(f'{cname} with the linear kernel can be much faster using ' - f'the specialized solver provided by Linear{cname}. Consider ' - f'switching to Linear{cname} if tranining takes too long.') + logger.warn(f'{cname} with the linear kernel can be much faster using ' + f'the specialized solver provided by Linear{cname}. Consider ' + f'switching to Linear{cname} if tranining takes too long.') def __del__(self): self._dealloc() diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index a2527f4358..9dc61b7635 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array from libcpp cimport nullptr from cuml.svm.svm_base import SVMBase +from cuml.internals.logger cimport level_enum cdef extern from "cuml/matrix/kernelparams.h" namespace "MLCommon::Matrix": enum KernelType: @@ -56,7 +57,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + level_enum verbosity double epsilon SvmType svmType From 4a3a18c5982d1112602926f1c1da96d9d0affa1e Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 6 Dec 2024 23:25:27 +0000 Subject: [PATCH 08/32] Ensure new level_enum is used everywhere it should be to specify log levels --- docs/source/api.rst | 14 ++++---- python/cuml/cuml/common/kernel_utils.py | 2 +- python/cuml/cuml/dask/common/dask_df_utils.py | 4 +-- python/cuml/cuml/dask/common/input_utils.py | 2 +- .../cuml/cuml/experimental/accel/__init__.py | 2 +- python/cuml/cuml/explainer/base.pyx | 8 ++--- python/cuml/cuml/internals/base.pyx | 17 +++------- python/cuml/cuml/internals/logger.pyx | 16 +++++----- .../cuml/linear_model/logistic_regression.pyx | 15 ++++----- .../neighbors/kneighbors_classifier_mg.pyx | 6 ++-- .../neighbors/kneighbors_regressor_mg.pyx | 4 +-- .../cuml/neighbors/nearest_neighbors_mg.pyx | 4 +-- python/cuml/cuml/tests/test_hdbscan.py | 32 +++++++++---------- python/cuml/cuml/tests/test_kmeans.py | 2 +- python/cuml/cuml/tests/test_logger.py | 12 +++---- python/cuml/cuml/tests/test_metrics.py | 2 +- .../cuml/cuml/tests/test_nearest_neighbors.py | 4 +-- python/cuml/cuml/tests/test_umap.py | 4 +-- 18 files changed, 70 insertions(+), 80 deletions(-) diff --git a/docs/source/api.rst b/docs/source/api.rst index a3a2ab73cc..44a29563f3 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -91,25 +91,25 @@ they are: - cuml.common.logger value - Verbosity level * - 0 - - cuml.common.logger.level_off + - cuml.common.logger.level_enum.off - Disables all log messages * - 1 - - cuml.common.logger.level_critical + - cuml.common.logger.level_enum.critical - Enables only critical messages * - 2 - - cuml.common.logger.level_error + - cuml.common.logger.level_enum.error - Enables all messages up to and including errors. * - 3 - - cuml.common.logger.level_warn + - cuml.common.logger.level_enum.warn - Enables all messages up to and including warnings. * - 4 or False - - cuml.common.logger.level_info + - cuml.common.logger.level_enum.info - Enables all messages up to and including information messages. * - 5 or True - - cuml.common.logger.level_debug + - cuml.common.logger.level_enum.debug - Enables all messages up to and including debug messages. * - 6 - - cuml.common.logger.level_trace + - cuml.common.logger.level_enum.trace - Enables all messages up to and including trace messages. diff --git a/python/cuml/cuml/common/kernel_utils.py b/python/cuml/cuml/common/kernel_utils.py index 89a861060e..5b8f3a03a7 100644 --- a/python/cuml/cuml/common/kernel_utils.py +++ b/python/cuml/cuml/common/kernel_utils.py @@ -111,7 +111,7 @@ def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None): nvrtc_kernel_str, ) - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(str(nvrtc_kernel_str)) return cp.RawKernel(nvrtc_kernel_str, kernel_name) diff --git a/python/cuml/cuml/dask/common/dask_df_utils.py b/python/cuml/cuml/dask/common/dask_df_utils.py index d608232ffa..199a08cac6 100644 --- a/python/cuml/cuml/dask/common/dask_df_utils.py +++ b/python/cuml/cuml/dask/common/dask_df_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ def to_dask_cudf(futures, client=None): c = default_client() if client is None else client # Convert a list of futures containing dfs back into a dask_cudf dfs = [d for d in futures if d.type != type(None)] # NOQA - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("to_dask_cudf dfs=%s" % str(dfs)) meta = c.submit(get_meta, dfs[0]) meta_local = meta.result() diff --git a/python/cuml/cuml/dask/common/input_utils.py b/python/cuml/cuml/dask/common/input_utils.py index 01c1a4e55f..d6ab867e6d 100644 --- a/python/cuml/cuml/dask/common/input_utils.py +++ b/python/cuml/cuml/dask/common/input_utils.py @@ -239,7 +239,7 @@ def _to_dask_cudf(futures, client=None): c = default_client() if client is None else client # Convert a list of futures containing dfs back into a dask_cudf dfs = [d for d in futures if d.type != type(None)] # NOQA - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("to_dask_cudf dfs=%s" % str(dfs)) meta_future = c.submit(_get_meta, dfs[0], pure=False) meta = meta_future.result() diff --git a/python/cuml/cuml/experimental/accel/__init__.py b/python/cuml/cuml/experimental/accel/__init__.py index cd3c6abf51..69f53b7bb1 100644 --- a/python/cuml/cuml/experimental/accel/__init__.py +++ b/python/cuml/cuml/experimental/accel/__init__.py @@ -33,7 +33,7 @@ def _install_for_library(library_name): def install(): """Enable cuML Accelerator Mode.""" - logger.set_level(logger.level_info) + logger.set_level(logger.level_enum.info) logger.set_pattern("%v") logger.info("cuML: Installing experimental accelerator...") diff --git a/python/cuml/cuml/explainer/base.pyx b/python/cuml/cuml/explainer/base.pyx index 33dd8da68f..d29aee0f1d 100644 --- a/python/cuml/cuml/explainer/base.pyx +++ b/python/cuml/cuml/explainer/base.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,13 +125,13 @@ class SHAPBase(): output_type=None): if verbose is True: - self.verbose = logger.level_debug + self.verbose = logger.level_enum.debug elif verbose is False: - self.verbose = logger.level_error + self.verbose = logger.level_enum.error else: self.verbose = verbose - if self.verbose >= logger.level_debug: + if self.verbose >= logger.level_enum.debug: self.time_performance = True else: self.time_performance = False diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 71c382cda6..991e898ede 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -146,9 +146,8 @@ class Base(TagsMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -208,7 +207,7 @@ class Base(TagsMixin, def __init__(self, *, handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, output_mem_type=None): """ @@ -222,15 +221,7 @@ class Base(TagsMixin, self.handle = None IF GPUBUILD == 1: - # Internally, self.verbose follows the spdlog/c++ standard of - # 0 is most logging, and logging decreases from there. - # So if the user passes an int value for logging, we convert it. - if verbose is True: - self.verbose = logger.level_enum.debug - elif verbose is False: - self.verbose = logger.level_enum.info - else: - self.verbose = logger.level_enum(verbose) + self.verbose = logger.level_enum(verbose) ELSE: self.verbose = verbose diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index a283edf460..37b51dc182 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -70,10 +70,10 @@ def set_level(level): # regular usage of setting a logging level for all subsequent logs # in this case, it will enable all logs upto and including `info()` - logger.set_level(logger.level_info) + logger.set_level(logger.level_enum.info) # in case one wants to temporarily set the log level for a code block - with logger.set_level(logger.level_debug) as _: + with logger.set_level(logger.level_enum.debug) as _: logger.debug("Hello world!") Parameters @@ -161,7 +161,7 @@ def should_log_for(level): .. code-block:: python - if logger.should_log_for(level_info): + if logger.should_log_for(level_enum.info): # which could waste precious CPU cycles my_message = construct_message() logger.info(my_message) @@ -169,8 +169,7 @@ def should_log_for(level): Parameters ---------- level : level_enum - Logging level to be set. \ - It must be one of cuml.common.logger.level_* + Logging level to be set. """ IF GPUBUILD == 1: return default_logger().should_log(level) @@ -182,11 +181,12 @@ def _log(level_enum lvl, msg, default_func): Parameters ---------- - lvl : int - Logging level to be set. \ - It must be one of cuml.common.logger.level_* + lvl : level_enum + Logging level to be set. msg : str Message to be logged. + default_func : function + Default logging function to be used if GPU build is disabled. """ IF GPUBUILD == 1: cdef string s = msg.encode("UTF-8") diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index c9ad443750..e6e9b1ba16 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -141,9 +141,8 @@ class LogisticRegression(UniversalBase, linesearch_max_iter : int (default = 50) Max number of linesearch iterations per outer iteration used in the lbfgs and owl QN solvers. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. l1_ratio : float or None, optional (default=None) The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1` solver : 'qn' (default='qn') @@ -277,7 +276,7 @@ class LogisticRegression(UniversalBase, handle=self.handle, ) - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): self.verb_prefix = "CY::" logger.debug(self.verb_prefix + "Estimator parameters:") logger.debug(pprint.pformat(self.__dict__)) @@ -353,24 +352,24 @@ class LogisticRegression(UniversalBase, else: loss = "sigmoid" - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(self.verb_prefix + "Setting loss to " + str(loss)) self.solver_model.loss = loss - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(self.verb_prefix + "Calling QN fit " + str(loss)) self.solver_model.fit(X, y_m, sample_weight=sample_weight, convert_dtype=convert_dtype) # coefficients and intercept are contained in the same array - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug( self.verb_prefix + "Setting coefficients " + str(loss) ) - if logger.should_log_for(logger.level_trace): + if logger.should_log_for(logger.level_enum.trace): with using_output_type("cupy"): logger.trace(self.verb_prefix + "Coefficients: " + str(self.solver_model.coef_)) diff --git a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx index f319a6adac..f27e080381 100644 --- a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -153,7 +153,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG): cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) knn_classify( handle_[0], out_result_local_parts, @@ -265,7 +265,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG): p_cai.ptr) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn_classify( diff --git a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx index 48accf5730..7c49b5576d 100644 --- a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -127,7 +127,7 @@ class KNeighborsRegressorMG(NearestNeighborsMG): o_cai.ptr, n_rows * n_outputs)) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn_regress( diff --git a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx index 5596c1e2f6..85dafd89fd 100644 --- a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx +++ b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,7 +125,7 @@ class NearestNeighborsMG(NearestNeighbors): result = type(self).alloc_local_output(local_query_rows, self.n_neighbors) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn( diff --git a/python/cuml/cuml/tests/test_hdbscan.py b/python/cuml/cuml/tests/test_hdbscan.py index 0a9a3a6382..7fd1eb0635 100644 --- a/python/cuml/cuml/tests/test_hdbscan.py +++ b/python/cuml/cuml/tests/test_hdbscan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -185,7 +185,7 @@ def test_hdbscan_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -251,7 +251,7 @@ def test_hdbscan_sklearn_datasets( X = test_datasets.data cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, gen_min_span_tree=True, min_samples=min_samples, @@ -309,7 +309,7 @@ def test_hdbscan_sklearn_extract_clusters( ): X = test_datasets.data cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, gen_min_span_tree=True, min_samples=min_samples, @@ -365,7 +365,7 @@ def test_hdbscan_cluster_patterns( X, y = get_pattern(dataset, nrows)[0] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -428,7 +428,7 @@ def test_hdbscan_cluster_patterns_extract_clusters( X, y = get_pattern(dataset, nrows)[0] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -572,7 +572,7 @@ def test_all_points_membership_vectors_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -628,7 +628,7 @@ def test_all_points_membership_vectors_moons( X, y = datasets.make_moons(n_samples=nrows, noise=0.05, random_state=42) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -686,7 +686,7 @@ def test_all_points_membership_vectors_circles( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -762,7 +762,7 @@ def test_approximate_predict_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -823,7 +823,7 @@ def test_approximate_predict_moons( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -890,7 +890,7 @@ def test_approximate_predict_circles( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -958,7 +958,7 @@ def test_approximate_predict_digits( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -1032,7 +1032,7 @@ def test_membership_vector_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -1098,7 +1098,7 @@ def test_membership_vector_moons( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -1164,7 +1164,7 @@ def test_membership_vector_circles( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, diff --git a/python/cuml/cuml/tests/test_kmeans.py b/python/cuml/cuml/tests/test_kmeans.py index b05a762177..e622657a2d 100644 --- a/python/cuml/cuml/tests/test_kmeans.py +++ b/python/cuml/cuml/tests/test_kmeans.py @@ -62,7 +62,7 @@ def get_data_consistency_test(): @pytest.fixture def random_state(): random_state = random.randint(0, 10**6) - with logger.set_level(logger.level_debug): + with logger.set_level(logger.level_enum.debug): logger.debug("Random seed: {}".format(random_state)) return random_state diff --git a/python/cuml/cuml/tests/test_logger.py b/python/cuml/cuml/tests/test_logger.py index c478a9fdc1..12e116fc82 100644 --- a/python/cuml/cuml/tests/test_logger.py +++ b/python/cuml/cuml/tests/test_logger.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,9 +27,9 @@ def test_logger(): logger.error("This is a error message") logger.critical("This is a critical message") - with logger.set_level(logger.level_warn): - assert logger.should_log_for(logger.level_warn) - assert not logger.should_log_for(logger.level_info) + with logger.set_level(logger.level_enum.warn): + assert logger.should_log_for(logger.level_enum.warn) + assert not logger.should_log_for(logger.level_enum.info) with logger.set_pattern("%v"): logger.info("This is an info message") @@ -38,7 +38,7 @@ def test_logger(): def test_redirected_logger(): new_stdout = StringIO() - with logger.set_level(logger.level_trace): + with logger.set_level(logger.level_enum.trace): # We do not test trace because CUML_LOG_TRACE is not compiled by # default test_msg = "This is a debug message" @@ -76,7 +76,7 @@ def test_log_flush(): stdout_buffer = BytesIO() new_stdout = TextIOWrapper(stdout_buffer) - with logger.set_level(logger.level_trace): + with logger.set_level(logger.level_enum.trace): test_msg = "This is a debug message" with redirect_stdout(new_stdout): logger.debug(test_msg) diff --git a/python/cuml/cuml/tests/test_metrics.py b/python/cuml/cuml/tests/test_metrics.py index 2189dcdc41..c99d57e666 100644 --- a/python/cuml/cuml/tests/test_metrics.py +++ b/python/cuml/cuml/tests/test_metrics.py @@ -109,7 +109,7 @@ @pytest.fixture(scope="module") def random_state(): random_state = random.randint(0, 10**6) - with logger.set_level(logger.level_debug): + with logger.set_level(logger.level_enum.debug): logger.debug("Random seed: {}".format(random_state)) return random_state diff --git a/python/cuml/cuml/tests/test_nearest_neighbors.py b/python/cuml/cuml/tests/test_nearest_neighbors.py index aa612b7763..bf91951295 100644 --- a/python/cuml/cuml/tests/test_nearest_neighbors.py +++ b/python/cuml/cuml/tests/test_nearest_neighbors.py @@ -617,14 +617,14 @@ def test_nearest_neighbors_sparse( a = a.astype("bool").astype("float32") b = b.astype("bool").astype("float32") - logger.set_level(logger.level_debug) + logger.set_level(logger.level_enum.debug) nn = cuKNN( metric=metric, p=2.0, n_neighbors=n_neighbors, algorithm="brute", output_type="numpy", - verbose=logger.level_debug, + verbose=logger.level_enum.debug, algo_params={ "batch_size_index": batch_size_index, "batch_size_query": batch_size_query, diff --git a/python/cuml/cuml/tests/test_umap.py b/python/cuml/cuml/tests/test_umap.py index 41f47bdaee..de32c1d908 100644 --- a/python/cuml/cuml/tests/test_umap.py +++ b/python/cuml/cuml/tests/test_umap.py @@ -197,7 +197,7 @@ def test_umap_transform_on_digits_sparse( fitter = cuUMAP( n_neighbors=15, - verbose=logger.level_info, + verbose=logger.level_enum.info, init="random", n_epochs=0, min_dist=0.01, @@ -236,7 +236,7 @@ def test_umap_transform_on_digits(target_metric): fitter = cuUMAP( n_neighbors=15, - verbose=logger.level_debug, + verbose=logger.level_enum.debug, init="random", n_epochs=0, min_dist=0.01, From 68ad0bd4fdfbc8b4a196875fb3467ee56ddee1ae Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Sat, 7 Dec 2024 00:16:56 +0000 Subject: [PATCH 09/32] Ensure that verbosity is consistently set using the level_enum --- python/cuml/cuml/cluster/agglomerative.pyx | 8 ++--- python/cuml/cuml/cluster/dbscan.pyx | 8 ++--- python/cuml/cuml/cluster/hdbscan/hdbscan.pyx | 8 ++--- python/cuml/cuml/cluster/kmeans.pyx | 10 +++--- python/cuml/cuml/dask/cluster/dbscan.py | 10 +++--- python/cuml/cuml/dask/cluster/kmeans.py | 10 +++--- python/cuml/cuml/dask/decomposition/base.py | 12 +++++-- python/cuml/cuml/dask/decomposition/pca.py | 11 ++++--- python/cuml/cuml/dask/decomposition/tsvd.py | 1 + .../dask/ensemble/randomforestclassifier.py | 5 +-- .../dask/ensemble/randomforestregressor.py | 5 +-- .../text/tfidf_transformer.py | 7 ++-- .../cuml/dask/linear_model/elastic_net.py | 3 +- python/cuml/cuml/dask/linear_model/lasso.py | 3 +- .../dask/linear_model/linear_regression.py | 5 ++- .../dask/linear_model/logistic_regression.py | 1 + python/cuml/cuml/dask/linear_model/ridge.py | 5 ++- python/cuml/cuml/dask/manifold/umap.py | 1 + .../cuml/cuml/dask/naive_bayes/naive_bayes.py | 7 ++-- .../dask/neighbors/kneighbors_classifier.py | 8 ++++- .../dask/neighbors/kneighbors_regressor.py | 8 ++--- .../cuml/dask/neighbors/nearest_neighbors.py | 9 +++--- .../cuml/dask/preprocessing/LabelEncoder.py | 5 ++- python/cuml/cuml/dask/preprocessing/label.py | 3 +- python/cuml/cuml/dask/solvers/cd.py | 3 +- .../cuml/decomposition/incremental_pca.py | 8 ++--- python/cuml/cuml/decomposition/pca.pyx | 7 ++-- python/cuml/cuml/decomposition/tsvd.pyx | 8 ++--- .../cuml/ensemble/randomforestclassifier.pyx | 8 ++--- .../cuml/ensemble/randomforestregressor.pyx | 8 ++--- python/cuml/cuml/experimental/fil/fil.pyx | 8 ++--- .../cuml/experimental/linear_model/lars.pyx | 7 ++-- python/cuml/cuml/explainer/kernel_shap.pyx | 3 +- .../cuml/cuml/explainer/permutation_shap.pyx | 5 +-- python/cuml/cuml/feature_extraction/_tfidf.py | 8 ++--- python/cuml/cuml/fil/fil.pyx | 7 ++-- .../cuml/cuml/kernel_ridge/kernel_ridge.pyx | 8 ++--- python/cuml/cuml/linear_model/elastic_net.pyx | 8 ++--- python/cuml/cuml/linear_model/lasso.py | 8 ++--- .../cuml/linear_model/linear_regression.pyx | 8 ++--- .../cuml/linear_model/logistic_regression.pyx | 4 +-- .../cuml/linear_model/mbsgd_classifier.pyx | 8 ++--- .../cuml/linear_model/mbsgd_regressor.pyx | 8 ++--- python/cuml/cuml/linear_model/ridge.pyx | 8 ++--- python/cuml/cuml/manifold/t_sne.pyx | 7 ++-- python/cuml/cuml/manifold/umap.pyx | 7 ++-- python/cuml/cuml/multiclass/multiclass.py | 32 ++++++++++++------- .../cuml/neighbors/kneighbors_classifier.pyx | 8 ++--- .../cuml/neighbors/kneighbors_regressor.pyx | 8 ++--- .../cuml/cuml/neighbors/nearest_neighbors.pyx | 15 ++++----- .../cuml/cuml/preprocessing/LabelEncoder.py | 8 ++--- python/cuml/cuml/preprocessing/encoders.py | 14 ++++---- python/cuml/cuml/preprocessing/label.py | 8 ++--- python/cuml/cuml/solvers/cd.pyx | 8 ++--- python/cuml/cuml/solvers/qn.pyx | 10 +++--- python/cuml/cuml/solvers/sgd.pyx | 8 ++--- python/cuml/cuml/svm/linear_svc.py | 5 ++- python/cuml/cuml/svm/svc.pyx | 7 ++-- python/cuml/cuml/svm/svm_base.pyx | 7 ++-- python/cuml/cuml/svm/svr.pyx | 8 ++--- python/cuml/cuml/tests/test_api.py | 13 ++++++-- python/cuml/cuml/tsa/arima.pyx | 7 ++-- python/cuml/cuml/tsa/auto_arima.pyx | 9 +++--- python/cuml/cuml/tsa/holtwinters.pyx | 8 ++--- wiki/python/ESTIMATOR_GUIDE.md | 2 +- 65 files changed, 270 insertions(+), 222 deletions(-) diff --git a/python/cuml/cuml/cluster/agglomerative.pyx b/python/cuml/cuml/cluster/agglomerative.pyx index 790db73362..491bf44ce8 100644 --- a/python/cuml/cuml/cluster/agglomerative.pyx +++ b/python/cuml/cuml/cluster/agglomerative.pyx @@ -25,6 +25,7 @@ np = cpu_only_import('numpy') from cuml.internals.array import CumlArray from cuml.internals.base import Base +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array @@ -96,9 +97,8 @@ class AgglomerativeClustering(Base, ClusterMixin, CMajorInputTagMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. n_clusters : int (default = 2) The number of clusters to find. affinity : str, default='euclidean' @@ -150,7 +150,7 @@ class AgglomerativeClustering(Base, ClusterMixin, CMajorInputTagMixin): children_ = CumlArrayDescriptor() def __init__(self, *, n_clusters=2, affinity="deprecated", metric=None, - linkage="single", handle=None, verbose=False, + linkage="single", handle=None, verbose=logger.level_enum.info, connectivity='knn', n_neighbors=10, output_type=None): super().__init__(handle=handle, diff --git a/python/cuml/cuml/cluster/dbscan.pyx b/python/cuml/cuml/cluster/dbscan.pyx index a6ef9d622c..da9521f8a8 100644 --- a/python/cuml/cuml/cluster/dbscan.pyx +++ b/python/cuml/cuml/cluster/dbscan.pyx @@ -23,6 +23,7 @@ cp = gpu_only_import('cupy') from cuml.internals.array import CumlArray from cuml.internals.base import UniversalBase +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import ClusterMixin @@ -169,9 +170,8 @@ class DBSCAN(UniversalBase, due to numerical rounding. algorithm: {'brute', 'rbc'}, default = 'brute' The algorithm to be used by for nearest neighbor computations. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. max_mbytes_per_batch : (optional) int64 Calculate batch size using no more than this number of megabytes for the pairwise distance computation. This enables the trade-off between @@ -246,7 +246,7 @@ class DBSCAN(UniversalBase, min_samples=5, metric='euclidean', algorithm='brute', - verbose=False, + verbose=logger.level_enum.info, max_mbytes_per_batch=None, output_type=None, calc_core_sample_indices=True): diff --git a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx index 39317403d0..73ba87723a 100644 --- a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx +++ b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx @@ -25,6 +25,7 @@ from warnings import warn from cuml.internals.array import CumlArray from cuml.internals.base import UniversalBase +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array @@ -350,9 +351,8 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): alpha : float, optional (default=1.0) A distance scaling parameter as used in robust single linkage. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. min_cluster_size : int, optional (default = 5) The minimum number of samples in a group for that group to be @@ -511,7 +511,7 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): allow_single_cluster=False, gen_min_span_tree=False, handle=None, - verbose=False, + verbose=logger.level_enum.info, connectivity='knn', output_type=None, prediction_data=False): diff --git a/python/cuml/cuml/cluster/kmeans.pyx b/python/cuml/cuml/cluster/kmeans.pyx index 81e63ab5b9..3c0435651d 100644 --- a/python/cuml/cuml/cluster/kmeans.pyx +++ b/python/cuml/cuml/cluster/kmeans.pyx @@ -36,7 +36,7 @@ IF GPUBUILD == 1: from cuml.metrics.distance_type cimport DistanceType from cuml.cluster.kmeans_utils cimport params as KMeansParams from cuml.cluster.kmeans_utils cimport KMeansPlusPlus, Random, Array - from cuml.internals.logger cimport level_enum + from cuml.internals import logger from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor @@ -125,9 +125,8 @@ class KMeans(UniversalBase, The more iterations of EM, the more accurate, but slower. tol : float64 (default = 1e-4) Stopping criterion when centroid means do not change much. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. random_state : int (default = 1) If you want results to be the same when you restart Python, select a state. @@ -206,7 +205,6 @@ class KMeans(UniversalBase, params.init = self._params_init params.max_iter = self.max_iter params.tol = self.tol - breakpoint() # TODO: This params object inherits from a cuvs type that still uses raft's # integer-based legacy logging. Once raft's logger is also converted to # using rapids-logger we will instead need to translate between enums here. @@ -222,7 +220,7 @@ class KMeans(UniversalBase, @device_interop_preparation def __init__(self, *, handle=None, n_clusters=8, max_iter=300, tol=1e-4, - verbose=False, random_state=1, + verbose=logger.level_enum.info, random_state=1, init='scalable-k-means++', n_init=1, oversampling_factor=2.0, max_samples_per_batch=1<<15, convert_dtype=True, output_type=None): diff --git a/python/cuml/cuml/dask/cluster/dbscan.py b/python/cuml/cuml/dask/cluster/dbscan.py index b71e34682a..1b225571a3 100644 --- a/python/cuml/cuml/dask/cluster/dbscan.py +++ b/python/cuml/cuml/dask/cluster/dbscan.py @@ -23,6 +23,7 @@ from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger np = cpu_only_import("numpy") @@ -41,9 +42,8 @@ class DBSCAN(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): ---------- client : dask.distributed.Client Dask client to use - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. min_samples : int (default = 5) The number of samples in a neighborhood such that this group can be considered as an important core point (including the point itself). @@ -74,7 +74,9 @@ class DBSCAN(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): """ def __init__(self, *, client=None, verbose=False, **kwargs): - super().__init__(client=client, verbose=verbose, **kwargs) + super().__init__( + client=client, verbose=logger.level_enum.info, **kwargs + ) @staticmethod @mnmg_import diff --git a/python/cuml/cuml/dask/cluster/kmeans.py b/python/cuml/cuml/dask/cluster/kmeans.py index 3dfeced41f..fce4af0727 100644 --- a/python/cuml/cuml/dask/cluster/kmeans.py +++ b/python/cuml/cuml/dask/cluster/kmeans.py @@ -25,6 +25,7 @@ from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import gpu_only_import +from cuml.internals import logger cp = gpu_only_import("cupy") @@ -58,9 +59,8 @@ class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): The more iterations of EM, the more accurate, but slower. tol : float (default = 1e-4) Stopping criterion when centroid means do not change much. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. random_state : int (default = 1) If you want results to be the same when you restart Python, select a state. @@ -93,7 +93,9 @@ class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): super().__init__(client=client, verbose=verbose, **kwargs) @staticmethod diff --git a/python/cuml/cuml/dask/decomposition/base.py b/python/cuml/cuml/dask/decomposition/base.py index 22dc206f49..173a045a69 100644 --- a/python/cuml/cuml/dask/decomposition/base.py +++ b/python/cuml/cuml/dask/decomposition/base.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,10 +26,18 @@ from cuml.dask.common.base import BaseEstimator from cuml.dask.common.input_utils import DistributedDataHandler +from cuml.internals import logger class BaseDecomposition(BaseEstimator): - def __init__(self, *, model_func, client=None, verbose=False, **kwargs): + def __init__( + self, + *, + model_func, + client=None, + verbose=logger.level_enum.info, + **kwargs, + ): """ Constructor for distributed decomposition model """ diff --git a/python/cuml/cuml/dask/decomposition/pca.py b/python/cuml/cuml/dask/decomposition/pca.py index 8cebb2764f..42a5b3ce35 100644 --- a/python/cuml/cuml/dask/decomposition/pca.py +++ b/python/cuml/cuml/dask/decomposition/pca.py @@ -20,6 +20,8 @@ from cuml.dask.common.base import DelayedTransformMixin from cuml.dask.common.base import DelayedInverseTransformMixin +from cuml.internals import logger + class PCA( BaseDecomposition, @@ -99,9 +101,8 @@ class PCA( 'full': Run exact full SVD and select the components by postprocessing 'jacobi': Iteratively compute SVD of the covariance matrix 'auto': For compatibility with Scikit-learn. Alias for 'jacobi'. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. whiten : boolean (default = False) If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). @@ -145,7 +146,9 @@ class PCA( `_. """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): super().__init__( model_func=PCA._create_pca, diff --git a/python/cuml/cuml/dask/decomposition/tsvd.py b/python/cuml/cuml/dask/decomposition/tsvd.py index 67392b7555..849a26cde2 100644 --- a/python/cuml/cuml/dask/decomposition/tsvd.py +++ b/python/cuml/cuml/dask/decomposition/tsvd.py @@ -19,6 +19,7 @@ from cuml.dask.common.base import mnmg_import from cuml.dask.common.base import DelayedTransformMixin from cuml.dask.common.base import DelayedInverseTransformMixin +from cuml.internals import logger class TruncatedSVD( diff --git a/python/cuml/cuml/dask/ensemble/randomforestclassifier.py b/python/cuml/cuml/dask/ensemble/randomforestclassifier.py index 4dfd7c3ddb..1d4adebcba 100755 --- a/python/cuml/cuml/dask/ensemble/randomforestclassifier.py +++ b/python/cuml/cuml/dask/ensemble/randomforestclassifier.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import gpu_only_import from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger np = cpu_only_import("numpy") cp = gpu_only_import("cupy") @@ -163,7 +164,7 @@ def __init__( *, workers=None, client=None, - verbose=False, + verbose=logger.level_enum.info, n_estimators=100, random_state=None, ignore_empty_partitions=False, diff --git a/python/cuml/cuml/dask/ensemble/randomforestregressor.py b/python/cuml/cuml/dask/ensemble/randomforestregressor.py index f2c7d283eb..7c50197154 100755 --- a/python/cuml/cuml/dask/ensemble/randomforestregressor.py +++ b/python/cuml/cuml/dask/ensemble/randomforestregressor.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ from cuml.ensemble import RandomForestRegressor as cuRFR from cuml.dask.ensemble.base import BaseRandomForestModel from cuml.dask.common.base import BaseEstimator +from cuml.internals import logger import dask @@ -149,7 +150,7 @@ def __init__( *, workers=None, client=None, - verbose=False, + verbose=logger.level_enum.info, n_estimators=100, random_state=None, ignore_empty_partitions=False, diff --git a/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py b/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py index 7dbcd26d46..5a5de0b651 100644 --- a/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py +++ b/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from cuml.dask.common.input_utils import DistributedDataHandler from cuml.feature_extraction.text import TfidfTransformer as s_TfidfTransformer +from cuml.internals import logger class TfidfTransformer(BaseEstimator, DelayedTransformMixin): @@ -77,7 +78,9 @@ class TfidfTransformer(BaseEstimator, DelayedTransformMixin): """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): """ Create new distributed TF-IDF transformer instance diff --git a/python/cuml/cuml/dask/linear_model/elastic_net.py b/python/cuml/cuml/dask/linear_model/elastic_net.py index 9ddaa8f6ff..f2fbc816ea 100644 --- a/python/cuml/cuml/dask/linear_model/elastic_net.py +++ b/python/cuml/cuml/dask/linear_model/elastic_net.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ from cuml.dask.solvers import CD from cuml.dask.common.base import BaseEstimator +from cuml.internals import logger class ElasticNet(BaseEstimator): diff --git a/python/cuml/cuml/dask/linear_model/lasso.py b/python/cuml/cuml/dask/linear_model/lasso.py index 65bc02bea0..7854975c78 100644 --- a/python/cuml/cuml/dask/linear_model/lasso.py +++ b/python/cuml/cuml/dask/linear_model/lasso.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ from cuml.dask.solvers import CD from cuml.dask.common.base import BaseEstimator +from cuml.internals import logger class Lasso(BaseEstimator): diff --git a/python/cuml/cuml/dask/linear_model/linear_regression.py b/python/cuml/cuml/dask/linear_model/linear_regression.py index 98ffba672d..22f49e723d 100644 --- a/python/cuml/cuml/dask/linear_model/linear_regression.py +++ b/python/cuml/cuml/dask/linear_model/linear_regression.py @@ -19,6 +19,7 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker +from cuml.internals import logger class LinearRegression( @@ -63,7 +64,9 @@ class LinearRegression( The independent term. If `fit_intercept` is False, will be 0. """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): super().__init__(client=client, verbose=verbose, **kwargs) def fit(self, X, y): diff --git a/python/cuml/cuml/dask/linear_model/logistic_regression.py b/python/cuml/cuml/dask/linear_model/logistic_regression.py index 82266807ba..89fd31c57d 100644 --- a/python/cuml/cuml/dask/linear_model/logistic_regression.py +++ b/python/cuml/cuml/dask/linear_model/logistic_regression.py @@ -28,6 +28,7 @@ from cuml.dask.common.utils import wait_and_raise_from_futures from cuml.internals.safe_imports import cpu_only_import from cuml.internals.safe_imports import gpu_only_import +from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") diff --git a/python/cuml/cuml/dask/linear_model/ridge.py b/python/cuml/cuml/dask/linear_model/ridge.py index 2830f3ce38..f334c86efb 100644 --- a/python/cuml/cuml/dask/linear_model/ridge.py +++ b/python/cuml/cuml/dask/linear_model/ridge.py @@ -19,6 +19,7 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker +from cuml.internals import logger class Ridge(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): @@ -68,7 +69,9 @@ class Ridge(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): super().__init__(client=client, verbose=verbose, **kwargs) self.coef_ = None diff --git a/python/cuml/cuml/dask/manifold/umap.py b/python/cuml/cuml/dask/manifold/umap.py index 181bfb0728..ca0620e4f5 100644 --- a/python/cuml/cuml/dask/manifold/umap.py +++ b/python/cuml/cuml/dask/manifold/umap.py @@ -15,6 +15,7 @@ from cuml.dask.common.base import BaseEstimator, DelayedTransformMixin from cuml.dask.common.input_utils import DistributedDataHandler +from cuml.internals import logger class UMAP(BaseEstimator, DelayedTransformMixin): diff --git a/python/cuml/cuml/dask/naive_bayes/naive_bayes.py b/python/cuml/cuml/dask/naive_bayes/naive_bayes.py index f3e2b5a443..8a7563bd36 100644 --- a/python/cuml/cuml/dask/naive_bayes/naive_bayes.py +++ b/python/cuml/cuml/dask/naive_bayes/naive_bayes.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ from toolz import first import dask from cuml.internals.safe_imports import gpu_only_import +from cuml.internals import logger cp = gpu_only_import("cupy") @@ -82,7 +83,9 @@ class MultinomialNB(BaseEstimator, DelayedPredictionMixin): """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): """ Create new multinomial distributed Naive Bayes classifier instance diff --git a/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py b/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py index fa0e1e59d9..9e6c2cbd55 100644 --- a/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py +++ b/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py @@ -28,6 +28,7 @@ import dask.array as da from uuid import uuid1 from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger np = cpu_only_import("numpy") pd = cpu_only_import("pandas") @@ -65,7 +66,12 @@ class KNeighborsClassifier(NearestNeighbors): """ def __init__( - self, *, client=None, streams_per_handle=0, verbose=False, **kwargs + self, + *, + client=None, + streams_per_handle=0, + verbose=logger.level_enum.info, + **kwargs, ): super().__init__(client=client, verbose=verbose, **kwargs) self.streams_per_handle = streams_per_handle diff --git a/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py b/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py index 2027169c67..489ca0be8b 100644 --- a/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py +++ b/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ from cuml.dask.neighbors import NearestNeighbors from dask.distributed import get_worker import dask.array as da +from cuml.internals import logger from uuid import uuid1 @@ -52,9 +53,8 @@ class KNeighborsRegressor(NearestNeighbors): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. """ def __init__( diff --git a/python/cuml/cuml/dask/neighbors/nearest_neighbors.py b/python/cuml/cuml/dask/neighbors/nearest_neighbors.py index 6ef7e70dd1..4f3c1bbd96 100644 --- a/python/cuml/cuml/dask/neighbors/nearest_neighbors.py +++ b/python/cuml/cuml/dask/neighbors/nearest_neighbors.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ from cuml.dask.common.input_utils import to_output from cuml.dask.common.input_utils import DistributedDataHandler +from cuml.internals import logger + from uuid import uuid1 @@ -48,9 +50,8 @@ class NearestNeighbors(BaseEstimator): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. """ diff --git a/python/cuml/cuml/dask/preprocessing/LabelEncoder.py b/python/cuml/cuml/dask/preprocessing/LabelEncoder.py index f232d505b2..a9d11200c8 100644 --- a/python/cuml/cuml/dask/preprocessing/LabelEncoder.py +++ b/python/cuml/cuml/dask/preprocessing/LabelEncoder.py @@ -22,6 +22,7 @@ from collections.abc import Sequence from cuml.internals.safe_imports import gpu_only_import_from +from cuml.internals import logger dcDataFrame = gpu_only_import_from("dask_cudf", "DataFrame") dcSeries = gpu_only_import_from("dask_cudf", "Series") @@ -122,7 +123,9 @@ class LabelEncoder( """ - def __init__(self, *, client=None, verbose=False, **kwargs): + def __init__( + self, *, client=None, verbose=logger.level_enum.info, **kwargs + ): super().__init__(client=client, verbose=verbose, **kwargs) def fit(self, y): diff --git a/python/cuml/cuml/dask/preprocessing/label.py b/python/cuml/cuml/dask/preprocessing/label.py index a2f49f46f4..ff896cc691 100644 --- a/python/cuml/cuml/dask/preprocessing/label.py +++ b/python/cuml/cuml/dask/preprocessing/label.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import dask from cuml.internals.safe_imports import gpu_only_import +from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") diff --git a/python/cuml/cuml/dask/solvers/cd.py b/python/cuml/cuml/dask/solvers/cd.py index b5fb25e518..cb3bbdf097 100644 --- a/python/cuml/cuml/dask/solvers/cd.py +++ b/python/cuml/cuml/dask/solvers/cd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker +from cuml.internals import logger class CD(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): diff --git a/python/cuml/cuml/decomposition/incremental_pca.py b/python/cuml/cuml/decomposition/incremental_pca.py index 925219d0bc..ab09ffeb1e 100644 --- a/python/cuml/cuml/decomposition/incremental_pca.py +++ b/python/cuml/cuml/decomposition/incremental_pca.py @@ -21,6 +21,7 @@ from cuml.common import input_to_cuml_array from cuml import Base from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger import numbers from cuml.internals.safe_imports import gpu_only_import @@ -79,9 +80,8 @@ class IncrementalPCA(PCA): `fit`. If `batch_size` is ``None``, then `batch_size` is inferred from the data and set to :py:`5 * n_features`, to provide a balance between approximation accuracy and memory consumption. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -203,7 +203,7 @@ def __init__( whiten=False, copy=True, batch_size=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ): diff --git a/python/cuml/cuml/decomposition/pca.pyx b/python/cuml/cuml/decomposition/pca.pyx index db2f0f62c8..402de1c429 100644 --- a/python/cuml/cuml/decomposition/pca.pyx +++ b/python/cuml/cuml/decomposition/pca.pyx @@ -219,9 +219,8 @@ class PCA(UniversalBase, tol : float (default = 1e-7) Used if algorithm = "jacobi". Smaller tolerance can increase accuracy, but but will slow down the algorithm's convergence. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. whiten : boolean (default = False) If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). @@ -293,7 +292,7 @@ class PCA(UniversalBase, @device_interop_preparation def __init__(self, *, copy=True, handle=None, iterated_power=15, n_components=None, random_state=None, svd_solver='auto', - tol=1e-7, verbose=False, whiten=False, + tol=1e-7, verbose=logger.level_enum.info, whiten=False, output_type=None): # parameters super().__init__(handle=handle, diff --git a/python/cuml/cuml/decomposition/tsvd.pyx b/python/cuml/cuml/decomposition/tsvd.pyx index b495d3d239..8078684abe 100644 --- a/python/cuml/cuml/decomposition/tsvd.pyx +++ b/python/cuml/cuml/decomposition/tsvd.pyx @@ -31,6 +31,7 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals import logger IF GPUBUILD == 1: @@ -192,9 +193,8 @@ class TruncatedSVD(UniversalBase, tol : float (default = 1e-7) Used if algorithm = "jacobi". Smaller tolerance can increase accuracy, but but will slow down the algorithm's convergence. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -250,7 +250,7 @@ class TruncatedSVD(UniversalBase, @device_interop_preparation def __init__(self, *, algorithm='full', handle=None, n_components=1, n_iter=15, random_state=None, tol=1e-7, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): # params super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 9664b4d008..e247f8a190 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -35,6 +35,7 @@ from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array from cuml.internals.logger cimport level_enum +from cuml.internals import logger from cuml.ensemble.randomforest_common import BaseRandomForestModel from cuml.ensemble.randomforest_common import _obtain_fil_model from cuml.ensemble.randomforest_shared cimport * @@ -220,9 +221,8 @@ class RandomForestClassifier(BaseRandomForestModel, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -247,7 +247,7 @@ class RandomForestClassifier(BaseRandomForestModel, `_. """ - def __init__(self, *, split_criterion=0, handle=None, verbose=False, + def __init__(self, *, split_criterion=0, handle=None, verbose=logger.level_enum.info, output_type=None, **kwargs): diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 5625d278ca..1a6765327d 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -31,6 +31,7 @@ import cuml.internals from cuml.internals.mixins import RegressorMixin from cuml.internals.logger cimport level_enum +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array @@ -225,9 +226,8 @@ class RandomForestRegressor(BaseRandomForestModel, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -254,7 +254,7 @@ class RandomForestRegressor(BaseRandomForestModel, split_criterion=2, accuracy_metric='r2', handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, **kwargs): self.RF_type = REGRESSION diff --git a/python/cuml/cuml/experimental/fil/fil.pyx b/python/cuml/cuml/experimental/fil/fil.pyx index 247e968e1b..6fc87ab22f 100644 --- a/python/cuml/cuml/experimental/fil/fil.pyx +++ b/python/cuml/cuml/experimental/fil/fil.pyx @@ -37,6 +37,7 @@ from cuml.experimental.fil.detail.raft_proto.cuda_stream cimport cuda_stream as from cuml.experimental.fil.detail.raft_proto.device_type cimport device_type as raft_proto_device_t from cuml.experimental.fil.detail.raft_proto.handle cimport handle_t as raft_proto_handle_t from cuml.experimental.fil.detail.raft_proto.optional cimport optional, nullopt +from cuml.internals import logger from cuml.internals import set_api_output_dtype from cuml.internals.base import UniversalBase from cuml.internals.device_type import DeviceType, DeviceTypeError @@ -466,9 +467,8 @@ class ForestInference(UniversalBase, CMajorInputTagMixin): type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_class : boolean True for classifier models, false for regressors. layout : {'breadth_first', 'depth_first'}, default='depth_first' @@ -655,7 +655,7 @@ class ForestInference(UniversalBase, CMajorInputTagMixin): treelite_model=None, handle=None, output_type=None, - verbose=False, + verbose=logger.level_enum.info, is_classifier=False, output_class=None, layout='depth_first', diff --git a/python/cuml/cuml/experimental/linear_model/lars.pyx b/python/cuml/cuml/experimental/linear_model/lars.pyx index ddb3f69fa3..6cc06b4a15 100644 --- a/python/cuml/cuml/experimental/linear_model/lars.pyx +++ b/python/cuml/cuml/experimental/linear_model/lars.pyx @@ -116,9 +116,8 @@ class Lars(Base, RegressorMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -166,7 +165,7 @@ class Lars(Base, RegressorMixin): intercept_ = CumlArrayDescriptor() def __init__(self, *, fit_intercept=True, normalize=True, - handle=None, verbose=False, output_type=None, copy_X=True, + handle=None, verbose=logger.level_enum.info, output_type=None, copy_X=True, fit_path=True, n_nonzero_coefs=500, eps=None, precompute='auto'): super().__init__(handle=handle, diff --git a/python/cuml/cuml/explainer/kernel_shap.pyx b/python/cuml/cuml/explainer/kernel_shap.pyx index fbd99d5eb9..1345d046e1 100644 --- a/python/cuml/cuml/explainer/kernel_shap.pyx +++ b/python/cuml/cuml/explainer/kernel_shap.pyx @@ -27,6 +27,7 @@ from cuml.explainer.common import get_cai_ptr from cuml.explainer.common import model_func_call from cuml.linear_model import Lasso from cuml.linear_model import LinearRegression +from cuml.internals import logger from functools import lru_cache from itertools import combinations from numbers import Number @@ -197,7 +198,7 @@ class KernelExplainer(SHAPBase): data, nsamples='auto', link='identity', - verbose=False, + verbose=logger.level_enum.info, random_state=None, is_gpu_model=None, handle=None, diff --git a/python/cuml/cuml/explainer/permutation_shap.pyx b/python/cuml/cuml/explainer/permutation_shap.pyx index b0270b5873..3ac7a0f20d 100644 --- a/python/cuml/cuml/explainer/permutation_shap.pyx +++ b/python/cuml/cuml/explainer/permutation_shap.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ np = cpu_only_import('numpy') import time from cuml.internals.safe_imports import gpu_only_import_from +from cuml.internals import logger cu_df = gpu_only_import_from('cudf', 'DataFrame') from cuml.explainer.base import SHAPBase from cuml.explainer.common import get_cai_ptr @@ -204,7 +205,7 @@ class PermutationExplainer(SHAPBase): random_state=None, dtype=None, output_type=None, - verbose=False,): + verbose=logger.level_enum.info,): super().__init__( order='C', model=model, diff --git a/python/cuml/cuml/feature_extraction/_tfidf.py b/python/cuml/cuml/feature_extraction/_tfidf.py index 2cf5974119..b6277a00e5 100644 --- a/python/cuml/cuml/feature_extraction/_tfidf.py +++ b/python/cuml/cuml/feature_extraction/_tfidf.py @@ -18,6 +18,7 @@ from cuml.common.sparsefuncs import csr_diag_mul from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2 import cuml.internals +from cuml.internals import logger from cuml.common.exceptions import NotFittedError from cuml.internals.safe_imports import gpu_only_import @@ -105,9 +106,8 @@ class TfidfTransformer(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -131,7 +131,7 @@ def __init__( smooth_idf=True, sublinear_tf=False, handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ): diff --git a/python/cuml/cuml/fil/fil.pyx b/python/cuml/cuml/fil/fil.pyx index d3764fc758..ee603a913e 100644 --- a/python/cuml/cuml/fil/fil.pyx +++ b/python/cuml/cuml/fil/fil.pyx @@ -633,9 +633,8 @@ class ForestInference(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -744,7 +743,7 @@ class ForestInference(Base, def __init__(self, *, handle=None, output_type=None, - verbose=False): + verbose=logger.level_enum.info): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx b/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx index 6063f27c99..e36851ba3d 100644 --- a/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx +++ b/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx @@ -24,6 +24,7 @@ from cupyx import lapack, geterr, seterr from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import Base from cuml.internals.mixins import RegressorMixin +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array @@ -153,9 +154,8 @@ class KernelRidge(Base, RegressorMixin): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -215,7 +215,7 @@ class KernelRidge(Base, RegressorMixin): kernel_params=None, output_type=None, handle=None, - verbose=False + verbose=logger.level_enum.info ): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/elastic_net.pyx b/python/cuml/cuml/linear_model/elastic_net.pyx index 7b212b21c9..4e39f9175c 100644 --- a/python/cuml/cuml/linear_model/elastic_net.pyx +++ b/python/cuml/cuml/linear_model/elastic_net.pyx @@ -25,6 +25,7 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.logger import warn +from cuml.internals import logger from cuml.linear_model.base import LinearPredictMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop @@ -130,9 +131,8 @@ class ElasticNet(UniversalBase, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -163,7 +163,7 @@ class ElasticNet(UniversalBase, def __init__(self, *, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-3, solver='cd', selection='cyclic', - handle=None, output_type=None, verbose=False): + handle=None, output_type=None, verbose=logger.level_enum.info): """ Initializes the elastic-net regression class. diff --git a/python/cuml/cuml/linear_model/lasso.py b/python/cuml/cuml/linear_model/lasso.py index 65a30be791..937adc1841 100644 --- a/python/cuml/cuml/linear_model/lasso.py +++ b/python/cuml/cuml/linear_model/lasso.py @@ -16,6 +16,7 @@ from cuml.linear_model.elastic_net import ElasticNet from cuml.internals.api_decorators import device_interop_preparation +from cuml.internals import logger class Lasso(ElasticNet): @@ -116,9 +117,8 @@ class Lasso(ElasticNet): type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -148,7 +148,7 @@ def __init__( selection="cyclic", handle=None, output_type=None, - verbose=False, + verbose=logger.level_enum.info, ): # Lasso is just a special case of ElasticNet super().__init__( diff --git a/python/cuml/cuml/linear_model/linear_regression.pyx b/python/cuml/cuml/linear_model/linear_regression.pyx index f1b64602b3..b18c37dd08 100644 --- a/python/cuml/cuml/linear_model/linear_regression.pyx +++ b/python/cuml/cuml/linear_model/linear_regression.pyx @@ -36,6 +36,7 @@ from cuml.linear_model.base import LinearPredictMixin from cuml.common import input_to_cuml_array from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals import logger IF GPUBUILD == 1: @@ -217,9 +218,8 @@ class LinearRegression(LinearPredictMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -277,7 +277,7 @@ class LinearRegression(LinearPredictMixin, @device_interop_preparation def __init__(self, *, algorithm='eig', fit_intercept=True, copy_X=None, normalize=False, - handle=None, verbose=False, output_type=None): + handle=None, verbose=logger.level_enum.info, output_type=None): IF GPUBUILD == 1: if handle is None and algorithm == 'eig': # if possible, create two streams, so that eigenvalue decomposition diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index e6e9b1ba16..3f93237d0a 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -29,7 +29,7 @@ from cuml.internals.mixins import ClassifierMixin, FMajorInputTagMixin from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.array import CumlArray from cuml.common.doc_utils import generate_docstring -import cuml.internals.logger as logger +from cuml.internals import logger from cuml.common import input_to_cuml_array from cuml.common import using_output_type from cuml.internals.api_decorators import device_interop_preparation @@ -210,7 +210,7 @@ class LogisticRegression(UniversalBase, class_weight=None, max_iter=1000, linesearch_max_iter=50, - verbose=False, + verbose=logger.level_enum.info, l1_ratio=None, solver="qn", handle=None, diff --git a/python/cuml/cuml/linear_model/mbsgd_classifier.pyx b/python/cuml/cuml/linear_model/mbsgd_classifier.pyx index 3a7fcc772e..1ca5467ad5 100644 --- a/python/cuml/cuml/linear_model/mbsgd_classifier.pyx +++ b/python/cuml/cuml/linear_model/mbsgd_classifier.pyx @@ -22,6 +22,7 @@ from cuml.internals.base import Base from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin +from cuml.internals import logger from cuml.solvers import SGD @@ -143,9 +144,8 @@ class MBSGDClassifier(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -163,7 +163,7 @@ class MBSGDClassifier(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/mbsgd_regressor.pyx b/python/cuml/cuml/linear_model/mbsgd_regressor.pyx index a738eb6d74..33ba719c33 100644 --- a/python/cuml/cuml/linear_model/mbsgd_regressor.pyx +++ b/python/cuml/cuml/linear_model/mbsgd_regressor.pyx @@ -20,6 +20,7 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.internals.mixins import RegressorMixin +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin from cuml.solvers import SGD @@ -134,9 +135,8 @@ class MBSGDRegressor(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -154,7 +154,7 @@ class MBSGDRegressor(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/ridge.pyx b/python/cuml/cuml/linear_model/ridge.pyx index bd039867f3..62ce647e47 100644 --- a/python/cuml/cuml/linear_model/ridge.pyx +++ b/python/cuml/cuml/linear_model/ridge.pyx @@ -33,6 +33,7 @@ from cuml.linear_model.base import LinearPredictMixin from cuml.common import input_to_cuml_array from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals import logger IF GPUBUILD == 1: @@ -159,9 +160,8 @@ class Ridge(UniversalBase, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -210,7 +210,7 @@ class Ridge(UniversalBase, @device_interop_preparation def __init__(self, *, alpha=1.0, solver='eig', fit_intercept=True, normalize=False, handle=None, output_type=None, - verbose=False): + verbose=logger.level_enum.info): """ Initializes the linear ridge regression class. diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index 31a0fa65d7..67f6c792e1 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -166,9 +166,8 @@ class TSNE(UniversalBase, 'chebyshev', 'cosine', 'correlation'] init : str 'random' or 'pca' (default 'random') Currently supports random or pca initialization. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. random_state : int (default None) Setting this can make repeated runs look more similar. Note, however, that this highly parallelized t-SNE implementation is not completely @@ -290,7 +289,7 @@ class TSNE(UniversalBase, metric='euclidean', metric_params=None, init='random', - verbose=False, + verbose=logger.level_enum.info, random_state=None, method='fft', angle=0.5, diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index a4708648b1..b128a16008 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -284,9 +284,8 @@ class UMAP(UniversalBase, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -358,7 +357,7 @@ class UMAP(UniversalBase, precomputed_knn=None, callback=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, build_algo="auto", build_kwds=None, output_type=None): diff --git a/python/cuml/cuml/multiclass/multiclass.py b/python/cuml/cuml/multiclass/multiclass.py index 61a79e1d31..2729a75253 100644 --- a/python/cuml/cuml/multiclass/multiclass.py +++ b/python/cuml/cuml/multiclass/multiclass.py @@ -18,6 +18,7 @@ from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.internals.import_utils import has_sklearn +from cuml.internals import logger from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.common import ( @@ -75,9 +76,8 @@ class MulticlassClassifier(Base, ClassifierMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -103,7 +103,7 @@ def __init__( estimator, *, handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, strategy="ovr", ): @@ -242,9 +242,8 @@ class OneVsRestClassifier(MulticlassClassifier): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -255,7 +254,12 @@ class OneVsRestClassifier(MulticlassClassifier): @_deprecate_pos_args(version="21.06") def __init__( - self, estimator, *args, handle=None, verbose=False, output_type=None + self, + estimator, + *args, + handle=None, + verbose=logger.level_enum.info, + output_type=None, ): super().__init__( estimator, @@ -317,9 +321,8 @@ class OneVsOneClassifier(MulticlassClassifier): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -330,7 +333,12 @@ class OneVsOneClassifier(MulticlassClassifier): @_deprecate_pos_args(version="21.06") def __init__( - self, estimator, *args, handle=None, verbose=False, output_type=None + self, + estimator, + *args, + handle=None, + verbose=logger.level_enum.info, + output_type=None, ): super().__init__( estimator, diff --git a/python/cuml/cuml/neighbors/kneighbors_classifier.pyx b/python/cuml/cuml/neighbors/kneighbors_classifier.pyx index 17f8628c95..3121c2072a 100644 --- a/python/cuml/cuml/neighbors/kneighbors_classifier.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_classifier.pyx @@ -27,6 +27,7 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin +from cuml.internals import logger from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') @@ -98,9 +99,8 @@ class KNeighborsClassifier(ClassifierMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -151,7 +151,7 @@ class KNeighborsClassifier(ClassifierMixin, }, } - def __init__(self, *, weights="uniform", handle=None, verbose=False, + def __init__(self, *, weights="uniform", handle=None, verbose=logger.level_enum.info, output_type=None, **kwargs): super().__init__( handle=handle, diff --git a/python/cuml/cuml/neighbors/kneighbors_regressor.pyx b/python/cuml/cuml/neighbors/kneighbors_regressor.pyx index 78c1525f27..8e49d6ea42 100644 --- a/python/cuml/cuml/neighbors/kneighbors_regressor.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_regressor.pyx @@ -25,6 +25,7 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import RegressorMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin +from cuml.internals import logger from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') @@ -106,9 +107,8 @@ class KNeighborsRegressor(RegressorMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -161,7 +161,7 @@ class KNeighborsRegressor(RegressorMixin, }, } - def __init__(self, *, weights="uniform", handle=None, verbose=False, + def __init__(self, *, weights="uniform", handle=None, verbose=logger.level_enum.info, output_type=None, **kwargs): super().__init__( handle=handle, diff --git a/python/cuml/cuml/neighbors/nearest_neighbors.pyx b/python/cuml/cuml/neighbors/nearest_neighbors.pyx index 4f551d282c..d68143def2 100644 --- a/python/cuml/cuml/neighbors/nearest_neighbors.pyx +++ b/python/cuml/cuml/neighbors/nearest_neighbors.pyx @@ -41,6 +41,7 @@ from cuml.metrics.distance_type cimport DistanceType from cuml.metrics.raft_distance_type cimport DistanceType as RaftDistanceType from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals import logger from cuml.neighbors.ann cimport * @@ -154,9 +155,8 @@ class NearestNeighbors(UniversalBase, ---------- n_neighbors : int (default=5) Default number of neighbors to query - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for computations in this model. Most importantly, this specifies the CUDA @@ -307,7 +307,7 @@ class NearestNeighbors(UniversalBase, @device_interop_preparation def __init__(self, *, n_neighbors=5, - verbose=False, + verbose=logger.level_enum.info, handle=None, algorithm="auto", metric="euclidean", @@ -951,7 +951,7 @@ class NearestNeighbors(UniversalBase, @cuml.internals.api_return_sparse_array() -def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=False, +def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=logger.level_enum.info, handle=None, algorithm="brute", metric="euclidean", p=2, include_self=False, metric_params=None): """ @@ -973,9 +973,8 @@ def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=False, connectivity matrix with ones and zeros, 'distance' returns the edges as the distances between points with the requested metric. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for diff --git a/python/cuml/cuml/preprocessing/LabelEncoder.py b/python/cuml/cuml/preprocessing/LabelEncoder.py index 960935e61f..fa13f8a008 100644 --- a/python/cuml/cuml/preprocessing/LabelEncoder.py +++ b/python/cuml/cuml/preprocessing/LabelEncoder.py @@ -24,6 +24,7 @@ cpu_only_import_from, gpu_only_import, ) +from cuml.internals import logger if TYPE_CHECKING: import cudf @@ -55,9 +56,8 @@ class LabelEncoder(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -134,7 +134,7 @@ def __init__( *, handle_unknown="error", handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ) -> None: diff --git a/python/cuml/cuml/preprocessing/encoders.py b/python/cuml/cuml/preprocessing/encoders.py index 943f3c294c..7a7618f1fb 100644 --- a/python/cuml/cuml/preprocessing/encoders.py +++ b/python/cuml/cuml/preprocessing/encoders.py @@ -234,9 +234,8 @@ class OneHotEncoder(BaseEncoder): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -262,7 +261,7 @@ def __init__( dtype=np.float32, handle_unknown="error", handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ): super().__init__( @@ -655,7 +654,7 @@ def __init__( dtype=np.float64, handle_unknown="error", handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ) -> None: """Encode categorical features as an integer array. @@ -685,9 +684,8 @@ def __init__( concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. See - :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output diff --git a/python/cuml/cuml/preprocessing/label.py b/python/cuml/cuml/preprocessing/label.py index 20aac36ac8..21569b3d09 100644 --- a/python/cuml/cuml/preprocessing/label.py +++ b/python/cuml/cuml/preprocessing/label.py @@ -20,6 +20,7 @@ import cuml.internals from cuml import Base from cuml.internals.safe_imports import gpu_only_import +from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") @@ -101,9 +102,8 @@ class LabelBinarizer(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -156,7 +156,7 @@ def __init__( pos_label=1, sparse_output=False, handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, ): super().__init__( diff --git a/python/cuml/cuml/solvers/cd.pyx b/python/cuml/cuml/solvers/cd.pyx index ba6c5ac12b..6b1bf59f34 100644 --- a/python/cuml/cuml/solvers/cd.pyx +++ b/python/cuml/cuml/solvers/cd.pyx @@ -28,6 +28,7 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import Base from cuml.common.doc_utils import generate_docstring from cuml.internals.input_utils import input_to_cuml_array +from cuml.internals import logger from cuml.internals.mixins import FMajorInputTagMixin @@ -170,9 +171,8 @@ class CD(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -186,7 +186,7 @@ class CD(Base, def __init__(self, *, loss='squared_loss', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-3, - shuffle=True, handle=None, output_type=None, verbose=False): + shuffle=True, handle=None, output_type=None, verbose=logger.level_enum.info): if loss not in ['squared_loss']: msg = "loss {!r} is not supported" diff --git a/python/cuml/cuml/solvers/qn.pyx b/python/cuml/cuml/solvers/qn.pyx index 72f51c25b2..524099eca2 100644 --- a/python/cuml/cuml/solvers/qn.pyx +++ b/python/cuml/cuml/solvers/qn.pyx @@ -26,6 +26,7 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.common.array_descriptor import CumlArrayDescriptor +from cuml.internals import logger from cuml.internals.array_sparse import SparseCumlArray from cuml.internals.global_settings import GlobalSettings from cuml.common.doc_utils import generate_docstring @@ -369,9 +370,8 @@ class QN(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -412,8 +412,8 @@ class QN(Base, def __init__(self, *, loss='sigmoid', fit_intercept=True, l1_strength=0.0, l2_strength=0.0, max_iter=1000, tol=1e-4, delta=None, linesearch_max_iter=50, lbfgs_memory=5, - verbose=False, handle=None, output_type=None, - warm_start=False, penalty_normalized=True): + verbose=logger.level_enum.info, handle=None, output_type=None, + warm_start=logger.level_enum.info, penalty_normalized=True): super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/solvers/sgd.pyx b/python/cuml/cuml/solvers/sgd.pyx index b6c452cc30..544c842309 100644 --- a/python/cuml/cuml/solvers/sgd.pyx +++ b/python/cuml/cuml/solvers/sgd.pyx @@ -33,6 +33,7 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array from cuml.internals.mixins import FMajorInputTagMixin +from cuml.internals import logger IF GPUBUILD == 1: from libcpp cimport bool @@ -214,9 +215,8 @@ class SGD(Base, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. """ @@ -227,7 +227,7 @@ class SGD(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - output_type=None, verbose=False): + output_type=None, verbose=logger.level_enum.info): if loss in ['hinge', 'log', 'squared_loss']: self.loss = loss diff --git a/python/cuml/cuml/svm/linear_svc.py b/python/cuml/cuml/svm/linear_svc.py index 40df6f0808..94c48fb459 100644 --- a/python/cuml/cuml/svm/linear_svc.py +++ b/python/cuml/cuml/svm/linear_svc.py @@ -82,9 +82,8 @@ class LinearSVC(LinearSVM, ClassifierMixin): Weights to modify the parameter C for class i to class_weight[i]*C. The string 'balanced' is also accepted, in which case ``class_weight[i] = n_samples / (n_classes * n_samples_of_class[i])`` - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. C : {LinearSVM_defaults.C.__class__.__name__ \ } (default = {LinearSVM_defaults.C}) The constant scaling factor of the loss term in the target formula diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index 283ce34bbe..fa8514b42b 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -281,9 +281,8 @@ class SVC(SVMBase, Seed for random number generator (used only when probability = True). Currently this argument is not used and a warning will be printed if the user provides it. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -333,7 +332,7 @@ class SVC(SVMBase, def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=1e-3, cache_size=1024.0, - max_iter=-1, nochange_steps=1000, verbose=False, + max_iter=-1, nochange_steps=1000, verbose=logger.level_enum.info, output_type=None, probability=False, random_state=None, class_weight=None, multiclass_strategy='ovo'): super().__init__( diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 11c523a7fd..57f2899808 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -163,9 +163,8 @@ class SVMBase(Base, We monitor how much our stopping criteria changes during outer iterations. If it does not change (changes less then 1e-3*tol) for nochange_steps consecutive steps, then we stop training. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. epsilon: float (default = 0.1) epsilon parameter of the epsiron-SVR model. There is no penalty associated to points that are predicted within the epsilon-tube @@ -224,7 +223,7 @@ class SVMBase(Base, def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, cache_size=1024.0, - max_iter=-1, nochange_steps=1000, verbose=False, + max_iter=-1, nochange_steps=1000, verbose=logger.level_enum.info, epsilon=0.1, output_type=None): super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index 9dc61b7635..3acdcc8186 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -29,6 +29,7 @@ from cuml.internals.array import CumlArray from cuml.internals.array_sparse import SparseCumlArray from cuml.internals.input_utils import determine_array_type_full from cuml.internals.mixins import RegressorMixin +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array @@ -159,9 +160,8 @@ class SVR(SVMBase, RegressorMixin): We monitor how much our stopping criteria changes during outer iterations. If it does not change (changes less then 1e-3*tol) for nochange_steps consecutive steps, then we stop training. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -231,7 +231,7 @@ class SVR(SVMBase, RegressorMixin): def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=1e-3, epsilon=0.1, cache_size=1024.0, max_iter=-1, nochange_steps=1000, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): super().__init__( handle=handle, C=C, diff --git a/python/cuml/cuml/tests/test_api.py b/python/cuml/cuml/tests/test_api.py index 74adbd177d..df73fe581f 100644 --- a/python/cuml/cuml/tests/test_api.py +++ b/python/cuml/cuml/tests/test_api.py @@ -22,6 +22,7 @@ import pytest import cuml import cuml.internals.mixins as cumix +from cuml.internals import logger from cuml.internals.safe_imports import gpu_only_import cp = gpu_only_import("cupy") @@ -105,17 +106,23 @@ def dataset(): class dummy_regressor_estimator(Base, cumix.RegressorMixin): - def __init__(self, *, handle=None, verbose=False, output_type=None): + def __init__( + self, *, handle=None, verbose=logger.level_enum.info, output_type=None + ): super().__init__(handle=handle) class dummy_classifier_estimator(Base, cumix.ClassifierMixin): - def __init__(self, *, handle=None, verbose=False, output_type=None): + def __init__( + self, *, handle=None, verbose=logger.level_enum.info, output_type=None + ): super().__init__(handle=handle) class dummy_cluster_estimator(Base, cumix.ClusterMixin): - def __init__(self, *, handle=None, verbose=False, output_type=None): + def __init__( + self, *, handle=None, verbose=logger.level_enum.info, output_type=None + ): super().__init__(handle=handle) diff --git a/python/cuml/cuml/tsa/arima.pyx b/python/cuml/cuml/tsa/arima.pyx index 3513362013..e6cfc2a1a4 100644 --- a/python/cuml/cuml/tsa/arima.pyx +++ b/python/cuml/cuml/tsa/arima.pyx @@ -203,9 +203,8 @@ class ARIMA(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -317,7 +316,7 @@ class ARIMA(Base): fit_intercept=True, simple_differencing=True, handle=None, - verbose=False, + verbose=logger.level_enum.info, output_type=None, convert_dtype=True): diff --git a/python/cuml/cuml/tsa/auto_arima.pyx b/python/cuml/cuml/tsa/auto_arima.pyx index 6f94e086bd..3f7d35ecff 100644 --- a/python/cuml/cuml/tsa/auto_arima.pyx +++ b/python/cuml/cuml/tsa/auto_arima.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -138,9 +138,8 @@ class AutoARIMA(Base): If True, the data is differenced before being passed to the Kalman filter. If False, differencing is part of the state-space model. See additional notes in the ARIMA docs - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -190,7 +189,7 @@ class AutoARIMA(Base): *, handle=None, simple_differencing=True, - verbose=False, + verbose=logger.level_enum.info, output_type=None, convert_dtype=True): # Initialize base class diff --git a/python/cuml/cuml/tsa/holtwinters.pyx b/python/cuml/cuml/tsa/holtwinters.pyx index 685e92fdea..f6c3beeb72 100644 --- a/python/cuml/cuml/tsa/holtwinters.pyx +++ b/python/cuml/cuml/tsa/holtwinters.pyx @@ -29,6 +29,7 @@ from cuml.internals import _deprecate_pos_args from cuml.common import using_output_type from cuml.internals.base import Base from cuml.internals.array import CumlArray +from cuml.internals import logger from cuml.common.array_descriptor import CumlArrayDescriptor from pylibraft.common.handle cimport handle_t @@ -165,9 +166,8 @@ class ExponentialSmoothing(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -187,7 +187,7 @@ class ExponentialSmoothing(Base): def __init__(self, endog, *, seasonal="additive", seasonal_periods=2, start_periods=2, ts_num=1, eps=2.24e-3, handle=None, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): super().__init__(handle=handle, verbose=verbose, diff --git a/wiki/python/ESTIMATOR_GUIDE.md b/wiki/python/ESTIMATOR_GUIDE.md index 5413bfd6be..45ca84373f 100644 --- a/wiki/python/ESTIMATOR_GUIDE.md +++ b/wiki/python/ESTIMATOR_GUIDE.md @@ -52,7 +52,7 @@ At a high level, all cuML Estimators must: ```python class MyEstimator(Base): - def __init__(self, *, extra_arg=True, handle=None, verbose=False, output_type=None): + def __init__(self, *, extra_arg=True, handle=None, verbose=logger.level_enum.info, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) ... ``` From ee7e37668fb1f3e6b1c5f56ab0cc41b34d9cfcc2 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Sat, 7 Dec 2024 01:02:34 +0000 Subject: [PATCH 10/32] Fix some docstrings --- .../cuml/ensemble/randomforest_common.pyx | 3 +- python/cuml/cuml/naive_bayes/naive_bayes.py | 42 +++++++++---------- python/cuml/cuml/neighbors/kernel_density.py | 8 ++-- python/cuml/cuml/preprocessing/encoders.py | 5 +-- .../random_projection/random_projection.pyx | 15 ++++--- python/cuml/cuml/svm/linear_svr.py | 5 +-- 6 files changed, 37 insertions(+), 41 deletions(-) diff --git a/python/cuml/cuml/ensemble/randomforest_common.pyx b/python/cuml/cuml/ensemble/randomforest_common.pyx index 38c15eaca2..4d53eeb216 100644 --- a/python/cuml/cuml/ensemble/randomforest_common.pyx +++ b/python/cuml/cuml/ensemble/randomforest_common.pyx @@ -28,6 +28,7 @@ from cuml.internals.base import Base from cuml.internals.array import CumlArray from cuml.common.exceptions import NotFittedError import cuml.internals +from cuml.internals import logger from cython.operator cimport dereference as deref @@ -70,7 +71,7 @@ class BaseRandomForestModel(Base): def __init__(self, *, split_criterion, n_streams=4, n_estimators=100, max_depth=16, handle=None, max_features='sqrt', n_bins=128, bootstrap=True, - verbose=False, min_samples_leaf=1, min_samples_split=2, + verbose=logger.level_enum.info, min_samples_leaf=1, min_samples_split=2, max_samples=1.0, max_leaves=-1, accuracy_metric=None, dtype=None, output_type=None, min_weight_fraction_leaf=None, n_jobs=None, max_leaf_nodes=None, min_impurity_decrease=0.0, diff --git a/python/cuml/cuml/naive_bayes/naive_bayes.py b/python/cuml/cuml/naive_bayes/naive_bayes.py index 701f88862e..ff3e79e85c 100644 --- a/python/cuml/cuml/naive_bayes/naive_bayes.py +++ b/python/cuml/cuml/naive_bayes/naive_bayes.py @@ -23,6 +23,7 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import ClassifierMixin from cuml.internals.base import Base +from cuml.internals import logger from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.common import CumlArray import math @@ -159,7 +160,9 @@ class _BaseNB(Base, ClassifierMixin): class_log_prior_ = CumlArrayDescriptor() feature_log_prob_ = CumlArrayDescriptor() - def __init__(self, *, verbose=False, handle=None, output_type=None): + def __init__( + self, *, verbose=logger.level_enum.info, handle=None, output_type=None + ): super(_BaseNB, self).__init__( verbose=verbose, handle=handle, output_type=output_type ) @@ -328,9 +331,8 @@ class GaussianNB(_BaseNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Examples -------- @@ -361,7 +363,7 @@ def __init__( var_smoothing=1e-9, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(GaussianNB, self).__init__( @@ -736,7 +738,7 @@ def __init__( alpha=1.0, fit_prior=True, class_prior=None, - verbose=False, + verbose=logger.level_enum.info, handle=None, output_type=None, ): @@ -1107,9 +1109,8 @@ class MultinomialNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1177,7 +1178,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(MultinomialNB, self).__init__( alpha=alpha, @@ -1253,9 +1254,8 @@ class BernoulliNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1310,7 +1310,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(BernoulliNB, self).__init__( alpha=alpha, @@ -1417,9 +1417,8 @@ class ComplementNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1470,7 +1469,7 @@ def __init__( norm=False, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(ComplementNB, self).__init__( alpha=alpha, @@ -1575,9 +1574,8 @@ class CategoricalNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1626,7 +1624,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(CategoricalNB, self).__init__( alpha=alpha, diff --git a/python/cuml/cuml/neighbors/kernel_density.py b/python/cuml/cuml/neighbors/kernel_density.py index 3af2107995..b1b7bc1976 100644 --- a/python/cuml/cuml/neighbors/kernel_density.py +++ b/python/cuml/cuml/neighbors/kernel_density.py @@ -18,6 +18,7 @@ from cuml.internals.import_utils import has_scipy from cuml.metrics import pairwise_distances from cuml.internals.base import Base +from cuml.internals import logger from cuml.internals.input_utils import input_to_cuml_array from cuml.internals.input_utils import input_to_cupy_array from cuml.internals.safe_imports import gpu_only_import_from @@ -183,9 +184,8 @@ class KernelDensity(Base): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. Examples -------- @@ -210,7 +210,7 @@ def __init__( metric_params=None, output_type=None, handle=None, - verbose=False, + verbose=logger.level_enum.info, ): super(KernelDensity, self).__init__( verbose=verbose, handle=handle, output_type=output_type diff --git a/python/cuml/cuml/preprocessing/encoders.py b/python/cuml/cuml/preprocessing/encoders.py index 7a7618f1fb..8999a427e6 100644 --- a/python/cuml/cuml/preprocessing/encoders.py +++ b/python/cuml/cuml/preprocessing/encoders.py @@ -73,9 +73,8 @@ class BaseEncoder(Base, CheckFeaturesMixIn): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output diff --git a/python/cuml/cuml/random_projection/random_projection.pyx b/python/cuml/cuml/random_projection/random_projection.pyx index 81811a4849..0ceaaedf94 100644 --- a/python/cuml/cuml/random_projection/random_projection.pyx +++ b/python/cuml/cuml/random_projection/random_projection.pyx @@ -28,6 +28,7 @@ from cuml.internals.base import Base from pylibraft.common.handle cimport * from cuml.common import input_to_cuml_array from cuml.internals.mixins import FMajorInputTagMixin +from cuml.internals import logger from rmm.librmm.cuda_stream_view cimport cuda_stream_view @@ -402,9 +403,8 @@ class GaussianRandomProjection(Base, random_state : int (default = None) Seed used to initialize random generator - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -429,7 +429,7 @@ class GaussianRandomProjection(Base, """ def __init__(self, *, handle=None, n_components='auto', eps=0.1, - random_state=None, verbose=False, output_type=None): + random_state=None, verbose=logger.level_enum.info, output_type=None): Base.__init__(self, handle=handle, @@ -545,9 +545,8 @@ class SparseRandomProjection(Base, random_state : int (default = None) Seed used to initialize random generator - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None @@ -574,7 +573,7 @@ class SparseRandomProjection(Base, def __init__(self, *, handle=None, n_components='auto', density='auto', eps=0.1, dense_output=True, random_state=None, - verbose=False, output_type=None): + verbose=logger.level_enum.info, output_type=None): Base.__init__(self, handle=handle, diff --git a/python/cuml/cuml/svm/linear_svr.py b/python/cuml/cuml/svm/linear_svr.py index 3f9b8040d9..48f37d83cb 100644 --- a/python/cuml/cuml/svm/linear_svr.py +++ b/python/cuml/cuml/svm/linear_svr.py @@ -76,9 +76,8 @@ class LinearSVR(LinearSVM, RegressorMixin): } (default = {LinearSVM_defaults.lbfgs_memory}) Number of vectors approximating the hessian for the underlying QN solver (l-bfgs). - verbose : int or boolean, default=False - Sets logging level. It must be one of `cuml.common.logger.level_*`. - See :ref:`verbosity-levels` for more info. + verbose : level_enum + Sets logging level. See :ref:`verbosity-levels` for more info. C : {LinearSVM_defaults.C.__class__.__name__ \ } (default = {LinearSVM_defaults.C}) The constant scaling factor of the loss term in the target formula From 81ba84365429c92ce4293f148c5c9b04e5e05994 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Sat, 7 Dec 2024 01:17:59 +0000 Subject: [PATCH 11/32] Fix one logger test --- python/cuml/cuml/internals/logger.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index 37b51dc182..c7eae315ea 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -145,7 +145,7 @@ def set_pattern(pattern): # pattern. cdef string prev = default_pattern() # TODO: Need to cast to a Python string? - context_object = PatternSetter(prev.decode("UTF-8")) + context_object = PatternSetter(prev) cdef string s = pattern.encode("UTF-8") default_logger().set_pattern(s) return context_object From 4e63f7db850fbadbd28dbab43dfb4ef46a743147 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 9 Dec 2024 21:23:44 +0000 Subject: [PATCH 12/32] Fix compilation of cuml-cpu --- python/cuml/cuml/cluster/kmeans.pyx | 2 +- python/cuml/cuml/internals/logger.pxd | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/python/cuml/cuml/cluster/kmeans.pyx b/python/cuml/cuml/cluster/kmeans.pyx index 3c0435651d..e77998deef 100644 --- a/python/cuml/cuml/cluster/kmeans.pyx +++ b/python/cuml/cuml/cluster/kmeans.pyx @@ -36,8 +36,8 @@ IF GPUBUILD == 1: from cuml.metrics.distance_type cimport DistanceType from cuml.cluster.kmeans_utils cimport params as KMeansParams from cuml.cluster.kmeans_utils cimport KMeansPlusPlus, Random, Array - from cuml.internals import logger +from cuml.internals import logger from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import UniversalBase diff --git a/python/cuml/cuml/internals/logger.pxd b/python/cuml/cuml/internals/logger.pxd index bda0374448..c9fc9b6cc4 100644 --- a/python/cuml/cuml/internals/logger.pxd +++ b/python/cuml/cuml/internals/logger.pxd @@ -17,9 +17,10 @@ # distutils: language = c++ +from libcpp.string cimport string + IF GPUBUILD == 1: import sys - from libcpp.string cimport string from libcpp.memory cimport make_shared, shared_ptr from libcpp cimport bool @@ -75,6 +76,17 @@ IF GPUBUILD == 1: cdef void _log_callback(int lvl, const char * msg) with gil cdef void _log_flush() with gil +ELSE: + cpdef enum class level_enum: + trace = 0 + debug = 1 + info = 2 + warn = 3 + error = 4 + critical = 5 + off = 6 + n_levels = 7 + cdef class LogLevelSetter: """Internal "context manager" object for restoring previous log level""" From e6a9898e8e3d320c36d898da5afe33e8f187b3dd Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 10 Dec 2024 02:57:16 +0000 Subject: [PATCH 13/32] Enable flushing --- python/cuml/cuml/internals/logger.pxd | 5 ++--- python/cuml/cuml/internals/logger.pyx | 4 +--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/python/cuml/cuml/internals/logger.pxd b/python/cuml/cuml/internals/logger.pxd index c9fc9b6cc4..7ab6446a9d 100644 --- a/python/cuml/cuml/internals/logger.pxd +++ b/python/cuml/cuml/internals/logger.pxd @@ -67,11 +67,10 @@ IF GPUBUILD == 1: cdef string default_pattern() except + ctypedef void(*log_callback_t)(int, const char*) except * with gil + ctypedef void(*flush_callback_t)() except * with gil cdef cppclass callback_sink_mt: - # TODO: Expose flushing in C++ - callback_sink_mt(log_callback_t callback) except + - # void setFlush(void(*flush)()) except + + callback_sink_mt(log_callback_t callback, flush_callback_t flush) except + cdef void _log_callback(int lvl, const char * msg) with gil cdef void _log_flush() with gil diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index c7eae315ea..5186d62714 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -320,6 +320,4 @@ def flush(): IF GPUBUILD == 1: # Set callback functions to handle redirected sys.stdout in Python - default_logger().sinks().push_back( make_shared[callback_sink_mt](_log_callback)) - # TODO: Expose flushing in the callback sink. - # default_logger().setFlush(_log_flush) + default_logger().sinks().push_back( make_shared[callback_sink_mt](_log_callback, _log_flush)) From 69e2bec72085b43ba5ca84af95d4d6650830f179 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 11 Dec 2024 23:04:04 +0000 Subject: [PATCH 14/32] Revert all pure Python changes --- python/cuml/cuml/dask/cluster/dbscan.py | 10 +++--- python/cuml/cuml/dask/cluster/kmeans.py | 10 +++--- python/cuml/cuml/dask/decomposition/base.py | 12 ++----- python/cuml/cuml/dask/decomposition/pca.py | 11 +++---- python/cuml/cuml/dask/decomposition/tsvd.py | 1 - .../dask/ensemble/randomforestclassifier.py | 5 ++- .../dask/ensemble/randomforestregressor.py | 5 ++- .../text/tfidf_transformer.py | 7 ++-- .../cuml/dask/linear_model/elastic_net.py | 3 +- python/cuml/cuml/dask/linear_model/lasso.py | 3 +- .../dask/linear_model/linear_regression.py | 5 +-- .../dask/linear_model/logistic_regression.py | 1 - python/cuml/cuml/dask/linear_model/ridge.py | 5 +-- python/cuml/cuml/dask/manifold/umap.py | 1 - .../cuml/cuml/dask/naive_bayes/naive_bayes.py | 7 ++-- .../dask/neighbors/kneighbors_classifier.py | 8 +---- .../dask/neighbors/kneighbors_regressor.py | 8 ++--- .../cuml/dask/neighbors/nearest_neighbors.py | 9 +++--- .../cuml/dask/preprocessing/LabelEncoder.py | 5 +-- python/cuml/cuml/dask/preprocessing/label.py | 3 +- python/cuml/cuml/dask/solvers/cd.py | 3 +- .../cuml/decomposition/incremental_pca.py | 8 ++--- .../cuml/cuml/explainer/permutation_shap.pyx | 5 ++- python/cuml/cuml/feature_extraction/_tfidf.py | 8 ++--- python/cuml/cuml/linear_model/lasso.py | 8 ++--- python/cuml/cuml/multiclass/multiclass.py | 32 +++++++------------ python/cuml/cuml/neighbors/kernel_density.py | 8 ++--- .../cuml/cuml/preprocessing/LabelEncoder.py | 8 ++--- python/cuml/cuml/preprocessing/encoders.py | 19 ++++++----- python/cuml/cuml/preprocessing/label.py | 8 ++--- python/cuml/cuml/svm/linear_svc.py | 5 +-- python/cuml/cuml/svm/linear_svr.py | 5 +-- python/cuml/cuml/tests/test_api.py | 13 ++------ 33 files changed, 96 insertions(+), 153 deletions(-) diff --git a/python/cuml/cuml/dask/cluster/dbscan.py b/python/cuml/cuml/dask/cluster/dbscan.py index 1b225571a3..b71e34682a 100644 --- a/python/cuml/cuml/dask/cluster/dbscan.py +++ b/python/cuml/cuml/dask/cluster/dbscan.py @@ -23,7 +23,6 @@ from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import cpu_only_import -from cuml.internals import logger np = cpu_only_import("numpy") @@ -42,8 +41,9 @@ class DBSCAN(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): ---------- client : dask.distributed.Client Dask client to use - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. min_samples : int (default = 5) The number of samples in a neighborhood such that this group can be considered as an important core point (including the point itself). @@ -74,9 +74,7 @@ class DBSCAN(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): """ def __init__(self, *, client=None, verbose=False, **kwargs): - super().__init__( - client=client, verbose=logger.level_enum.info, **kwargs - ) + super().__init__(client=client, verbose=verbose, **kwargs) @staticmethod @mnmg_import diff --git a/python/cuml/cuml/dask/cluster/kmeans.py b/python/cuml/cuml/dask/cluster/kmeans.py index fce4af0727..3dfeced41f 100644 --- a/python/cuml/cuml/dask/cluster/kmeans.py +++ b/python/cuml/cuml/dask/cluster/kmeans.py @@ -25,7 +25,6 @@ from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import gpu_only_import -from cuml.internals import logger cp = gpu_only_import("cupy") @@ -59,8 +58,9 @@ class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): The more iterations of EM, the more accurate, but slower. tol : float (default = 1e-4) Stopping criterion when centroid means do not change much. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. random_state : int (default = 1) If you want results to be the same when you restart Python, select a state. @@ -93,9 +93,7 @@ class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin): """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__(client=client, verbose=verbose, **kwargs) @staticmethod diff --git a/python/cuml/cuml/dask/decomposition/base.py b/python/cuml/cuml/dask/decomposition/base.py index 173a045a69..22dc206f49 100644 --- a/python/cuml/cuml/dask/decomposition/base.py +++ b/python/cuml/cuml/dask/decomposition/base.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,18 +26,10 @@ from cuml.dask.common.base import BaseEstimator from cuml.dask.common.input_utils import DistributedDataHandler -from cuml.internals import logger class BaseDecomposition(BaseEstimator): - def __init__( - self, - *, - model_func, - client=None, - verbose=logger.level_enum.info, - **kwargs, - ): + def __init__(self, *, model_func, client=None, verbose=False, **kwargs): """ Constructor for distributed decomposition model """ diff --git a/python/cuml/cuml/dask/decomposition/pca.py b/python/cuml/cuml/dask/decomposition/pca.py index 42a5b3ce35..8cebb2764f 100644 --- a/python/cuml/cuml/dask/decomposition/pca.py +++ b/python/cuml/cuml/dask/decomposition/pca.py @@ -20,8 +20,6 @@ from cuml.dask.common.base import DelayedTransformMixin from cuml.dask.common.base import DelayedInverseTransformMixin -from cuml.internals import logger - class PCA( BaseDecomposition, @@ -101,8 +99,9 @@ class PCA( 'full': Run exact full SVD and select the components by postprocessing 'jacobi': Iteratively compute SVD of the covariance matrix 'auto': For compatibility with Scikit-learn. Alias for 'jacobi'. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. whiten : boolean (default = False) If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). @@ -146,9 +145,7 @@ class PCA( `_. """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__( model_func=PCA._create_pca, diff --git a/python/cuml/cuml/dask/decomposition/tsvd.py b/python/cuml/cuml/dask/decomposition/tsvd.py index 849a26cde2..67392b7555 100644 --- a/python/cuml/cuml/dask/decomposition/tsvd.py +++ b/python/cuml/cuml/dask/decomposition/tsvd.py @@ -19,7 +19,6 @@ from cuml.dask.common.base import mnmg_import from cuml.dask.common.base import DelayedTransformMixin from cuml.dask.common.base import DelayedInverseTransformMixin -from cuml.internals import logger class TruncatedSVD( diff --git a/python/cuml/cuml/dask/ensemble/randomforestclassifier.py b/python/cuml/cuml/dask/ensemble/randomforestclassifier.py index 1d4adebcba..4dfd7c3ddb 100755 --- a/python/cuml/cuml/dask/ensemble/randomforestclassifier.py +++ b/python/cuml/cuml/dask/ensemble/randomforestclassifier.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,7 +26,6 @@ from cuml.dask.common.base import BaseEstimator from cuml.internals.safe_imports import gpu_only_import from cuml.internals.safe_imports import cpu_only_import -from cuml.internals import logger np = cpu_only_import("numpy") cp = gpu_only_import("cupy") @@ -164,7 +163,7 @@ def __init__( *, workers=None, client=None, - verbose=logger.level_enum.info, + verbose=False, n_estimators=100, random_state=None, ignore_empty_partitions=False, diff --git a/python/cuml/cuml/dask/ensemble/randomforestregressor.py b/python/cuml/cuml/dask/ensemble/randomforestregressor.py index 7c50197154..f2c7d283eb 100755 --- a/python/cuml/cuml/dask/ensemble/randomforestregressor.py +++ b/python/cuml/cuml/dask/ensemble/randomforestregressor.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ from cuml.ensemble import RandomForestRegressor as cuRFR from cuml.dask.ensemble.base import BaseRandomForestModel from cuml.dask.common.base import BaseEstimator -from cuml.internals import logger import dask @@ -150,7 +149,7 @@ def __init__( *, workers=None, client=None, - verbose=logger.level_enum.info, + verbose=False, n_estimators=100, random_state=None, ignore_empty_partitions=False, diff --git a/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py b/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py index 5a5de0b651..7dbcd26d46 100644 --- a/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py +++ b/python/cuml/cuml/dask/feature_extraction/text/tfidf_transformer.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,7 +26,6 @@ from cuml.dask.common.input_utils import DistributedDataHandler from cuml.feature_extraction.text import TfidfTransformer as s_TfidfTransformer -from cuml.internals import logger class TfidfTransformer(BaseEstimator, DelayedTransformMixin): @@ -78,9 +77,7 @@ class TfidfTransformer(BaseEstimator, DelayedTransformMixin): """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): """ Create new distributed TF-IDF transformer instance diff --git a/python/cuml/cuml/dask/linear_model/elastic_net.py b/python/cuml/cuml/dask/linear_model/elastic_net.py index f2fbc816ea..9ddaa8f6ff 100644 --- a/python/cuml/cuml/dask/linear_model/elastic_net.py +++ b/python/cuml/cuml/dask/linear_model/elastic_net.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ from cuml.dask.solvers import CD from cuml.dask.common.base import BaseEstimator -from cuml.internals import logger class ElasticNet(BaseEstimator): diff --git a/python/cuml/cuml/dask/linear_model/lasso.py b/python/cuml/cuml/dask/linear_model/lasso.py index 7854975c78..65bc02bea0 100644 --- a/python/cuml/cuml/dask/linear_model/lasso.py +++ b/python/cuml/cuml/dask/linear_model/lasso.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ from cuml.dask.solvers import CD from cuml.dask.common.base import BaseEstimator -from cuml.internals import logger class Lasso(BaseEstimator): diff --git a/python/cuml/cuml/dask/linear_model/linear_regression.py b/python/cuml/cuml/dask/linear_model/linear_regression.py index 22f49e723d..98ffba672d 100644 --- a/python/cuml/cuml/dask/linear_model/linear_regression.py +++ b/python/cuml/cuml/dask/linear_model/linear_regression.py @@ -19,7 +19,6 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker -from cuml.internals import logger class LinearRegression( @@ -64,9 +63,7 @@ class LinearRegression( The independent term. If `fit_intercept` is False, will be 0. """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__(client=client, verbose=verbose, **kwargs) def fit(self, X, y): diff --git a/python/cuml/cuml/dask/linear_model/logistic_regression.py b/python/cuml/cuml/dask/linear_model/logistic_regression.py index 89fd31c57d..82266807ba 100644 --- a/python/cuml/cuml/dask/linear_model/logistic_regression.py +++ b/python/cuml/cuml/dask/linear_model/logistic_regression.py @@ -28,7 +28,6 @@ from cuml.dask.common.utils import wait_and_raise_from_futures from cuml.internals.safe_imports import cpu_only_import from cuml.internals.safe_imports import gpu_only_import -from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") diff --git a/python/cuml/cuml/dask/linear_model/ridge.py b/python/cuml/cuml/dask/linear_model/ridge.py index f334c86efb..2830f3ce38 100644 --- a/python/cuml/cuml/dask/linear_model/ridge.py +++ b/python/cuml/cuml/dask/linear_model/ridge.py @@ -19,7 +19,6 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker -from cuml.internals import logger class Ridge(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): @@ -69,9 +68,7 @@ class Ridge(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__(client=client, verbose=verbose, **kwargs) self.coef_ = None diff --git a/python/cuml/cuml/dask/manifold/umap.py b/python/cuml/cuml/dask/manifold/umap.py index ca0620e4f5..181bfb0728 100644 --- a/python/cuml/cuml/dask/manifold/umap.py +++ b/python/cuml/cuml/dask/manifold/umap.py @@ -15,7 +15,6 @@ from cuml.dask.common.base import BaseEstimator, DelayedTransformMixin from cuml.dask.common.input_utils import DistributedDataHandler -from cuml.internals import logger class UMAP(BaseEstimator, DelayedTransformMixin): diff --git a/python/cuml/cuml/dask/naive_bayes/naive_bayes.py b/python/cuml/cuml/dask/naive_bayes/naive_bayes.py index 8a7563bd36..f3e2b5a443 100644 --- a/python/cuml/cuml/dask/naive_bayes/naive_bayes.py +++ b/python/cuml/cuml/dask/naive_bayes/naive_bayes.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,6 @@ from toolz import first import dask from cuml.internals.safe_imports import gpu_only_import -from cuml.internals import logger cp = gpu_only_import("cupy") @@ -83,9 +82,7 @@ class MultinomialNB(BaseEstimator, DelayedPredictionMixin): """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): """ Create new multinomial distributed Naive Bayes classifier instance diff --git a/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py b/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py index 9e6c2cbd55..fa0e1e59d9 100644 --- a/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py +++ b/python/cuml/cuml/dask/neighbors/kneighbors_classifier.py @@ -28,7 +28,6 @@ import dask.array as da from uuid import uuid1 from cuml.internals.safe_imports import cpu_only_import -from cuml.internals import logger np = cpu_only_import("numpy") pd = cpu_only_import("pandas") @@ -66,12 +65,7 @@ class KNeighborsClassifier(NearestNeighbors): """ def __init__( - self, - *, - client=None, - streams_per_handle=0, - verbose=logger.level_enum.info, - **kwargs, + self, *, client=None, streams_per_handle=0, verbose=False, **kwargs ): super().__init__(client=client, verbose=verbose, **kwargs) self.streams_per_handle = streams_per_handle diff --git a/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py b/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py index 489ca0be8b..2027169c67 100644 --- a/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py +++ b/python/cuml/cuml/dask/neighbors/kneighbors_regressor.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ from cuml.dask.neighbors import NearestNeighbors from dask.distributed import get_worker import dask.array as da -from cuml.internals import logger from uuid import uuid1 @@ -53,8 +52,9 @@ class KNeighborsRegressor(NearestNeighbors): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. """ def __init__( diff --git a/python/cuml/cuml/dask/neighbors/nearest_neighbors.py b/python/cuml/cuml/dask/neighbors/nearest_neighbors.py index 4f3c1bbd96..6ef7e70dd1 100644 --- a/python/cuml/cuml/dask/neighbors/nearest_neighbors.py +++ b/python/cuml/cuml/dask/neighbors/nearest_neighbors.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,8 +24,6 @@ from cuml.dask.common.input_utils import to_output from cuml.dask.common.input_utils import DistributedDataHandler -from cuml.internals import logger - from uuid import uuid1 @@ -50,8 +48,9 @@ class NearestNeighbors(BaseEstimator): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. """ diff --git a/python/cuml/cuml/dask/preprocessing/LabelEncoder.py b/python/cuml/cuml/dask/preprocessing/LabelEncoder.py index a9d11200c8..f232d505b2 100644 --- a/python/cuml/cuml/dask/preprocessing/LabelEncoder.py +++ b/python/cuml/cuml/dask/preprocessing/LabelEncoder.py @@ -22,7 +22,6 @@ from collections.abc import Sequence from cuml.internals.safe_imports import gpu_only_import_from -from cuml.internals import logger dcDataFrame = gpu_only_import_from("dask_cudf", "DataFrame") dcSeries = gpu_only_import_from("dask_cudf", "Series") @@ -123,9 +122,7 @@ class LabelEncoder( """ - def __init__( - self, *, client=None, verbose=logger.level_enum.info, **kwargs - ): + def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__(client=client, verbose=verbose, **kwargs) def fit(self, y): diff --git a/python/cuml/cuml/dask/preprocessing/label.py b/python/cuml/cuml/dask/preprocessing/label.py index ff896cc691..a2f49f46f4 100644 --- a/python/cuml/cuml/dask/preprocessing/label.py +++ b/python/cuml/cuml/dask/preprocessing/label.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ import dask from cuml.internals.safe_imports import gpu_only_import -from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") diff --git a/python/cuml/cuml/dask/solvers/cd.py b/python/cuml/cuml/dask/solvers/cd.py index cb3bbdf097..b5fb25e518 100644 --- a/python/cuml/cuml/dask/solvers/cd.py +++ b/python/cuml/cuml/dask/solvers/cd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker -from cuml.internals import logger class CD(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin): diff --git a/python/cuml/cuml/decomposition/incremental_pca.py b/python/cuml/cuml/decomposition/incremental_pca.py index ab09ffeb1e..925219d0bc 100644 --- a/python/cuml/cuml/decomposition/incremental_pca.py +++ b/python/cuml/cuml/decomposition/incremental_pca.py @@ -21,7 +21,6 @@ from cuml.common import input_to_cuml_array from cuml import Base from cuml.internals.safe_imports import cpu_only_import -from cuml.internals import logger import numbers from cuml.internals.safe_imports import gpu_only_import @@ -80,8 +79,9 @@ class IncrementalPCA(PCA): `fit`. If `batch_size` is ``None``, then `batch_size` is inferred from the data and set to :py:`5 * n_features`, to provide a balance between approximation accuracy and memory consumption. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -203,7 +203,7 @@ def __init__( whiten=False, copy=True, batch_size=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ): diff --git a/python/cuml/cuml/explainer/permutation_shap.pyx b/python/cuml/cuml/explainer/permutation_shap.pyx index 3ac7a0f20d..b0270b5873 100644 --- a/python/cuml/cuml/explainer/permutation_shap.pyx +++ b/python/cuml/cuml/explainer/permutation_shap.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ np = cpu_only_import('numpy') import time from cuml.internals.safe_imports import gpu_only_import_from -from cuml.internals import logger cu_df = gpu_only_import_from('cudf', 'DataFrame') from cuml.explainer.base import SHAPBase from cuml.explainer.common import get_cai_ptr @@ -205,7 +204,7 @@ class PermutationExplainer(SHAPBase): random_state=None, dtype=None, output_type=None, - verbose=logger.level_enum.info,): + verbose=False,): super().__init__( order='C', model=model, diff --git a/python/cuml/cuml/feature_extraction/_tfidf.py b/python/cuml/cuml/feature_extraction/_tfidf.py index b6277a00e5..2cf5974119 100644 --- a/python/cuml/cuml/feature_extraction/_tfidf.py +++ b/python/cuml/cuml/feature_extraction/_tfidf.py @@ -18,7 +18,6 @@ from cuml.common.sparsefuncs import csr_diag_mul from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2 import cuml.internals -from cuml.internals import logger from cuml.common.exceptions import NotFittedError from cuml.internals.safe_imports import gpu_only_import @@ -106,8 +105,9 @@ class TfidfTransformer(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -131,7 +131,7 @@ def __init__( smooth_idf=True, sublinear_tf=False, handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ): diff --git a/python/cuml/cuml/linear_model/lasso.py b/python/cuml/cuml/linear_model/lasso.py index 937adc1841..65a30be791 100644 --- a/python/cuml/cuml/linear_model/lasso.py +++ b/python/cuml/cuml/linear_model/lasso.py @@ -16,7 +16,6 @@ from cuml.linear_model.elastic_net import ElasticNet from cuml.internals.api_decorators import device_interop_preparation -from cuml.internals import logger class Lasso(ElasticNet): @@ -117,8 +116,9 @@ class Lasso(ElasticNet): type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -148,7 +148,7 @@ def __init__( selection="cyclic", handle=None, output_type=None, - verbose=logger.level_enum.info, + verbose=False, ): # Lasso is just a special case of ElasticNet super().__init__( diff --git a/python/cuml/cuml/multiclass/multiclass.py b/python/cuml/cuml/multiclass/multiclass.py index 2729a75253..61a79e1d31 100644 --- a/python/cuml/cuml/multiclass/multiclass.py +++ b/python/cuml/cuml/multiclass/multiclass.py @@ -18,7 +18,6 @@ from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.internals.import_utils import has_sklearn -from cuml.internals import logger from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.common import ( @@ -76,8 +75,9 @@ class MulticlassClassifier(Base, ClassifierMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -103,7 +103,7 @@ def __init__( estimator, *, handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, strategy="ovr", ): @@ -242,8 +242,9 @@ class OneVsRestClassifier(MulticlassClassifier): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -254,12 +255,7 @@ class OneVsRestClassifier(MulticlassClassifier): @_deprecate_pos_args(version="21.06") def __init__( - self, - estimator, - *args, - handle=None, - verbose=logger.level_enum.info, - output_type=None, + self, estimator, *args, handle=None, verbose=False, output_type=None ): super().__init__( estimator, @@ -321,8 +317,9 @@ class OneVsOneClassifier(MulticlassClassifier): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -333,12 +330,7 @@ class OneVsOneClassifier(MulticlassClassifier): @_deprecate_pos_args(version="21.06") def __init__( - self, - estimator, - *args, - handle=None, - verbose=logger.level_enum.info, - output_type=None, + self, estimator, *args, handle=None, verbose=False, output_type=None ): super().__init__( estimator, diff --git a/python/cuml/cuml/neighbors/kernel_density.py b/python/cuml/cuml/neighbors/kernel_density.py index b1b7bc1976..3af2107995 100644 --- a/python/cuml/cuml/neighbors/kernel_density.py +++ b/python/cuml/cuml/neighbors/kernel_density.py @@ -18,7 +18,6 @@ from cuml.internals.import_utils import has_scipy from cuml.metrics import pairwise_distances from cuml.internals.base import Base -from cuml.internals import logger from cuml.internals.input_utils import input_to_cuml_array from cuml.internals.input_utils import input_to_cupy_array from cuml.internals.safe_imports import gpu_only_import_from @@ -184,8 +183,9 @@ class KernelDensity(Base): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Examples -------- @@ -210,7 +210,7 @@ def __init__( metric_params=None, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(KernelDensity, self).__init__( verbose=verbose, handle=handle, output_type=output_type diff --git a/python/cuml/cuml/preprocessing/LabelEncoder.py b/python/cuml/cuml/preprocessing/LabelEncoder.py index fa13f8a008..960935e61f 100644 --- a/python/cuml/cuml/preprocessing/LabelEncoder.py +++ b/python/cuml/cuml/preprocessing/LabelEncoder.py @@ -24,7 +24,6 @@ cpu_only_import_from, gpu_only_import, ) -from cuml.internals import logger if TYPE_CHECKING: import cudf @@ -56,8 +55,9 @@ class LabelEncoder(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -134,7 +134,7 @@ def __init__( *, handle_unknown="error", handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ) -> None: diff --git a/python/cuml/cuml/preprocessing/encoders.py b/python/cuml/cuml/preprocessing/encoders.py index 8999a427e6..943f3c294c 100644 --- a/python/cuml/cuml/preprocessing/encoders.py +++ b/python/cuml/cuml/preprocessing/encoders.py @@ -73,8 +73,9 @@ class BaseEncoder(Base, CheckFeaturesMixIn): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -233,8 +234,9 @@ class OneHotEncoder(BaseEncoder): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -260,7 +262,7 @@ def __init__( dtype=np.float32, handle_unknown="error", handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ): super().__init__( @@ -653,7 +655,7 @@ def __init__( dtype=np.float64, handle_unknown="error", handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ) -> None: """Encode categorical features as an integer array. @@ -683,8 +685,9 @@ def __init__( concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. See + :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output diff --git a/python/cuml/cuml/preprocessing/label.py b/python/cuml/cuml/preprocessing/label.py index 21569b3d09..20aac36ac8 100644 --- a/python/cuml/cuml/preprocessing/label.py +++ b/python/cuml/cuml/preprocessing/label.py @@ -20,7 +20,6 @@ import cuml.internals from cuml import Base from cuml.internals.safe_imports import gpu_only_import -from cuml.internals import logger cp = gpu_only_import("cupy") cupyx = gpu_only_import("cupyx") @@ -102,8 +101,9 @@ class LabelBinarizer(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -156,7 +156,7 @@ def __init__( pos_label=1, sparse_output=False, handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, ): super().__init__( diff --git a/python/cuml/cuml/svm/linear_svc.py b/python/cuml/cuml/svm/linear_svc.py index 94c48fb459..40df6f0808 100644 --- a/python/cuml/cuml/svm/linear_svc.py +++ b/python/cuml/cuml/svm/linear_svc.py @@ -82,8 +82,9 @@ class LinearSVC(LinearSVM, ClassifierMixin): Weights to modify the parameter C for class i to class_weight[i]*C. The string 'balanced' is also accepted, in which case ``class_weight[i] = n_samples / (n_classes * n_samples_of_class[i])`` - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. C : {LinearSVM_defaults.C.__class__.__name__ \ } (default = {LinearSVM_defaults.C}) The constant scaling factor of the loss term in the target formula diff --git a/python/cuml/cuml/svm/linear_svr.py b/python/cuml/cuml/svm/linear_svr.py index 48f37d83cb..3f9b8040d9 100644 --- a/python/cuml/cuml/svm/linear_svr.py +++ b/python/cuml/cuml/svm/linear_svr.py @@ -76,8 +76,9 @@ class LinearSVR(LinearSVM, RegressorMixin): } (default = {LinearSVM_defaults.lbfgs_memory}) Number of vectors approximating the hessian for the underlying QN solver (l-bfgs). - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. C : {LinearSVM_defaults.C.__class__.__name__ \ } (default = {LinearSVM_defaults.C}) The constant scaling factor of the loss term in the target formula diff --git a/python/cuml/cuml/tests/test_api.py b/python/cuml/cuml/tests/test_api.py index df73fe581f..74adbd177d 100644 --- a/python/cuml/cuml/tests/test_api.py +++ b/python/cuml/cuml/tests/test_api.py @@ -22,7 +22,6 @@ import pytest import cuml import cuml.internals.mixins as cumix -from cuml.internals import logger from cuml.internals.safe_imports import gpu_only_import cp = gpu_only_import("cupy") @@ -106,23 +105,17 @@ def dataset(): class dummy_regressor_estimator(Base, cumix.RegressorMixin): - def __init__( - self, *, handle=None, verbose=logger.level_enum.info, output_type=None - ): + def __init__(self, *, handle=None, verbose=False, output_type=None): super().__init__(handle=handle) class dummy_classifier_estimator(Base, cumix.ClassifierMixin): - def __init__( - self, *, handle=None, verbose=logger.level_enum.info, output_type=None - ): + def __init__(self, *, handle=None, verbose=False, output_type=None): super().__init__(handle=handle) class dummy_cluster_estimator(Base, cumix.ClusterMixin): - def __init__( - self, *, handle=None, verbose=logger.level_enum.info, output_type=None - ): + def __init__(self, *, handle=None, verbose=False, output_type=None): super().__init__(handle=handle) From 34d79da0dd78a328003ff47865c41fc66417b376 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 11 Dec 2024 23:11:55 +0000 Subject: [PATCH 15/32] Revert changes to public APIs and docstrings in Cython --- python/cuml/cuml/cluster/agglomerative.pyx | 8 ++-- python/cuml/cuml/cluster/dbscan.pyx | 7 ++-- python/cuml/cuml/cluster/hdbscan/hdbscan.pyx | 8 ++-- python/cuml/cuml/cluster/kmeans.pyx | 13 +++--- python/cuml/cuml/decomposition/pca.pyx | 7 ++-- python/cuml/cuml/decomposition/tsvd.pyx | 8 ++-- .../cuml/ensemble/randomforest_common.pyx | 3 +- .../cuml/ensemble/randomforestclassifier.pyx | 7 ++-- .../cuml/ensemble/randomforestregressor.pyx | 7 ++-- python/cuml/cuml/experimental/fil/fil.pyx | 8 ++-- .../cuml/experimental/linear_model/lars.pyx | 7 ++-- python/cuml/cuml/explainer/kernel_shap.pyx | 3 +- python/cuml/cuml/fil/fil.pyx | 7 ++-- python/cuml/cuml/internals/base.pyx | 7 ++-- .../cuml/cuml/kernel_ridge/kernel_ridge.pyx | 8 ++-- python/cuml/cuml/linear_model/elastic_net.pyx | 8 ++-- .../cuml/linear_model/linear_regression.pyx | 8 ++-- .../cuml/linear_model/logistic_regression.pyx | 7 ++-- .../cuml/linear_model/mbsgd_classifier.pyx | 8 ++-- .../cuml/linear_model/mbsgd_regressor.pyx | 8 ++-- python/cuml/cuml/linear_model/ridge.pyx | 8 ++-- python/cuml/cuml/manifold/t_sne.pyx | 7 ++-- python/cuml/cuml/manifold/umap.pyx | 7 ++-- python/cuml/cuml/naive_bayes/naive_bayes.py | 42 ++++++++++--------- .../cuml/neighbors/kneighbors_classifier.pyx | 8 ++-- .../cuml/neighbors/kneighbors_regressor.pyx | 8 ++-- .../cuml/cuml/neighbors/nearest_neighbors.pyx | 15 +++---- .../random_projection/random_projection.pyx | 15 +++---- python/cuml/cuml/solvers/cd.pyx | 8 ++-- python/cuml/cuml/solvers/qn.pyx | 10 ++--- python/cuml/cuml/solvers/sgd.pyx | 8 ++-- python/cuml/cuml/svm/svc.pyx | 7 ++-- python/cuml/cuml/svm/svm_base.pyx | 7 ++-- python/cuml/cuml/svm/svr.pyx | 7 ++-- python/cuml/cuml/tsa/arima.pyx | 7 ++-- python/cuml/cuml/tsa/auto_arima.pyx | 9 ++-- python/cuml/cuml/tsa/holtwinters.pyx | 8 ++-- 37 files changed, 171 insertions(+), 157 deletions(-) diff --git a/python/cuml/cuml/cluster/agglomerative.pyx b/python/cuml/cuml/cluster/agglomerative.pyx index 491bf44ce8..790db73362 100644 --- a/python/cuml/cuml/cluster/agglomerative.pyx +++ b/python/cuml/cuml/cluster/agglomerative.pyx @@ -25,7 +25,6 @@ np = cpu_only_import('numpy') from cuml.internals.array import CumlArray from cuml.internals.base import Base -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array @@ -97,8 +96,9 @@ class AgglomerativeClustering(Base, ClusterMixin, CMajorInputTagMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. n_clusters : int (default = 2) The number of clusters to find. affinity : str, default='euclidean' @@ -150,7 +150,7 @@ class AgglomerativeClustering(Base, ClusterMixin, CMajorInputTagMixin): children_ = CumlArrayDescriptor() def __init__(self, *, n_clusters=2, affinity="deprecated", metric=None, - linkage="single", handle=None, verbose=logger.level_enum.info, + linkage="single", handle=None, verbose=False, connectivity='knn', n_neighbors=10, output_type=None): super().__init__(handle=handle, diff --git a/python/cuml/cuml/cluster/dbscan.pyx b/python/cuml/cuml/cluster/dbscan.pyx index da9521f8a8..67b68a547e 100644 --- a/python/cuml/cuml/cluster/dbscan.pyx +++ b/python/cuml/cuml/cluster/dbscan.pyx @@ -170,8 +170,9 @@ class DBSCAN(UniversalBase, due to numerical rounding. algorithm: {'brute', 'rbc'}, default = 'brute' The algorithm to be used by for nearest neighbor computations. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. max_mbytes_per_batch : (optional) int64 Calculate batch size using no more than this number of megabytes for the pairwise distance computation. This enables the trade-off between @@ -246,7 +247,7 @@ class DBSCAN(UniversalBase, min_samples=5, metric='euclidean', algorithm='brute', - verbose=logger.level_enum.info, + verbose=False, max_mbytes_per_batch=None, output_type=None, calc_core_sample_indices=True): diff --git a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx index 73ba87723a..39317403d0 100644 --- a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx +++ b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx @@ -25,7 +25,6 @@ from warnings import warn from cuml.internals.array import CumlArray from cuml.internals.base import UniversalBase -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array @@ -351,8 +350,9 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): alpha : float, optional (default=1.0) A distance scaling parameter as used in robust single linkage. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. min_cluster_size : int, optional (default = 5) The minimum number of samples in a group for that group to be @@ -511,7 +511,7 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): allow_single_cluster=False, gen_min_span_tree=False, handle=None, - verbose=logger.level_enum.info, + verbose=False, connectivity='knn', output_type=None, prediction_data=False): diff --git a/python/cuml/cuml/cluster/kmeans.pyx b/python/cuml/cuml/cluster/kmeans.pyx index e77998deef..48ad769cd8 100644 --- a/python/cuml/cuml/cluster/kmeans.pyx +++ b/python/cuml/cuml/cluster/kmeans.pyx @@ -37,7 +37,6 @@ IF GPUBUILD == 1: from cuml.cluster.kmeans_utils cimport params as KMeansParams from cuml.cluster.kmeans_utils cimport KMeansPlusPlus, Random, Array -from cuml.internals import logger from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import UniversalBase @@ -125,8 +124,9 @@ class KMeans(UniversalBase, The more iterations of EM, the more accurate, but slower. tol : float64 (default = 1e-4) Stopping criterion when centroid means do not change much. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. random_state : int (default = 1) If you want results to be the same when you restart Python, select a state. @@ -205,10 +205,7 @@ class KMeans(UniversalBase, params.init = self._params_init params.max_iter = self.max_iter params.tol = self.tol - # TODO: This params object inherits from a cuvs type that still uses raft's - # integer-based legacy logging. Once raft's logger is also converted to - # using rapids-logger we will instead need to translate between enums here. - params.verbosity = self.verbose + params.verbosity = self.verbose params.rng_state.seed = self.random_state params.metric = DistanceType.L2Expanded # distance metric as squared L2: @todo - support other metrics # noqa: E501 params.batch_samples = self.max_samples_per_batch @@ -220,7 +217,7 @@ class KMeans(UniversalBase, @device_interop_preparation def __init__(self, *, handle=None, n_clusters=8, max_iter=300, tol=1e-4, - verbose=logger.level_enum.info, random_state=1, + verbose=False, random_state=1, init='scalable-k-means++', n_init=1, oversampling_factor=2.0, max_samples_per_batch=1<<15, convert_dtype=True, output_type=None): diff --git a/python/cuml/cuml/decomposition/pca.pyx b/python/cuml/cuml/decomposition/pca.pyx index 402de1c429..db2f0f62c8 100644 --- a/python/cuml/cuml/decomposition/pca.pyx +++ b/python/cuml/cuml/decomposition/pca.pyx @@ -219,8 +219,9 @@ class PCA(UniversalBase, tol : float (default = 1e-7) Used if algorithm = "jacobi". Smaller tolerance can increase accuracy, but but will slow down the algorithm's convergence. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. whiten : boolean (default = False) If True, de-correlates the components. This is done by dividing them by the corresponding singular values then multiplying by sqrt(n_samples). @@ -292,7 +293,7 @@ class PCA(UniversalBase, @device_interop_preparation def __init__(self, *, copy=True, handle=None, iterated_power=15, n_components=None, random_state=None, svd_solver='auto', - tol=1e-7, verbose=logger.level_enum.info, whiten=False, + tol=1e-7, verbose=False, whiten=False, output_type=None): # parameters super().__init__(handle=handle, diff --git a/python/cuml/cuml/decomposition/tsvd.pyx b/python/cuml/cuml/decomposition/tsvd.pyx index 8078684abe..b495d3d239 100644 --- a/python/cuml/cuml/decomposition/tsvd.pyx +++ b/python/cuml/cuml/decomposition/tsvd.pyx @@ -31,7 +31,6 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -from cuml.internals import logger IF GPUBUILD == 1: @@ -193,8 +192,9 @@ class TruncatedSVD(UniversalBase, tol : float (default = 1e-7) Used if algorithm = "jacobi". Smaller tolerance can increase accuracy, but but will slow down the algorithm's convergence. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -250,7 +250,7 @@ class TruncatedSVD(UniversalBase, @device_interop_preparation def __init__(self, *, algorithm='full', handle=None, n_components=1, n_iter=15, random_state=None, tol=1e-7, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): # params super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/ensemble/randomforest_common.pyx b/python/cuml/cuml/ensemble/randomforest_common.pyx index 4d53eeb216..38c15eaca2 100644 --- a/python/cuml/cuml/ensemble/randomforest_common.pyx +++ b/python/cuml/cuml/ensemble/randomforest_common.pyx @@ -28,7 +28,6 @@ from cuml.internals.base import Base from cuml.internals.array import CumlArray from cuml.common.exceptions import NotFittedError import cuml.internals -from cuml.internals import logger from cython.operator cimport dereference as deref @@ -71,7 +70,7 @@ class BaseRandomForestModel(Base): def __init__(self, *, split_criterion, n_streams=4, n_estimators=100, max_depth=16, handle=None, max_features='sqrt', n_bins=128, bootstrap=True, - verbose=logger.level_enum.info, min_samples_leaf=1, min_samples_split=2, + verbose=False, min_samples_leaf=1, min_samples_split=2, max_samples=1.0, max_leaves=-1, accuracy_metric=None, dtype=None, output_type=None, min_weight_fraction_leaf=None, n_jobs=None, max_leaf_nodes=None, min_impurity_decrease=0.0, diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index e247f8a190..0e55ea8a0c 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -221,8 +221,9 @@ class RandomForestClassifier(BaseRandomForestModel, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -247,7 +248,7 @@ class RandomForestClassifier(BaseRandomForestModel, `_. """ - def __init__(self, *, split_criterion=0, handle=None, verbose=logger.level_enum.info, + def __init__(self, *, split_criterion=0, handle=None, verbose=False, output_type=None, **kwargs): diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 1a6765327d..5a2b2e38fe 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -226,8 +226,9 @@ class RandomForestRegressor(BaseRandomForestModel, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -254,7 +255,7 @@ class RandomForestRegressor(BaseRandomForestModel, split_criterion=2, accuracy_metric='r2', handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, **kwargs): self.RF_type = REGRESSION diff --git a/python/cuml/cuml/experimental/fil/fil.pyx b/python/cuml/cuml/experimental/fil/fil.pyx index 6fc87ab22f..247e968e1b 100644 --- a/python/cuml/cuml/experimental/fil/fil.pyx +++ b/python/cuml/cuml/experimental/fil/fil.pyx @@ -37,7 +37,6 @@ from cuml.experimental.fil.detail.raft_proto.cuda_stream cimport cuda_stream as from cuml.experimental.fil.detail.raft_proto.device_type cimport device_type as raft_proto_device_t from cuml.experimental.fil.detail.raft_proto.handle cimport handle_t as raft_proto_handle_t from cuml.experimental.fil.detail.raft_proto.optional cimport optional, nullopt -from cuml.internals import logger from cuml.internals import set_api_output_dtype from cuml.internals.base import UniversalBase from cuml.internals.device_type import DeviceType, DeviceTypeError @@ -467,8 +466,9 @@ class ForestInference(UniversalBase, CMajorInputTagMixin): type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_class : boolean True for classifier models, false for regressors. layout : {'breadth_first', 'depth_first'}, default='depth_first' @@ -655,7 +655,7 @@ class ForestInference(UniversalBase, CMajorInputTagMixin): treelite_model=None, handle=None, output_type=None, - verbose=logger.level_enum.info, + verbose=False, is_classifier=False, output_class=None, layout='depth_first', diff --git a/python/cuml/cuml/experimental/linear_model/lars.pyx b/python/cuml/cuml/experimental/linear_model/lars.pyx index 6cc06b4a15..ddb3f69fa3 100644 --- a/python/cuml/cuml/experimental/linear_model/lars.pyx +++ b/python/cuml/cuml/experimental/linear_model/lars.pyx @@ -116,8 +116,9 @@ class Lars(Base, RegressorMixin): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -165,7 +166,7 @@ class Lars(Base, RegressorMixin): intercept_ = CumlArrayDescriptor() def __init__(self, *, fit_intercept=True, normalize=True, - handle=None, verbose=logger.level_enum.info, output_type=None, copy_X=True, + handle=None, verbose=False, output_type=None, copy_X=True, fit_path=True, n_nonzero_coefs=500, eps=None, precompute='auto'): super().__init__(handle=handle, diff --git a/python/cuml/cuml/explainer/kernel_shap.pyx b/python/cuml/cuml/explainer/kernel_shap.pyx index 1345d046e1..fbd99d5eb9 100644 --- a/python/cuml/cuml/explainer/kernel_shap.pyx +++ b/python/cuml/cuml/explainer/kernel_shap.pyx @@ -27,7 +27,6 @@ from cuml.explainer.common import get_cai_ptr from cuml.explainer.common import model_func_call from cuml.linear_model import Lasso from cuml.linear_model import LinearRegression -from cuml.internals import logger from functools import lru_cache from itertools import combinations from numbers import Number @@ -198,7 +197,7 @@ class KernelExplainer(SHAPBase): data, nsamples='auto', link='identity', - verbose=logger.level_enum.info, + verbose=False, random_state=None, is_gpu_model=None, handle=None, diff --git a/python/cuml/cuml/fil/fil.pyx b/python/cuml/cuml/fil/fil.pyx index ee603a913e..d3764fc758 100644 --- a/python/cuml/cuml/fil/fil.pyx +++ b/python/cuml/cuml/fil/fil.pyx @@ -633,8 +633,9 @@ class ForestInference(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -743,7 +744,7 @@ class ForestInference(Base, def __init__(self, *, handle=None, output_type=None, - verbose=logger.level_enum.info): + verbose=False): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 991e898ede..6aa0452ba9 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -146,8 +146,9 @@ class Base(TagsMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -207,7 +208,7 @@ class Base(TagsMixin, def __init__(self, *, handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, output_mem_type=None): """ diff --git a/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx b/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx index e36851ba3d..6063f27c99 100644 --- a/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx +++ b/python/cuml/cuml/kernel_ridge/kernel_ridge.pyx @@ -24,7 +24,6 @@ from cupyx import lapack, geterr, seterr from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import Base from cuml.internals.mixins import RegressorMixin -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array @@ -154,8 +153,9 @@ class KernelRidge(Base, RegressorMixin): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -215,7 +215,7 @@ class KernelRidge(Base, RegressorMixin): kernel_params=None, output_type=None, handle=None, - verbose=logger.level_enum.info + verbose=False ): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/elastic_net.pyx b/python/cuml/cuml/linear_model/elastic_net.pyx index 4e39f9175c..7b212b21c9 100644 --- a/python/cuml/cuml/linear_model/elastic_net.pyx +++ b/python/cuml/cuml/linear_model/elastic_net.pyx @@ -25,7 +25,6 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.array import CumlArray from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.logger import warn -from cuml.internals import logger from cuml.linear_model.base import LinearPredictMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop @@ -131,8 +130,9 @@ class ElasticNet(UniversalBase, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -163,7 +163,7 @@ class ElasticNet(UniversalBase, def __init__(self, *, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-3, solver='cd', selection='cyclic', - handle=None, output_type=None, verbose=logger.level_enum.info): + handle=None, output_type=None, verbose=False): """ Initializes the elastic-net regression class. diff --git a/python/cuml/cuml/linear_model/linear_regression.pyx b/python/cuml/cuml/linear_model/linear_regression.pyx index b18c37dd08..f1b64602b3 100644 --- a/python/cuml/cuml/linear_model/linear_regression.pyx +++ b/python/cuml/cuml/linear_model/linear_regression.pyx @@ -36,7 +36,6 @@ from cuml.linear_model.base import LinearPredictMixin from cuml.common import input_to_cuml_array from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -from cuml.internals import logger IF GPUBUILD == 1: @@ -218,8 +217,9 @@ class LinearRegression(LinearPredictMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -277,7 +277,7 @@ class LinearRegression(LinearPredictMixin, @device_interop_preparation def __init__(self, *, algorithm='eig', fit_intercept=True, copy_X=None, normalize=False, - handle=None, verbose=logger.level_enum.info, output_type=None): + handle=None, verbose=False, output_type=None): IF GPUBUILD == 1: if handle is None and algorithm == 'eig': # if possible, create two streams, so that eigenvalue decomposition diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index 3f93237d0a..0ccb38fcc9 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -141,8 +141,9 @@ class LogisticRegression(UniversalBase, linesearch_max_iter : int (default = 50) Max number of linesearch iterations per outer iteration used in the lbfgs and owl QN solvers. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. l1_ratio : float or None, optional (default=None) The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1` solver : 'qn' (default='qn') @@ -210,7 +211,7 @@ class LogisticRegression(UniversalBase, class_weight=None, max_iter=1000, linesearch_max_iter=50, - verbose=logger.level_enum.info, + verbose=False, l1_ratio=None, solver="qn", handle=None, diff --git a/python/cuml/cuml/linear_model/mbsgd_classifier.pyx b/python/cuml/cuml/linear_model/mbsgd_classifier.pyx index 1ca5467ad5..3a7fcc772e 100644 --- a/python/cuml/cuml/linear_model/mbsgd_classifier.pyx +++ b/python/cuml/cuml/linear_model/mbsgd_classifier.pyx @@ -22,7 +22,6 @@ from cuml.internals.base import Base from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin -from cuml.internals import logger from cuml.solvers import SGD @@ -144,8 +143,9 @@ class MBSGDClassifier(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -163,7 +163,7 @@ class MBSGDClassifier(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/mbsgd_regressor.pyx b/python/cuml/cuml/linear_model/mbsgd_regressor.pyx index 33ba719c33..a738eb6d74 100644 --- a/python/cuml/cuml/linear_model/mbsgd_regressor.pyx +++ b/python/cuml/cuml/linear_model/mbsgd_regressor.pyx @@ -20,7 +20,6 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.internals.mixins import RegressorMixin -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin from cuml.solvers import SGD @@ -135,8 +134,9 @@ class MBSGDRegressor(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -154,7 +154,7 @@ class MBSGDRegressor(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) diff --git a/python/cuml/cuml/linear_model/ridge.pyx b/python/cuml/cuml/linear_model/ridge.pyx index 62ce647e47..bd039867f3 100644 --- a/python/cuml/cuml/linear_model/ridge.pyx +++ b/python/cuml/cuml/linear_model/ridge.pyx @@ -33,7 +33,6 @@ from cuml.linear_model.base import LinearPredictMixin from cuml.common import input_to_cuml_array from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -from cuml.internals import logger IF GPUBUILD == 1: @@ -160,8 +159,9 @@ class Ridge(UniversalBase, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -210,7 +210,7 @@ class Ridge(UniversalBase, @device_interop_preparation def __init__(self, *, alpha=1.0, solver='eig', fit_intercept=True, normalize=False, handle=None, output_type=None, - verbose=logger.level_enum.info): + verbose=False): """ Initializes the linear ridge regression class. diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index 67f6c792e1..31a0fa65d7 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -166,8 +166,9 @@ class TSNE(UniversalBase, 'chebyshev', 'cosine', 'correlation'] init : str 'random' or 'pca' (default 'random') Currently supports random or pca initialization. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. random_state : int (default None) Setting this can make repeated runs look more similar. Note, however, that this highly parallelized t-SNE implementation is not completely @@ -289,7 +290,7 @@ class TSNE(UniversalBase, metric='euclidean', metric_params=None, init='random', - verbose=logger.level_enum.info, + verbose=False, random_state=None, method='fft', angle=0.5, diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index b128a16008..a4708648b1 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -284,8 +284,9 @@ class UMAP(UniversalBase, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -357,7 +358,7 @@ class UMAP(UniversalBase, precomputed_knn=None, callback=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, build_algo="auto", build_kwds=None, output_type=None): diff --git a/python/cuml/cuml/naive_bayes/naive_bayes.py b/python/cuml/cuml/naive_bayes/naive_bayes.py index ff3e79e85c..701f88862e 100644 --- a/python/cuml/cuml/naive_bayes/naive_bayes.py +++ b/python/cuml/cuml/naive_bayes/naive_bayes.py @@ -23,7 +23,6 @@ from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import ClassifierMixin from cuml.internals.base import Base -from cuml.internals import logger from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.common import CumlArray import math @@ -160,9 +159,7 @@ class _BaseNB(Base, ClassifierMixin): class_log_prior_ = CumlArrayDescriptor() feature_log_prob_ = CumlArrayDescriptor() - def __init__( - self, *, verbose=logger.level_enum.info, handle=None, output_type=None - ): + def __init__(self, *, verbose=False, handle=None, output_type=None): super(_BaseNB, self).__init__( verbose=verbose, handle=handle, output_type=output_type ) @@ -331,8 +328,9 @@ class GaussianNB(_BaseNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Examples -------- @@ -363,7 +361,7 @@ def __init__( var_smoothing=1e-9, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(GaussianNB, self).__init__( @@ -738,7 +736,7 @@ def __init__( alpha=1.0, fit_prior=True, class_prior=None, - verbose=logger.level_enum.info, + verbose=False, handle=None, output_type=None, ): @@ -1109,8 +1107,9 @@ class MultinomialNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1178,7 +1177,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(MultinomialNB, self).__init__( alpha=alpha, @@ -1254,8 +1253,9 @@ class BernoulliNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1310,7 +1310,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(BernoulliNB, self).__init__( alpha=alpha, @@ -1417,8 +1417,9 @@ class ComplementNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1469,7 +1470,7 @@ def __init__( norm=False, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(ComplementNB, self).__init__( alpha=alpha, @@ -1574,8 +1575,9 @@ class CategoricalNB(_BaseDiscreteNB): users can run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -1624,7 +1626,7 @@ def __init__( class_prior=None, output_type=None, handle=None, - verbose=logger.level_enum.info, + verbose=False, ): super(CategoricalNB, self).__init__( alpha=alpha, diff --git a/python/cuml/cuml/neighbors/kneighbors_classifier.pyx b/python/cuml/cuml/neighbors/kneighbors_classifier.pyx index 3121c2072a..17f8628c95 100644 --- a/python/cuml/cuml/neighbors/kneighbors_classifier.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_classifier.pyx @@ -27,7 +27,6 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin -from cuml.internals import logger from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') @@ -99,8 +98,9 @@ class KNeighborsClassifier(ClassifierMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -151,7 +151,7 @@ class KNeighborsClassifier(ClassifierMixin, }, } - def __init__(self, *, weights="uniform", handle=None, verbose=logger.level_enum.info, + def __init__(self, *, weights="uniform", handle=None, verbose=False, output_type=None, **kwargs): super().__init__( handle=handle, diff --git a/python/cuml/cuml/neighbors/kneighbors_regressor.pyx b/python/cuml/cuml/neighbors/kneighbors_regressor.pyx index 8e49d6ea42..78c1525f27 100644 --- a/python/cuml/cuml/neighbors/kneighbors_regressor.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_regressor.pyx @@ -25,7 +25,6 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import RegressorMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.mixins import FMajorInputTagMixin -from cuml.internals import logger from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') @@ -107,8 +106,9 @@ class KNeighborsRegressor(RegressorMixin, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -161,7 +161,7 @@ class KNeighborsRegressor(RegressorMixin, }, } - def __init__(self, *, weights="uniform", handle=None, verbose=logger.level_enum.info, + def __init__(self, *, weights="uniform", handle=None, verbose=False, output_type=None, **kwargs): super().__init__( handle=handle, diff --git a/python/cuml/cuml/neighbors/nearest_neighbors.pyx b/python/cuml/cuml/neighbors/nearest_neighbors.pyx index d68143def2..4f551d282c 100644 --- a/python/cuml/cuml/neighbors/nearest_neighbors.pyx +++ b/python/cuml/cuml/neighbors/nearest_neighbors.pyx @@ -41,7 +41,6 @@ from cuml.metrics.distance_type cimport DistanceType from cuml.metrics.raft_distance_type cimport DistanceType as RaftDistanceType from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -from cuml.internals import logger from cuml.neighbors.ann cimport * @@ -155,8 +154,9 @@ class NearestNeighbors(UniversalBase, ---------- n_neighbors : int (default=5) Default number of neighbors to query - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for computations in this model. Most importantly, this specifies the CUDA @@ -307,7 +307,7 @@ class NearestNeighbors(UniversalBase, @device_interop_preparation def __init__(self, *, n_neighbors=5, - verbose=logger.level_enum.info, + verbose=False, handle=None, algorithm="auto", metric="euclidean", @@ -951,7 +951,7 @@ class NearestNeighbors(UniversalBase, @cuml.internals.api_return_sparse_array() -def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=logger.level_enum.info, +def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=False, handle=None, algorithm="brute", metric="euclidean", p=2, include_self=False, metric_params=None): """ @@ -973,8 +973,9 @@ def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=logger. connectivity matrix with ones and zeros, 'distance' returns the edges as the distances between points with the requested metric. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. handle : cuml.Handle Specifies the cuml.handle that holds internal CUDA state for diff --git a/python/cuml/cuml/random_projection/random_projection.pyx b/python/cuml/cuml/random_projection/random_projection.pyx index 0ceaaedf94..81811a4849 100644 --- a/python/cuml/cuml/random_projection/random_projection.pyx +++ b/python/cuml/cuml/random_projection/random_projection.pyx @@ -28,7 +28,6 @@ from cuml.internals.base import Base from pylibraft.common.handle cimport * from cuml.common import input_to_cuml_array from cuml.internals.mixins import FMajorInputTagMixin -from cuml.internals import logger from rmm.librmm.cuda_stream_view cimport cuda_stream_view @@ -403,8 +402,9 @@ class GaussianRandomProjection(Base, random_state : int (default = None) Seed used to initialize random generator - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -429,7 +429,7 @@ class GaussianRandomProjection(Base, """ def __init__(self, *, handle=None, n_components='auto', eps=0.1, - random_state=None, verbose=logger.level_enum.info, output_type=None): + random_state=None, verbose=False, output_type=None): Base.__init__(self, handle=handle, @@ -545,8 +545,9 @@ class SparseRandomProjection(Base, random_state : int (default = None) Seed used to initialize random generator - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None @@ -573,7 +574,7 @@ class SparseRandomProjection(Base, def __init__(self, *, handle=None, n_components='auto', density='auto', eps=0.1, dense_output=True, random_state=None, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): Base.__init__(self, handle=handle, diff --git a/python/cuml/cuml/solvers/cd.pyx b/python/cuml/cuml/solvers/cd.pyx index 6b1bf59f34..ba6c5ac12b 100644 --- a/python/cuml/cuml/solvers/cd.pyx +++ b/python/cuml/cuml/solvers/cd.pyx @@ -28,7 +28,6 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.base import Base from cuml.common.doc_utils import generate_docstring from cuml.internals.input_utils import input_to_cuml_array -from cuml.internals import logger from cuml.internals.mixins import FMajorInputTagMixin @@ -171,8 +170,9 @@ class CD(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -186,7 +186,7 @@ class CD(Base, def __init__(self, *, loss='squared_loss', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-3, - shuffle=True, handle=None, output_type=None, verbose=logger.level_enum.info): + shuffle=True, handle=None, output_type=None, verbose=False): if loss not in ['squared_loss']: msg = "loss {!r} is not supported" diff --git a/python/cuml/cuml/solvers/qn.pyx b/python/cuml/cuml/solvers/qn.pyx index 524099eca2..72f51c25b2 100644 --- a/python/cuml/cuml/solvers/qn.pyx +++ b/python/cuml/cuml/solvers/qn.pyx @@ -26,7 +26,6 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.common.array_descriptor import CumlArrayDescriptor -from cuml.internals import logger from cuml.internals.array_sparse import SparseCumlArray from cuml.internals.global_settings import GlobalSettings from cuml.common.doc_utils import generate_docstring @@ -370,8 +369,9 @@ class QN(Base, run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -412,8 +412,8 @@ class QN(Base, def __init__(self, *, loss='sigmoid', fit_intercept=True, l1_strength=0.0, l2_strength=0.0, max_iter=1000, tol=1e-4, delta=None, linesearch_max_iter=50, lbfgs_memory=5, - verbose=logger.level_enum.info, handle=None, output_type=None, - warm_start=logger.level_enum.info, penalty_normalized=True): + verbose=False, handle=None, output_type=None, + warm_start=False, penalty_normalized=True): super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/solvers/sgd.pyx b/python/cuml/cuml/solvers/sgd.pyx index 544c842309..b6c452cc30 100644 --- a/python/cuml/cuml/solvers/sgd.pyx +++ b/python/cuml/cuml/solvers/sgd.pyx @@ -33,7 +33,6 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.common.doc_utils import generate_docstring from cuml.common import input_to_cuml_array from cuml.internals.mixins import FMajorInputTagMixin -from cuml.internals import logger IF GPUBUILD == 1: from libcpp cimport bool @@ -215,8 +214,9 @@ class SGD(Base, type. If None, the output type set at the module level (`cuml.global_settings.output_type`) will be used. See :ref:`output-data-type-configuration` for more info. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. """ @@ -227,7 +227,7 @@ class SGD(Base, l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3, shuffle=True, learning_rate='constant', eta0=0.001, power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None, - output_type=None, verbose=logger.level_enum.info): + output_type=None, verbose=False): if loss in ['hinge', 'log', 'squared_loss']: self.loss = loss diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index fa8514b42b..283ce34bbe 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -281,8 +281,9 @@ class SVC(SVMBase, Seed for random number generator (used only when probability = True). Currently this argument is not used and a warning will be printed if the user provides it. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. Attributes ---------- @@ -332,7 +333,7 @@ class SVC(SVMBase, def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=1e-3, cache_size=1024.0, - max_iter=-1, nochange_steps=1000, verbose=logger.level_enum.info, + max_iter=-1, nochange_steps=1000, verbose=False, output_type=None, probability=False, random_state=None, class_weight=None, multiclass_strategy='ovo'): super().__init__( diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 57f2899808..11c523a7fd 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -163,8 +163,9 @@ class SVMBase(Base, We monitor how much our stopping criteria changes during outer iterations. If it does not change (changes less then 1e-3*tol) for nochange_steps consecutive steps, then we stop training. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. epsilon: float (default = 0.1) epsilon parameter of the epsiron-SVR model. There is no penalty associated to points that are predicted within the epsilon-tube @@ -223,7 +224,7 @@ class SVMBase(Base, def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, cache_size=1024.0, - max_iter=-1, nochange_steps=1000, verbose=logger.level_enum.info, + max_iter=-1, nochange_steps=1000, verbose=False, epsilon=0.1, output_type=None): super().__init__(handle=handle, verbose=verbose, diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index 3acdcc8186..94bfb1aae9 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -160,8 +160,9 @@ class SVR(SVMBase, RegressorMixin): We monitor how much our stopping criteria changes during outer iterations. If it does not change (changes less then 1e-3*tol) for nochange_steps consecutive steps, then we stop training. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -231,7 +232,7 @@ class SVR(SVMBase, RegressorMixin): def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=1e-3, epsilon=0.1, cache_size=1024.0, max_iter=-1, nochange_steps=1000, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): super().__init__( handle=handle, C=C, diff --git a/python/cuml/cuml/tsa/arima.pyx b/python/cuml/cuml/tsa/arima.pyx index e6cfc2a1a4..3513362013 100644 --- a/python/cuml/cuml/tsa/arima.pyx +++ b/python/cuml/cuml/tsa/arima.pyx @@ -203,8 +203,9 @@ class ARIMA(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -316,7 +317,7 @@ class ARIMA(Base): fit_intercept=True, simple_differencing=True, handle=None, - verbose=logger.level_enum.info, + verbose=False, output_type=None, convert_dtype=True): diff --git a/python/cuml/cuml/tsa/auto_arima.pyx b/python/cuml/cuml/tsa/auto_arima.pyx index 3f7d35ecff..6f94e086bd 100644 --- a/python/cuml/cuml/tsa/auto_arima.pyx +++ b/python/cuml/cuml/tsa/auto_arima.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -138,8 +138,9 @@ class AutoARIMA(Base): If True, the data is differenced before being passed to the Kalman filter. If False, differencing is part of the state-space model. See additional notes in the ARIMA docs - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -189,7 +190,7 @@ class AutoARIMA(Base): *, handle=None, simple_differencing=True, - verbose=logger.level_enum.info, + verbose=False, output_type=None, convert_dtype=True): # Initialize base class diff --git a/python/cuml/cuml/tsa/holtwinters.pyx b/python/cuml/cuml/tsa/holtwinters.pyx index f6c3beeb72..685e92fdea 100644 --- a/python/cuml/cuml/tsa/holtwinters.pyx +++ b/python/cuml/cuml/tsa/holtwinters.pyx @@ -29,7 +29,6 @@ from cuml.internals import _deprecate_pos_args from cuml.common import using_output_type from cuml.internals.base import Base from cuml.internals.array import CumlArray -from cuml.internals import logger from cuml.common.array_descriptor import CumlArrayDescriptor from pylibraft.common.handle cimport handle_t @@ -166,8 +165,9 @@ class ExponentialSmoothing(Base): run different models concurrently in different streams by creating handles in several streams. If it is None, a new one is created. - verbose : level_enum - Sets logging level. See :ref:`verbosity-levels` for more info. + verbose : int or boolean, default=False + Sets logging level. It must be one of `cuml.common.logger.level_*`. + See :ref:`verbosity-levels` for more info. output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \ 'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None Return results and set estimator attributes to the indicated output @@ -187,7 +187,7 @@ class ExponentialSmoothing(Base): def __init__(self, endog, *, seasonal="additive", seasonal_periods=2, start_periods=2, ts_num=1, eps=2.24e-3, handle=None, - verbose=logger.level_enum.info, output_type=None): + verbose=False, output_type=None): super().__init__(handle=handle, verbose=verbose, From fa77a86ff7f61c57a02e21dbad2df2e1caba8486 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 12 Dec 2024 19:00:15 +0000 Subject: [PATCH 16/32] Revert remaining changes and update base class for verbosity compatibility --- python/cuml/cuml/internals/base.pyx | 10 +++++++++- python/cuml/cuml/manifold/umap.pyx | 3 ++- python/cuml/cuml/svm/svc.pyx | 12 ++++++------ python/cuml/cuml/svm/svm_base.pyx | 12 ++++++------ python/cuml/cuml/svm/svr.pyx | 1 - 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 6aa0452ba9..71c382cda6 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -222,7 +222,15 @@ class Base(TagsMixin, self.handle = None IF GPUBUILD == 1: - self.verbose = logger.level_enum(verbose) + # Internally, self.verbose follows the spdlog/c++ standard of + # 0 is most logging, and logging decreases from there. + # So if the user passes an int value for logging, we convert it. + if verbose is True: + self.verbose = logger.level_enum.debug + elif verbose is False: + self.verbose = logger.level_enum.info + else: + self.verbose = logger.level_enum(verbose) ELSE: self.verbose = verbose diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index a4708648b1..9b92f23173 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -39,6 +39,7 @@ import cuml.internals from cuml.internals.base import UniversalBase from cuml.common.doc_utils import generate_docstring from cuml.internals import logger +from cuml.internals.logger cimport level_enum from cuml.internals.available_devices import is_cuda_available from cuml.internals.input_utils import input_to_cuml_array from cuml.internals.array import CumlArray @@ -469,7 +470,7 @@ class UMAP(UniversalBase, umap_params.repulsion_strength = cls.repulsion_strength umap_params.negative_sample_rate = cls.negative_sample_rate umap_params.transform_queue_size = cls.transform_queue_size - umap_params.verbosity = cls.verbose + umap_params.verbosity = cls.verbose umap_params.a = cls.a umap_params.b = cls.b if cls.init == "spectral": diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index 283ce34bbe..ded9ad358c 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -32,8 +32,8 @@ import cuml.internals from cuml.internals.array import CumlArray from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring -from cuml.internals import logger -from cuml.internals cimport logger +from cuml.internals.logger import warn +from cuml.internals.logger cimport level_enum from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array, input_to_host_array, input_to_host_array_with_sparse_support @@ -77,7 +77,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - logger.level_enum verbosity + level_enum verbosity double epsilon SvmType svmType @@ -353,7 +353,7 @@ class SVC(SVMBase, self.probability = probability self.random_state = random_state if probability and random_state is not None: - logger.warn("Random state is currently ignored by probabilistic SVC") + warn("Random state is currently ignored by probabilistic SVC") self.class_weight = class_weight self.svmType = C_SVC self.multiclass_strategy = multiclass_strategy @@ -405,8 +405,8 @@ class SVC(SVMBase, def _fit_multiclass(self, X, y, sample_weight) -> "SVC": if sample_weight is not None: - logger.warn("Sample weights are currently ignored for multi class " - "classification") + warn("Sample weights are currently ignored for multi class " + "classification") if not has_sklearn(): raise RuntimeError("Scikit-learn is needed to fit multiclass SVM") diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 11c523a7fd..5ed18c76f6 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -34,8 +34,8 @@ from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array from cuml.internals.input_utils import determine_array_type_full from cuml.common import using_output_type -from cuml.internals import logger -from cuml.internals cimport logger +from cuml.internals.logger import warn +from cuml.internals.logger cimport level_enum from cuml.internals.mixins import FMajorInputTagMixin from cuml.internals.array_sparse import SparseCumlArray, SparseCumlArrayInput from libcpp cimport bool @@ -69,7 +69,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - logger.level_enum verbosity + level_enum verbosity double epsilon SvmType svmType @@ -264,9 +264,9 @@ class SVMBase(Base, and not getattr(type(self), "_linear_kernel_warned", False): setattr(type(self), "_linear_kernel_warned", True) cname = type(self).__name__ - logger.warn(f'{cname} with the linear kernel can be much faster using ' - f'the specialized solver provided by Linear{cname}. Consider ' - f'switching to Linear{cname} if tranining takes too long.') + warn(f'{cname} with the linear kernel can be much faster using ' + f'the specialized solver provided by Linear{cname}. Consider ' + f'switching to Linear{cname} if tranining takes too long.') def __del__(self): self._dealloc() diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index 94bfb1aae9..9dc61b7635 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -29,7 +29,6 @@ from cuml.internals.array import CumlArray from cuml.internals.array_sparse import SparseCumlArray from cuml.internals.input_utils import determine_array_type_full from cuml.internals.mixins import RegressorMixin -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array From 0826775fcb4fe94e41da7322af396e7a8dff3f53 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 12 Dec 2024 20:35:41 +0000 Subject: [PATCH 17/32] Fix inversion of log levels --- python/cuml/cuml/internals/base.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 71c382cda6..364b825307 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -230,7 +230,8 @@ class Base(TagsMixin, elif verbose is False: self.verbose = logger.level_enum.info else: - self.verbose = logger.level_enum(verbose) + # cuml's verbosity levels are the inverse of rapids-logger's (spdlog's) + self.verbose = logger.level_enum(6 - verbose) ELSE: self.verbose = verbose From bcc766c2dbf12c8d7c5373257744c82a281b1e81 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 12 Dec 2024 14:16:04 -0800 Subject: [PATCH 18/32] Rewrite verbosity on access instead of on save to appease sklearn checks --- python/cuml/cuml/internals/base.pyx | 33 ++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 364b825307..b3cc925f26 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -39,6 +39,7 @@ import cuml import cuml.common import cuml.internals.logger as logger import cuml.internals +from cuml.internals import api_context_managers import cuml.internals.input_utils from cuml.internals.available_devices import is_cuda_available from cuml.internals.device_type import DeviceType @@ -226,14 +227,13 @@ class Base(TagsMixin, # 0 is most logging, and logging decreases from there. # So if the user passes an int value for logging, we convert it. if verbose is True: - self.verbose = logger.level_enum.debug + self._verbose = logger.level_enum.debug elif verbose is False: - self.verbose = logger.level_enum.info + self._verbose = logger.level_enum.info else: - # cuml's verbosity levels are the inverse of rapids-logger's (spdlog's) - self.verbose = logger.level_enum(6 - verbose) + self._verbose = verbose ELSE: - self.verbose = verbose + self._verbose = verbose self.output_type = _check_output_type_str( cuml.global_settings.output_type @@ -251,6 +251,29 @@ class Base(TagsMixin, if nvtx_benchmark and nvtx_benchmark.lower() == 'true': self.set_nvtx_annotations() + @property + def verbose(self): + if api_context_managers.in_internal_api(): + return logger.level_enum(6 - self._verbose) + else: + return self._verbose + + @verbose.setter + def verbose(self, int | logger.level_enum): + if api_context_managers.in_internal_api(): + assert isinstance(value, level_enum), ( + "The log level should always be provided as a level_enum, " + "not an integer" + ) + self._verbose = 6 - int(value) + else: + if isinstance(value, logger.level_enum): + raise ValueError( + "The log level should always be provided as an integer, " + "not using the enum" + ) + self._verbose = value + def __repr__(self): """ Pretty prints the arguments of a class using Scikit-learn standard :) From b9ca57d1f152c12447274be2b127ec352f834361 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 12 Dec 2024 23:53:37 +0000 Subject: [PATCH 19/32] Appease linter --- python/cuml/cuml/ensemble/randomforestclassifier.pyx | 1 - python/cuml/cuml/ensemble/randomforestregressor.pyx | 1 - 2 files changed, 2 deletions(-) diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 0e55ea8a0c..9664b4d008 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -35,7 +35,6 @@ from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array from cuml.internals.logger cimport level_enum -from cuml.internals import logger from cuml.ensemble.randomforest_common import BaseRandomForestModel from cuml.ensemble.randomforest_common import _obtain_fil_model from cuml.ensemble.randomforest_shared cimport * diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 5a2b2e38fe..5625d278ca 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -31,7 +31,6 @@ import cuml.internals from cuml.internals.mixins import RegressorMixin from cuml.internals.logger cimport level_enum -from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array From 0e7bbd2356c6a9e84011679296f3ce367fa30861 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 13 Dec 2024 01:34:46 +0000 Subject: [PATCH 20/32] Fix typing --- python/cuml/cuml/internals/base.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index b3cc925f26..ddb7a68fe4 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -259,9 +259,9 @@ class Base(TagsMixin, return self._verbose @verbose.setter - def verbose(self, int | logger.level_enum): + def verbose(self, value: int | logger.level_enum): if api_context_managers.in_internal_api(): - assert isinstance(value, level_enum), ( + assert isinstance(value, logger.level_enum), ( "The log level should always be provided as a level_enum, " "not an integer" ) From e99cbcff3c0868e6584b751fc762a77141fd5ba0 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 13 Dec 2024 01:56:39 +0000 Subject: [PATCH 21/32] Turn of shallow clones --- cpp/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 8d51ad91ce..738a234d2c 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -222,7 +222,7 @@ rapids_cmake_install_lib_dir(lib_dir) # Not using rapids-cmake since we never want to find, always download. CPMAddPackage( - NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW TRUE GIT_TAG feat/cuml + NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW FALSE GIT_TAG feat/cuml ) rapids_make_logger( ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger From 74c5f367d128dcc363196097a94efaf6de126c83 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Sat, 14 Dec 2024 01:34:52 +0000 Subject: [PATCH 22/32] Also set the flush for the C++ test --- cpp/test/sg/logger.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index 929f023e0e..fa9ff14ab3 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -90,7 +90,8 @@ TEST_F(LoggerTest, callback) TEST_F(LoggerTest, flush) { - default_logger().sinks().push_back(std::make_shared(exampleCallback)); + default_logger().sinks().push_back( + std::make_shared(exampleCallback, exampleFlush)); auto const testMsg = "This is a critical message"; CUML_LOG_CRITICAL(testMsg); ASSERT_EQ(1, flushCount); From ed428be86fe1cc70e9177d4b537067e602d70616 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 17 Dec 2024 00:14:40 +0000 Subject: [PATCH 23/32] Fix setting of default logging level --- cpp/CMakeLists.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 738a234d2c..5b16dd71bf 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -109,6 +109,17 @@ set(RMM_LOGGING_LEVEL "INFO" CACHE STRING "Choose the logging level.") set_property(CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF") message(VERBOSE "CUML_CPP: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.") +# Set logging level +set(LIBCUML_LOGGING_LEVEL + "DEBUG" + CACHE STRING "Choose the logging level." +) +set_property( + CACHE LIBCUML_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" + "OFF" +) +message(VERBOSE "CUML: LIBCUML_LOGGING_LEVEL = '${LIBCUML_LOGGING_LEVEL}'.") + if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS) # Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to # have different values for the `Threads::Threads` target. Setting this flag ensures @@ -571,6 +582,7 @@ if(BUILD_CUML_CPP_LIBRARY) PRIVATE "$<$:${CUML_CXX_FLAGS}>" "$<$:${CUML_CUDA_FLAGS}>" ) + target_compile_definitions(${CUML_CPP_TARGET} PUBLIC "CUML_LOG_ACTIVE_LEVEL=CUML_LOG_LEVEL_${LIBCUML_LOGGING_LEVEL}") target_include_directories(${CUML_CPP_TARGET} PUBLIC From 12558854aeeb2f1eaf4f95ea8a38a6291a970c84 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 17 Dec 2024 00:43:28 +0000 Subject: [PATCH 24/32] Fix C++ flushing test --- cpp/test/sg/logger.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index fa9ff14ab3..030ece46dc 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -92,8 +92,7 @@ TEST_F(LoggerTest, flush) { default_logger().sinks().push_back( std::make_shared(exampleCallback, exampleFlush)); - auto const testMsg = "This is a critical message"; - CUML_LOG_CRITICAL(testMsg); + default_logger().flush(); ASSERT_EQ(1, flushCount); } From 9ff2c0a3f0023cac4c199e21918a3a1a656b51b5 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 18 Dec 2024 21:24:21 +0000 Subject: [PATCH 25/32] Try using a custom descriptor --- python/cuml/cuml/internals/base.pyx | 54 +++++++++++++++-------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index bc3e9f705c..76e73e332f 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -75,6 +75,29 @@ IF GPUBUILD == 1: import cuml.common.cuda +class VerbosityDescriptor: + def __get__(self, obj, cls=None): + if api_context_managers.in_internal_api(): + return logger.level_enum(6 - self._verbose) + else: + return self._verbose + + def __set__(self, obj, value): + if api_context_managers.in_internal_api(): + assert isinstance(value, logger.level_enum), ( + "The log level should always be provided as a level_enum, " + "not an integer" + ) + self._verbose = 6 - int(value) + else: + if isinstance(value, logger.level_enum): + raise ValueError( + "The log level should always be provided as an integer, " + "not using the enum" + ) + self._verbose = value + + class Base(TagsMixin, metaclass=cuml.internals.BaseMetaClass): """ @@ -229,13 +252,13 @@ class Base(TagsMixin, # 0 is most logging, and logging decreases from there. # So if the user passes an int value for logging, we convert it. if verbose is True: - self._verbose = logger.level_enum.debug + self.verbose = logger.level_enum.debug elif verbose is False: - self._verbose = logger.level_enum.info + self.verbose = logger.level_enum.info else: - self._verbose = verbose + self.verbose = logger.level_enum(6 - verbose) ELSE: - self._verbose = verbose + self.verbose = logger.level_enum(6 - verbose) self.output_type = _check_output_type_str( cuml.global_settings.output_type @@ -253,28 +276,7 @@ class Base(TagsMixin, if nvtx_benchmark and nvtx_benchmark.lower() == 'true': self.set_nvtx_annotations() - @property - def verbose(self): - if api_context_managers.in_internal_api(): - return logger.level_enum(6 - self._verbose) - else: - return self._verbose - - @verbose.setter - def verbose(self, value: int | logger.level_enum): - if api_context_managers.in_internal_api(): - assert isinstance(value, logger.level_enum), ( - "The log level should always be provided as a level_enum, " - "not an integer" - ) - self._verbose = 6 - int(value) - else: - if isinstance(value, logger.level_enum): - raise ValueError( - "The log level should always be provided as an integer, " - "not using the enum" - ) - self._verbose = value + verbose = VerbosityDescriptor() def __repr__(self): """ From 8d8561ddcd1397f5e4fef3f92411b9f682ede8cb Mon Sep 17 00:00:00 2001 From: William Hicks Date: Wed, 18 Dec 2024 15:05:15 -0800 Subject: [PATCH 26/32] Fix behavior of VerboseDescriptor - Access underlying _verbose during pickling of estimators - Fix bugs in implementation of descriptor - Correct behavior of get_params to ensure Scikit-Learn style verbosity is returned external to cuML API --- python/cuml/cuml/cluster/hdbscan/hdbscan.pyx | 2 +- .../cuml/ensemble/randomforestclassifier.pyx | 4 +- .../cuml/ensemble/randomforestregressor.pyx | 4 +- python/cuml/cuml/internals/base.pyx | 38 +++++++++++++++++-- .../cuml/linear_model/logistic_regression.pyx | 2 +- python/cuml/cuml/manifold/t_sne.pyx | 2 +- python/cuml/cuml/svm/linear.pyx | 13 +++++++ python/cuml/cuml/svm/svm_base.pyx | 2 +- 8 files changed, 55 insertions(+), 12 deletions(-) diff --git a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx index 39317403d0..2e61b3b12b 100644 --- a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx +++ b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx @@ -964,7 +964,7 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): def __setstate__(self, state): super(HDBSCAN, self).__init__( handle=state["handle"], - verbose=state["verbose"] + verbose=state["_verbose"] ) if not state["fit_called_"]: diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 9664b4d008..34ff8cf9c1 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -286,7 +286,7 @@ class RandomForestClassifier(BaseRandomForestModel, state["rf_params64"] = rf_forest64.rf_params state["n_cols"] = self.n_cols - state["verbose"] = self.verbose + state["_verbose"] = self._verbose state["treelite_serialized_model"] = self.treelite_serialized_model state["treelite_handle"] = None state["split_criterion"] = self.split_criterion @@ -297,7 +297,7 @@ class RandomForestClassifier(BaseRandomForestModel, super(RandomForestClassifier, self).__init__( split_criterion=state["split_criterion"], handle=state["handle"], - verbose=state["verbose"]) + verbose=state["_verbose"]) cdef RandomForestMetaData[float, int] *rf_forest = \ new RandomForestMetaData[float, int]() cdef RandomForestMetaData[double, int] *rf_forest64 = \ diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 5625d278ca..498e987b90 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -290,7 +290,7 @@ class RandomForestRegressor(BaseRandomForestModel, state["rf_params64"] = rf_forest64.rf_params state['n_cols'] = self.n_cols - state["verbose"] = self.verbose + state["_verbose"] = self._verbose state["treelite_serialized_model"] = self.treelite_serialized_model state['handle'] = self.handle state["treelite_handle"] = None @@ -301,7 +301,7 @@ class RandomForestRegressor(BaseRandomForestModel, def __setstate__(self, state): super(RandomForestRegressor, self).__init__( split_criterion=state["split_criterion"], - handle=state["handle"], verbose=state['verbose']) + handle=state["handle"], verbose=state['_verbose']) cdef RandomForestMetaData[float, float] *rf_forest = \ new RandomForestMetaData[float, float]() cdef RandomForestMetaData[double, double] *rf_forest64 = \ diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 76e73e332f..310d82c199 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -76,11 +76,21 @@ IF GPUBUILD == 1: class VerbosityDescriptor: + """Descriptor for ensuring correct type is used for verbosity + + This descriptor ensures that when the 'verbose' attribute of a cuML + estimator is accessed external to the cuML API, an integer is returned + (consistent with Scikit-Learn's API for verbosity). Internal to the API, an + enum is used. Scikit-Learn's numerical values for verbosity are the inverse + of those used by spdlog, so the numerical value is also inverted internal + to the cuML API. This ensures that cuML code treats verbosity values as + expected for an spdlog-based codebase. + """ def __get__(self, obj, cls=None): if api_context_managers.in_internal_api(): - return logger.level_enum(6 - self._verbose) + return logger.level_enum(6 - obj._verbose) else: - return self._verbose + return obj._verbose def __set__(self, obj, value): if api_context_managers.in_internal_api(): @@ -88,14 +98,14 @@ class VerbosityDescriptor: "The log level should always be provided as a level_enum, " "not an integer" ) - self._verbose = 6 - int(value) + obj._verbose = 6 - int(value) else: if isinstance(value, logger.level_enum): raise ValueError( "The log level should always be provided as an integer, " "not using the enum" ) - self._verbose = value + obj._verbose = value class Base(TagsMixin, @@ -247,6 +257,15 @@ class Base(TagsMixin, ELSE: self.handle = None + # The following manipulation of the root_cm ensures that the verbose + # descriptor sees any set or get of the verbose attribute as happening + # internal to the cuML API. Currently, __init__ calls do not take place + # within an api context manager, so setting "verbose" here would + # otherwise appear to be external to the cuML API. This behavior will + # be corrected with the update of cuML's API context manager + # infrastructure in https://github.com/rapidsai/cuml/pull/6189. + GlobalSettings().prev_root_cm = GlobalSettings().root_cm + GlobalSettings().root_cm = True IF GPUBUILD == 1: # Internally, self.verbose follows the spdlog/c++ standard of # 0 is most logging, and logging decreases from there. @@ -259,6 +278,9 @@ class Base(TagsMixin, self.verbose = logger.level_enum(6 - verbose) ELSE: self.verbose = logger.level_enum(6 - verbose) + # Please see above note on manipulation of the root_cm. This should be + # rendered unnecessary with https://github.com/rapidsai/cuml/pull/6189. + GlobalSettings().root_cm = GlobalSettings().prev_root_cm self.output_type = _check_output_type_str( cuml.global_settings.output_type @@ -324,6 +346,14 @@ class Base(TagsMixin, variables = self._get_param_names() for key in variables: var_value = getattr(self, key, None) + # We are currently internal to the cuML API, but the value we + # return will immediately be returned external to the API, so we + # must perform the translation from enum to integer before + # returning the value. Ordinarily, this is handled by + # VerbosityDescriptor for direct access to the verbose + # attribute. + if key == "verbose": + var_value = 6 - int(var_value) params[key] = var_value return params diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index 53a90524d3..c66ef7ee35 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -566,7 +566,7 @@ class LogisticRegression(UniversalBase, def __setstate__(self, state): super().__init__(handle=None, - verbose=state["verbose"]) + verbose=state["_verbose"]) self.__dict__.update(state) def get_attr_names(self): diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index d9687f1b1c..85fce24076 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -694,7 +694,7 @@ class TSNE(UniversalBase, def __setstate__(self, state): super(TSNE, self).__init__(handle=None, - verbose=state['verbose']) + verbose=state['_verbose']) self.__dict__.update(state) return state diff --git a/python/cuml/cuml/svm/linear.pyx b/python/cuml/cuml/svm/linear.pyx index 9cad6898c8..70cca27cf2 100644 --- a/python/cuml/cuml/svm/linear.pyx +++ b/python/cuml/cuml/svm/linear.pyx @@ -31,6 +31,7 @@ from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.array import CumlArray from cuml.internals.base import Base from cuml.internals.logger cimport level_enum +from cuml.internals.logger import level_enum as py_level_enum from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array @@ -205,6 +206,18 @@ class LSVMPWrapper(LSVMPWrapper_): else: raise ValueError(f"Unknown loss string value: {loss}") + @property + def verbose(self): + # Reverse ordering of log levels to convert spdlog level values to + # Scikit-Learn log level values + return 6 - int(self._getparam('verbose')) + + @verbose.setter + def verbose(self, level: int): + # Reverse ordering of log levels to convert spdlog level values to + # Scikit-Learn log level values + self._setparam('verbose', py_level_enum(6 - level)) + # Add properties for parameters with a trivial conversion def __add_prop(prop_name): diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 5ed18c76f6..af76f2d0e3 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -685,7 +685,7 @@ class SVMBase(Base, def __setstate__(self, state): super(SVMBase, self).__init__(handle=None, - verbose=state['verbose']) + verbose=state['_verbose']) self.__dict__.update(state) self._model = self._get_svm_model() self._freeSvmBuffers = False From 62895f0e0ecd0ae087b351603213880657315ea7 Mon Sep 17 00:00:00 2001 From: William Hicks Date: Thu, 19 Dec 2024 10:03:25 -0800 Subject: [PATCH 27/32] Correct verbose handling in set_params --- python/cuml/cuml/internals/base.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 310d82c199..1d53cb6e76 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -371,6 +371,9 @@ class Base(TagsMixin, if key not in variables: raise ValueError("Bad param '%s' passed to set_params" % key) else: + # Switch verbose to enum since we are now internal to cuML API + if key == "verbose": + value = logger.level_enum(6 - int(value)) setattr(self, key, value) return self From ae320975345ecd09d8e852c3f3d8c68778cf20cf Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 30 Dec 2024 20:09:03 +0000 Subject: [PATCH 28/32] Fix a couple of bugs in umap behavior --- python/cuml/cuml/dask/manifold/umap.py | 2 +- python/cuml/cuml/manifold/umap.pyx | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/python/cuml/cuml/dask/manifold/umap.py b/python/cuml/cuml/dask/manifold/umap.py index 181bfb0728..9a27b52d30 100644 --- a/python/cuml/cuml/dask/manifold/umap.py +++ b/python/cuml/cuml/dask/manifold/umap.py @@ -44,7 +44,7 @@ class UMAP(BaseEstimator, DelayedTransformMixin): >>> X, y = make_blobs(1000, 10, centers=42, cluster_std=0.1, ... dtype=np.float32, random_state=10) - >>> local_model = UMAP(random_state=10) + >>> local_model = UMAP(random_state=10, verbose=0) >>> selection = np.random.RandomState(10).choice(1000, 100) >>> X_train = X[selection] diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index c8bf0aed07..b226211b56 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -435,7 +435,18 @@ class UMAP(UniversalBase, self.precomputed_knn = extract_knn_infos(precomputed_knn, n_neighbors) - logger.set_level(verbose) + # We need to set this log level here so that it is propagated in time + # for the logger.info call below. We cannot use the verbose parameter + # directly because Base.__init__ contains the logic for converting + # boolean values to suitable integers. We access self._verbose instead + # of self.verbose because due to the same issues described in + # Base.__init__'s logic for setting verbose, this code is not + # considered to be within a root context and therefore considered + # external. Rather than mucking with the decorator, for this specific + # case since we're trying to set the properties of the underlying + # logger we may as well access our underlying value directly and + # perform the necessary arithmetic. + logger.set_level(logger.level_enum(6 - self._verbose)) if build_algo == "auto" or build_algo == "brute_force_knn" or build_algo == "nn_descent": if self.deterministic and build_algo == "auto": From d238f1eaf47fff8fa3c258459843eb950b73a859 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 31 Dec 2024 08:45:00 -0800 Subject: [PATCH 29/32] Fix logger call to use commit hash --- cpp/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index ca43aaad75..79fa9767f5 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -233,7 +233,7 @@ rapids_cmake_install_lib_dir(lib_dir) # Not using rapids-cmake since we never want to find, always download. CPMAddPackage( - NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW FALSE GIT_TAG feat/cuml + NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW FALSE GIT_TAG 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 VERSION 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 ) rapids_make_logger( ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger From 2cbbaca9de182dca2e674d9d3dc130ccf4d014cd Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 31 Dec 2024 08:55:36 -0800 Subject: [PATCH 30/32] Fix repo --- cpp/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 79fa9767f5..c8a1593f01 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -233,7 +233,8 @@ rapids_cmake_install_lib_dir(lib_dir) # Not using rapids-cmake since we never want to find, always download. CPMAddPackage( - NAME rapids_logger GITHUB_REPOSITORY vyasr/rapids-logger GIT_SHALLOW FALSE GIT_TAG 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 VERSION 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 + NAME rapids_logger GITHUB_REPOSITORY rapidsai/rapids-logger GIT_SHALLOW FALSE GIT_TAG + 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 VERSION 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 ) rapids_make_logger( ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger From 5b66f0826a303b28680e97b493a4b0fe0b0b847d Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 2 Jan 2025 16:45:00 +0000 Subject: [PATCH 31/32] style --- cpp/CMakeLists.txt | 2 +- cpp/bench/sg/svc.cu | 2 +- cpp/bench/sg/svr.cu | 2 +- cpp/examples/dbscan/dbscan_example.cpp | 2 +- cpp/include/cuml/cluster/dbscan.hpp | 2 +- cpp/include/cuml/cluster/kmeans.hpp | 2 +- cpp/include/cuml/common/utils.hpp | 2 +- cpp/include/cuml/ensemble/randomforest.hpp | 2 +- cpp/include/cuml/manifold/tsne.h | 2 +- cpp/include/cuml/manifold/umapparams.h | 2 +- cpp/include/cuml/solvers/lars.hpp | 2 +- cpp/include/cuml/svm/linear.hpp | 2 +- cpp/include/cuml/svm/svc.hpp | 2 +- cpp/include/cuml/svm/svm_api.h | 2 +- cpp/include/cuml/svm/svm_parameter.h | 2 +- cpp/src/dbscan/dbscan.cu | 2 +- cpp/src/dbscan/dbscan.cuh | 2 +- cpp/src/dbscan/dbscan_api.cpp | 2 +- cpp/src/decisiontree/decisiontree.cuh | 2 +- cpp/src/glm/qn/mg/qn_mg.cuh | 2 +- cpp/src/glm/qn/qn.cuh | 2 +- cpp/src/glm/qn/qn_solvers.cuh | 2 +- cpp/src/randomforest/randomforest.cu | 2 +- cpp/src/randomforest/randomforest.cuh | 2 +- cpp/src/solver/lars.cu | 2 +- cpp/src/solver/lars_impl.cuh | 2 +- cpp/src/svm/linear.cu | 2 +- cpp/src/svm/smosolver.h | 2 +- cpp/src/svm/svc.cu | 2 +- cpp/src/svm/svm_api.cpp | 2 +- cpp/src/svm/workingset.cuh | 2 +- cpp/src/tsne/tsne_runner.cuh | 2 +- cpp/src/tsne/utils.cuh | 2 +- cpp/src/umap/fuzzy_simpl_set/naive.cuh | 2 +- cpp/src/umap/runner.cuh | 2 +- cpp/src/umap/simpl_set_embed/algo.cuh | 2 +- cpp/src/umap/supervised.cuh | 2 +- cpp/test/sg/genetic/evolution_test.cu | 2 +- cpp/test/sg/genetic/program_test.cu | 2 +- cpp/test/sg/hdbscan_test.cu | 2 +- cpp/test/sg/lars_test.cu | 2 +- cpp/test/sg/logger.cpp | 2 +- cpp/test/sg/quasi_newton.cu | 2 +- cpp/test/sg/svc_test.cu | 2 +- cpp/test/sg/tsne_test.cu | 2 +- python/cuml/cuml/cluster/dbscan.pyx | 2 +- python/cuml/cuml/cluster/hdbscan/hdbscan.pyx | 2 +- python/cuml/cuml/common/kernel_utils.py | 2 +- python/cuml/cuml/dask/common/dask_df_utils.py | 2 +- python/cuml/cuml/dask/common/input_utils.py | 2 +- python/cuml/cuml/dask/manifold/umap.py | 2 +- python/cuml/cuml/ensemble/randomforestclassifier.pyx | 2 +- python/cuml/cuml/ensemble/randomforestregressor.pyx | 2 +- python/cuml/cuml/experimental/accel/__init__.py | 2 +- python/cuml/cuml/experimental/linear_model/lars.pyx | 2 +- python/cuml/cuml/explainer/base.pyx | 2 +- python/cuml/cuml/internals/base.pyx | 2 +- python/cuml/cuml/internals/logger.pxd | 2 +- python/cuml/cuml/internals/logger.pyx | 2 +- python/cuml/cuml/linear_model/logistic_regression.pyx | 2 +- python/cuml/cuml/manifold/simpl_set.pyx | 2 +- python/cuml/cuml/manifold/t_sne.pyx | 2 +- python/cuml/cuml/manifold/umap.pyx | 2 +- python/cuml/cuml/manifold/umap_utils.pxd | 2 +- python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx | 2 +- python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx | 2 +- python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx | 2 +- python/cuml/cuml/svm/linear.pyx | 2 +- python/cuml/cuml/svm/svc.pyx | 2 +- python/cuml/cuml/svm/svm_base.pyx | 2 +- python/cuml/cuml/svm/svr.pyx | 2 +- python/cuml/cuml/tests/test_hdbscan.py | 2 +- python/cuml/cuml/tests/test_kmeans.py | 2 +- python/cuml/cuml/tests/test_logger.py | 2 +- python/cuml/cuml/tests/test_metrics.py | 2 +- python/cuml/cuml/tests/test_nearest_neighbors.py | 2 +- python/cuml/cuml/tests/test_umap.py | 2 +- 77 files changed, 77 insertions(+), 77 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index c8a1593f01..ed69ee3df7 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2018-2024, NVIDIA CORPORATION. +# Copyright (c) 2018-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cpp/bench/sg/svc.cu b/cpp/bench/sg/svc.cu index 966cf9f0da..5399fec776 100644 --- a/cpp/bench/sg/svc.cu +++ b/cpp/bench/sg/svc.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/bench/sg/svr.cu b/cpp/bench/sg/svr.cu index 79500a8625..40be89b372 100644 --- a/cpp/bench/sg/svr.cu +++ b/cpp/bench/sg/svr.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/examples/dbscan/dbscan_example.cpp b/cpp/examples/dbscan/dbscan_example.cpp index 4fe5653cab..f6b07fc946 100644 --- a/cpp/examples/dbscan/dbscan_example.cpp +++ b/cpp/examples/dbscan/dbscan_example.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/cluster/dbscan.hpp b/cpp/include/cuml/cluster/dbscan.hpp index 8a2937fcee..d910c03414 100644 --- a/cpp/include/cuml/cluster/dbscan.hpp +++ b/cpp/include/cuml/cluster/dbscan.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/cluster/kmeans.hpp b/cpp/include/cuml/cluster/kmeans.hpp index 95761998c2..9a3ffc768f 100644 --- a/cpp/include/cuml/cluster/kmeans.hpp +++ b/cpp/include/cuml/cluster/kmeans.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/common/utils.hpp b/cpp/include/cuml/common/utils.hpp index 9a5f08cc62..6c8f690fbf 100644 --- a/cpp/include/cuml/common/utils.hpp +++ b/cpp/include/cuml/common/utils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/ensemble/randomforest.hpp b/cpp/include/cuml/ensemble/randomforest.hpp index a0351c0dab..596e80f535 100644 --- a/cpp/include/cuml/ensemble/randomforest.hpp +++ b/cpp/include/cuml/ensemble/randomforest.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/manifold/tsne.h b/cpp/include/cuml/manifold/tsne.h index a87c9a85c0..1e1222e2e3 100644 --- a/cpp/include/cuml/manifold/tsne.h +++ b/cpp/include/cuml/manifold/tsne.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/manifold/umapparams.h b/cpp/include/cuml/manifold/umapparams.h index bb21abae2b..a3086e6e13 100644 --- a/cpp/include/cuml/manifold/umapparams.h +++ b/cpp/include/cuml/manifold/umapparams.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/solvers/lars.hpp b/cpp/include/cuml/solvers/lars.hpp index 7a70d2e435..5f795bc735 100644 --- a/cpp/include/cuml/solvers/lars.hpp +++ b/cpp/include/cuml/solvers/lars.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/svm/linear.hpp b/cpp/include/cuml/svm/linear.hpp index dc391a3e5b..a3564d43ed 100644 --- a/cpp/include/cuml/svm/linear.hpp +++ b/cpp/include/cuml/svm/linear.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/svm/svc.hpp b/cpp/include/cuml/svm/svc.hpp index 3c2a0366e7..652f8c1b88 100644 --- a/cpp/include/cuml/svm/svc.hpp +++ b/cpp/include/cuml/svm/svc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/svm/svm_api.h b/cpp/include/cuml/svm/svm_api.h index 8851e2ad27..381b4ddf56 100644 --- a/cpp/include/cuml/svm/svm_api.h +++ b/cpp/include/cuml/svm/svm_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/include/cuml/svm/svm_parameter.h b/cpp/include/cuml/svm/svm_parameter.h index 66df9c13cc..b73ff90e23 100644 --- a/cpp/include/cuml/svm/svm_parameter.h +++ b/cpp/include/cuml/svm/svm_parameter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/dbscan/dbscan.cu b/cpp/src/dbscan/dbscan.cu index 1910959c09..c36416a4f2 100644 --- a/cpp/src/dbscan/dbscan.cu +++ b/cpp/src/dbscan/dbscan.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/dbscan/dbscan.cuh b/cpp/src/dbscan/dbscan.cuh index b80466bc6e..a56f62ed7e 100644 --- a/cpp/src/dbscan/dbscan.cuh +++ b/cpp/src/dbscan/dbscan.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/dbscan/dbscan_api.cpp b/cpp/src/dbscan/dbscan_api.cpp index bd582a496e..3ed494751e 100644 --- a/cpp/src/dbscan/dbscan_api.cpp +++ b/cpp/src/dbscan/dbscan_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/decisiontree/decisiontree.cuh b/cpp/src/decisiontree/decisiontree.cuh index 2bf9ac595c..7cc6ea353b 100644 --- a/cpp/src/decisiontree/decisiontree.cuh +++ b/cpp/src/decisiontree/decisiontree.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/glm/qn/mg/qn_mg.cuh b/cpp/src/glm/qn/mg/qn_mg.cuh index 78e34af47b..76cb06c9fe 100644 --- a/cpp/src/glm/qn/mg/qn_mg.cuh +++ b/cpp/src/glm/qn/mg/qn_mg.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, NVIDIA CORPORATION. + * Copyright (c) 2023-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/glm/qn/qn.cuh b/cpp/src/glm/qn/qn.cuh index b76085b25f..ff85761995 100644 --- a/cpp/src/glm/qn/qn.cuh +++ b/cpp/src/glm/qn/qn.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/glm/qn/qn_solvers.cuh b/cpp/src/glm/qn/qn_solvers.cuh index 9fb67c34d5..a438f5da4f 100644 --- a/cpp/src/glm/qn/qn_solvers.cuh +++ b/cpp/src/glm/qn/qn_solvers.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/randomforest/randomforest.cu b/cpp/src/randomforest/randomforest.cu index 4d70f3ea27..6ba9470018 100644 --- a/cpp/src/randomforest/randomforest.cu +++ b/cpp/src/randomforest/randomforest.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/randomforest/randomforest.cuh b/cpp/src/randomforest/randomforest.cuh index 0ef58f7ee5..7f82e2877b 100644 --- a/cpp/src/randomforest/randomforest.cuh +++ b/cpp/src/randomforest/randomforest.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/solver/lars.cu b/cpp/src/solver/lars.cu index 0f6499c471..7302ec2957 100644 --- a/cpp/src/solver/lars.cu +++ b/cpp/src/solver/lars.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/solver/lars_impl.cuh b/cpp/src/solver/lars_impl.cuh index 402b5bf51e..383a0c9110 100644 --- a/cpp/src/solver/lars_impl.cuh +++ b/cpp/src/solver/lars_impl.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/svm/linear.cu b/cpp/src/svm/linear.cu index 2dadc2c937..ebe4521c31 100644 --- a/cpp/src/svm/linear.cu +++ b/cpp/src/svm/linear.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/svm/smosolver.h b/cpp/src/svm/smosolver.h index f394d7fdd7..c21d45cfbc 100644 --- a/cpp/src/svm/smosolver.h +++ b/cpp/src/svm/smosolver.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/svm/svc.cu b/cpp/src/svm/svc.cu index 941de3d0cf..56f7656626 100644 --- a/cpp/src/svm/svc.cu +++ b/cpp/src/svm/svc.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/svm/svm_api.cpp b/cpp/src/svm/svm_api.cpp index c9917c3356..392c15bedf 100644 --- a/cpp/src/svm/svm_api.cpp +++ b/cpp/src/svm/svm_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/svm/workingset.cuh b/cpp/src/svm/workingset.cuh index 5aff3f0183..ad707dc27a 100644 --- a/cpp/src/svm/workingset.cuh +++ b/cpp/src/svm/workingset.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/tsne/tsne_runner.cuh b/cpp/src/tsne/tsne_runner.cuh index 2961074e87..cdbfdd2674 100644 --- a/cpp/src/tsne/tsne_runner.cuh +++ b/cpp/src/tsne/tsne_runner.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/tsne/utils.cuh b/cpp/src/tsne/utils.cuh index 446d4c9d5b..0ad23bae11 100644 --- a/cpp/src/tsne/utils.cuh +++ b/cpp/src/tsne/utils.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/umap/fuzzy_simpl_set/naive.cuh b/cpp/src/umap/fuzzy_simpl_set/naive.cuh index 58a55a486b..41e54f1f63 100644 --- a/cpp/src/umap/fuzzy_simpl_set/naive.cuh +++ b/cpp/src/umap/fuzzy_simpl_set/naive.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/umap/runner.cuh b/cpp/src/umap/runner.cuh index 9f779e320b..01aa6f62c7 100644 --- a/cpp/src/umap/runner.cuh +++ b/cpp/src/umap/runner.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/umap/simpl_set_embed/algo.cuh b/cpp/src/umap/simpl_set_embed/algo.cuh index 0487be4b31..6be8b0235b 100644 --- a/cpp/src/umap/simpl_set_embed/algo.cuh +++ b/cpp/src/umap/simpl_set_embed/algo.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/src/umap/supervised.cuh b/cpp/src/umap/supervised.cuh index 5918e37fc0..1a9739f280 100644 --- a/cpp/src/umap/supervised.cuh +++ b/cpp/src/umap/supervised.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/genetic/evolution_test.cu b/cpp/test/sg/genetic/evolution_test.cu index fa8ebf77bd..27d45645fc 100644 --- a/cpp/test/sg/genetic/evolution_test.cu +++ b/cpp/test/sg/genetic/evolution_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/genetic/program_test.cu b/cpp/test/sg/genetic/program_test.cu index d549906ebf..b254b3ed01 100644 --- a/cpp/test/sg/genetic/program_test.cu +++ b/cpp/test/sg/genetic/program_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/hdbscan_test.cu b/cpp/test/sg/hdbscan_test.cu index 888496e98c..3a86571f00 100644 --- a/cpp/test/sg/hdbscan_test.cu +++ b/cpp/test/sg/hdbscan_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/lars_test.cu b/cpp/test/sg/lars_test.cu index 3f58f337ee..85657eafea 100644 --- a/cpp/test/sg/lars_test.cu +++ b/cpp/test/sg/lars_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index 030ece46dc..73278447c2 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/quasi_newton.cu b/cpp/test/sg/quasi_newton.cu index 4dca80f911..a0930740fa 100644 --- a/cpp/test/sg/quasi_newton.cu +++ b/cpp/test/sg/quasi_newton.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/svc_test.cu b/cpp/test/sg/svc_test.cu index 661fe35e36..e8ecb297c1 100644 --- a/cpp/test/sg/svc_test.cu +++ b/cpp/test/sg/svc_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cpp/test/sg/tsne_test.cu b/cpp/test/sg/tsne_test.cu index ec9f69e805..628d2da144 100644 --- a/cpp/test/sg/tsne_test.cu +++ b/cpp/test/sg/tsne_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/cluster/dbscan.pyx b/python/cuml/cuml/cluster/dbscan.pyx index 67b68a547e..7d2c9dad8b 100644 --- a/python/cuml/cuml/cluster/dbscan.pyx +++ b/python/cuml/cuml/cluster/dbscan.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx index b19545bc2a..dcc5fefaeb 100644 --- a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx +++ b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/common/kernel_utils.py b/python/cuml/cuml/common/kernel_utils.py index 5b8f3a03a7..652a860129 100644 --- a/python/cuml/cuml/common/kernel_utils.py +++ b/python/cuml/cuml/common/kernel_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/dask/common/dask_df_utils.py b/python/cuml/cuml/dask/common/dask_df_utils.py index 199a08cac6..f1daf83e03 100644 --- a/python/cuml/cuml/dask/common/dask_df_utils.py +++ b/python/cuml/cuml/dask/common/dask_df_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/dask/common/input_utils.py b/python/cuml/cuml/dask/common/input_utils.py index d6ab867e6d..40650f96a0 100644 --- a/python/cuml/cuml/dask/common/input_utils.py +++ b/python/cuml/cuml/dask/common/input_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/dask/manifold/umap.py b/python/cuml/cuml/dask/manifold/umap.py index 9a27b52d30..bd3c13fd99 100644 --- a/python/cuml/cuml/dask/manifold/umap.py +++ b/python/cuml/cuml/dask/manifold/umap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 34ff8cf9c1..5198d60b28 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -1,6 +1,6 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 498e987b90..6e3a13d0fb 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/experimental/accel/__init__.py b/python/cuml/cuml/experimental/accel/__init__.py index 69f53b7bb1..9781f88fcc 100644 --- a/python/cuml/cuml/experimental/accel/__init__.py +++ b/python/cuml/cuml/experimental/accel/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2024, NVIDIA CORPORATION. +# Copyright (c) 2024-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/experimental/linear_model/lars.pyx b/python/cuml/cuml/experimental/linear_model/lars.pyx index ddb3f69fa3..6efbe2d50f 100644 --- a/python/cuml/cuml/experimental/linear_model/lars.pyx +++ b/python/cuml/cuml/experimental/linear_model/lars.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/explainer/base.pyx b/python/cuml/cuml/explainer/base.pyx index d29aee0f1d..3aae1d02a7 100644 --- a/python/cuml/cuml/explainer/base.pyx +++ b/python/cuml/cuml/explainer/base.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 1d53cb6e76..a2a7374a1f 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/internals/logger.pxd b/python/cuml/cuml/internals/logger.pxd index 7ab6446a9d..6556cb0505 100644 --- a/python/cuml/cuml/internals/logger.pxd +++ b/python/cuml/cuml/internals/logger.pxd @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index 5186d62714..0bc09126bd 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index c66ef7ee35..e968093c8e 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/manifold/simpl_set.pyx b/python/cuml/cuml/manifold/simpl_set.pyx index 20e1207bf6..ffd1d13463 100644 --- a/python/cuml/cuml/manifold/simpl_set.pyx +++ b/python/cuml/cuml/manifold/simpl_set.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index 85fce24076..7ff8702a2c 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index 4d2a69e7f8..079b270d0a 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/manifold/umap_utils.pxd b/python/cuml/cuml/manifold/umap_utils.pxd index e2df77a22f..498e495733 100644 --- a/python/cuml/cuml/manifold/umap_utils.pxd +++ b/python/cuml/cuml/manifold/umap_utils.pxd @@ -1,5 +1,5 @@ # -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx index f27e080381..129ffa9ca7 100644 --- a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx index 7c49b5576d..ff7a1bc193 100644 --- a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx index 85dafd89fd..fff40af0e2 100644 --- a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx +++ b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/svm/linear.pyx b/python/cuml/cuml/svm/linear.pyx index 70cca27cf2..9a0efedbc3 100644 --- a/python/cuml/cuml/svm/linear.pyx +++ b/python/cuml/cuml/svm/linear.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index ded9ad358c..bafe84e505 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index af76f2d0e3..7970e99338 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index 9dc61b7635..51f3110f77 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_hdbscan.py b/python/cuml/cuml/tests/test_hdbscan.py index 7fd1eb0635..a00ed73761 100644 --- a/python/cuml/cuml/tests/test_hdbscan.py +++ b/python/cuml/cuml/tests/test_hdbscan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_kmeans.py b/python/cuml/cuml/tests/test_kmeans.py index 665f237fd2..9c01486ffe 100644 --- a/python/cuml/cuml/tests/test_kmeans.py +++ b/python/cuml/cuml/tests/test_kmeans.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_logger.py b/python/cuml/cuml/tests/test_logger.py index 12e116fc82..dc6c1285e3 100644 --- a/python/cuml/cuml/tests/test_logger.py +++ b/python/cuml/cuml/tests/test_logger.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_metrics.py b/python/cuml/cuml/tests/test_metrics.py index c99d57e666..5886ff68d4 100644 --- a/python/cuml/cuml/tests/test_metrics.py +++ b/python/cuml/cuml/tests/test_metrics.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_nearest_neighbors.py b/python/cuml/cuml/tests/test_nearest_neighbors.py index bf91951295..d997ee578f 100644 --- a/python/cuml/cuml/tests/test_nearest_neighbors.py +++ b/python/cuml/cuml/tests/test_nearest_neighbors.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/cuml/cuml/tests/test_umap.py b/python/cuml/cuml/tests/test_umap.py index de32c1d908..6d91012177 100644 --- a/python/cuml/cuml/tests/test_umap.py +++ b/python/cuml/cuml/tests/test_umap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 64f80f9ba8ee303699480621bc548f1221254d2f Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 2 Jan 2025 19:49:40 +0000 Subject: [PATCH 32/32] Fix lbfgs test verifying log level --- .../cuml/tests/dask/test_dask_logistic_regression.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py b/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py index 94ebecf6b0..0a3d47ea3f 100644 --- a/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py +++ b/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression as skLR from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger from cuml.testing.utils import array_equal from scipy.sparse import csr_matrix, load_npz, save_npz import random @@ -197,8 +198,10 @@ def assert_params( assert qnpams["fit_intercept"] == fit_intercept assert qnpams["max_iter"] == max_iter assert qnpams["linesearch_max_iter"] == linesearch_max_iter - assert ( - qnpams["verbose"] == 5 if verbose is True else 4 + assert qnpams["verbose"] == ( + logger.level_enum.debug + if verbose is True + else logger.level_enum.info ) # cuml Verbosity Levels assert ( lr.output_type == "input" if output_type is None else output_type