Skip to content

Commit

Permalink
Merge branch 'mlj-table' of github.com:Evovest/EvoTrees.jl into mlj-t…
Browse files Browse the repository at this point in the history
…able
  • Loading branch information
jeremiedb committed Oct 26, 2023
2 parents 57ac5dc + e47e804 commit cd10204
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 81 deletions.
24 changes: 12 additions & 12 deletions benchmarks/regressor-df.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ using EvoTrees
using DataFrames
using BenchmarkTools
using Random: seed!
import CUDA
# import CUDA

nobs = Int(1e6)
nobs = Int(1e7)
num_feat = Int(100)
nrounds = 200
T = Float64
Expand Down Expand Up @@ -94,13 +94,13 @@ device = "cpu"
@time pred_evo = m_evo(dtrain);
@btime m_evo($dtrain);

@info "EvoTrees GPU"
device = "gpu"
@info "train"
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @btime m_evo = fit_evotree($params_evo, $dtrain; target_name, device);
# @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
@info "predict"
@time pred_evo = m_evo(dtrain; device);
@btime m_evo($dtrain; device);
# @info "EvoTrees GPU"
# device = "gpu"
# @info "train"
# @time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo, dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# # @btime m_evo = fit_evotree($params_evo, $dtrain; target_name, device);
# # @btime fit_evotree($params_evo, $dtrain; target_name, deval=dtrain, metric=metric_evo, device, verbosity, print_every_n=100);
# @info "predict"
# @time pred_evo = m_evo(dtrain; device);
# @btime m_evo($dtrain; device);
70 changes: 35 additions & 35 deletions benchmarks/regressor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ using XGBoost
using EvoTrees
using BenchmarkTools
using Random: seed!
import CUDA
# import CUDA

### v.0.15.1
# desktop | 1e6 | depth 11 | cpu: 37.2s
Expand Down Expand Up @@ -45,27 +45,27 @@ elseif loss == "logloss"
metric_evo = :logloss
end

@info "XGBoost"
@info "train"
params_xgb = Dict(
:num_round => nrounds,
:max_depth => max_depth - 1,
:eta => 0.05,
:objective => loss_xgb,
:print_every_n => 5,
:subsample => 0.5,
:colsample_bytree => 0.5,
:tree_method => "hist", # hist/gpu_hist
:max_bin => 64,
)
# @info "XGBoost"
# @info "train"
# params_xgb = Dict(
# :num_round => nrounds,
# :max_depth => max_depth - 1,
# :eta => 0.05,
# :objective => loss_xgb,
# :print_every_n => 5,
# :subsample => 0.5,
# :colsample_bytree => 0.5,
# :tree_method => "hist", # hist/gpu_hist
# :max_bin => 64,
# )

dtrain = DMatrix(x_train, y_train)
watchlist = Dict("train" => DMatrix(x_train, y_train));
@time m_xgb = xgboost(dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric=metric_xgb, params_xgb...);
# @btime m_xgb = xgboost($dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric = metric_xgb, params_xgb...);
@info "predict"
@time pred_xgb = XGBoost.predict(m_xgb, x_train);
# @btime XGBoost.predict($m_xgb, $x_train);
# dtrain = DMatrix(x_train, y_train)
# watchlist = Dict("train" => DMatrix(x_train, y_train));
# @time m_xgb = xgboost(dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric=metric_xgb, params_xgb...);
# # @btime m_xgb = xgboost($dtrain; watchlist, nthread=nthread, verbosity=0, eval_metric = metric_xgb, params_xgb...);
# @info "predict"
# @time pred_xgb = XGBoost.predict(m_xgb, x_train);
# # @btime XGBoost.predict($m_xgb, $x_train);

# @info "lightgbm train:"
# m_gbm = LGBMRegression(
Expand Down Expand Up @@ -135,17 +135,17 @@ device = "cpu"
@time pred_evo = m_evo(x_train);
# @btime m_evo($x_train);

@info "EvoTrees GPU"
device = "gpu"
# @info "train - no eval"
# CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
# CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
@info "train - eval"
CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100);
CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100);
# @time m_evo = fit_evotree(params_evo; x_train, y_train);
# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo, device, verbosity);
@info "predict"
CUDA.@time pred_evo = m_evo(x_train; device);
CUDA.@time pred_evo = m_evo(x_train; device);
# @btime m_evo($x_train; device);
# @info "EvoTrees GPU"
# device = "gpu"
# # @info "train - no eval"
# # CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
# # CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, device, verbosity, print_every_n=100);
# @info "train - eval"
# CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100);
# CUDA.@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, device, verbosity, print_every_n=100);
# # @time m_evo = fit_evotree(params_evo; x_train, y_train);
# # @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo, device, verbosity);
# @info "predict"
# CUDA.@time pred_evo = m_evo(x_train; device);
# CUDA.@time pred_evo = m_evo(x_train; device);
# # @btime m_evo($x_train; device);
63 changes: 29 additions & 34 deletions src/fit-utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -175,45 +175,40 @@ function split_set_threads!(
lefts = zeros(Int, nblocks)
rights = zeros(Int, nblocks)

@sync begin
for bid = 1:nblocks
@spawn begin
lefts[bid], rights[bid] = split_set_chunk!(
left,
right,
is,
bid,
nblocks,
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
)
end
end
@threads for bid = 1:nblocks
lefts[bid], rights[bid] = split_set_chunk!(
left,
right,
is,
bid,
nblocks,
x_bin,
feat,
cond_bin,
feattype,
offset,
chunk_size,
)
end

sum_lefts = sum(lefts)
cumsum_lefts = cumsum(lefts)
cumsum_rights = cumsum(rights)
@sync begin
for bid = 1:nblocks
@spawn split_views_kernel!(
out,
left,
right,
bid,
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
)
end

@threads for bid = 1:nblocks
split_views_kernel!(
out,
left,
right,
bid,
offset,
chunk_size,
lefts,
rights,
sum_lefts,
cumsum_lefts,
cumsum_rights,
)
end

return (
Expand Down

0 comments on commit cd10204

Please sign in to comment.