From e2378885f109e515048bb86982f5bd53beaa8929 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Sat, 23 Mar 2024 20:20:45 -0300 Subject: [PATCH] Kill the old `nms` function. --- docs/source/index.rst | 1 - torch_xla/core/functions.py | 23 --------------------- torch_xla/csrc/BUILD | 2 -- torch_xla/csrc/init_python_bindings.cpp | 27 ------------------------- 4 files changed, 53 deletions(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index e29053f85382..4246fbbe820e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -33,7 +33,6 @@ xla_model .. automodule:: torch_xla.core.functions .. autofunction:: all_reduce .. autofunction:: all_gather -.. autofunction:: nms distributed ---------------------------------- diff --git a/torch_xla/core/functions.py b/torch_xla/core/functions.py index 59f82c06665d..e868ddd3695e 100644 --- a/torch_xla/core/functions.py +++ b/torch_xla/core/functions.py @@ -84,29 +84,6 @@ def all_gather(value, dim=0): return AllGather.apply(value, dim) -def nms(boxes, scores, score_threshold, iou_threshold, output_size): - """Performs a Non Maximal Suppression operation. - - Args: - boxes (torch.Tensor): A `torch.Tensor` of shape `[N, 4]` listing the boxes - coordinates in `(y0, x0, y1, x1)` form. - scores (torch.Tensor): A `torch.Tensor` of shape `[N]` listing the scores - of each box. - score_threshold (torch.Tensor): The minimum score for a box to qualify as - valid. - iou_threshold (torch.Tensor): The minimum IOU (Intersection Over Union) - score to trigger overlap logic. - output_size (int): The maximum number of returned indices (must be lower or - equal to N). - - Returns: - A tuple of `torch.Tensor` with the first element being the selected box - indices, and the second element being the number of valid boxes. - """ - return torch_xla._XLAC._xla_nms(boxes, scores, score_threshold, iou_threshold, - output_size) - - def distributed_mm(w, x, split=1): """Performs a matrix multiplication with sharded weight. diff --git a/torch_xla/csrc/BUILD b/torch_xla/csrc/BUILD index 7f78c534af4f..ce718a1ebfe3 100644 --- a/torch_xla/csrc/BUILD +++ b/torch_xla/csrc/BUILD @@ -47,7 +47,6 @@ ptxla_cc_library( "ir_dump_util.cpp", "matrix.cpp", "nll_loss.cpp", - "nms_op.cpp", "pooling.cpp", "quant_util.cpp", "random.cpp", @@ -87,7 +86,6 @@ ptxla_cc_library( "ir_dump_util.h", "matrix.h", "nll_loss.h", - "nms_op.h", "pooling.h", "quant_util.h", "random.h", diff --git a/torch_xla/csrc/init_python_bindings.cpp b/torch_xla/csrc/init_python_bindings.cpp index 97076eb37568..50228d88aedd 100644 --- a/torch_xla/csrc/init_python_bindings.cpp +++ b/torch_xla/csrc/init_python_bindings.cpp @@ -666,28 +666,6 @@ py::object GetRevisions() { return py_dict; } -py::object XlaNms(const at::Tensor& boxes, const at::Tensor& scores, - const at::Tensor& score_threshold, - const at::Tensor& iou_threshold, int64_t output_size) { - at::Tensor selected_indices; - at::Tensor num_valid; - { - NoGilSection nogil; - auto nms_result = tensor_methods::nms( - bridge::GetXlaTensor(boxes), bridge::GetXlaTensor(scores), - bridge::GetXlaTensor(score_threshold), - bridge::GetXlaTensor(iou_threshold), output_size); - selected_indices = bridge::AtenFromXlaTensor(std::move(nms_result.first)); - num_valid = bridge::AtenFromXlaTensor(std::move(nms_result.second)); - } - auto result_tuple = py::tuple(2); - result_tuple[0] = - torch::autograd::make_variable(selected_indices, /*requires_grad=*/false); - result_tuple[1] = - torch::autograd::make_variable(num_valid, /*requires_grad=*/false); - return result_tuple; -} - std::vector XlaUserComputation( const std::string& opname, const std::vector& inputs, runtime::ComputationClient::ComputationPtr computation) { @@ -1086,11 +1064,6 @@ void InitXlaModuleBindings(py::module m) { [](const at::Tensor& tensor, int dim) { return GetXlaTensorDimensionSize(tensor, dim); }); - m.def("_xla_nms", [](const at::Tensor& boxes, const at::Tensor& scores, - const at::Tensor& score_threshold, - const at::Tensor& iou_threshold, int64_t output_size) { - return XlaNms(boxes, scores, score_threshold, iou_threshold, output_size); - }); m.def("_xla_user_computation", [](const std::string& opname, const std::vector& inputs, const runtime::ComputationClient::ComputationPtr& computation) {