Skip to content

Commit

Permalink
Kill the old nms function.
Browse files Browse the repository at this point in the history
  • Loading branch information
ysiraichi committed Mar 23, 2024
1 parent 42ecc29 commit e237888
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 53 deletions.
1 change: 0 additions & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ xla_model
.. automodule:: torch_xla.core.functions
.. autofunction:: all_reduce
.. autofunction:: all_gather
.. autofunction:: nms

distributed
----------------------------------
Expand Down
23 changes: 0 additions & 23 deletions torch_xla/core/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,29 +84,6 @@ def all_gather(value, dim=0):
return AllGather.apply(value, dim)


def nms(boxes, scores, score_threshold, iou_threshold, output_size):
"""Performs a Non Maximal Suppression operation.
Args:
boxes (torch.Tensor): A `torch.Tensor` of shape `[N, 4]` listing the boxes
coordinates in `(y0, x0, y1, x1)` form.
scores (torch.Tensor): A `torch.Tensor` of shape `[N]` listing the scores
of each box.
score_threshold (torch.Tensor): The minimum score for a box to qualify as
valid.
iou_threshold (torch.Tensor): The minimum IOU (Intersection Over Union)
score to trigger overlap logic.
output_size (int): The maximum number of returned indices (must be lower or
equal to N).
Returns:
A tuple of `torch.Tensor` with the first element being the selected box
indices, and the second element being the number of valid boxes.
"""
return torch_xla._XLAC._xla_nms(boxes, scores, score_threshold, iou_threshold,
output_size)


def distributed_mm(w, x, split=1):
"""Performs a matrix multiplication with sharded weight.
Expand Down
2 changes: 0 additions & 2 deletions torch_xla/csrc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ ptxla_cc_library(
"ir_dump_util.cpp",
"matrix.cpp",
"nll_loss.cpp",
"nms_op.cpp",
"pooling.cpp",
"quant_util.cpp",
"random.cpp",
Expand Down Expand Up @@ -87,7 +86,6 @@ ptxla_cc_library(
"ir_dump_util.h",
"matrix.h",
"nll_loss.h",
"nms_op.h",
"pooling.h",
"quant_util.h",
"random.h",
Expand Down
27 changes: 0 additions & 27 deletions torch_xla/csrc/init_python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -666,28 +666,6 @@ py::object GetRevisions() {
return py_dict;
}

py::object XlaNms(const at::Tensor& boxes, const at::Tensor& scores,
const at::Tensor& score_threshold,
const at::Tensor& iou_threshold, int64_t output_size) {
at::Tensor selected_indices;
at::Tensor num_valid;
{
NoGilSection nogil;
auto nms_result = tensor_methods::nms(
bridge::GetXlaTensor(boxes), bridge::GetXlaTensor(scores),
bridge::GetXlaTensor(score_threshold),
bridge::GetXlaTensor(iou_threshold), output_size);
selected_indices = bridge::AtenFromXlaTensor(std::move(nms_result.first));
num_valid = bridge::AtenFromXlaTensor(std::move(nms_result.second));
}
auto result_tuple = py::tuple(2);
result_tuple[0] =
torch::autograd::make_variable(selected_indices, /*requires_grad=*/false);
result_tuple[1] =
torch::autograd::make_variable(num_valid, /*requires_grad=*/false);
return result_tuple;
}

std::vector<at::Tensor> XlaUserComputation(
const std::string& opname, const std::vector<at::Tensor>& inputs,
runtime::ComputationClient::ComputationPtr computation) {
Expand Down Expand Up @@ -1086,11 +1064,6 @@ void InitXlaModuleBindings(py::module m) {
[](const at::Tensor& tensor, int dim) {
return GetXlaTensorDimensionSize(tensor, dim);
});
m.def("_xla_nms", [](const at::Tensor& boxes, const at::Tensor& scores,
const at::Tensor& score_threshold,
const at::Tensor& iou_threshold, int64_t output_size) {
return XlaNms(boxes, scores, score_threshold, iou_threshold, output_size);
});
m.def("_xla_user_computation",
[](const std::string& opname, const std::vector<at::Tensor>& inputs,
const runtime::ComputationClient::ComputationPtr& computation) {
Expand Down

0 comments on commit e237888

Please sign in to comment.