Skip to content

Commit

Permalink
Merge pull request #56 from Xilinx/release_rai_1_2
Browse files Browse the repository at this point in the history
Merge main to release
  • Loading branch information
mgehre-amd authored Jun 21, 2024
2 parents ee76171 + bb0b929 commit 98243be
Showing 1 changed file with 8 additions and 4 deletions.
12 changes: 8 additions & 4 deletions lib/Conversion/XTenNNToTorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,10 +202,14 @@ std::optional<ValueRange> resizeToTorch(ResizeOp op, ResizeOp::Adaptor adaptor,

auto scalesAttr = adaptor.getScales();
// Create a constant for the scales
auto denseScales = DenseElementsAttr::get(RankedTensorType::get({(long)scalesAttr.size()}, rewriter.getF32Type()), scalesAttr);
auto scalesConst = rewriter.create<Torch::ValueTensorLiteralOp>(loc,
Torch::ValueTensorType::get(op->getContext(), {scalesAttr.size()}, rewriter.getF32Type()),
denseScales);
auto shape =
llvm::SmallVector<int64_t>{static_cast<int64_t>(scalesAttr.size())};
auto denseScales = DenseElementsAttr::get(
RankedTensorType::get(shape, rewriter.getF32Type()), scalesAttr);
auto valueTensorType = Torch::ValueTensorType::get(op->getContext(), shape,
rewriter.getF32Type());
auto scalesConst = rewriter.create<Torch::ValueTensorLiteralOp>(
loc, valueTensorType, denseScales);

// Operands in order : X - roi - scales - sizes
// roi and sizes are None because they are not supported by the xten representation of resize
Expand Down

0 comments on commit 98243be

Please sign in to comment.