From 510e68903c46362eedb8f169cb079cf68da0f6c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Valim?= Date: Mon, 13 May 2024 13:22:59 +0200 Subject: [PATCH] Deprecate ~V and ~M in favor of ~VEC and ~MAT --- exla/test/exla/backend_test.exs | 4 +- exla/test/exla/defn/expr_test.exs | 36 +++--- exla/test/exla/defn/vectorize_test.exs | 86 +++++++------- nx/guides/advanced/aggregation.livemd | 10 +- nx/lib/nx.ex | 62 ++++++---- nx/test/nx/defn/grad_test.exs | 82 ++++++------- nx/test/nx/lin_alg_test.exs | 28 ++--- nx/test/nx/non_finite_test.exs | 48 ++++---- nx/test/nx/tensor_test.exs | 16 +-- nx/test/nx/vectorize_test.exs | 154 ++++++++++++------------- nx/test/nx_test.exs | 36 +++--- torchx/test/torchx/complex_test.exs | 18 +-- torchx/test/torchx/nx_test.exs | 36 +++--- 13 files changed, 319 insertions(+), 297 deletions(-) diff --git a/exla/test/exla/backend_test.exs b/exla/test/exla/backend_test.exs index a81c6d7857..d6b2efccd6 100644 --- a/exla/test/exla/backend_test.exs +++ b/exla/test/exla/backend_test.exs @@ -1,7 +1,7 @@ defmodule EXLA.BackendTest do use EXLA.Case, async: true - import Nx, only: [sigil_V: 2] + import Nx, only: [sigil_VEC: 2] setup do Nx.default_backend(EXLA.Backend) @@ -192,7 +192,7 @@ defmodule EXLA.BackendTest do end test "conjugate" do - assert inspect(Nx.conjugate(~V[1 2-0i 3+0i 0-i 0-2i])) =~ + assert inspect(Nx.conjugate(~VEC[1 2-0i 3+0i 0-i 0-2i])) =~ "1.0-0.0i, 2.0+0.0i, 3.0-0.0i, 0.0+1.0i, 0.0+2.0i" end end diff --git a/exla/test/exla/defn/expr_test.exs b/exla/test/exla/defn/expr_test.exs index c540784c67..850b0aa497 100644 --- a/exla/test/exla/defn/expr_test.exs +++ b/exla/test/exla/defn/expr_test.exs @@ -815,17 +815,17 @@ defmodule EXLA.Defn.ExprTest do test "fft" do assert_all_close( fft(Nx.tensor([1, 1, 0, 0]), length: 5), - ~V[2.0+0.0i 1.3090-0.9511i 0.1909-0.5877i 0.1909+0.5877i 1.3090+0.9510i] + ~VEC[2.0+0.0i 1.3090-0.9511i 0.1909-0.5877i 0.1909+0.5877i 1.3090+0.9510i] ) assert_all_close( fft(Nx.tensor([1, 1, 0, 0, 2, 3]), length: 4), - ~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] + ~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] ) assert_all_close( fft(Nx.tensor([1, 1, 0]), length: :power_of_two), - ~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] + ~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] ) end @@ -847,12 +847,12 @@ defmodule EXLA.Defn.ExprTest do length: :power_of_two ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -877,12 +877,12 @@ defmodule EXLA.Defn.ExprTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -907,12 +907,12 @@ defmodule EXLA.Defn.ExprTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -923,19 +923,19 @@ defmodule EXLA.Defn.ExprTest do test "ifft" do assert_all_close( - ifft(~V[5 5 5 5 5], + ifft(~VEC[5 5 5 5 5], length: 5 ), Nx.tensor([5, 0, 0, 0, 0]) ) assert_all_close( - ifft(~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i 5 6], length: 4), + ifft(~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i 5 6], length: 4), Nx.tensor([1, 1, 0, 0]) ) assert_all_close( - ifft(~V[2 0 0], length: :power_of_two), + ifft(~VEC[2 0 0], length: :power_of_two), Nx.tensor([0.5, 0.5, 0.5, 0.5]) ) end @@ -944,12 +944,12 @@ defmodule EXLA.Defn.ExprTest do assert_all_close( ifft( Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -988,12 +988,12 @@ defmodule EXLA.Defn.ExprTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0+1.0i 0 1.0-1.0i 1 1 1 1 1 1i -1 -1i ], - ~M[ + ~MAT[ 1 1i -1 -1i 1 1 1 1 2 1.0+1.0i 0 1.0-1.0i @@ -1018,12 +1018,12 @@ defmodule EXLA.Defn.ExprTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0+1.0i 0 1.0-1.0i 1 1 1 1 1 1i -1 -1i ], - ~M[ + ~MAT[ 1 1i -1 -1i 1 1 1 1 2 1.0+1.0i 0 1.0-1.0i diff --git a/exla/test/exla/defn/vectorize_test.exs b/exla/test/exla/defn/vectorize_test.exs index c81661c5f6..baf3bf3220 100644 --- a/exla/test/exla/defn/vectorize_test.exs +++ b/exla/test/exla/defn/vectorize_test.exs @@ -182,9 +182,9 @@ defmodule EXLA.Defn.VectorizeTest do test "simple if" do # this tests the case where we have a single vectorized predicate - pred = Nx.vectorize(~V[0 1 0], :pred) + pred = Nx.vectorize(~VEC[0 1 0], :pred) - assert_equal(vectorized_if(pred, 1, 2, pid: self()), Nx.vectorize(~V[2 1 2], :pred)) + assert_equal(vectorized_if(pred, 1, 2, pid: self()), Nx.vectorize(~VEC[2 1 2], :pred)) assert_received {:vectorization_test, t, clause: "if"} assert_equal(t, Nx.tensor(1)) @@ -195,12 +195,12 @@ defmodule EXLA.Defn.VectorizeTest do test "simple cond" do # this tests the case where we have a two vectorized predicates - pred1 = Nx.vectorize(~V[1 0 0], :pred) - pred2 = Nx.vectorize(~V[0 0 0], :pred) + pred1 = Nx.vectorize(~VEC[1 0 0], :pred) + pred2 = Nx.vectorize(~VEC[0 0 0], :pred) assert_equal( vectorized_cond(pred1, 1, pred2, 2, 3, pid: self()), - Nx.vectorize(~V[1 3 3], :pred) + Nx.vectorize(~VEC[1 3 3], :pred) ) assert_received {:vectorization_test, t, clause: "clause_1"} @@ -211,20 +211,20 @@ defmodule EXLA.Defn.VectorizeTest do end test "if with container result" do - pred1 = Nx.vectorize(~V[2 0 0], :pred) + pred1 = Nx.vectorize(~VEC[2 0 0], :pred) result = vectorized_if( pred1, {1, 2, 3}, - {7, 8, Nx.vectorize(~V[9 10 11], :x)}, + {7, 8, Nx.vectorize(~VEC[9 10 11], :x)}, pid: self() ) assert_equal(result, { - Nx.vectorize(~V[1 7 7], :pred), - Nx.vectorize(~V[2 8 8], :pred), - Nx.vectorize(~M[ + Nx.vectorize(~VEC[1 7 7], :pred), + Nx.vectorize(~VEC[2 8 8], :pred), + Nx.vectorize(~MAT[ 3 3 3 9 10 11 9 10 11 @@ -248,8 +248,8 @@ defmodule EXLA.Defn.VectorizeTest do end test "only executes selected branches" do - t = Nx.vectorize(~V[1], :pred) - f = Nx.vectorize(~V[0], :pred) + t = Nx.vectorize(~VEC[1], :pred) + f = Nx.vectorize(~VEC[0], :pred) assert = fn res, val, clause -> t = Nx.tensor(val) @@ -267,74 +267,74 @@ defmodule EXLA.Defn.VectorizeTest do test "1 vectorized pred in the beginning" do assert_equal( - cond4(Nx.vectorize(~V[0 1], :pred), 10, 0, 20, 0, 30, 40), - Nx.vectorize(~V[40 10], :pred) + cond4(Nx.vectorize(~VEC[0 1], :pred), 10, 0, 20, 0, 30, 40), + Nx.vectorize(~VEC[40 10], :pred) ) assert_equal( - cond4(Nx.vectorize(~V[0 0], :pred), 10, 1, 20, 0, 30, 40), - Nx.vectorize(~V[20 20], :pred) + cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 1, 20, 0, 30, 40), + Nx.vectorize(~VEC[20 20], :pred) ) assert_equal( - cond4(Nx.vectorize(~V[0 0], :pred), 10, 0, 20, 1, 30, 40), - Nx.vectorize(~V[30 30], :pred) + cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 0, 20, 1, 30, 40), + Nx.vectorize(~VEC[30 30], :pred) ) assert_equal( - cond4(Nx.vectorize(~V[0 0], :pred), 10, 0, 20, 0, 30, 40), - Nx.vectorize(~V[40 40], :pred) + cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 0, 20, 0, 30, 40), + Nx.vectorize(~VEC[40 40], :pred) ) end test "1 vectorized pred in the second but not last position" do assert_equal( - cond4(0, 10, Nx.vectorize(~V[0 1], :pred), 20, 0, 30, 40), - Nx.vectorize(~V[40 20], :pred) + cond4(0, 10, Nx.vectorize(~VEC[0 1], :pred), 20, 0, 30, 40), + Nx.vectorize(~VEC[40 20], :pred) ) assert_equal( - cond4(1, 10, Nx.vectorize(~V[0 1], :pred), 20, 0, 30, 40), - Nx.vectorize(~V[10 10], :pred) + cond4(1, 10, Nx.vectorize(~VEC[0 1], :pred), 20, 0, 30, 40), + Nx.vectorize(~VEC[10 10], :pred) ) assert_equal( - cond4(0, 10, Nx.vectorize(~V[0 0], :pred), 20, 1, 30, 40), - Nx.vectorize(~V[30 30], :pred) + cond4(0, 10, Nx.vectorize(~VEC[0 0], :pred), 20, 1, 30, 40), + Nx.vectorize(~VEC[30 30], :pred) ) assert_equal( - cond4(0, 10, Nx.vectorize(~V[0 0], :pred), 20, 0, 30, 40), - Nx.vectorize(~V[40 40], :pred) + cond4(0, 10, Nx.vectorize(~VEC[0 0], :pred), 20, 0, 30, 40), + Nx.vectorize(~VEC[40 40], :pred) ) end test "1 vectorized pred in the last position" do assert_equal( - cond4(0, 10, 0, 20, Nx.vectorize(~V[0 1], :pred), 30, 40), - Nx.vectorize(~V[40 30], :pred) + cond4(0, 10, 0, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40), + Nx.vectorize(~VEC[40 30], :pred) ) assert_equal( - cond4(1, 10, 0, 20, Nx.vectorize(~V[0 1], :pred), 30, 40), - Nx.vectorize(~V[10 10], :pred) + cond4(1, 10, 0, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40), + Nx.vectorize(~VEC[10 10], :pred) ) assert_equal( - cond4(0, 10, 1, 20, Nx.vectorize(~V[0 1], :pred), 30, 40), - Nx.vectorize(~V[20 20], :pred) + cond4(0, 10, 1, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40), + Nx.vectorize(~VEC[20 20], :pred) ) assert_equal( - cond4(0, 10, 0, 20, Nx.vectorize(~V[0 0], :pred), 30, 40), - Nx.vectorize(~V[40 40], :pred) + cond4(0, 10, 0, 20, Nx.vectorize(~VEC[0 0], :pred), 30, 40), + Nx.vectorize(~VEC[40 40], :pred) ) end test "2 vectorized preds with different axes" do assert_equal( - cond4(Nx.vectorize(~V[0 1 0], :pred1), 10, Nx.vectorize(~V[1 0], :pred2), 20, 0, 30, 40), - Nx.vectorize(~M[ + cond4(Nx.vectorize(~VEC[0 1 0], :pred1), 10, Nx.vectorize(~VEC[1 0], :pred2), 20, 0, 30, 40), + Nx.vectorize(~MAT[ 20 40 10 10 20 40 @@ -345,15 +345,15 @@ defmodule EXLA.Defn.VectorizeTest do test "2 vectorized preds with different axes + clauses that match either" do assert_equal( cond4( - Nx.vectorize(~V[0 1 0], :pred1), - Nx.vectorize(~V[10 100], :pred2), - Nx.vectorize(~V[1 0], :pred2), - Nx.vectorize(~V[20 200 2000], :pred1), + Nx.vectorize(~VEC[0 1 0], :pred1), + Nx.vectorize(~VEC[10 100], :pred2), + Nx.vectorize(~VEC[1 0], :pred2), + Nx.vectorize(~VEC[20 200 2000], :pred1), 0, 30, 40 ), - Nx.vectorize(~M[ + Nx.vectorize(~MAT[ 20 40 10 100 2000 40 diff --git a/nx/guides/advanced/aggregation.livemd b/nx/guides/advanced/aggregation.livemd index 9eab9df56b..587db22bac 100644 --- a/nx/guides/advanced/aggregation.livemd +++ b/nx/guides/advanced/aggregation.livemd @@ -75,7 +75,7 @@ max_y = Nx.reduce_max(m, axes: [:y]) Let's consider another example with [Nx.weighted_mean](https://hexdocs.pm/nx/Nx.html#weighted_mean/3). It supports full-tensor and per axis operations. We display how to compute the _weighted mean aggregate_ of a matrix with the example below of a 2D tensor of shape `{2,2}` labeled `m`: ```elixir -m = ~M[ +m = ~MAT[ 1 2 3 4 ] @@ -96,7 +96,7 @@ m = ~M[ First, we'll compute the full-tensor aggregation. The calculations are developed below. We calculate an "array product" (aka [Hadamard product](), an element-wise product) of our tensor with the tensor of weights, then sum all the elements and divide by the sum of the weights. ```elixir -w = ~M[ +w = ~MAT[ 10 20 30 40 ] @@ -121,7 +121,7 @@ man_w_avg = (1 * 10 + 2 * 20 + 3 * 30 + 4 * 40) / (10 + 20 + 30 + 40) The weighted mean can be computed _per axis_. Let's compute it along the _first_ axis (`axes: [0]`): you calculate "by column", so you aggregate/reduce along the first axis: ```elixir -w = ~M[ +w = ~MAT[ 10 20 30 40 ] @@ -148,7 +148,7 @@ man_w_avg_x = [(1 * 10 + 3 * 30) / (10 + 30), (2 * 20 + 4 * 40) / (20 + 40)] We calculate weighted mean of a square matrix along the _second_ axis (`axes: [1]`): you calculate per row, so you aggregate/reduce along the second axis. ```elixir -w = ~M[ +w = ~MAT[ 10 20 30 40 ] @@ -816,7 +816,7 @@ Nx.argmin(t, axis: 3) You have the `:tie_break` option to decide how to operate with you have several occurences of the result. It defaults to `tie_break: :low`. ```elixir -t4 = ~V[2 0 0 0 1] +t4 = ~VEC[2 0 0 0 1] %{ argmin_with_default: Nx.argmin(t4) |> Nx.to_number(), diff --git a/nx/lib/nx.ex b/nx/lib/nx.ex index d134a69093..f8158634e9 100644 --- a/nx/lib/nx.ex +++ b/nx/lib/nx.ex @@ -87,14 +87,14 @@ defmodule Nx do is available: iex> import Nx, only: :sigils - iex> ~V[1 2 3]f32 + iex> ~VEC[1 2 3]f32 #Nx.Tensor< f32[3] [1.0, 2.0, 3.0] > iex> import Nx, only: :sigils - iex> ~M''' + iex> ~MAT''' ...> 1 2 3 ...> 4 5 6 ...> '''s32 @@ -8158,8 +8158,8 @@ defmodule Nx do 0.0 > - iex> import Nx, only: [sigil_V: 2] - iex> Nx.phase(~V[1+2i -2+1i]) + iex> import Nx, only: [sigil_VEC: 2] + iex> Nx.phase(~VEC[1+2i -2+1i]) #Nx.Tensor< f32[2] [1.1071487665176392, 2.677945137023926] @@ -17070,8 +17070,30 @@ defmodule Nx do ## Sigils + @doc false + @deprecated "Use ~MAT instead" + defmacro sigil_M({:<<>>, _meta, [string]}, modifiers) do + {numbers, type} = string |> String.trim() |> binary_to_numbers() + numbers_to_tensor(numbers, type, modifiers) + end + + @doc false + @deprecated "Use ~VEC instead" + defmacro sigil_V({:<<>>, _meta, [string]}, modifiers) do + string + |> String.trim() + |> binary_to_numbers() + |> case do + {[numbers], type} -> + numbers_to_tensor(numbers, type, modifiers) + + _ -> + raise ArgumentError, "must be one-dimensional" + end + end + @doc """ - A convenient `~M` sigil for building matrices (two-dimensional tensors). + A convenient `~MAT` sigil for building matrices (two-dimensional tensors). ## Examples @@ -17081,7 +17103,7 @@ defmodule Nx do Then you use the sigil to create matrices. The sigil: - ~M< + ~MAT< -1 0 0 1 0 2 0 0 0 0 3 0 @@ -17103,35 +17125,35 @@ defmodule Nx do as a sigil modifier: iex> import Nx, only: :sigils - iex> ~M[0.1 0.2 0.3 0.4]f16 + iex> ~MAT[0.1 0.2 0.3 0.4]f16 #Nx.Tensor< f16[1][4] [ [0.0999755859375, 0.199951171875, 0.300048828125, 0.39990234375] ] > - iex> ~M[1+1i 2-2.0i -3] + iex> ~MAT[1+1i 2-2.0i -3] #Nx.Tensor< c64[1][3] [ [1.0+1.0i, 2.0-2.0i, -3.0+0.0i] ] > - iex> ~M[1 Inf NaN] + iex> ~MAT[1 Inf NaN] #Nx.Tensor< f32[1][3] [ [1.0, Inf, NaN] ] > - iex> ~M[1i Inf NaN] + iex> ~MAT[1i Inf NaN] #Nx.Tensor< c64[1][3] [ [0.0+1.0i, Inf+0.0i, NaN+0.0i] ] > - iex> ~M[1i Inf+2i NaN-Infi] + iex> ~MAT[1i Inf+2i NaN-Infi] #Nx.Tensor< c64[1][3] [ @@ -17141,13 +17163,13 @@ defmodule Nx do """ @doc type: :creation - defmacro sigil_M({:<<>>, _meta, [string]}, modifiers) do + defmacro sigil_MAT({:<<>>, _meta, [string]}, modifiers) do {numbers, type} = string |> String.trim() |> binary_to_numbers() numbers_to_tensor(numbers, type, modifiers) end @doc """ - A convenient `~V` sigil for building vectors (one-dimensional tensors). + A convenient `~VEC` sigil for building vectors (one-dimensional tensors). ## Examples @@ -17157,7 +17179,7 @@ defmodule Nx do Then you use the sigil to create vectors. The sigil: - ~V[-1 0 0 1] + ~VEC[-1 0 0 1] Is equivalent to: @@ -17169,34 +17191,34 @@ defmodule Nx do as a sigil modifier: iex> import Nx, only: :sigils - iex> ~V[0.1 0.2 0.3 0.4]f16 + iex> ~VEC[0.1 0.2 0.3 0.4]f16 #Nx.Tensor< f16[4] [0.0999755859375, 0.199951171875, 0.300048828125, 0.39990234375] > - iex> ~V[1+1i 2-2.0i -3] + iex> ~VEC[1+1i 2-2.0i -3] #Nx.Tensor< c64[3] [1.0+1.0i, 2.0-2.0i, -3.0+0.0i] > - iex> ~V[1 Inf NaN] + iex> ~VEC[1 Inf NaN] #Nx.Tensor< f32[3] [1.0, Inf, NaN] > - iex> ~V[1i Inf NaN] + iex> ~VEC[1i Inf NaN] #Nx.Tensor< c64[3] [0.0+1.0i, Inf+0.0i, NaN+0.0i] > - iex> ~V[1i Inf+2i NaN-Infi] + iex> ~VEC[1i Inf+2i NaN-Infi] #Nx.Tensor< c64[3] [0.0+1.0i, Inf+2.0i, NaN-Infi] > """ @doc type: :creation - defmacro sigil_V({:<<>>, _meta, [string]}, modifiers) do + defmacro sigil_VEC({:<<>>, _meta, [string]}, modifiers) do string |> String.trim() |> binary_to_numbers() diff --git a/nx/test/nx/defn/grad_test.exs b/nx/test/nx/defn/grad_test.exs index 20202fb8aa..4242c548f3 100644 --- a/nx/test/nx/defn/grad_test.exs +++ b/nx/test/nx/defn/grad_test.exs @@ -1063,26 +1063,26 @@ defmodule Nx.Defn.GradTest do end test "works with complex numbers" do - t = Nx.reshape(~V[1+1i 2 3-3i], {1, 1, 3}) - k = Nx.reshape(~V[1 2i 3i], {1, 1, 3}) + t = Nx.reshape(~VEC[1+1i 2 3-3i], {1, 1, 3}) + k = Nx.reshape(~VEC[1 2i 3i], {1, 1, 3}) assert_all_close( - Nx.reshape(~V[1 2i 3i], {1, 1, 3}), + Nx.reshape(~VEC[1 2i 3i], {1, 1, 3}), grad_sum_conv_x(t, k) ) assert_all_close( - Nx.reshape(~V[1+1i 2 3-3i], {1, 1, 3}), + Nx.reshape(~VEC[1+1i 2 3-3i], {1, 1, 3}), grad_sum_conv_y(t, k) ) assert_all_close( - Nx.reshape(~V[-1.0926-0.5343i -3.2978i 99.3534-14.2328i], {1, 1, 3}), + Nx.reshape(~VEC[-1.0926-0.5343i -3.2978i 99.3534-14.2328i], {1, 1, 3}), grad_sum_conv_x_cos_x_sin_y(t, k) ) assert_all_close( - Nx.reshape(~V[0.45046-0.5343i -1.56562 -100.3435+14.2328i], {1, 1, 3}), + Nx.reshape(~VEC[0.45046-0.5343i -1.56562 -100.3435+14.2328i], {1, 1, 3}), grad_sum_conv_y_cos_x_sin_y(t, k) ) end @@ -1753,10 +1753,10 @@ defmodule Nx.Defn.GradTest do assert concatenate_grad_power(Nx.tensor([[1.0, 2.0], [3.0, 4.0]])) == Nx.tensor([[5.0, 16.0], [33.0, 56.0]]) - assert concatenate_grad_power(~M[ + assert concatenate_grad_power(~MAT[ 1i 2 3 4i - ]) == ~M[ + ]) == ~MAT[ -3+2i 16 33 -48+8i ] @@ -1787,17 +1787,17 @@ defmodule Nx.Defn.GradTest do end test "computes for 2x2 complex matrix" do - t = ~M[ + t = ~MAT[ 1 -2i 2i 5 ] - assert cholesky_grad(t) == ~M[ + assert cholesky_grad(t) == ~MAT[ 2.5-1i -1i 1+1i 0.5 ] - assert_all_close(cholesky_cos_grad(t), ~M[ + assert_all_close(cholesky_cos_grad(t), ~MAT[ -5.7305 0.8414i -4.4683i -0.4207 ]) @@ -1863,11 +1863,11 @@ defmodule Nx.Defn.GradTest do test "computes qr_megapower_grad for complex tensor" do assert_all_close( - qr_megapower_grad(~M[ + qr_megapower_grad(~MAT[ 1 2i 3 4i ]), - ~M[ + ~MAT[ 1.94476 2.72264i 4.98145 5.87086i ] @@ -1921,11 +1921,11 @@ defmodule Nx.Defn.GradTest do test "computes lu_megapower_grad for complex tensor" do assert_all_close( - lu_megapower_grad(~M[ + lu_megapower_grad(~MAT[ 1i 2 1 4i ]), - ~M[ + ~MAT[ 6.1484942i 0.76084137 5.0678897 6.7508316i ] @@ -2034,11 +2034,11 @@ defmodule Nx.Defn.GradTest do test "computes grad for complex tensor" do assert_all_close( - invert_grad(~M[ + invert_grad(~MAT[ 1i 1i 0 1 ]), - ~M[ + ~MAT[ 1+i -1i 0 0 ] @@ -2337,7 +2337,7 @@ defmodule Nx.Defn.GradTest do [54.598150033144236, 148.4131591025766, 403.4287934927351] ]) - assert_all_close(grad_sum_exp_reverse(~V[1 -2i 3]), ~V[2.7182 -0.4161-0.9092i 20.0855]) + assert_all_close(grad_sum_exp_reverse(~VEC[1 -2i 3]), ~VEC[2.7182 -0.4161-0.9092i 20.0855]) assert grad_sum_reverse_exp(Nx.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])) == Nx.tensor([ @@ -2372,23 +2372,23 @@ defmodule Nx.Defn.GradTest do end test "works with complex" do - assert_all_close(grad_abs(~V[0 1i 2]), ~V[0 -1i 1]) + assert_all_close(grad_abs(~VEC[0 1i 2]), ~VEC[0 -1i 1]) # Ensure our definition is in accordance with the definition # for abs_squared at the JuliaDiff reference: grad(sum(abs(t))) = 2conj(t) - assert_all_close(grad_abs_squared(~V[0 1 2]), ~V[0 2 4]) - assert_all_close(grad_abs_squared(~V[0 1i 2 -3i]), ~V[0 -2i 4 6i]) + assert_all_close(grad_abs_squared(~VEC[0 1 2]), ~VEC[0 2 4]) + assert_all_close(grad_abs_squared(~VEC[0 1i 2 -3i]), ~VEC[0 -2i 4 6i]) - t = ~V[0 1+i 2+2i 3+3i] + t = ~VEC[0 1+i 2+2i 3+3i] assert_all_close( grad_abs(t), - ~V[0 0.7071-0.7071i 0.7071-0.7071i 0.7071-0.7071i] + ~VEC[0 0.7071-0.7071i 0.7071-0.7071i 0.7071-0.7071i] ) assert_all_close( grad_cos_abs_sin(t), - ~V[0 -0.3120795+1.2447729i -0.05693477-2.053038i -0.00780554-5.6348686i] + ~VEC[0 -0.3120795+1.2447729i -0.05693477-2.053038i -0.00780554-5.6348686i] ) end end @@ -2461,12 +2461,12 @@ defmodule Nx.Defn.GradTest do end test "computes gradient with sum+select for complex tensor" do - lhs = grad_sum_select_2by2(~M[ + lhs = grad_sum_select_2by2(~MAT[ 1+i 2+i 3+i -1-4i ]) - rhs = ~M[ + rhs = ~MAT[ 1.4686+2.2873i 3.9923+6.2176i 10.8522+16.9013i 22.9790+14.7448i ] @@ -2512,17 +2512,17 @@ defmodule Nx.Defn.GradTest do test "as_type takes the real part when downcasting complex" do # Note that, due to the way the grad_as_type_downcast defn is defined, # the expected grad is the same as the grad for: - # Nx.sum(Nx.cos(~V[1 2 3])), which is -sin(~V[1 2 3]) - t = ~V[1+i 2+i 3+i] + # Nx.sum(Nx.cos(~VEC[1 2 3])), which is -sin(~VEC[1 2 3]) + t = ~VEC[1+i 2+i 3+i] grad = grad_as_type_downcast(t) - assert grad == grad_as_type_downcast(~V[1 2 3]) + assert grad == grad_as_type_downcast(~VEC[1 2 3]) assert grad == Nx.negate(Nx.sin(Nx.real(t))) end test "as_type passes through for non-downcasting calls" do assert grad_as_type(Nx.tensor([1, 2, 3])) == Nx.tensor([1.0, 1.0, 1.0]) - assert grad_as_type_complex(~V[1+i 2+i 3+i]) == Nx.tensor([1.0, 1.0, 1.0]) + assert grad_as_type_complex(~VEC[1+i 2+i 3+i]) == Nx.tensor([1.0, 1.0, 1.0]) end test "bitcast passes through" do @@ -2754,8 +2754,8 @@ defmodule Nx.Defn.GradTest do ]) assert_all_close( - grad_reshape_mean_0_sum(~V[1 2i 3 1 -2i -1]), - ~V[0.3333333 -0.166666i 0.111111 0.3333333 0.1666666i -0.333333] + grad_reshape_mean_0_sum(~VEC[1 2i 3 1 -2i -1]), + ~VEC[0.3333333 -0.166666i 0.111111 0.3333333 0.1666666i -0.333333] ) assert grad_reshape_mean_0_sum(Nx.tensor([1, 2, 3, 4, 5, 6])) == @@ -2786,8 +2786,8 @@ defmodule Nx.Defn.GradTest do ]) assert_all_close( - grad_transpose_mean_0_sum(~M[1 2i 3 1 -2i -1]), - ~M[0.1666 -0.0833i 0.0555 0.1666 0.0833i -0.1666] + grad_transpose_mean_0_sum(~MAT[1 2i 3 1 -2i -1]), + ~MAT[0.1666 -0.0833i 0.0555 0.1666 0.0833i -0.1666] ) assert grad_transpose_mean_1_sum(Nx.tensor([[1, 2, 3], [4, 5, 6]])) == @@ -3141,7 +3141,7 @@ defmodule Nx.Defn.GradTest do type: {:c, 64} ) == grad_sum_take_along_axis( - ~M[ + ~MAT[ 0 1i 2i 3 4 5i @@ -3264,9 +3264,9 @@ defmodule Nx.Defn.GradTest do ]) ) - assert ~M[0 4i 24 -12i] == + assert ~MAT[0 4i 24 -12i] == grad_sum_take_axis_1_power( - ~M[0 1i 2 -3i], + ~MAT[0 1i 2 -3i], Nx.tensor([[0, 1, 2, 2, 2, 3], [0, 1, 2, 2, 2, 3]]) ) @@ -3408,13 +3408,13 @@ defmodule Nx.Defn.GradTest do ]) ) - assert ~M[ + assert ~MAT[ 0 2i 0 0 8i 0 0 0 16 0 0 0 ] == grad_sum_gather_power( - ~M[ + ~MAT[ 0 1i 2 -3i 4i 5 6i 7 8 9i 10 11i @@ -3514,7 +3514,7 @@ defmodule Nx.Defn.GradTest do ) assert_all_close( - ~M[ + ~MAT[ 14.16124 12.1516 0 0 0 0 -2.89999 0.737196 0.737196 0.737196 0 0 0 -108.54785i 0 0 0 0 @@ -3550,7 +3550,7 @@ defmodule Nx.Defn.GradTest do ) assert_all_close( - ~M[ + ~MAT[ 1.0896463 0.28715 0 0 0 0 -0.8319124 0.077385 0.077385 0.077385 0 0 0 -0.57873374i 0 0 0 0 diff --git a/nx/test/nx/lin_alg_test.exs b/nx/test/nx/lin_alg_test.exs index 6afe675192..ce4b7b81b6 100644 --- a/nx/test/nx/lin_alg_test.exs +++ b/nx/test/nx/lin_alg_test.exs @@ -86,15 +86,15 @@ defmodule Nx.LinAlgTest do end test "works with complex tensors" do - a = ~M[ + a = ~MAT[ 1 0 i -1i 0 1i 1 1 1 ] - b = ~V[3+i 4 2-2i] + b = ~VEC[3+i 4 2-2i] - result = ~V[i 2 -3i] + result = ~VEC[i 2 -3i] assert_all_close(Nx.LinAlg.solve(a, b), result) end @@ -121,13 +121,13 @@ defmodule Nx.LinAlgTest do end test "works with complex tensors" do - a = ~M[ + a = ~MAT[ 1 0 i 0 -1i 0 0 0 2 ] - expected_result = ~M[ + expected_result = ~MAT[ 1 0 -0.5i 0 1i 0 0 0 0.5 @@ -280,7 +280,7 @@ defmodule Nx.LinAlgTest do describe "matrix_power" do test "supports complex with positive exponent" do - a = ~M[ + a = ~MAT[ 1 1i -1i 1 ] @@ -291,7 +291,7 @@ defmodule Nx.LinAlgTest do end test "supports complex with 0 exponent" do - a = ~M[ + a = ~MAT[ 1 1i -1i 1 ] @@ -300,12 +300,12 @@ defmodule Nx.LinAlgTest do end test "supports complex with negative exponent" do - a = ~M[ + a = ~MAT[ 1 -0.5i 0 0.5 ] - result = ~M[ + result = ~MAT[ 1 15i 0 16 ] @@ -448,7 +448,7 @@ defmodule Nx.LinAlgTest do end test "works with complex matrix" do - t = ~M[ + t = ~MAT[ 1 0 1i 0 2 -1i 1 1 1 @@ -456,13 +456,13 @@ defmodule Nx.LinAlgTest do {q, r} = Nx.LinAlg.qr(t) - assert_all_close(q, ~M[ + assert_all_close(q, ~MAT[ -0.7071 0.2357 -0.6666 0 -0.9428 -0.3333 -0.7071 -0.2357 0.6666 ]) - assert_all_close(r, ~M[ + assert_all_close(r, ~MAT[ -1.4142 -0.7071 -0.7071-0.7071i 0 -2.1213 -0.2357+1.1785i 0 0 0.6666-0.3333i @@ -849,7 +849,7 @@ defmodule Nx.LinAlgTest do test "works with f16" do x = Nx.tensor([[0, 0], [0, 0]], type: :f16) - assert Nx.LinAlg.svd(x) == {Nx.eye(2, type: :f16), ~V"0.0 0.0"f16, Nx.eye(2, type: :f16)} + assert Nx.LinAlg.svd(x) == {Nx.eye(2, type: :f16), ~VEC"0.0 0.0"f16, Nx.eye(2, type: :f16)} x = Nx.tensor([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], type: :f16) assert {u, s, vt} = Nx.LinAlg.svd(x) @@ -860,7 +860,7 @@ defmodule Nx.LinAlgTest do test "works with f64" do x = Nx.tensor([[0, 0], [0, 0]], type: :f64) - assert Nx.LinAlg.svd(x) == {Nx.eye(2, type: :f64), ~V"0.0 0.0"f64, Nx.eye(2, type: :f64)} + assert Nx.LinAlg.svd(x) == {Nx.eye(2, type: :f64), ~VEC"0.0 0.0"f64, Nx.eye(2, type: :f64)} x = Nx.tensor([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], type: :f64) assert {u, s, vt} = Nx.LinAlg.svd(x) diff --git a/nx/test/nx/non_finite_test.exs b/nx/test/nx/non_finite_test.exs index 1ec426c350..563657f845 100644 --- a/nx/test/nx/non_finite_test.exs +++ b/nx/test/nx/non_finite_test.exs @@ -225,42 +225,42 @@ defmodule Nx.NonFiniteTest do test "max" do # infinity as right arg - assert ~V[Inf] == Nx.max(~V[-Inf], ~V[Inf]) - assert ~V[Inf] == Nx.max(~V[1], ~V[Inf]) - assert ~V[NaN] == Nx.max(~V[NaN], ~V[Inf]) - assert ~V[Inf] == Nx.max(~V[Inf], ~V[Inf]) + assert ~VEC[Inf] == Nx.max(~VEC[-Inf], ~VEC[Inf]) + assert ~VEC[Inf] == Nx.max(~VEC[1], ~VEC[Inf]) + assert ~VEC[NaN] == Nx.max(~VEC[NaN], ~VEC[Inf]) + assert ~VEC[Inf] == Nx.max(~VEC[Inf], ~VEC[Inf]) # neg_inf as right arg - assert ~V[-Inf] == Nx.max(~V[-Inf], ~V[-Inf]) - assert ~V[1.0] == Nx.max(~V[1], ~V[-Inf]) - assert ~V[NaN] == Nx.max(~V[NaN], ~V[-Inf]) - assert ~V[Inf] == Nx.max(~V[Inf], ~V[-Inf]) + assert ~VEC[-Inf] == Nx.max(~VEC[-Inf], ~VEC[-Inf]) + assert ~VEC[1.0] == Nx.max(~VEC[1], ~VEC[-Inf]) + assert ~VEC[NaN] == Nx.max(~VEC[NaN], ~VEC[-Inf]) + assert ~VEC[Inf] == Nx.max(~VEC[Inf], ~VEC[-Inf]) # nan as right arg - assert ~V[NaN] == Nx.max(~V[-Inf], ~V[NaN]) - assert ~V[NaN] == Nx.max(~V[1], ~V[NaN]) - assert ~V[NaN] == Nx.max(~V[NaN], ~V[NaN]) - assert ~V[NaN] == Nx.max(~V[Inf], ~V[NaN]) + assert ~VEC[NaN] == Nx.max(~VEC[-Inf], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.max(~VEC[1], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.max(~VEC[NaN], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.max(~VEC[Inf], ~VEC[NaN]) end test "min" do # infinity as right arg - assert ~V[-Inf] == Nx.min(~V[-Inf], ~V[Inf]) - assert ~V[1.0] == Nx.min(~V[1], ~V[Inf]) - assert ~V[NaN] == Nx.min(~V[NaN], ~V[Inf]) - assert ~V[Inf] == Nx.min(~V[Inf], ~V[Inf]) + assert ~VEC[-Inf] == Nx.min(~VEC[-Inf], ~VEC[Inf]) + assert ~VEC[1.0] == Nx.min(~VEC[1], ~VEC[Inf]) + assert ~VEC[NaN] == Nx.min(~VEC[NaN], ~VEC[Inf]) + assert ~VEC[Inf] == Nx.min(~VEC[Inf], ~VEC[Inf]) # neg_inf as right arg - assert ~V[-Inf] == Nx.min(~V[-Inf], ~V[-Inf]) - assert ~V[-Inf] == Nx.min(~V[1], ~V[-Inf]) - assert ~V[NaN] == Nx.min(~V[NaN], ~V[-Inf]) - assert ~V[-Inf] == Nx.min(~V[Inf], ~V[-Inf]) + assert ~VEC[-Inf] == Nx.min(~VEC[-Inf], ~VEC[-Inf]) + assert ~VEC[-Inf] == Nx.min(~VEC[1], ~VEC[-Inf]) + assert ~VEC[NaN] == Nx.min(~VEC[NaN], ~VEC[-Inf]) + assert ~VEC[-Inf] == Nx.min(~VEC[Inf], ~VEC[-Inf]) # nan as right arg - assert ~V[NaN] == Nx.min(~V[-Inf], ~V[NaN]) - assert ~V[NaN] == Nx.min(~V[1], ~V[NaN]) - assert ~V[NaN] == Nx.min(~V[NaN], ~V[NaN]) - assert ~V[NaN] == Nx.min(~V[Inf], ~V[NaN]) + assert ~VEC[NaN] == Nx.min(~VEC[-Inf], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.min(~VEC[1], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.min(~VEC[NaN], ~VEC[NaN]) + assert ~VEC[NaN] == Nx.min(~VEC[Inf], ~VEC[NaN]) end test "remainder" do diff --git a/nx/test/nx/tensor_test.exs b/nx/test/nx/tensor_test.exs index 3798722804..2045577da1 100644 --- a/nx/test/nx/tensor_test.exs +++ b/nx/test/nx/tensor_test.exs @@ -130,7 +130,7 @@ defmodule Nx.TensorTest do describe "inspect" do test "prints with configured precision" do - assert inspect(~V[1], custom_options: [nx_precision: 5]) == + assert inspect(~VEC[1], custom_options: [nx_precision: 5]) == """ #Nx.Tensor< s64[1] @@ -138,7 +138,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[1.0], custom_options: [nx_precision: 5]) == + assert inspect(~VEC[1.0], custom_options: [nx_precision: 5]) == """ #Nx.Tensor< f32[1] @@ -146,7 +146,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[1.000042e-3], custom_options: [nx_precision: 5]) == + assert inspect(~VEC[1.000042e-3], custom_options: [nx_precision: 5]) == """ #Nx.Tensor< f32[1] @@ -154,7 +154,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[42.1337e10], custom_options: [nx_precision: 5]) == + assert inspect(~VEC[42.1337e10], custom_options: [nx_precision: 5]) == """ #Nx.Tensor< f32[1] @@ -162,7 +162,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[Inf -Inf NaN], custom_options: [nx_precision: 7]) == + assert inspect(~VEC[Inf -Inf NaN], custom_options: [nx_precision: 7]) == """ #Nx.Tensor< f32[3] @@ -170,7 +170,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[Inf-Infi 1.0i 0 1000], custom_options: [nx_precision: 3]) == + assert inspect(~VEC[Inf-Infi 1.0i 0 1000], custom_options: [nx_precision: 3]) == """ #Nx.Tensor< c64[4] @@ -178,7 +178,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[-0.0001], custom_options: [nx_precision: 3]) == + assert inspect(~VEC[-0.0001], custom_options: [nx_precision: 3]) == """ #Nx.Tensor< f32[1] @@ -186,7 +186,7 @@ defmodule Nx.TensorTest do >\ """ - assert inspect(~V[-0.0001], custom_options: [nx_precision: 8]) == + assert inspect(~VEC[-0.0001], custom_options: [nx_precision: 8]) == """ #Nx.Tensor< f32[1] diff --git a/nx/test/nx/vectorize_test.exs b/nx/test/nx/vectorize_test.exs index 4562240f91..683e8f4ed9 100644 --- a/nx/test/nx/vectorize_test.exs +++ b/nx/test/nx/vectorize_test.exs @@ -310,54 +310,54 @@ defmodule Nx.VectorizeTest do end test "conjugate" do - input = ~V[-1i 1i 10-i] |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[1i -1i 10+i], :rows) + input = ~VEC[-1i 1i 10-i] |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[1i -1i 10+i], :rows) assert result == Nx.conjugate(input) end test "phase" do - input = ~V[1i 0 10] |> Nx.vectorize(:rows) + input = ~VEC[1i 0 10] |> Nx.vectorize(:rows) result = Nx.vectorize(Nx.tensor([:math.pi() / 2, 0, 0]), :rows) assert result == Nx.phase(input) end test "real" do - input = ~V[-1i 0 10] |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[0 0 10]f32, :rows) + input = ~VEC[-1i 0 10] |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[0 0 10]f32, :rows) assert result == Nx.real(input) end test "imag" do - input = ~V[-1i 0 10] |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[-1 0 0]f32, :rows) + input = ~VEC[-1i 0 10] |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[-1 0 0]f32, :rows) assert result == Nx.imag(input) end test "bitwise_not" do - input = ~V[15 240 0]u8 |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[240 15 255]u8, :rows) + input = ~VEC[15 240 0]u8 |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[240 15 255]u8, :rows) assert result == Nx.bitwise_not(input) end test "population_count" do - input = ~V[15 240 3]u8 |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[4 4 2]u8, :rows) + input = ~VEC[15 240 3]u8 |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[4 4 2]u8, :rows) assert result == Nx.population_count(input) end test "count_leading_zeros" do - input = ~V[15 240 3]u8 |> Nx.vectorize(:rows) - result = Nx.vectorize(~V[4 0 6]u8, :rows) + input = ~VEC[15 240 3]u8 |> Nx.vectorize(:rows) + result = Nx.vectorize(~VEC[4 0 6]u8, :rows) assert result == Nx.count_leading_zeros(input) end test "sort" do - input = ~M[ + input = ~MAT[ 1 2 3 3 2 1 ] |> Nx.vectorize(:rows) - result = ~M[ + result = ~MAT[ 1 2 3 1 2 3 ] |> Nx.vectorize(:rows) @@ -366,12 +366,12 @@ defmodule Nx.VectorizeTest do end test "argsort" do - input = ~M[ + input = ~MAT[ 1 2 3 3 2 1 ] |> Nx.vectorize(:rows) - result = ~M[ + result = ~MAT[ 0 1 2 2 1 0 ] |> Nx.vectorize(:rows) @@ -405,12 +405,12 @@ defmodule Nx.VectorizeTest do end test "reflect" do - input = ~M[ + input = ~MAT[ 0 1 2 5 4 3 ] |> Nx.vectorize(:rows) - result = ~M[ + result = ~MAT[ 1 2 1 0 1 2 1 4 3 4 5 4 3 4 ] |> Nx.vectorize(:rows) @@ -447,19 +447,19 @@ defmodule Nx.VectorizeTest do test "simple" do assert double_n_times(Nx.tensor(3), Nx.tensor(5)) == Nx.tensor(96) - x = Nx.vectorize(~V[1 2 3], :x) - n = Nx.vectorize(~V[5 6 3], :x) + x = Nx.vectorize(~VEC[1 2 3], :x) + n = Nx.vectorize(~VEC[5 6 3], :x) - assert double_n_times(x, n) == Nx.vectorize(~V[32 128 24], :x) + assert double_n_times(x, n) == Nx.vectorize(~VEC[32 128 24], :x) end test "different axes" do - x = Nx.vectorize(~V[1 2 3], :init) - n = Nx.vectorize(~V[4 5], :pred) + x = Nx.vectorize(~VEC[1 2 3], :init) + n = Nx.vectorize(~VEC[4 5], :pred) assert double_n_times(x, n) == Nx.vectorize( - ~M[ + ~MAT[ 16 32 48 32 64 96 ], @@ -471,7 +471,7 @@ defmodule Nx.VectorizeTest do test "mix of common and different axes" do x = Nx.vectorize( - ~M[ + ~MAT[ 1 2 3 4 5 6 @@ -480,32 +480,32 @@ defmodule Nx.VectorizeTest do pred: 2 ) - y = Nx.vectorize(~V[1 2], :pred) - n = Nx.vectorize(~V[3 4], :pred) + y = Nx.vectorize(~VEC[1 2], :pred) + n = Nx.vectorize(~VEC[3 4], :pred) assert double_x_triple_y_n_times(x, y, n) == { Nx.vectorize( - ~M[ + ~MAT[ 8 16 24 64 80 96 ], pred: 2, other: 3 ), - Nx.vectorize(~V[27 162], pred: 2) + Nx.vectorize(~VEC[27 162], pred: 2) } end test "simple with multiple pred axes" do - x = Nx.vectorize(~V[1 2 3], :x) - n = Nx.vectorize(~M[ + x = Nx.vectorize(~VEC[1 2 3], :x) + n = Nx.vectorize(~MAT[ 0 1 2 5 6 3 ], y: 2, x: 3) assert double_n_times(x, n) == Nx.vectorize( - ~M[ + ~MAT[ 1 4 12 32 128 24 ], @@ -558,11 +558,11 @@ defmodule Nx.VectorizeTest do test "simple if" do # this tests the case where we have a single vectorized predicate - pred = Nx.vectorize(~V[0 1 0], :pred) + pred = Nx.vectorize(~VEC[0 1 0], :pred) io = ExUnit.CaptureIO.capture_io(fn -> - assert vectorized_if(pred, 1, 2) == Nx.vectorize(~V[2 1 2], :pred) + assert vectorized_if(pred, 1, 2) == Nx.vectorize(~VEC[2 1 2], :pred) end) assert io == @@ -578,12 +578,12 @@ defmodule Nx.VectorizeTest do test "simple cond" do # this tests the case where we have a two vectorized predicates - pred1 = Nx.vectorize(~V[1 0 0], :pred) - pred2 = Nx.vectorize(~V[0 0 0], :pred) + pred1 = Nx.vectorize(~VEC[1 0 0], :pred) + pred2 = Nx.vectorize(~VEC[0 0 0], :pred) io = ExUnit.CaptureIO.capture_io(fn -> - assert vectorized_cond(pred1, 1, pred2, 2, 3) == Nx.vectorize(~V[1 3 3], :pred) + assert vectorized_cond(pred1, 1, pred2, 2, 3) == Nx.vectorize(~VEC[1 3 3], :pred) end) # This assertion ensures that the clause for pred2 is never executed @@ -595,7 +595,7 @@ defmodule Nx.VectorizeTest do end test "if with container result" do - pred1 = Nx.vectorize(~V[2 0 0], :pred) + pred1 = Nx.vectorize(~VEC[2 0 0], :pred) io = ExUnit.CaptureIO.capture_io(fn -> @@ -603,13 +603,13 @@ defmodule Nx.VectorizeTest do vectorized_if( pred1, {1, 2, 3}, - {7, 8, Nx.vectorize(~V[9 10 11], :x)} + {7, 8, Nx.vectorize(~VEC[9 10 11], :x)} ) assert result == { - Nx.vectorize(~V[1 7 7], :pred), - Nx.vectorize(~V[2 8 8], :pred), - Nx.vectorize(~M[ + Nx.vectorize(~VEC[1 7 7], :pred), + Nx.vectorize(~VEC[2 8 8], :pred), + Nx.vectorize(~MAT[ 3 3 3 9 10 11 9 10 11 @@ -643,8 +643,8 @@ defmodule Nx.VectorizeTest do end test "only executes selected branches" do - t = Nx.vectorize(~V[1], :pred) - f = Nx.vectorize(~V[0], :pred) + t = Nx.vectorize(~VEC[1], :pred) + f = Nx.vectorize(~VEC[0], :pred) assert = fn res, val, clause -> t = Nx.tensor(val) @@ -660,58 +660,58 @@ defmodule Nx.VectorizeTest do end test "1 vectorized pred in the beginning" do - assert cond4(Nx.vectorize(~V[0 1], :pred), 10, 0, 20, 0, 30, 40) == - Nx.vectorize(~V[40 10], :pred) + assert cond4(Nx.vectorize(~VEC[0 1], :pred), 10, 0, 20, 0, 30, 40) == + Nx.vectorize(~VEC[40 10], :pred) - assert cond4(Nx.vectorize(~V[0 0], :pred), 10, 1, 20, 0, 30, 40) == - Nx.vectorize(~V[20 20], :pred) + assert cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 1, 20, 0, 30, 40) == + Nx.vectorize(~VEC[20 20], :pred) - assert cond4(Nx.vectorize(~V[0 0], :pred), 10, 0, 20, 1, 30, 40) == - Nx.vectorize(~V[30 30], :pred) + assert cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 0, 20, 1, 30, 40) == + Nx.vectorize(~VEC[30 30], :pred) - assert cond4(Nx.vectorize(~V[0 0], :pred), 10, 0, 20, 0, 30, 40) == - Nx.vectorize(~V[40 40], :pred) + assert cond4(Nx.vectorize(~VEC[0 0], :pred), 10, 0, 20, 0, 30, 40) == + Nx.vectorize(~VEC[40 40], :pred) end test "1 vectorized pred in the second but not last position" do - assert cond4(0, 10, Nx.vectorize(~V[0 1], :pred), 20, 0, 30, 40) == - Nx.vectorize(~V[40 20], :pred) + assert cond4(0, 10, Nx.vectorize(~VEC[0 1], :pred), 20, 0, 30, 40) == + Nx.vectorize(~VEC[40 20], :pred) - assert cond4(1, 10, Nx.vectorize(~V[0 1], :pred), 20, 0, 30, 40) == - Nx.vectorize(~V[10 10], :pred) + assert cond4(1, 10, Nx.vectorize(~VEC[0 1], :pred), 20, 0, 30, 40) == + Nx.vectorize(~VEC[10 10], :pred) - assert cond4(0, 10, Nx.vectorize(~V[0 0], :pred), 20, 1, 30, 40) == - Nx.vectorize(~V[30 30], :pred) + assert cond4(0, 10, Nx.vectorize(~VEC[0 0], :pred), 20, 1, 30, 40) == + Nx.vectorize(~VEC[30 30], :pred) - assert cond4(0, 10, Nx.vectorize(~V[0 0], :pred), 20, 0, 30, 40) == - Nx.vectorize(~V[40 40], :pred) + assert cond4(0, 10, Nx.vectorize(~VEC[0 0], :pred), 20, 0, 30, 40) == + Nx.vectorize(~VEC[40 40], :pred) end test "1 vectorized pred in the last position" do - assert cond4(0, 10, 0, 20, Nx.vectorize(~V[0 1], :pred), 30, 40) == - Nx.vectorize(~V[40 30], :pred) + assert cond4(0, 10, 0, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40) == + Nx.vectorize(~VEC[40 30], :pred) - assert cond4(1, 10, 0, 20, Nx.vectorize(~V[0 1], :pred), 30, 40) == - Nx.vectorize(~V[10 10], :pred) + assert cond4(1, 10, 0, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40) == + Nx.vectorize(~VEC[10 10], :pred) - assert cond4(0, 10, 1, 20, Nx.vectorize(~V[0 1], :pred), 30, 40) == - Nx.vectorize(~V[20 20], :pred) + assert cond4(0, 10, 1, 20, Nx.vectorize(~VEC[0 1], :pred), 30, 40) == + Nx.vectorize(~VEC[20 20], :pred) - assert cond4(0, 10, 0, 20, Nx.vectorize(~V[0 0], :pred), 30, 40) == - Nx.vectorize(~V[40 40], :pred) + assert cond4(0, 10, 0, 20, Nx.vectorize(~VEC[0 0], :pred), 30, 40) == + Nx.vectorize(~VEC[40 40], :pred) end test "2 vectorized preds with different axes" do assert cond4( - Nx.vectorize(~V[0 1 0], :pred1), + Nx.vectorize(~VEC[0 1 0], :pred1), 10, - Nx.vectorize(~V[1 0], :pred2), + Nx.vectorize(~VEC[1 0], :pred2), 20, 0, 30, 40 ) == - Nx.vectorize(~M[ + Nx.vectorize(~MAT[ 20 40 10 10 20 40 @@ -720,15 +720,15 @@ defmodule Nx.VectorizeTest do test "2 vectorized preds with different axes + clauses that match either" do assert cond4( - Nx.vectorize(~V[0 1 0], :pred1), - Nx.vectorize(~V[10 100], :pred2), - Nx.vectorize(~V[1 0], :pred2), - Nx.vectorize(~V[20 200 2000], :pred1), + Nx.vectorize(~VEC[0 1 0], :pred1), + Nx.vectorize(~VEC[10 100], :pred2), + Nx.vectorize(~VEC[1 0], :pred2), + Nx.vectorize(~VEC[20 200 2000], :pred1), 0, 30, 40 ) == - Nx.vectorize(~M[ + Nx.vectorize(~MAT[ 20 40 10 100 2000 40 diff --git a/nx/test/nx_test.exs b/nx/test/nx_test.exs index b8af1c560d..2da1ca42ed 100644 --- a/nx/test/nx_test.exs +++ b/nx/test/nx_test.exs @@ -1242,11 +1242,11 @@ defmodule NxTest do test "non-composite types" do refute Nx.compatible?(Complex.new(0, 2), 2) refute Nx.compatible?(2, Complex.new(0, 2)) - refute Nx.compatible?(Complex.new(0, 2), ~V[2]) - refute Nx.compatible?(~V[2], Complex.new(0, 2)) - refute Nx.compatible?(2, ~V[2i]) - refute Nx.compatible?(~V[2i], 2) - refute Nx.compatible?(~V[2], ~V[2i]) + refute Nx.compatible?(Complex.new(0, 2), ~VEC[2]) + refute Nx.compatible?(~VEC[2], Complex.new(0, 2)) + refute Nx.compatible?(2, ~VEC[2i]) + refute Nx.compatible?(~VEC[2i], 2) + refute Nx.compatible?(~VEC[2], ~VEC[2i]) assert Nx.compatible?(Complex.new(2), Complex.new(0, 2)) assert Nx.compatible?(2, 0) end @@ -2538,17 +2538,17 @@ defmodule NxTest do test "evaluates to tensor" do import Nx - assert ~M[-1 2 3 4] == Nx.tensor([[-1, 2, 3, 4]]) + assert ~MAT[-1 2 3 4] == Nx.tensor([[-1, 2, 3, 4]]) - assert ~M[1 + assert ~MAT[1 2 3 4] == Nx.tensor([[1], [2], [3], [4]]) - assert ~M[1.0 2 3 + assert ~MAT[1.0 2 3 11 12 13] == Nx.tensor([[1.0, 2, 3], [11, 12, 13]]) - assert ~V[4 3 2 1] == Nx.tensor([4, 3, 2, 1]) + assert ~VEC[4 3 2 1] == Nx.tensor([4, 3, 2, 1]) end test "raises when vector has more than one dimension" do @@ -2556,17 +2556,17 @@ defmodule NxTest do ArgumentError, "must be one-dimensional", fn -> - eval(~S[~V<0 0 0 1 + eval(~S[~VEC<0 0 0 1 1 0 0 0>]) end ) end test "evaluates with proper type" do - assert eval("~M[1 2 3 4]f32") == Nx.tensor([[1, 2, 3, 4]], type: {:f, 32}) - assert eval("~M[4 3 2 1]u8") == Nx.tensor([[4, 3, 2, 1]], type: {:u, 8}) + assert eval("~MAT[1 2 3 4]f32") == Nx.tensor([[1, 2, 3, 4]], type: {:f, 32}) + assert eval("~MAT[4 3 2 1]u8") == Nx.tensor([[4, 3, 2, 1]], type: {:u, 8}) - assert eval("~V[0 1 0 1]u8") == Nx.tensor([0, 1, 0, 1], type: {:u, 8}) + assert eval("~VEC[0 1 0 1]u8") == Nx.tensor([0, 1, 0, 1], type: {:u, 8}) end test "raises on invalid type" do @@ -2574,7 +2574,7 @@ defmodule NxTest do ArgumentError, "invalid numerical type: {:f, 8} (see Nx.Type docs for all supported types)", fn -> - eval("~M[1 2 3 4]f8") + eval("~MAT[1 2 3 4]f8") end ) end @@ -2584,7 +2584,7 @@ defmodule NxTest do ArgumentError, "expected a numerical value for tensor, got x", fn -> - eval("~V[1 2 x 4]u8") + eval("~VEC[1 2 x 4]u8") end ) end @@ -3054,14 +3054,14 @@ defmodule NxTest do test "Bluestein clause" do # we need a tensor which isn't a power of 2 and is > than 1024 to actually # validate the bluestein implementation. No doctests check for it. - x = Nx.tile(~V[1 1 1 0 0 0], [400]) + x = Nx.tile(~VEC[1 1 1 0 0 0], [400]) x_fft = Nx.fft(x) # From numpy we expect the following indices to be the only non-zero values in the tensor non_zero_idx = Nx.tensor([0, 400, 1200, 2000]) - assert Nx.take(x_fft, non_zero_idx) == ~V[1200 400-692.820323i 400 400+692.820323i] + assert Nx.take(x_fft, non_zero_idx) == ~VEC[1200 400-692.820323i 400 400+692.820323i] - zeros_check = Nx.indexed_put(x_fft, Nx.new_axis(non_zero_idx, 1), ~V[0 0 0 0]) + zeros_check = Nx.indexed_put(x_fft, Nx.new_axis(non_zero_idx, 1), ~VEC[0 0 0 0]) zeros = Nx.broadcast(0, x_fft) assert_all_close(Nx.real(zeros_check), zeros, atol: 1.0e-9) assert_all_close(Nx.imag(zeros_check), zeros, atol: 1.0e-9) diff --git a/torchx/test/torchx/complex_test.exs b/torchx/test/torchx/complex_test.exs index ecfb1a3397..0994bdde07 100644 --- a/torchx/test/torchx/complex_test.exs +++ b/torchx/test/torchx/complex_test.exs @@ -201,13 +201,13 @@ defmodule Torchx.ComplexTest do end test "invert" do - a = ~M[ + a = ~MAT[ 1 0 i 0 -1i 0 0 0 2 ] - expected_result = ~M[ + expected_result = ~MAT[ 1 0 -0.5i 0 1i 0 0 0 0.5 @@ -222,15 +222,15 @@ defmodule Torchx.ComplexTest do end test "solve" do - a = ~M[ + a = ~MAT[ 1 0 i -1i 0 1i 1 1 1 ] - b = ~V[3+i 4 2-2i] + b = ~VEC[3+i 4 2-2i] - result = ~V[i 2 -3i] + result = ~VEC[i 2 -3i] assert_all_close(Nx.LinAlg.solve(a, b), result) end @@ -238,7 +238,7 @@ defmodule Torchx.ComplexTest do describe "matrix_power" do test "supports complex with positive exponent" do - a = ~M[ + a = ~MAT[ 1 1i -1i 1 ] @@ -249,7 +249,7 @@ defmodule Torchx.ComplexTest do end test "supports complex with 0 exponent" do - a = ~M[ + a = ~MAT[ 1 1i -1i 1 ] @@ -258,12 +258,12 @@ defmodule Torchx.ComplexTest do end test "supports complex with negative exponent" do - a = ~M[ + a = ~MAT[ 1 -0.5i 0 0.5 ] - result = ~M[ + result = ~MAT[ 1 15i 0 16 ] diff --git a/torchx/test/torchx/nx_test.exs b/torchx/test/torchx/nx_test.exs index 2c0736c3c6..b350841e57 100644 --- a/torchx/test/torchx/nx_test.exs +++ b/torchx/test/torchx/nx_test.exs @@ -138,17 +138,17 @@ defmodule Torchx.NxTest do test "fft" do assert_all_close( Nx.fft(Nx.tensor([1, 1, 0, 0]), length: 5), - ~V[2.0+0.0i 1.3090-0.9511i 0.1909-0.5877i 0.1909+0.5877i 1.3090+0.9510i] + ~VEC[2.0+0.0i 1.3090-0.9511i 0.1909-0.5877i 0.1909+0.5877i 1.3090+0.9510i] ) assert_all_close( Nx.fft(Nx.tensor([1, 1, 0, 0, 2, 3]), length: 4), - ~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] + ~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] ) assert_all_close( Nx.fft(Nx.tensor([1, 1, 0]), length: :power_of_two), - ~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] + ~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i] ) end @@ -170,12 +170,12 @@ defmodule Torchx.NxTest do length: :power_of_two ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -200,12 +200,12 @@ defmodule Torchx.NxTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -230,12 +230,12 @@ defmodule Torchx.NxTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -246,19 +246,19 @@ defmodule Torchx.NxTest do test "ifft" do assert_all_close( - Nx.ifft(~V[5 5 5 5 5], + Nx.ifft(~VEC[5 5 5 5 5], length: 5 ), Nx.tensor([5, 0, 0, 0, 0]) ) assert_all_close( - Nx.ifft(~V[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i 5 6], length: 4), + Nx.ifft(~VEC[2.0+0.0i 1.0-1.0i 0.0+0.0i 1.0+1.0i 5 6], length: 4), Nx.tensor([1, 1, 0, 0]) ) assert_all_close( - Nx.ifft(~V[2 0 0], length: :power_of_two), + Nx.ifft(~VEC[2 0 0], length: :power_of_two), Nx.tensor([0.5, 0.5, 0.5, 0.5]) ) end @@ -267,12 +267,12 @@ defmodule Torchx.NxTest do assert_all_close( Nx.ifft( Nx.stack([ - ~M[ + ~MAT[ 2 1.0-1.0i 0 1.0+1.0i 1 1 1 1 1 -1i -1 1i ], - ~M[ + ~MAT[ 1 -1i -1 1i 1 1 1 1 2 1.0-1.0i 0 1.0+1.0i @@ -311,12 +311,12 @@ defmodule Torchx.NxTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0+1.0i 0 1.0-1.0i 1 1 1 1 1 1i -1 -1i ], - ~M[ + ~MAT[ 1 1i -1 -1i 1 1 1 1 2 1.0+1.0i 0 1.0-1.0i @@ -341,12 +341,12 @@ defmodule Torchx.NxTest do length: 4 ), Nx.stack([ - ~M[ + ~MAT[ 2 1.0+1.0i 0 1.0-1.0i 1 1 1 1 1 1i -1 -1i ], - ~M[ + ~MAT[ 1 1i -1 -1i 1 1 1 1 2 1.0+1.0i 0 1.0-1.0i