From 58ddd6567741ae3950b7397b3be2abde04404c49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Valim?= Date: Tue, 19 Nov 2024 10:40:32 +0100 Subject: [PATCH] Update to latest Nx (#311) --- lib/scholar/cluster/affinity_propagation.ex | 37 +++++++------- lib/scholar/cluster/optics.ex | 12 ++--- lib/scholar/linear/isotonic_regression.ex | 19 ++++--- lib/scholar/linear/logistic_regression.ex | 4 +- lib/scholar/linear/polynomial_regression.ex | 10 ++-- lib/scholar/linear/svm.ex | 2 +- lib/scholar/manifold/trimap.ex | 18 ++++--- lib/scholar/metrics/classification.ex | 30 +++++------ lib/scholar/metrics/distance.ex | 4 +- lib/scholar/naive_bayes/complement.ex | 2 +- lib/scholar/naive_bayes/gaussian.ex | 2 +- lib/scholar/naive_bayes/multinomial.ex | 2 +- lib/scholar/neighbors/brute_knn.ex | 2 +- lib/scholar/neighbors/nn_descent.ex | 20 ++++---- lib/scholar/neighbors/rnn_classifier.ex | 2 +- lib/scholar/preprocessing/ordinal_encoder.ex | 4 +- mix.lock | 14 +++--- notebooks/k_means.livemd | 52 ++++++++++---------- notebooks/k_nearest_neighbors.livemd | 6 +-- notebooks/linear_regression.livemd | 2 +- notebooks/nearest_neighbors.livemd | 4 +- test/scholar/neighbors/kd_tree_test.exs | 10 ++-- 22 files changed, 133 insertions(+), 125 deletions(-) diff --git a/lib/scholar/cluster/affinity_propagation.ex b/lib/scholar/cluster/affinity_propagation.ex index 759ac04a..f58600ae 100644 --- a/lib/scholar/cluster/affinity_propagation.ex +++ b/lib/scholar/cluster/affinity_propagation.ex @@ -103,9 +103,9 @@ defmodule Scholar.Cluster.AffinityPropagation do iex> x = Nx.tensor([[12,5,78,2], [9,3,81,-2], [-1,3,6,1], [1,-2,5,2]]) iex> Scholar.Cluster.AffinityPropagation.fit(x, key: key) %Scholar.Cluster.AffinityPropagation{ - labels: Nx.tensor([0, 0, 2, 2]), - cluster_centers_indices: Nx.tensor([0, -1, 2, -1]), - cluster_centers: Nx.tensor( + labels: Nx.s32([0, 0, 2, 2]), + cluster_centers_indices: Nx.s32([0, -1, 2, -1]), + cluster_centers: Nx.f32( [ [12.0, 5.0, 78.0, 2.0], [:infinity, :infinity, :infinity, :infinity], @@ -113,8 +113,8 @@ defmodule Scholar.Cluster.AffinityPropagation do [:infinity, :infinity, :infinity, :infinity] ] ), - num_clusters: Nx.tensor(2, type: :u64), - iterations: Nx.tensor(22, type: :s64) + num_clusters: Nx.u32(2), + iterations: Nx.u32(22) } """ deftransform fit(data, opts \\ []) do @@ -125,7 +125,7 @@ defmodule Scholar.Cluster.AffinityPropagation do defnp fit_n(data, key, opts) do data = to_float(data) - iterations = opts[:iterations] + iterations = opts[:iterations] |> Nx.as_type(:u32) damping_factor = opts[:damping_factor] converge_after = opts[:converge_after] n = Nx.axis_size(data, 0) @@ -146,7 +146,7 @@ defmodule Scholar.Cluster.AffinityPropagation do stop = Nx.u8(0) {{a, r, it}, _} = - while {{a = zero_n, r = zero_n, i = 0}, {s, range, stop, e}}, + while {{a = zero_n, r = zero_n, i = Nx.u32(0)}, {s, range, stop, e}}, i < iterations and not stop do temp = a + s indices = Nx.argmax(temp, axis: 1) @@ -204,7 +204,7 @@ defmodule Scholar.Cluster.AffinityPropagation do indices = Nx.select(mask, Nx.iota(Nx.shape(diagonals)), -1) - |> Nx.as_type({:s, 64}) + |> Nx.as_type(:s32) cluster_centers = Nx.select( @@ -216,15 +216,14 @@ defmodule Scholar.Cluster.AffinityPropagation do labels = Nx.broadcast(mask, Nx.shape(s)) |> Nx.select(s, Nx.Constants.neg_infinity(Nx.type(s))) - |> Nx.argmax(axis: 1) - |> Nx.as_type({:s, 64}) + |> Nx.argmax(axis: 1, type: :s32) labels = Nx.select(mask, Nx.iota(Nx.shape(labels)), labels) {cluster_centers, indices, labels} else - {Nx.tensor(-1, type: Nx.type(data)), Nx.broadcast(Nx.tensor(-1, type: :s64), {n}), - Nx.broadcast(Nx.tensor(-1, type: :s64), {n})} + {Nx.tensor(-1, type: Nx.type(data)), Nx.broadcast(Nx.tensor(-1, type: :s32), {n}), + Nx.broadcast(Nx.tensor(-1, type: :s32), {n})} end %__MODULE__{ @@ -262,16 +261,16 @@ defmodule Scholar.Cluster.AffinityPropagation do iex> model = Scholar.Cluster.AffinityPropagation.fit(x, key: key) iex> Scholar.Cluster.AffinityPropagation.prune(model) %Scholar.Cluster.AffinityPropagation{ - labels: Nx.tensor([0, 0, 1, 1]), - cluster_centers_indices: Nx.tensor([0, 2]), + labels: Nx.s32([0, 0, 1, 1]), + cluster_centers_indices: Nx.s32([0, 2]), cluster_centers: Nx.tensor( [ [12.0, 5.0, 78.0, 2.0], [-1.0, 3.0, 6.0, 1.0] ] ), - num_clusters: Nx.tensor(2, type: :u64), - iterations: Nx.tensor(22, type: :s64) + num_clusters: Nx.u32(2), + iterations: Nx.u32(22) } """ def prune( @@ -293,7 +292,9 @@ defmodule Scholar.Cluster.AffinityPropagation do end) mapping = Map.new(mapping) - cluster_centers_indices = Nx.tensor(Enum.reverse(indices)) + + cluster_centers_indices = + Nx.tensor(Enum.reverse(indices), type: Nx.type(cluster_centers_indices)) %__MODULE__{ model @@ -314,7 +315,7 @@ defmodule Scholar.Cluster.AffinityPropagation do iex> model = Scholar.Cluster.AffinityPropagation.prune(model) iex> Scholar.Cluster.AffinityPropagation.predict(model, Nx.tensor([[10,3,50,6], [8,3,8,2]])) #Nx.Tensor< - s64[2] + s32[2] [0, 1] > """ diff --git a/lib/scholar/cluster/optics.ex b/lib/scholar/cluster/optics.ex index c6548b78..4d991b2f 100644 --- a/lib/scholar/cluster/optics.ex +++ b/lib/scholar/cluster/optics.ex @@ -75,32 +75,32 @@ defmodule Scholar.Cluster.OPTICS do iex> x = Nx.tensor([[1, 2], [2, 5], [3, 6], [8, 7], [8, 8], [7, 3]]) iex> Scholar.Cluster.OPTICS.fit(x, min_samples: 2).labels #Nx.Tensor< - s64[6] + s32[6] [-1, -1, -1, -1, -1, -1] > iex> Scholar.Cluster.OPTICS.fit(x, eps: 4.5, min_samples: 2).labels #Nx.Tensor< - s64[6] + s32[6] [0, 0, 0, 1, 1, 1] > iex> Scholar.Cluster.OPTICS.fit(x, eps: 2, min_samples: 2).labels #Nx.Tensor< - s64[6] + s32[6] [-1, 0, 0, 1, 1, -1] > iex> Scholar.Cluster.OPTICS.fit(x, eps: 2, min_samples: 2, algorithm: :kd_tree, metric: {:minkowski, 1}).labels #Nx.Tensor< - s64[6] + s32[6] [-1, 0, 0, 1, 1, -1] > iex> Scholar.Cluster.OPTICS.fit(x, eps: 1, min_samples: 2).labels #Nx.Tensor< - s64[6] + s32[6] [-1, -1, -1, 0, 0, -1] > iex> Scholar.Cluster.OPTICS.fit(x, eps: 4.5, min_samples: 3).labels #Nx.Tensor< - s64[6] + s32[6] [0, 0, 0, 1, 1, -1] > """ diff --git a/lib/scholar/linear/isotonic_regression.ex b/lib/scholar/linear/isotonic_regression.ex index d1ebf9af..c0b09503 100644 --- a/lib/scholar/linear/isotonic_regression.ex +++ b/lib/scholar/linear/isotonic_regression.ex @@ -158,7 +158,6 @@ defmodule Scholar.Linear.IsotonicRegression do {sample_weights, opts} = Keyword.pop(opts, :sample_weights, 1.0) x_type = to_float_type(x) x = to_float(x) - y = to_float(y) sample_weights = @@ -202,7 +201,7 @@ defmodule Scholar.Linear.IsotonicRegression do @doc """ Makes predictions with the given `model` on input `x` and interpolating `function`. - Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. + Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. Otherwise, predictions match train target shape. ## Examples @@ -443,19 +442,19 @@ defmodule Scholar.Linear.IsotonicRegression do end defnp contiguous_isotonic_regression(y, sample_weights, max_size, increasing) do - y_size = if increasing, do: max_size, else: Nx.axis_size(y, 0) - 1 + y_size = if(increasing, do: max_size, else: Nx.axis_size(y, 0) - 1) |> Nx.as_type(:u32) y = if increasing, do: y, else: Nx.reverse(y) sample_weights = if increasing, do: sample_weights, else: Nx.reverse(sample_weights) - target = Nx.iota({Nx.axis_size(y, 0)}, type: :s64) + target = Nx.iota({Nx.axis_size(y, 0)}, type: :u32) type_wy = Nx.Type.merge(Nx.type(y), Nx.type(sample_weights)) - i = if increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size + i = if(increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size) |> Nx.as_type(:u32) {{y, target}, _} = while {{y, target}, {i, sample_weights, sum_w = Nx.tensor(0, type: Nx.type(sample_weights)), - sum_wy = Nx.tensor(0, type: type_wy), prev_y = Nx.tensor(0, type: type_wy), _k = 0, - terminating_flag = 0, y_size}}, + sum_wy = Nx.tensor(0, type: type_wy), prev_y = Nx.tensor(0, type: type_wy), + _k = Nx.u32(0), terminating_flag = Nx.u8(0), y_size}}, i < y_size + 1 and not terminating_flag do k = target[i] + 1 @@ -509,12 +508,12 @@ defmodule Scholar.Linear.IsotonicRegression do end end - i = if increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size + i = if(increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size) |> Nx.as_type(:u32) {y, _} = - while {y, {target, i, _k = 0, max_size}}, i < max_size + 1 do + while {y, {target, i, _k = Nx.u32(0), max_size}}, i < max_size + 1 do k = target[i] + 1 - indices = Nx.iota({Nx.axis_size(y, 0)}) + indices = Nx.iota({Nx.axis_size(y, 0)}, type: :u32) in_range? = Nx.logical_and(i + 1 <= indices, indices < k) y = Nx.select(in_range?, y[i], y) i = k diff --git a/lib/scholar/linear/logistic_regression.ex b/lib/scholar/linear/logistic_regression.ex index 97706ff8..c49887e2 100644 --- a/lib/scholar/linear/logistic_regression.ex +++ b/lib/scholar/linear/logistic_regression.ex @@ -211,7 +211,7 @@ defmodule Scholar.Linear.LogisticRegression do @doc """ Makes predictions with the given `model` on inputs `x`. - Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. + Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. ## Examples @@ -220,7 +220,7 @@ defmodule Scholar.Linear.LogisticRegression do iex> model = Scholar.Linear.LogisticRegression.fit(x, y, num_classes: 2) iex> Scholar.Linear.LogisticRegression.predict(model, Nx.tensor([[-3.0, 5.0]])) #Nx.Tensor< - s64[1] + s32[1] [1] > """ diff --git a/lib/scholar/linear/polynomial_regression.ex b/lib/scholar/linear/polynomial_regression.ex index 3df96bb0..ce027a52 100644 --- a/lib/scholar/linear/polynomial_regression.ex +++ b/lib/scholar/linear/polynomial_regression.ex @@ -110,8 +110,8 @@ defmodule Scholar.Linear.PolynomialRegression do @doc """ Makes predictions with the given `model` on input `x`. - Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. - Otherwise, predictions match train target shape. + Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`. + Otherwise, predictions match train target shape. ## Examples @@ -148,7 +148,7 @@ defmodule Scholar.Linear.PolynomialRegression do iex> x = Nx.tensor([[2]]) iex> Scholar.Linear.PolynomialRegression.transform(x, degree: 5, fit_intercept?: false) #Nx.Tensor< - s64[1][5] + s32[1][5] [ [2, 4, 8, 16, 32] ] @@ -157,7 +157,7 @@ defmodule Scholar.Linear.PolynomialRegression do iex> x = Nx.tensor([[2, 3]]) iex> Scholar.Linear.PolynomialRegression.transform(x) #Nx.Tensor< - s64[1][6] + s32[1][6] [ [1, 2, 3, 4, 6, 9] ] @@ -166,7 +166,7 @@ defmodule Scholar.Linear.PolynomialRegression do iex> x = Nx.iota({3, 2}) iex> Scholar.Linear.PolynomialRegression.transform(x, fit_intercept?: false) #Nx.Tensor< - s64[3][5] + s32[3][5] [ [0, 1, 0, 0, 1], [2, 3, 4, 6, 9], diff --git a/lib/scholar/linear/svm.ex b/lib/scholar/linear/svm.ex index c8997bb0..d1d47607 100644 --- a/lib/scholar/linear/svm.ex +++ b/lib/scholar/linear/svm.ex @@ -259,7 +259,7 @@ defmodule Scholar.Linear.SVM do iex> model = Scholar.Linear.SVM.fit(x, y, num_classes: 2) iex> Scholar.Linear.SVM.predict(model, Nx.tensor([[-3.0, 5.0]])) #Nx.Tensor< - s64[1] + s32[1] [1] > """ diff --git a/lib/scholar/manifold/trimap.ex b/lib/scholar/manifold/trimap.ex index f18ac8ba..754f5df5 100644 --- a/lib/scholar/manifold/trimap.ex +++ b/lib/scholar/manifold/trimap.ex @@ -153,14 +153,14 @@ defmodule Scholar.Manifold.Trimap do # binsearch which checks if the elements of tensor1 are in tensor2 {is_in, _} = - while {is_in, {tensor1, tensor2, prev = Nx.s64(-1), i = Nx.s64(0)}}, i < Nx.size(tensor1) do + while {is_in, {tensor1, tensor2, prev = Nx.s64(-1), i = Nx.u32(0)}}, i < Nx.size(tensor1) do if i > 0 and prev == tensor1[i] do is_in = Nx.indexed_put(is_in, Nx.new_axis(i, 0), is_in[i - 1]) {is_in, {tensor1, tensor2, prev, i + 1}} else {found?, _} = while {stop = Nx.u8(0), - {tensor1, tensor2, left = Nx.s64(0), right = Nx.size(tensor2) - 1, i}}, + {tensor1, tensor2, left = Nx.s64(0), right = Nx.s64(Nx.size(tensor2) - 1), i}}, left <= right and not stop do mid = div(left + right, 2) @@ -188,13 +188,19 @@ defmodule Scholar.Manifold.Trimap do final_samples = Nx.broadcast(Nx.s64(0), shape) {final_samples, key, _, _} = - while {final_samples, key, rejects, i = Nx.s64(0)}, i < elem(shape, 0) do - {samples, key} = Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}) + while {final_samples, key, rejects, i = Nx.u32(0)}, i < elem(shape, 0) do + # TODO: See if we can relax the samples to u32 + {samples, key} = + Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}, type: :s64) + discard = in1d(samples, rejects[i]) {samples, key, _, _, _} = while {samples, key, discard, rejects, i}, Nx.any(discard) do - {new_samples, key} = Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}) + # TODO: See if we can relax the samples to u32 + {new_samples, key} = + Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}, type: :s64) + discard = in1d(new_samples, rejects[i]) or in1d(new_samples, samples) samples = Nx.select(discard, samples, new_samples) {samples, key, in1d(samples, rejects[i]), rejects, i} @@ -552,7 +558,7 @@ defmodule Scholar.Manifold.Trimap do gain = Nx.broadcast(Nx.tensor(1.0, type: to_float_type(embeddings)), Nx.shape(embeddings)) {embeddings, _} = - while {embeddings, {vel, gain, lr, triplets, weights, i = Nx.s64(0)}}, + while {embeddings, {vel, gain, lr, triplets, weights, i = Nx.u32(0)}}, i < opts[:num_iters] do gamma = if i < @switch_iter, do: @init_momentum, else: @final_momentum diff --git a/lib/scholar/metrics/classification.ex b/lib/scholar/metrics/classification.ex index 7a1f33c8..f1ccd002 100644 --- a/lib/scholar/metrics/classification.ex +++ b/lib/scholar/metrics/classification.ex @@ -253,7 +253,7 @@ defmodule Scholar.Metrics.Classification do iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: :u32) iex> Scholar.Metrics.Classification.accuracy(y_true, y_pred, normalize: false) #Nx.Tensor< - u64 + u32 6 > """ @@ -530,7 +530,7 @@ defmodule Scholar.Metrics.Classification do iex> y_pred = Nx.tensor([0, 1, 0, 2, 2, 2], type: :u32) iex> Scholar.Metrics.Classification.confusion_matrix(y_true, y_pred, num_classes: 3) #Nx.Tensor< - u64[3][3] + u32[3][3] [ [1, 1, 0], [1, 0, 1], @@ -538,8 +538,8 @@ defmodule Scholar.Metrics.Classification do ] > - iex> y_true = Nx.tensor([0, 0, 1, 1, 2, 2], type: {:u, 32}) - iex> y_pred = Nx.tensor([0, 1, 0, 2, 2, 2], type: {:u, 32}) + iex> y_true = Nx.tensor([0, 0, 1, 1, 2, 2], type: :u64) + iex> y_pred = Nx.tensor([0, 1, 0, 2, 2, 2], type: :u64) iex> sample_weights = [2, 5, 1, 1.5, 2, 8] iex> Scholar.Metrics.Classification.confusion_matrix(y_true, y_pred, num_classes: 3, sample_weights: sample_weights, normalize: :predicted) #Nx.Tensor< @@ -556,7 +556,7 @@ defmodule Scholar.Metrics.Classification do weights = if opts[:sample_weights] == nil, - do: Nx.u64(1), + do: Nx.u32(1), else: validate_weights(opts[:sample_weights], Nx.axis_size(y_true, 0)) confusion_matrix_n(y_true, y_pred, weights, opts) @@ -567,9 +567,9 @@ defmodule Scholar.Metrics.Classification do num_classes = check_num_classes(opts[:num_classes]) - zeros = Nx.broadcast(Nx.u64(0), {num_classes, num_classes}) + zeros = Nx.broadcast(Nx.u32(0), {num_classes, num_classes}) indices = Nx.stack([y_true, y_pred], axis: 1) - updates = Nx.broadcast(Nx.u64(1), y_true) * weights + updates = Nx.broadcast(Nx.u32(1), y_true) * weights cm = Nx.indexed_add(zeros, indices, updates) @@ -597,15 +597,15 @@ defmodule Scholar.Metrics.Classification do ## Examples - iex> y_true = Nx.tensor([0, 1, 2, 0, 1, 2], type: {:u, 32}) - iex> y_pred = Nx.tensor([0, 2, 1, 0, 0, 1], type: {:u, 32}) + iex> y_true = Nx.tensor([0, 1, 2, 0, 1, 2], type: :u64) + iex> y_pred = Nx.tensor([0, 2, 1, 0, 0, 1], type: :u64) iex> Scholar.Metrics.Classification.balanced_accuracy_score(y_true, y_pred, num_classes: 3) #Nx.Tensor< f32 0.3333333432674408 > - iex> y_true = Nx.tensor([0, 1, 2, 0, 1, 2], type: {:u, 32}) - iex> y_pred = Nx.tensor([0, 2, 1, 0, 0, 1], type: {:u, 32}) + iex> y_true = Nx.tensor([0, 1, 2, 0, 1, 2], type: :u64) + iex> y_pred = Nx.tensor([0, 2, 1, 0, 0, 1], type: :u64) iex> sample_weights = [1, 1, 1, 2, 2, 2] iex> Scholar.Metrics.Classification.balanced_accuracy_score(y_true, y_pred, num_classes: 3, sample_weights: sample_weights, adjusted: true) #Nx.Tensor< @@ -749,7 +749,7 @@ defmodule Scholar.Metrics.Classification do {Nx.f32([0.6666666865348816, 1.0, 0.25]), Nx.f32([0.6666666865348816, 0.5, 1.0]), Nx.f32([0.6666666865348816, 0.6666666865348816, 0.4000000059604645]), - Nx.u64([3, 6, 1])} + Nx.u32([3, 6, 1])} iex> Scholar.Metrics.Classification.precision_recall_fscore_support(y_true, y_pred, num_classes: 3, average: :macro) {Nx.f32([0.6666666865348816, 1.0, 0.25]), Nx.f32([0.6666666865348816, 0.5, 1.0]), @@ -773,7 +773,7 @@ defmodule Scholar.Metrics.Classification do {Nx.f32([0.0, 0.0]), Nx.f32([0.0, 0.0]), Nx.f32([0.0, 0.0]), - Nx.u64([2, 2])} + Nx.u32([2, 2])} """ deftransform precision_recall_fscore_support(y_true, y_pred, opts) do opts = NimbleOptions.validate!(opts, @precision_recall_fscore_support_schema) @@ -902,7 +902,7 @@ defmodule Scholar.Metrics.Classification do iex> y_true = Nx.tensor([2, 2, 3, 4]) iex> Scholar.Metrics.Classification.zero_one_loss(y_true, y_pred, normalize: false) #Nx.Tensor< - u64 + u32 1 > """ @@ -1370,7 +1370,7 @@ defmodule Scholar.Metrics.Classification do iex> y_score = Nx.tensor([[0.5, 0.2, 0.1], [0.3, 0.4, 0.5], [0.4, 0.3, 0.2], [0.1, 0.3, 0.6], [0.9, 0.1, 0.0]]) iex> Scholar.Metrics.Classification.top_k_accuracy_score(y_true, y_score, k: 2, num_classes: 3, normalize: false) #Nx.Tensor< - u64 + u32 4 > diff --git a/lib/scholar/metrics/distance.ex b/lib/scholar/metrics/distance.ex index ae8c2599..36bd0429 100644 --- a/lib/scholar/metrics/distance.ex +++ b/lib/scholar/metrics/distance.ex @@ -593,7 +593,7 @@ defmodule Scholar.Metrics.Distance do iex> y = Nx.reverse(x) iex> Scholar.Metrics.Distance.pairwise_squared_euclidean(x, y) #Nx.Tensor< - s64[6][6] + s32[6][6] [ [5470, 3526, 2014, 934, 286, 70], [3526, 2014, 934, 286, 70, 286], @@ -619,7 +619,7 @@ defmodule Scholar.Metrics.Distance do iex> x = Nx.iota({6, 6}) iex> Scholar.Metrics.Distance.pairwise_squared_euclidean(x) #Nx.Tensor< - s64[6][6] + s32[6][6] [ [0, 216, 864, 1944, 3456, 5400], [216, 0, 216, 864, 1944, 3456], diff --git a/lib/scholar/naive_bayes/complement.ex b/lib/scholar/naive_bayes/complement.ex index 7204b0cf..268692df 100644 --- a/lib/scholar/naive_bayes/complement.ex +++ b/lib/scholar/naive_bayes/complement.ex @@ -203,7 +203,7 @@ defmodule Scholar.NaiveBayes.Complement do iex> model = Scholar.NaiveBayes.Complement.fit(x, y, num_classes: 3) iex> Scholar.NaiveBayes.Complement.predict(model, Nx.tensor([[6, 2, 4], [8, 5, 9]])) #Nx.Tensor< - s64[2] + s32[2] [2, 2] > """ diff --git a/lib/scholar/naive_bayes/gaussian.ex b/lib/scholar/naive_bayes/gaussian.ex index 5efab37d..75245aed 100644 --- a/lib/scholar/naive_bayes/gaussian.ex +++ b/lib/scholar/naive_bayes/gaussian.ex @@ -161,7 +161,7 @@ defmodule Scholar.NaiveBayes.Gaussian do iex> model = Scholar.NaiveBayes.Gaussian.fit(x, y, num_classes: 3) iex> Scholar.NaiveBayes.Gaussian.predict(model, Nx.tensor([[6, 2, 4], [8, 5, 9]])) #Nx.Tensor< - s64[2] + s32[2] [2, 2] > """ diff --git a/lib/scholar/naive_bayes/multinomial.ex b/lib/scholar/naive_bayes/multinomial.ex index f3c81151..44988b53 100644 --- a/lib/scholar/naive_bayes/multinomial.ex +++ b/lib/scholar/naive_bayes/multinomial.ex @@ -292,7 +292,7 @@ defmodule Scholar.NaiveBayes.Multinomial do iex> model = Scholar.NaiveBayes.Multinomial.fit(x, y, num_classes: 3) iex> Scholar.NaiveBayes.Multinomial.predict(model, Nx.tensor([[6, 2, 4], [8, 5, 9]])) #Nx.Tensor< - s64[2] + s32[2] [2, 2] > """ diff --git a/lib/scholar/neighbors/brute_knn.ex b/lib/scholar/neighbors/brute_knn.ex index 707c8d67..ace50fb7 100644 --- a/lib/scholar/neighbors/brute_knn.ex +++ b/lib/scholar/neighbors/brute_knn.ex @@ -64,7 +64,7 @@ defmodule Scholar.Neighbors.BruteKNN do 2 iex> model.data #Nx.Tensor< - s64[5][2] + s32[5][2] [ [1, 2], [2, 3], diff --git a/lib/scholar/neighbors/nn_descent.ex b/lib/scholar/neighbors/nn_descent.ex index fc3218b1..77e456c2 100644 --- a/lib/scholar/neighbors/nn_descent.ex +++ b/lib/scholar/neighbors/nn_descent.ex @@ -192,14 +192,14 @@ defmodule Scholar.Neighbors.NNDescent do # binsearch which checks if the elements of tensor1 are in tensor2 {is_in, _} = - while {is_in, {tensor1, tensor2, prev = Nx.s64(-1), i = Nx.s64(0)}}, i < Nx.size(tensor1) do + while {is_in, {tensor1, tensor2, prev = Nx.s32(-1), i = Nx.u32(0)}}, i < Nx.size(tensor1) do if i > 0 and prev == tensor1[i] do is_in = Nx.indexed_put(is_in, Nx.new_axis(i, 0), is_in[i - 1]) {is_in, {tensor1, tensor2, prev, i + 1}} else {found?, _} = while {stop = Nx.u8(0), - {tensor1, tensor2, left = Nx.s64(0), right = Nx.size(tensor2) - 1, i}}, + {tensor1, tensor2, left = Nx.s32(0), right = Nx.s32(Nx.size(tensor2) - 1), i}}, left <= right and not stop do mid = div(left + right, 2) @@ -224,10 +224,10 @@ defmodule Scholar.Neighbors.NNDescent do end defnp unique_random_sample(key, shape, opts \\ []) do - final_samples = Nx.broadcast(Nx.s64(0), shape) + final_samples = Nx.broadcast(0, shape) {final_samples, key, _} = - while {final_samples, key, i = Nx.s64(0)}, i < elem(shape, 0) do + while {final_samples, key, i = Nx.u32(0)}, i < elem(shape, 0) do {samples, key} = Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}) samples = Nx.sort(samples) discard = Nx.broadcast(Nx.u8(0), {elem(shape, 1)}) @@ -292,11 +292,11 @@ defmodule Scholar.Neighbors.NNDescent do ) {{indices, keys, flags}, _} = - while {{indices, keys, flags}, {index0 = Nx.s64(0), data, missing, random_indices, d}}, + while {{indices, keys, flags}, {index0 = Nx.u32(0), data, missing, random_indices, d}}, index0 < num_heaps do {{indices, keys, flags}, _} = while {{indices, keys, flags}, - {index0, j = Nx.s64(0), data, missing, random_indices, d}}, + {index0, j = Nx.u32(0), data, missing, random_indices, d}}, j < missing[index0] do {add_neighbor( {indices, keys, flags}, @@ -357,21 +357,21 @@ defmodule Scholar.Neighbors.NNDescent do {indices, keys, flags} = curr_graph {curr_graph, _} = - while {{indices, keys, flags}, {i = Nx.s64(0), data, leaves}}, + while {{indices, keys, flags}, {i = Nx.u32(0), data, leaves}}, i < num_leaves do {{indices, keys, flags}, _} = - while {{indices, keys, flags}, {i, j = Nx.s64(0), data, leaves, stop = Nx.u8(0)}}, + while {{indices, keys, flags}, {i, j = Nx.u32(0), data, leaves, stop = Nx.u8(0)}}, j < leaf_size and not stop do index0 = leaves[[i, j]] - if index0 != Nx.s64(-1) do + if index0 != -1 do {{indices, keys, flags}, _} = while {{indices, keys, flags}, {i, j, k = j + 1, index0, data, leaves, stop_inner = Nx.u8(0)}}, k < leaf_size and not stop_inner do index1 = leaves[[i, k]] - if index1 != Nx.s64(-1) do + if index1 != -1 do d = handle_dist(data[index0], data[index1], opts) {indices, keys, flags} = diff --git a/lib/scholar/neighbors/rnn_classifier.ex b/lib/scholar/neighbors/rnn_classifier.ex index ddc241e4..4013a7c8 100644 --- a/lib/scholar/neighbors/rnn_classifier.ex +++ b/lib/scholar/neighbors/rnn_classifier.ex @@ -146,7 +146,7 @@ defmodule Scholar.Neighbors.RadiusNNClassifier do iex> model = Scholar.Neighbors.RadiusNNClassifier.fit(x, y, num_classes: 2) iex> Scholar.Neighbors.RadiusNNClassifier.predict(model, Nx.tensor([[1.9, 4.3], [1.1, 2.0]])) #Nx.Tensor< - s64[2] + s32[2] [0, 1] > """ diff --git a/lib/scholar/preprocessing/ordinal_encoder.ex b/lib/scholar/preprocessing/ordinal_encoder.ex index e4053c74..61c6c25f 100644 --- a/lib/scholar/preprocessing/ordinal_encoder.ex +++ b/lib/scholar/preprocessing/ordinal_encoder.ex @@ -87,7 +87,7 @@ defmodule Scholar.Preprocessing.OrdinalEncoder do iex> encoder = Scholar.Preprocessing.OrdinalEncoder.fit(tensor, num_categories: 4) iex> Scholar.Preprocessing.OrdinalEncoder.transform(encoder, tensor) #Nx.Tensor< - s64[7] + s32[7] [1, 0, 2, 3, 0, 2, 0] > @@ -96,7 +96,7 @@ defmodule Scholar.Preprocessing.OrdinalEncoder do iex> new_tensor = Nx.tensor([2, 3, 4, 5, 4, 56, 2]) iex> Scholar.Preprocessing.OrdinalEncoder.transform(encoder, new_tensor) #Nx.Tensor< - s64[7] + s32[7] [0, 1, 2, -1, 2, 3, 0] > """ diff --git a/mix.lock b/mix.lock index 39712046..36baed81 100644 --- a/mix.lock +++ b/mix.lock @@ -4,21 +4,21 @@ "complex": {:hex, :complex, "0.5.0", "af2d2331ff6170b61bb738695e481b27a66780e18763e066ee2cd863d0b1dd92", [:mix], [], "hexpm", "2683bd3c184466cfb94fad74cbfddfaa94b860e27ad4ca1bffe3bff169d91ef1"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, "earmark_parser": {:hex, :earmark_parser, "1.4.41", "ab34711c9dc6212dda44fcd20ecb87ac3f3fce6f0ca2f28d4a00e4154f8cd599", [:mix], [], "hexpm", "a81a04c7e34b6617c2792e291b5a2e57ab316365c2644ddc553bb9ed863ebefa"}, - "elixir_make": {:hex, :elixir_make, "0.8.4", "4960a03ce79081dee8fe119d80ad372c4e7badb84c493cc75983f9d3bc8bde0f", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:certifi, "~> 2.0", [hex: :certifi, repo: "hexpm", optional: true]}], "hexpm", "6e7f1d619b5f61dfabd0a20aa268e575572b542ac31723293a4c1a567d5ef040"}, + "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, "ex_doc": {:hex, :ex_doc, "0.34.2", "13eedf3844ccdce25cfd837b99bea9ad92c4e511233199440488d217c92571e8", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "5ce5f16b41208a50106afed3de6a2ed34f4acfd65715b82a0b84b49d995f95c1"}, - "exla": {:hex, :exla, "0.7.3", "51310270a0976974fc758f7b28ebd6ca8e099b3d6fc78b0d484c808e977cb914", [:make, :mix], [{:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.0", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:nx, "~> 0.7.1", [hex: :nx, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:xla, "~> 0.6.0", [hex: :xla, repo: "hexpm", optional: false]}], "hexpm", "5b3d5741a24aada21d3b0feb4b99d1fc3c8457f995a63ea16684d8d5678b96ff"}, + "exla": {:hex, :exla, "0.9.1", "1e8ecd2a6106e86ec1d132fd80cc3992c6c5a8b3b6b1867abd12bf650e6ccd67", [:make, :mix], [{:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.0", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:nx, "~> 0.9.0", [hex: :nx, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:xla, "~> 0.8.0", [hex: :xla, repo: "hexpm", optional: false]}], "hexpm", "a44f10f2eafe802dab325b86eaf746ec578a408467731a83f4ddec9b05d50667"}, "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, - "makeup": {:hex, :makeup, "1.1.2", "9ba8837913bdf757787e71c1581c21f9d2455f4dd04cfca785c70bbfff1a76a3", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cce1566b81fbcbd21eca8ffe808f33b221f9eee2cbc7a1706fc3da9ff18e6cac"}, - "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"}, + "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, + "makeup_elixir": {:hex, :makeup_elixir, "1.0.0", "74bb8348c9b3a51d5c589bf5aebb0466a84b33274150e3b6ece1da45584afc82", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "49159b7d7d999e836bedaf09dcf35ca18b312230cf901b725a64f3f42e407983"}, "makeup_erlang": {:hex, :makeup_erlang, "1.0.1", "c7f58c120b2b5aa5fd80d540a89fdf866ed42f1f3994e4fe189abebeab610839", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "8a89a1eeccc2d798d6ea15496a6e4870b75e014d1af514b1b71fa33134f57814"}, "nimble_csv": {:hex, :nimble_csv, "1.2.0", "4e26385d260c61eba9d4412c71cea34421f296d5353f914afe3f2e71cce97722", [:mix], [], "hexpm", "d0628117fcc2148178b034044c55359b26966c6eaa8e2ce15777be3bbc91b12a"}, "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, - "nx": {:hex, :nx, "0.7.3", "51ff45d9f9ff58b616f4221fa54ccddda98f30319bb8caaf86695234a469017a", [:mix], [{:complex, "~> 0.5", [hex: :complex, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5ff29af84f08db9bda66b8ef7ce92ab583ab4f983629fe00b479f1e5c7c705a6"}, + "nx": {:hex, :nx, "0.9.1", "b5296f178d24ded118d5fd5c3977bb65c7f6ad8113eff4cb1401ac1770eb837a", [:mix], [{:complex, "~> 0.5", [hex: :complex, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f67ca8fcf09f73000e9a59a19f93ad8e0e581f4993e008527a4a6f280c71c467"}, "polaris": {:hex, :polaris, "0.1.0", "dca61b18e3e801ecdae6ac9f0eca5f19792b44a5cb4b8d63db50fc40fc038d22", [:mix], [{:nx, "~> 0.5", [hex: :nx, repo: "hexpm", optional: false]}], "hexpm", "13ef2b166650e533cb24b10e2f3b8ab4f2f449ba4d63156e8c569527f206e2c2"}, "scidata": {:hex, :scidata, "0.1.11", "fe3358bac7d740374b4f2a7eff6a1cb02e5ee7f87f7cdb1e8648ad93c533165f", [:mix], [{:castore, "~> 0.1", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.1", [hex: :nimble_csv, repo: "hexpm", optional: false]}, {:stb_image, "~> 0.4", [hex: :stb_image, repo: "hexpm", optional: true]}], "hexpm", "90873337a9d5fe880d640517efa93d3c07e46c8ba436de44117f581800549f93"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, - "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, - "xla": {:hex, :xla, "0.6.0", "67bb7695efa4a23b06211dc212de6a72af1ad5a9e17325e05e0a87e4c241feb8", [:make, :mix], [{:elixir_make, "~> 0.4", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "dd074daf942312c6da87c7ed61b62fb1a075bced157f1cc4d47af2d7c9f44fb7"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "xla": {:hex, :xla, "0.8.0", "fef314d085dd3ee16a0816c095239938f80769150e15db16dfaa435553d7cb16", [:make, :mix], [{:elixir_make, "~> 0.4", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "739c61c8d93b97e12ba0369d10e76130224c208f1a76ad293e3581f056833e57"}, } diff --git a/notebooks/k_means.livemd b/notebooks/k_means.livemd index 4200c85a..90dc1834 100644 --- a/notebooks/k_means.livemd +++ b/notebooks/k_means.livemd @@ -91,7 +91,7 @@ y = ] >, #Nx.Tensor< - s64[150] + s32[150] EXLA.Backend [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...] >} @@ -380,7 +380,7 @@ models = 609477056.0 >, labels: #Nx.Tensor< - s64[426400] + s32[426400] EXLA.Backend [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...] > @@ -413,7 +413,7 @@ models = 271123392.0 >, labels: #Nx.Tensor< - s64[426400] + s32[426400] EXLA.Backend [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...] > @@ -451,7 +451,7 @@ models = 177419888.0 >, labels: #Nx.Tensor< - s64[426400] + s32[426400] EXLA.Backend [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...] > @@ -490,7 +490,7 @@ models = 131876120.0 >, labels: #Nx.Tensor< - s64[426400] + s32[426400] EXLA.Backend [15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...] > @@ -529,7 +529,7 @@ models = 64511020.0 >, labels: #Nx.Tensor< - s64[426400] + s32[426400] EXLA.Backend [23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, ...] > @@ -699,7 +699,7 @@ models = 10950.6201171875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, ...] > @@ -724,7 +724,7 @@ models = 9246.3125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [0, 0, 2, 0, 1, 1, 2, 1, 1, 1, 0, 0, 2, 0, 0, 1, 2, 1, 1, 2, 0, 0, 2, 0, 2, 1, 1, 1, 0, 1, 0, 0, 1, 0, 2, 1, 2, 1, 1, 1, 0, 0, 2, 0, ...] > @@ -749,7 +749,7 @@ models = 8447.5419921875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [3, 3, 0, 3, 1, 1, 0, 1, 2, 2, 3, 3, 0, 3, 3, 1, 0, 1, 1, 2, 3, 3, 0, 3, 0, 1, 1, 1, 3, 2, 3, 3, 1, 3, 0, 1, 0, 1, 2, 2, 3, 3, 0, ...] > @@ -774,7 +774,7 @@ models = 7935.5498046875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [0, 3, 2, 3, 4, 4, 2, 4, 1, 1, 3, 3, 2, 3, 3, 4, 2, 4, 4, 2, 3, 3, 2, 3, 2, 4, 4, 4, 3, 1, 0, 3, 4, 3, 2, 4, 2, 4, 1, 1, 0, 3, ...] > @@ -799,7 +799,7 @@ models = 7484.12109375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [0, 3, 2, 3, 4, 4, 2, 5, 1, 1, 3, 3, 2, 3, 3, 4, 2, 4, 4, 1, 3, 3, 2, 3, 2, 5, 4, 5, 3, 1, 0, 3, 4, 3, 2, 4, 2, 5, 1, 1, 0, ...] > @@ -824,7 +824,7 @@ models = 7091.55810546875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [2, 1, 0, 1, 6, 6, 0, 3, 4, 5, 1, 1, 0, 2, 1, 6, 0, 3, 6, 5, 2, 1, 0, 2, 0, 3, 6, 3, 1, 4, 2, 1, 6, 1, 0, 3, 0, 3, 4, 4, ...] > @@ -849,7 +849,7 @@ models = 6868.54296875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [3, 4, 1, 6, 6, 6, 1, 7, 0, 5, 6, 4, 1, 4, 2, 6, 2, 7, 6, 5, 4, 4, 1, 4, 2, 7, 6, 7, 6, 0, 3, 4, 6, 6, 2, 7, 1, 7, 0, ...] > @@ -874,7 +874,7 @@ models = 6582.2734375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [3, 4, 1, 6, 6, 8, 1, 7, 0, 5, 6, 4, 1, 4, 2, 8, 2, 7, 6, 5, 4, 4, 1, 4, 2, 8, 8, 7, 6, 0, 3, 4, 6, 6, 2, 8, 1, 7, ...] > @@ -899,7 +899,7 @@ models = 6426.0517578125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [3, 4, 9, 6, 6, 8, 9, 7, 0, 5, 6, 4, 1, 4, 2, 8, 9, 7, 2, 5, 4, 4, 9, 4, 9, 8, 6, 7, 2, 0, 3, 4, 6, 6, 2, 8, 9, ...] > @@ -924,7 +924,7 @@ models = 6238.46923828125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [3, 4, 9, 6, 6, 8, 9, 7, 0, 5, 6, 4, 1, 4, 2, 8, 9, 7, 2, 5, 4, 10, 9, 4, 9, 8, 6, 7, 2, 0, 3, 10, 6, 6, 2, 8, ...] > @@ -949,7 +949,7 @@ models = 6081.9736328125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [7, 3, 2, 3, 5, 5, 6, 1, 8, 4, 5, 0, 2, 0, 3, 10, 6, 1, 5, 4, 0, 11, 2, 0, 2, 1, 5, 1, 9, 10, 7, 11, 3, 9, 3, ...] > @@ -974,7 +974,7 @@ models = 5876.912109375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 10, 2, 1, 1, 5, 2, 3, 4, 12, 1, 10, 2, 10, 11, 5, 11, 0, 6, 7, 10, 10, 2, 10, 2, 0, 0, 3, 1, 12, 8, 10, 1, 1, ...] > @@ -999,7 +999,7 @@ models = 5797.251953125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 10, 13, 1, 1, 5, 2, 3, 4, 12, 1, 10, 13, 10, 11, 5, 11, 0, 6, 7, 10, 10, 13, 10, 13, 0, 0, 3, 1, 12, 8, 10, 1, ...] > @@ -1024,7 +1024,7 @@ models = 5662.34521484375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 11, 2, 5, 0, 0, 2, 10, 13, 6, 5, 11, 14, 12, 5, 9, 2, 10, 0, 4, 12, 11, 14, 12, 1, 7, 0, 10, 5, 6, 8, 11, ...] > @@ -1049,7 +1049,7 @@ models = 5568.4345703125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 11, 2, 5, 0, 0, 2, 10, 13, 6, 5, 11, 14, 12, 5, 9, 2, 10, 0, 4, 12, 11, 14, 15, 1, 7, 0, 10, 5, 6, 8, ...] > @@ -1074,7 +1074,7 @@ models = 5398.72412109375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 11, 2, 5, 0, 0, 2, 10, 13, 6, 5, 11, 14, 12, 5, 9, 2, 10, 16, 4, 12, 11, 14, 15, 1, 7, 0, 10, 5, 6, ...] > @@ -1099,7 +1099,7 @@ models = 5336.95263671875 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 11, 2, 5, 0, 0, 2, 10, 17, 6, 5, 11, 14, 12, 5, 9, 2, 10, 16, 4, 12, 11, 14, 15, 1, 7, 0, 10, 5, ...] > @@ -1124,7 +1124,7 @@ models = 5233.1396484375 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 11, 2, 5, 0, 0, 2, 10, 17, 6, 0, 11, 14, 12, 18, 9, 2, 10, 18, 4, 12, 11, 14, 15, 1, 7, 0, 10, ...] > @@ -1149,7 +1149,7 @@ models = 5173.603515625 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [8, 19, 2, 5, 0, 0, 2, 10, 17, 6, 0, 11, 14, 12, 18, 9, 2, 10, 18, 4, 12, 11, 14, 15, 1, 7, 0, ...] > @@ -1249,7 +1249,7 @@ best_model = Enum.at(models, 1) 9246.3125 >, labels: #Nx.Tensor< - s64[200] + s32[200] EXLA.Backend [0, 0, 2, 0, 1, 1, 2, 1, 1, 1, 0, 0, 2, 0, 0, 1, 2, 1, 1, 2, 0, 0, 2, 0, 2, 1, 1, 1, 0, 1, 0, 0, 1, 0, 2, 1, 2, 1, 1, 1, 0, 0, 2, 0, 2, 1, ...] > diff --git a/notebooks/k_nearest_neighbors.livemd b/notebooks/k_nearest_neighbors.livemd index d6ad95b1..1f695dc8 100644 --- a/notebooks/k_nearest_neighbors.livemd +++ b/notebooks/k_nearest_neighbors.livemd @@ -150,7 +150,7 @@ KNNClassifier.predict(model, x_pred) ``` #Nx.Tensor< - s64[1] + s32[1] EXLA.Backend [1] > @@ -167,7 +167,7 @@ KNNClassifier.predict(model, x_pred) ``` #Nx.Tensor< - s64[1] + s32[1] EXLA.Backend [0] > @@ -409,7 +409,7 @@ x = Scholar.Preprocessing.standard_scale(x) ] >, #Nx.Tensor< - s64[4898] + s32[4898] EXLA.Backend [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, ...] >} diff --git a/notebooks/linear_regression.livemd b/notebooks/linear_regression.livemd index 81c700ff..49b80bc7 100644 --- a/notebooks/linear_regression.livemd +++ b/notebooks/linear_regression.livemd @@ -368,7 +368,7 @@ Nx.iota({n_samples, n_variables}) ``` #Nx.Tensor< - s64[5][3] + s32[5][3] EXLA.Backend [ [0, 0, 0], diff --git a/notebooks/nearest_neighbors.livemd b/notebooks/nearest_neighbors.livemd index 22b05596..2ebd4095 100644 --- a/notebooks/nearest_neighbors.livemd +++ b/notebooks/nearest_neighbors.livemd @@ -258,7 +258,7 @@ Now, we need to run a query to calculate the nearest neighbors for a given set o ``` {#Nx.Tensor< - s64[2278][6] + s32[2278][6] EXLA.Backend [ [0, 1367, 370, 119, 1158, 1423], @@ -497,7 +497,7 @@ x = Scholar.Preprocessing.StandardScaler.fit_transform(x) ] >, #Nx.Tensor< - s64[39644][1] + s32[39644][1] EXLA.Backend [ [593], diff --git a/test/scholar/neighbors/kd_tree_test.exs b/test/scholar/neighbors/kd_tree_test.exs index bf0c9f63..f90d9468 100644 --- a/test/scholar/neighbors/kd_tree_test.exs +++ b/test/scholar/neighbors/kd_tree_test.exs @@ -65,7 +65,7 @@ defmodule Scholar.Neighbors.KDTreeTest do kdtree = KDTree.fit(x()) {indices, distances} = KDTree.predict(kdtree, x_pred()) - assert indices == Nx.tensor([[0, 6, 4], [5, 2, 9], [0, 9, 2], [5, 2, 7]]) + assert indices == Nx.tensor([[0, 6, 4], [5, 2, 9], [0, 9, 2], [5, 2, 7]], type: :s64) assert_all_close( distances, @@ -82,7 +82,7 @@ defmodule Scholar.Neighbors.KDTreeTest do kdtree = KDTree.fit(x(), metric: {:minkowski, 1.5}) {indices, distances} = KDTree.predict(kdtree, x_pred()) - assert indices == Nx.tensor([[0, 6, 2], [5, 2, 9], [0, 9, 2], [5, 2, 7]]) + assert indices == Nx.tensor([[0, 6, 2], [5, 2, 9], [0, 9, 2], [5, 2, 7]], type: :s64) assert_all_close( distances, @@ -99,7 +99,8 @@ defmodule Scholar.Neighbors.KDTreeTest do kdtree = KDTree.fit(x(), num_neighbors: 4) {indices, distances} = KDTree.predict(kdtree, x_pred()) - assert indices == Nx.tensor([[0, 6, 4, 2], [5, 2, 9, 0], [0, 9, 2, 5], [5, 2, 7, 4]]) + assert indices == + Nx.tensor([[0, 6, 4, 2], [5, 2, 9, 0], [0, 9, 2, 5], [5, 2, 7, 4]], type: :s64) assert_all_close( distances, @@ -116,7 +117,8 @@ defmodule Scholar.Neighbors.KDTreeTest do kdtree = KDTree.fit(x() |> Nx.as_type(:f64), num_neighbors: 4) {indices, distances} = KDTree.predict(kdtree, x_pred()) - assert indices == Nx.tensor([[0, 6, 4, 2], [5, 2, 9, 0], [0, 9, 2, 5], [5, 2, 7, 4]]) + assert indices == + Nx.tensor([[0, 6, 4, 2], [5, 2, 9, 0], [0, 9, 2, 5], [5, 2, 7, 4]], type: :s64) assert_all_close( distances,