Skip to content

Commit

Permalink
Rename
Browse files Browse the repository at this point in the history
  • Loading branch information
nathanielsimard committed Jul 18, 2024
1 parent 0761717 commit 5729849
Show file tree
Hide file tree
Showing 57 changed files with 102 additions and 89 deletions.
18 changes: 9 additions & 9 deletions crates/cubecl-core/src/codegen/execution.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::compute::{CubeCount, KernelTask};
use crate::frontend::TensorHandle;
use crate::frontend::TensorHandleRef;
use crate::ir::Elem;
use crate::pod::CubeElement;
use crate::{calculate_cube_count_elemwise, Kernel, Runtime, SUBCUBE_DIM_APPROX};
Expand All @@ -17,8 +17,8 @@ pub struct Execution<'h, K, R: Runtime, Scalars> {
scalars: Scalars,
client: ComputeClient<R::Server, R::Channel>,
kernel: K,
inputs: &'h [TensorHandle<'h, R>],
outputs: &'h [TensorHandle<'h, R>],
inputs: &'h [TensorHandleRef<'h, R>],
outputs: &'h [TensorHandleRef<'h, R>],
}

impl<'h, K, R: Runtime> Execution<'h, K, R, ()> {
Expand All @@ -36,7 +36,7 @@ impl<'h, K, R: Runtime> Execution<'h, K, R, ()> {
}

#[allow(unused)]
pub fn inputs(self, inputs: &'h [TensorHandle<'h, R>]) -> Execution<'h, K, R, ()> {
pub fn inputs(self, inputs: &'h [TensorHandleRef<'h, R>]) -> Execution<'h, K, R, ()> {
Execution {
scalars: self.scalars,
client: self.client,
Expand All @@ -46,7 +46,7 @@ impl<'h, K, R: Runtime> Execution<'h, K, R, ()> {
}
}

pub fn outputs(self, outputs: &'h [TensorHandle<'h, R>]) -> Execution<'h, K, R, ()> {
pub fn outputs(self, outputs: &'h [TensorHandleRef<'h, R>]) -> Execution<'h, K, R, ()> {
Execution {
scalars: self.scalars,
client: self.client,
Expand Down Expand Up @@ -188,8 +188,8 @@ where

#[allow(clippy::too_many_arguments)]
fn execute_dynamic<R, K, E1, E2, E3>(
inputs: &[TensorHandle<R>],
outputs: &[TensorHandle<R>],
inputs: &[TensorHandleRef<R>],
outputs: &[TensorHandleRef<R>],
scalars_1: Option<&[E1]>,
scalars_2: Option<&[E2]>,
scalars_3: Option<&[E3]>,
Expand Down Expand Up @@ -225,8 +225,8 @@ struct ExecuteSettings<R: Runtime> {
}

fn execute_settings<'a, R: Runtime, E1: CubeElement, E2: CubeElement, E3: CubeElement>(
inputs: &'a [TensorHandle<R>],
outputs: &'a [TensorHandle<R>],
inputs: &'a [TensorHandleRef<R>],
outputs: &'a [TensorHandleRef<R>],
scalars_1: Option<&[E1]>,
scalars_2: Option<&[E2]>,
scalars_3: Option<&[E3]>,
Expand Down
10 changes: 5 additions & 5 deletions crates/cubecl-core/src/compute/launcher.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use crate::compute::{CubeCount, KernelTask};
use crate::ir::{Elem, FloatKind, IntKind};
use crate::prelude::ArrayHandle;
use crate::prelude::ArrayHandleRef;
use crate::KernelSettings;
use crate::{calculate_num_elems_dyn_rank, frontend::TensorHandle, Kernel, Runtime};
use crate::{calculate_num_elems_dyn_rank, frontend::TensorHandleRef, Kernel, Runtime};
use bytemuck::NoUninit;
use cubecl_runtime::client::ComputeClient;
use cubecl_runtime::server::Binding;
Expand All @@ -24,12 +24,12 @@ pub struct KernelLauncher<R: Runtime> {

impl<R: Runtime> KernelLauncher<R> {
/// Register a tensor to be launched.
pub fn register_tensor(&mut self, tensor: &TensorHandle<'_, R>) {
pub fn register_tensor(&mut self, tensor: &TensorHandleRef<'_, R>) {
self.tensors.push(tensor);
}

/// Register an array to be launched.
pub fn register_array(&mut self, array: &ArrayHandle<'_, R>) {
pub fn register_array(&mut self, array: &ArrayHandleRef<'_, R>) {
self.tensors.push(&array.as_tensor());
}

Expand Down Expand Up @@ -154,7 +154,7 @@ pub enum ScalarState<T> {

impl<R: Runtime> TensorState<R> {
/// Push a new tensor to the state.
pub fn push(&mut self, tensor: &TensorHandle<'_, R>) {
pub fn push(&mut self, tensor: &TensorHandleRef<'_, R>) {
if let TensorState::Empty = self {
*self = TensorState::Some {
bindings: Vec::with_capacity(1),
Expand Down
16 changes: 8 additions & 8 deletions crates/cubecl-core/src/frontend/element/array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::{

use super::{
ArgSettings, CubePrimitive, ExpandElement, ExpandElementBaseInit, ExpandElementTyped,
LaunchArg, LaunchArgExpand, TensorHandle, UInt,
LaunchArg, LaunchArgExpand, TensorHandleRef, UInt,
};

/// A contiguous array of elements.
Expand Down Expand Up @@ -142,7 +142,7 @@ impl<C: CubePrimitive> LaunchArgExpand for Array<C> {
}

/// Tensor representation with a reference to the [server handle](cubecl_runtime::server::Handle).
pub struct ArrayHandle<'a, R: Runtime> {
pub struct ArrayHandleRef<'a, R: Runtime> {
pub handle: &'a cubecl_runtime::server::Handle<R::Server>,
pub length: [usize; 1],
}
Expand All @@ -151,7 +151,7 @@ pub enum ArrayArg<'a, R: Runtime> {
/// The array is passed with an array handle.
Handle {
/// The array handle.
handle: ArrayHandle<'a, R>,
handle: ArrayHandleRef<'a, R>,
/// The vectorization factor.
vectorization_factor: u8,
},
Expand Down Expand Up @@ -209,7 +209,7 @@ impl<'a, R: Runtime> ArrayArg<'a, R> {
/// factor of 1.
pub fn new(handle: &'a cubecl_runtime::server::Handle<R::Server>, length: usize) -> Self {
ArrayArg::Handle {
handle: ArrayHandle::new(handle, length),
handle: ArrayHandleRef::new(handle, length),
vectorization_factor: 1,
}
}
Expand All @@ -220,24 +220,24 @@ impl<'a, R: Runtime> ArrayArg<'a, R> {
length: usize,
) -> Self {
ArrayArg::Handle {
handle: ArrayHandle::new(handle, length),
handle: ArrayHandleRef::new(handle, length),
vectorization_factor,
}
}
}

impl<'a, R: Runtime> ArrayHandle<'a, R> {
impl<'a, R: Runtime> ArrayHandleRef<'a, R> {
pub fn new(handle: &'a cubecl_runtime::server::Handle<R::Server>, length: usize) -> Self {
Self {
handle,
length: [length],
}
}

pub fn as_tensor(&self) -> TensorHandle<'_, R> {
pub fn as_tensor(&self) -> TensorHandleRef<'_, R> {
let shape = &self.length;

TensorHandle {
TensorHandleRef {
handle: self.handle,
strides: &[1],
shape,
Expand Down
8 changes: 4 additions & 4 deletions crates/cubecl-core/src/frontend/element/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ impl<C: CubePrimitive> LaunchArg for Tensor<C> {
/// Tensor representation with a reference to the [server handle](cubecl_runtime::server::Handle),
/// the strides and the shape.
#[derive(new)]
pub struct TensorHandle<'a, R: Runtime> {
pub struct TensorHandleRef<'a, R: Runtime> {
pub handle: &'a cubecl_runtime::server::Handle<R::Server>,
pub strides: &'a [usize],
pub shape: &'a [usize],
Expand All @@ -64,7 +64,7 @@ pub enum TensorArg<'a, R: Runtime> {
/// The tensor is passed with a tensor handle.
Handle {
/// The tensor handle.
handle: TensorHandle<'a, R>,
handle: TensorHandleRef<'a, R>,
/// The vectorization factor.
vectorization_factor: u8,
},
Expand All @@ -86,7 +86,7 @@ impl<'a, R: Runtime> TensorArg<'a, R> {
shape: &'a [usize],
) -> Self {
Self::Handle {
handle: TensorHandle::new(handle, strides, shape),
handle: TensorHandleRef::new(handle, strides, shape),
vectorization_factor: 1,
}
}
Expand All @@ -98,7 +98,7 @@ impl<'a, R: Runtime> TensorArg<'a, R> {
shape: &'a [usize],
) -> Self {
Self::Handle {
handle: TensorHandle::new(handle, strides, shape),
handle: TensorHandleRef::new(handle, strides, shape),
vectorization_factor: factor,
}
}
Expand Down
4 changes: 2 additions & 2 deletions crates/cubecl-core/src/prelude.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ pub use crate::runtime::Runtime;

/// Elements
pub use crate::frontend::{
Array, ArrayHandle, Bool, Float, LaunchArg, Slice, SliceMut, Tensor, TensorArg, UInt, F16, F32,
F64, I32, I64,
Array, ArrayHandleRef, Bool, Float, LaunchArg, Slice, SliceMut, Tensor, TensorArg, UInt, F16,
F32, F64, I32, I64,
};
pub use crate::pod::CubeElement;

Expand Down
2 changes: 1 addition & 1 deletion crates/cubecl-cuda/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ derive-new = { workspace = true }
cubecl-core = { path = "../cubecl-core", version = "0.1.0", features = [
"export_tests",
] }
cubecl-lac = { path = "../cubecl-lac", version = "0.1.0", features = [
cubecl-linalg = { path = "../cubecl-linalg", version = "0.1.0", features = [
"export_tests",
] }

Expand Down
2 changes: 1 addition & 1 deletion crates/cubecl-cuda/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,5 @@ mod tests {
pub type TestRuntime = crate::CudaRuntime;

cubecl_core::testgen_all!();
cubecl_lac::testgen_all!();
cubecl_linalg::testgen_all!();
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ description = "CubeCL Linear Algebra Components"
edition.workspace = true
keywords = []
license.workspace = true
name = "cubecl-lac"
name = "cubecl-linalg"
readme.workspace = true
repository = "https://github.com/tracel-ai/cubecl/tree/main/cubecl-lac"
repository = "https://github.com/tracel-ai/cubecl/tree/main/cubecl-linalg"
version.workspace = true

[features]
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ fn get_dims<F: Float>(lhs: &Tensor<F>, rhs: &Tensor<F>) -> Dimensions {
let k = lhs.shape(second_dim);
let n = rhs.shape(second_dim);

Dimensions { m: m, k, n }
Dimensions { m, k, n }
}

#[cube]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ pub(crate) trait BlockLoader<F: Float, FC: Float>: Send + Sync + 'static {

#[cube]
pub(crate) trait BlockWriter<F: Float>: Send + Sync + 'static {
#[allow(clippy::too_many_arguments)]
fn write_output(
out: &mut Tensor<F>,
accumulator_sm: SharedMemory<F>,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,7 @@ fn compute_tile<F: Float, FC: Float>(
pub mod tests {
use crate::matmul::{
cmma::base::{make_accumulators, SharedMemoriesExpand},
test_utils::{
assert_equals, cmma_available, create_empty, range_tensor_f16,
},
test_utils::{assert_equals, cmma_available, create_empty, range_tensor_f16},
};

use super::*;
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,24 @@ use crate::{
base::cmma_kernel,
config::{cmma_cube_count, cmma_cube_dim, CmmaConfig, CmmaLaunchConfig},
},
tensor::{MatrixLayout, Tensor},
tensor::{MatrixLayout, TensorHandle},
};

/// Matrix multiplication using tiling 2d algorithm
pub fn matmul_cmma<R: Runtime, F: Float>(
lhs: Tensor<R, F>,
rhs: Tensor<R, F>,
out: Tensor<R, F>,
lhs: TensorHandle<R, F>,
rhs: TensorHandle<R, F>,
out: TensorHandle<R, F>,
device: &R::Device,
) -> Tensor<R, F> {
) -> TensorHandle<R, F> {
let rank = lhs.rank();
let m = lhs.shape[rank - 2];
let k = lhs.shape[rank - 1];
let n = rhs.shape[rank - 1];

let client = R::client(device);

let check_layout = |tensor: &Tensor<R, F>| match tensor.matrix_layout() {
let check_layout = |tensor: &TensorHandle<R, F>| match tensor.matrix_layout() {
MatrixLayout::Contiguous => {}
MatrixLayout::MildlyPermuted {
transposed: _,
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::marker::PhantomData;

use crate::{matmul::test_utils::range_tensor_with_factor, tensor::Tensor};
use crate::{matmul::test_utils::range_tensor_with_factor, tensor::TensorHandle};
use cubecl_core::{frontend::F32, CubeElement, Runtime};

use super::{
Expand Down Expand Up @@ -78,9 +78,11 @@ impl MatmulTestCase {
return;
}

let tensor_1 = range_tensor_with_factor::<R>(self.batch, self.m, self.k, self.factor, device);
let tensor_2 = range_tensor_with_factor::<R>(self.batch, self.k, self.n, self.factor, device);
let out = Tensor {
let tensor_1 =
range_tensor_with_factor::<R>(self.batch, self.m, self.k, self.factor, device);
let tensor_2 =
range_tensor_with_factor::<R>(self.batch, self.k, self.n, self.factor, device);
let out = TensorHandle {
handle: create_empty::<R>(self.batch * self.m, self.n, device),
shape: vec![self.batch, self.m, self.n],
strides: vec![self.m * self.n, self.n, 1],
Expand Down
File renamed without changes.
Loading

0 comments on commit 5729849

Please sign in to comment.