diff --git a/src/drivers/fs/virtio_fs.rs b/src/drivers/fs/virtio_fs.rs index f4502a4608..d5c401db7e 100644 --- a/src/drivers/fs/virtio_fs.rs +++ b/src/drivers/fs/virtio_fs.rs @@ -13,8 +13,9 @@ use crate::drivers::virtio::error::VirtioFsError; use crate::drivers::virtio::transport::mmio::{ComCfg, IsrStatus, NotifCfg}; #[cfg(feature = "pci")] use crate::drivers::virtio::transport::pci::{ComCfg, IsrStatus, NotifCfg}; +use crate::drivers::virtio::virtqueue::split::SplitVq; use crate::drivers::virtio::virtqueue::{ - AsSliceU8, BuffSpec, BufferToken, Bytes, Virtq, VqIndex, VqSize, VqType, + AsSliceU8, BuffSpec, BufferToken, Bytes, Virtq, VqIndex, VqSize, }; use crate::fs::fuse::{self, FuseInterface}; @@ -37,7 +38,7 @@ pub(crate) struct VirtioFsDriver { pub(super) com_cfg: ComCfg, pub(super) isr_stat: IsrStatus, pub(super) notif_cfg: NotifCfg, - pub(super) vqueues: Vec>, + pub(super) vqueues: Vec>, pub(super) ready_queue: Vec, pub(super) irq: InterruptLine, } @@ -126,23 +127,21 @@ impl VirtioFsDriver { // create the queues and tell device about them for i in 0..vqnum as u16 { - let vq = Virtq::new( + let vq = SplitVq::new( &mut self.com_cfg, &self.notif_cfg, VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Split, VqIndex::from(i), self.dev_cfg.features.into(), - ); + ) + .unwrap(); self.vqueues.push(Rc::new(vq)); } let cmd_spec = Some(BuffSpec::Single(Bytes::new(64 * 1024 + 128).unwrap())); let rsp_spec = Some(BuffSpec::Single(Bytes::new(64 * 1024 + 128).unwrap())); - if let Ok(buff_tkn) = - self.vqueues[1].prep_buffer(Rc::clone(&self.vqueues[1]), cmd_spec, rsp_spec) - { + if let Ok(buff_tkn) = self.vqueues[1].clone().prep_buffer(cmd_spec, rsp_spec) { self.ready_queue.push(buff_tkn); // At this point the device is "live" self.com_cfg.drv_ok(); diff --git a/src/drivers/net/virtio_mmio.rs b/src/drivers/net/virtio_mmio.rs index a954042965..2ebc5b0073 100644 --- a/src/drivers/net/virtio_mmio.rs +++ b/src/drivers/net/virtio_mmio.rs @@ -139,12 +139,12 @@ impl VirtioNetDriver { notif_cfg, ctrl_vq: CtrlQueue::new(None), recv_vqs: RxQueues::new( - Vec::>::new(), + Vec::>::new(), Rc::new(RefCell::new(VecDeque::new())), false, ), send_vqs: TxQueues::new( - Vec::>::new(), + Vec::>::new(), Rc::new(RefCell::new(VecDeque::new())), Vec::new(), false, diff --git a/src/drivers/net/virtio_net.rs b/src/drivers/net/virtio_net.rs index 818b9230df..a94f4e73fe 100644 --- a/src/drivers/net/virtio_net.rs +++ b/src/drivers/net/virtio_net.rs @@ -30,8 +30,10 @@ use crate::drivers::net::NetworkDriver; use crate::drivers::virtio::transport::mmio::{ComCfg, IsrStatus, NotifCfg}; #[cfg(feature = "pci")] use crate::drivers::virtio::transport::pci::{ComCfg, IsrStatus, NotifCfg}; +use crate::drivers::virtio::virtqueue::packed::PackedVq; +use crate::drivers::virtio::virtqueue::split::SplitVq; use crate::drivers::virtio::virtqueue::{ - BuffSpec, BufferToken, Bytes, Transfer, Virtq, VqIndex, VqSize, VqType, + BuffSpec, BufferToken, Bytes, Transfer, Virtq, VqIndex, VqSize, }; use crate::executor::device::{RxToken, TxToken}; @@ -77,10 +79,10 @@ impl Default for VirtioNetHdr { } } -pub struct CtrlQueue(Option>); +pub struct CtrlQueue(Option>); impl CtrlQueue { - pub fn new(vq: Option>) -> Self { + pub fn new(vq: Option>) -> Self { CtrlQueue(vq) } } @@ -153,14 +155,14 @@ enum MqCmd { } pub struct RxQueues { - vqs: Vec>, + vqs: Vec>, poll_queue: Rc>>, is_multi: bool, } impl RxQueues { pub fn new( - vqs: Vec>, + vqs: Vec>, poll_queue: Rc>>, is_multi: bool, ) -> Self { @@ -188,10 +190,7 @@ impl RxQueues { /// Adds a given queue to the underlying vector and populates the queue with RecvBuffers. /// /// Queues are all populated according to Virtio specification v1.1. - 5.1.6.3.1 - fn add(&mut self, vq: Virtq, dev_cfg: &NetDevCfg) { - // Safe virtqueue - let rc_vq = Rc::new(vq); - let vq = &rc_vq; + fn add(&mut self, vq: Rc, dev_cfg: &NetDevCfg) { let num_buff: u16 = vq.size().into(); let rx_size = if dev_cfg @@ -208,7 +207,7 @@ impl RxQueues { // let spec = BuffSpec::Single(Bytes::new(rx_size).unwrap()); for _ in 0..num_buff { - let buff_tkn = match vq.prep_buffer(Rc::clone(vq), None, Some(spec.clone())) { + let buff_tkn = match vq.clone().prep_buffer(None, Some(spec.clone())) { Ok(tkn) => tkn, Err(_vq_err) => { error!("Setup of network queue failed, which should not happen!"); @@ -225,7 +224,7 @@ impl RxQueues { } // Safe virtqueue - self.vqs.push(rc_vq); + self.vqs.push(vq); if self.vqs.len() > 1 { self.is_multi = true; @@ -277,7 +276,7 @@ impl RxQueues { /// Structure which handles transmission of packets and delegation /// to the respective queue structures. pub struct TxQueues { - vqs: Vec>, + vqs: Vec>, poll_queue: Rc>>, ready_queue: Vec, /// Indicates, whether the Driver/Device are using multiple @@ -287,7 +286,7 @@ pub struct TxQueues { impl TxQueues { pub fn new( - vqs: Vec>, + vqs: Vec>, poll_queue: Rc>>, ready_queue: Vec, is_multi: bool, @@ -331,9 +330,9 @@ impl TxQueues { } } - fn add(&mut self, vq: Virtq, dev_cfg: &NetDevCfg) { + fn add(&mut self, vq: Rc, dev_cfg: &NetDevCfg) { // Safe virtqueue - self.vqs.push(Rc::new(vq)); + self.vqs.push(vq.clone()); if self.vqs.len() == 1 { // Unwrapping is safe, as one virtq will be definitely in the vector. let vq = self.vqs.first().unwrap(); @@ -359,7 +358,8 @@ impl TxQueues { for _ in 0..num_buff { self.ready_queue.push( - vq.prep_buffer(Rc::clone(vq), Some(spec.clone()), None) + vq.clone() + .prep_buffer(Some(spec.clone()), None) .unwrap() .write_seq(Some(&VirtioNetHdr::default()), None::<&VirtioNetHdr>) .unwrap(), @@ -379,7 +379,8 @@ impl TxQueues { for _ in 0..num_buff { self.ready_queue.push( - vq.prep_buffer(Rc::clone(vq), Some(spec.clone()), None) + vq.clone() + .prep_buffer(Some(spec.clone()), None) .unwrap() .write_seq(Some(&VirtioNetHdr::default()), None::<&VirtioNetHdr>) .unwrap(), @@ -436,7 +437,7 @@ impl TxQueues { // As usize is currently safe as the minimal usize is defined as 16bit in rust. let spec = BuffSpec::Single(Bytes::new(len).unwrap()); - match self.vqs[0].prep_buffer(Rc::clone(&self.vqs[0]), Some(spec), None) { + match self.vqs[0].clone().prep_buffer(Some(spec), None) { Ok(tkn) => Some((tkn, 0)), Err(_) => { // Here it is possible if multiple queues are enabled to get another buffertoken from them! @@ -975,23 +976,27 @@ impl VirtioNetDriver { .features .is_feature(Features::VIRTIO_F_RING_PACKED) { - self.ctrl_vq = CtrlQueue(Some(Rc::new(Virtq::new( - &mut self.com_cfg, - &self.notif_cfg, - VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Packed, - VqIndex::from(self.num_vqs), - self.dev_cfg.features.into(), - )))); + self.ctrl_vq = CtrlQueue(Some(Rc::new( + PackedVq::new( + &mut self.com_cfg, + &self.notif_cfg, + VqSize::from(VIRTIO_MAX_QUEUE_SIZE), + VqIndex::from(self.num_vqs), + self.dev_cfg.features.into(), + ) + .unwrap(), + ))); } else { - self.ctrl_vq = CtrlQueue(Some(Rc::new(Virtq::new( - &mut self.com_cfg, - &self.notif_cfg, - VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Split, - VqIndex::from(self.num_vqs), - self.dev_cfg.features.into(), - )))); + self.ctrl_vq = CtrlQueue(Some(Rc::new( + SplitVq::new( + &mut self.com_cfg, + &self.notif_cfg, + VqSize::from(VIRTIO_MAX_QUEUE_SIZE), + VqIndex::from(self.num_vqs), + self.dev_cfg.features.into(), + ) + .unwrap(), + ))); } self.ctrl_vq.0.as_ref().unwrap().enable_notifs(); @@ -1038,57 +1043,57 @@ impl VirtioNetDriver { .features .is_feature(Features::VIRTIO_F_RING_PACKED) { - let vq = Virtq::new( + let vq = PackedVq::new( &mut self.com_cfg, &self.notif_cfg, VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Packed, VqIndex::from(2 * i), self.dev_cfg.features.into(), - ); + ) + .unwrap(); // Interrupt for receiving packets is wanted vq.enable_notifs(); - self.recv_vqs.add(vq, &self.dev_cfg); + self.recv_vqs.add(Rc::from(vq), &self.dev_cfg); - let vq = Virtq::new( + let vq = PackedVq::new( &mut self.com_cfg, &self.notif_cfg, VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Packed, VqIndex::from(2 * i + 1), self.dev_cfg.features.into(), - ); + ) + .unwrap(); // Interrupt for comunicating that a sended packet left, is not needed vq.disable_notifs(); - self.send_vqs.add(vq, &self.dev_cfg); + self.send_vqs.add(Rc::from(vq), &self.dev_cfg); } else { - let vq = Virtq::new( + let vq = SplitVq::new( &mut self.com_cfg, &self.notif_cfg, VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Split, VqIndex::from(2 * i), self.dev_cfg.features.into(), - ); + ) + .unwrap(); // Interrupt for receiving packets is wanted vq.enable_notifs(); - self.recv_vqs.add(vq, &self.dev_cfg); + self.recv_vqs.add(Rc::from(vq), &self.dev_cfg); - let vq = Virtq::new( + let vq = SplitVq::new( &mut self.com_cfg, &self.notif_cfg, VqSize::from(VIRTIO_MAX_QUEUE_SIZE), - VqType::Split, VqIndex::from(2 * i + 1), self.dev_cfg.features.into(), - ); + ) + .unwrap(); // Interrupt for comunicating that a sended packet left, is not needed vq.disable_notifs(); - self.send_vqs.add(vq, &self.dev_cfg); + self.send_vqs.add(Rc::from(vq), &self.dev_cfg); } } diff --git a/src/drivers/net/virtio_pci.rs b/src/drivers/net/virtio_pci.rs index 49a31c6783..e2b66c48e0 100644 --- a/src/drivers/net/virtio_pci.rs +++ b/src/drivers/net/virtio_pci.rs @@ -17,7 +17,6 @@ use crate::drivers::pci::{PciCommand, PciDevice}; use crate::drivers::virtio::error::{self, VirtioError}; use crate::drivers::virtio::transport::pci; use crate::drivers::virtio::transport::pci::{PciCap, UniCapsColl}; -use crate::drivers::virtio::virtqueue::Virtq; /// Virtio's network device configuration structure. /// See specification v1.1. - 5.1.4 @@ -147,13 +146,9 @@ impl VirtioNetDriver { notif_cfg, ctrl_vq: CtrlQueue::new(None), - recv_vqs: RxQueues::new( - Vec::>::new(), - Rc::new(RefCell::new(VecDeque::new())), - false, - ), + recv_vqs: RxQueues::new(Vec::new(), Rc::new(RefCell::new(VecDeque::new())), false), send_vqs: TxQueues::new( - Vec::>::new(), + Vec::new(), Rc::new(RefCell::new(VecDeque::new())), Vec::new(), false, diff --git a/src/drivers/virtio/virtqueue/mod.rs b/src/drivers/virtio/virtqueue/mod.rs index 4477b7e913..9db89d0093 100644 --- a/src/drivers/virtio/virtqueue/mod.rs +++ b/src/drivers/virtio/virtqueue/mod.rs @@ -1,7 +1,7 @@ //! This module contains Virtio's virtqueue. //! //! The virtqueue is available in two forms. -//! [SplitVq] and [PackedVq]. +//! [split::SplitVq] and [packed::PackedVq]. //! Both queues are wrapped inside an enum [Virtq] in //! order to provide an unified interface. //! @@ -25,8 +25,6 @@ use align_address::Align; use zerocopy::AsBytes; use self::error::{BufferError, VirtqError}; -use self::packed::PackedVq; -use self::split::SplitVq; #[cfg(not(feature = "pci"))] use super::transport::mmio::{ComCfg, NotifCfg}; #[cfg(feature = "pci")] @@ -92,12 +90,6 @@ impl From for u16 { } } -/// Enum that defines which virtqueue shall be created when used via the `Virtq::new()` function. -pub enum VqType { - Packed, - Split, -} - /// The General Descriptor struct for both Packed and SplitVq. #[repr(C, align(16))] struct Descriptor { @@ -107,96 +99,30 @@ struct Descriptor { flags: u16, } -/// The Virtq enum unifies access to the two different Virtqueue types -/// [PackedVq] and [SplitVq]. +// Public interface of Virtq + +/// The Virtq trait unifies access to the two different Virtqueue types +/// [packed::PackedVq] and [split::SplitVq]. /// -/// The enum provides a common interface for both types. Which in some case +/// The trait provides a common interface for both types. Which in some case /// might not provide the complete feature set of each queue. Drivers who /// do need these features should refrain from providing support for both /// Virtqueue types and use the structs directly instead. -pub enum Virtq { - Packed(PackedVq), - Split(SplitVq), -} - -// Private Interface of the Virtq -impl Virtq { +#[allow(private_bounds)] +pub trait Virtq: VirtqPrivate { /// Entry function which the TransferTokens can use, when they are dispatching /// themselves via their `Rc` reference /// /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - fn dispatch(&self, tkn: TransferToken, notif: bool) { - match self { - Virtq::Packed(vq) => vq.dispatch(tkn, notif), - Virtq::Split(vq) => vq.dispatch(tkn, notif), - }; - } -} - -// Public Interface solely for page boundary checking and other convenience functions -impl Virtq { - /// Allows to check, if a given structure crosses a physical page boundary. - /// Returns true, if the structure does NOT cross a boundary or crosses only - /// contiguous physical page boundaries. - /// - /// Structures provided to the Queue must pass this test, otherwise the queue - /// currently panics. - pub fn check_bounds(data: &T) -> bool { - let slice = data.as_slice_u8(); + fn dispatch(&self, tkn: TransferToken, notif: bool); - let start_virt = ptr::from_ref(slice.first().unwrap()).addr(); - let end_virt = ptr::from_ref(slice.last().unwrap()).addr(); - let end_phy_calc = paging::virt_to_phys(VirtAddr::from(start_virt)) + (slice.len() - 1); - let end_phy = paging::virt_to_phys(VirtAddr::from(end_virt)); - - end_phy == end_phy_calc - } - - /// Allows to check, if a given slice crosses a physical page boundary. - /// Returns true, if the slice does NOT cross a boundary or crosses only - /// contiguous physical page boundaries. - /// Slice MUST come from a boxed value. Otherwise the slice might be moved and - /// the test of this function is not longer valid. - /// - /// This check is especially useful if one wants to check if slices - /// into which the queue will destructure a structure are valid for the queue. - /// - /// Slices provided to the Queue must pass this test, otherwise the queue - /// currently panics. - pub fn check_bounds_slice(slice: &[u8]) -> bool { - let start_virt = ptr::from_ref(slice.first().unwrap()).addr(); - let end_virt = ptr::from_ref(slice.last().unwrap()).addr(); - let end_phy_calc = paging::virt_to_phys(VirtAddr::from(start_virt)) + (slice.len() - 1); - let end_phy = paging::virt_to_phys(VirtAddr::from(end_virt)); - - end_phy == end_phy_calc - } - - /// Frees memory regions gained access to via `Transfer.ret_raw()`. - pub fn free_raw(ptr: *mut u8, len: usize) { - crate::mm::deallocate(VirtAddr::from(ptr as usize), len); - } -} - -// Public interface of Virtq -impl Virtq { /// Enables interrupts for this virtqueue upon receiving a transfer - pub fn enable_notifs(&self) { - match self { - Virtq::Packed(vq) => vq.enable_notifs(), - Virtq::Split(vq) => vq.enable_notifs(), - } - } + fn enable_notifs(&self); /// Disables interrupts for this virtqueue upon receiving a transfer - pub fn disable_notifs(&self) { - match self { - Virtq::Packed(vq) => vq.disable_notifs(), - Virtq::Split(vq) => vq.disable_notifs(), - } - } + fn disable_notifs(&self); /// Checks if new used descriptors have been written by the device. /// This activates the queue and polls the descriptor ring of the queue. @@ -204,166 +130,57 @@ impl Virtq { /// * `TransferTokens` which hold an `await_queue` will be placed into /// these queues. /// * All finished `TransferTokens` will have a state of `TransferState::Finished`. - pub fn poll(&self) { - match self { - Virtq::Packed(vq) => vq.poll(), - Virtq::Split(vq) => vq.poll(), - } - } + fn poll(&self); - /// Dispatches a batch of TransferTokens. The actual behaviour depends on the respective - /// virtqueue implementation. Please see the respective docs for details. - /// - /// **INFO:** - /// Due to the missing HashMap implementation in the kernel, this function currently uses a nested - /// for-loop. The first iteration is over the number if dispatched tokens. Inside this loop, the - /// function iterates over a list of all already "used" virtqueues. If the given token belongs to an - /// existing queue it is inserted into the corresponding list of tokens, if it belongs to no queue, - /// a new entry in the "used" virtqueues list is made. - /// This procedure can possibly be very slow. + /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in + /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers + /// a device notification if wanted by the device. /// /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(tkns: Vec, notif: bool) { - let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); - - // Sort the TransferTokens depending in the queue their coming from. - // then call dispatch_batch of that queue - for tkn in tkns { - let index = tkn.get_vq().index(); - let mut used = false; - let mut index_used = 0usize; - - for (pos, (vq, _)) in used_vqs.iter_mut().enumerate() { - if index == vq.index() { - index_used = pos; - used = true; - break; - } - } - - if used { - let (_, tkn_lst) = &mut used_vqs[index_used]; - tkn_lst.push(tkn); - } else { - let mut new_tkn_lst = Vec::new(); - let vq = tkn.get_vq(); - new_tkn_lst.push(tkn); - - used_vqs.push((vq, new_tkn_lst)) - } - } - - for (vq_ref, tkn_lst) in used_vqs { - match vq_ref.as_ref() { - Virtq::Packed(vq) => vq.dispatch_batch(tkn_lst, notif), - Virtq::Split(vq) => vq.dispatch_batch(tkn_lst, notif), - } - } - } + fn dispatch_batch(&self, tkns: Vec, notif: bool); /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` /// upon finish. /// - /// **INFO:** - /// Due to the missing HashMap implementation in the kernel, this function currently uses a nested - /// for-loop. The first iteration is over the number if dispatched tokens. Inside this loop, the - /// function iterates over a list of all already "used" virtqueues. If the given token belongs to an - /// existing queue it is inserted into the corresponding list of tokens, if it belongs to no queue, - /// a new entry in the "used" virtqueues list is made. - /// This procedure can possibly be very slow. - /// /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch_await( + /// + /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in + /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers + /// a device notification if wanted by the device. + /// + /// Tokens to get a reference to the provided await_queue, where they will be placed upon finish. + fn dispatch_batch_await( + &self, tkns: Vec, await_queue: Rc>>, notif: bool, - ) { - let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); - - // Sort the TransferTokens depending in the queue their coming from. - // then call dispatch_batch of that queue - for tkn in tkns { - let index = tkn.get_vq().index(); - let mut used = false; - let mut index_used = 0usize; - - for (pos, (vq, _)) in used_vqs.iter_mut().enumerate() { - if index == vq.index() { - index_used = pos; - used = true; - break; - } - } + ); - if used { - let (_, tkn_lst) = &mut used_vqs[index_used]; - tkn_lst.push(tkn); - } else { - let mut new_tkn_lst = Vec::new(); - let vq = tkn.get_vq(); - new_tkn_lst.push(tkn); - - used_vqs.push((vq, new_tkn_lst)) - } - } - - for (vq, tkn_lst) in used_vqs { - match vq.as_ref() { - Virtq::Packed(vq) => { - vq.dispatch_batch_await(tkn_lst, Rc::clone(&await_queue), notif) - } - Virtq::Split(vq) => { - vq.dispatch_batch_await(tkn_lst, Rc::clone(&await_queue), notif) - } - } - } - } - - /// Creates a new Virtq of the specified (VqType)[VqType], (VqSize)[VqSize] and the (VqIndex)[VqIndex]. + /// Creates a new Virtq of the specified [VqSize] and the [VqIndex]. /// The index represents the "ID" of the virtqueue. /// Upon creation the virtqueue is "registered" at the device via the `ComCfg` struct. /// /// Be aware, that devices define a maximum number of queues and a maximal size they can handle. - pub fn new( + fn new( com_cfg: &mut ComCfg, notif_cfg: &NotifCfg, size: VqSize, - vq_type: VqType, index: VqIndex, feats: u64, - ) -> Self { - match vq_type { - VqType::Packed => match PackedVq::new(com_cfg, notif_cfg, size, index, feats) { - Ok(packed_vq) => Virtq::Packed(packed_vq), - Err(_vq_error) => panic!("Currently panics if queue fails to be created"), - }, - VqType::Split => match SplitVq::new(com_cfg, notif_cfg, size, index, feats) { - Ok(split_vq) => Virtq::Split(split_vq), - Err(_vq_error) => panic!("Currently panics if queue fails to be created"), - }, - } - } + ) -> Result + where + Self: Sized; /// Returns the size of a Virtqueue. This represents the overall size and not the capacity the /// queue currently has for new descriptors. - pub fn size(&self) -> VqSize { - match self { - Virtq::Packed(vq) => vq.size(), - Virtq::Split(vq) => vq.size(), - } - } + fn size(&self) -> VqSize; // Returns the index (ID) of a Virtqueue. - pub fn index(&self) -> VqIndex { - match self { - Virtq::Packed(vq) => vq.index(), - Virtq::Split(vq) => vq.index(), - } - } + fn index(&self) -> VqIndex; /// Provides the calley with a TransferToken. Fails upon multiple circumstances. /// @@ -371,7 +188,7 @@ impl Virtq { /// * Data behind the respective raw pointers will NOT be deallocated. Under no circumstances. /// * Calley is responsible for ensuring the raw pointers will remain valid from start till end of transfer. /// * start: call of `fn prep_transfer_from_raw()` - /// * end: closing of [Transfer] via `Transfer.close()`. + /// * end: return of the [Transfer] via [TransferToken::dispatch_blocking] or its push to the [TransferToken::await_queue]. /// * In case the underlying BufferToken is reused, the raw pointers MUST still be valid all the time /// BufferToken exists. /// * Transfer created from this TransferTokens will ONLY allow to return a copy of the data. @@ -427,15 +244,580 @@ impl Virtq { /// ``` /// Then he must split the structure after the send part and provide the respective part via the send argument and the respective other /// part via the recv argument. - pub fn prep_transfer_from_raw( - &self, - rc_self: Rc, - send: Option<(*mut T, BuffSpec<'_>)>, - recv: Option<(*mut K, BuffSpec<'_>)>, - ) -> Result { - match self { - Virtq::Packed(vq) => vq.prep_transfer_from_raw(rc_self, send, recv), - Virtq::Split(vq) => vq.prep_transfer_from_raw(rc_self, send, recv), + fn prep_transfer_from_raw( + self: Rc, + send: Option<(&[u8], BuffSpec<'_>)>, + recv: Option<(&mut [u8], BuffSpec<'_>)>, + ) -> Result; + + /// The implementation of the method requires constraints that are incompatible with a trait object. + /// Because of this, we constrain it to static objects (via Sized) and call it from the implementation + /// of [Self::prep_buffer] inside the implementor. + fn prep_transfer_from_raw_static( + self: Rc, + send: Option<(&[u8], BuffSpec<'_>)>, + recv: Option<(&mut [u8], BuffSpec<'_>)>, + ) -> Result + where + Self: Sized + 'static, + { + match (send, recv) { + (None, None) => Err(VirtqError::BufferNotSpecified), + (Some((send_data, send_spec)), None) => { + match send_spec { + BuffSpec::Single(size) => { + // Buffer must have the right size + if send_data.len() != size.into() { + return Err(VirtqError::BufferSizeWrong(send_data.len())); + } + + let desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), send_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Single { + desc_lst: vec![desc].into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: None, + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + BuffSpec::Multiple(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut index = 0usize; + + for byte in size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match send_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(send_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Multiple { + desc_lst: desc_lst.into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: None, + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + BuffSpec::Indirect(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut index = 0usize; + + for byte in size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match send_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(send_data.len())), + }; + + desc_lst.push( + self.mem_pool().pull_from_raw_untracked( + Rc::clone(&self.mem_pool()), + next_slice, + ), + ); + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Indirect { + desc_lst: desc_lst.into_boxed_slice(), + ctrl_desc, + len: send_data.len(), + next_write: 0, + }), + recv_buff: None, + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + } + } + (None, Some((recv_data, recv_spec))) => { + match recv_spec { + BuffSpec::Single(size) => { + // Buffer must have the right size + if recv_data.len() != size.into() { + return Err(VirtqError::BufferSizeWrong(recv_data.len())); + } + + let desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), recv_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: None, + recv_buff: Some(Buffer::Single { + desc_lst: vec![desc].into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + BuffSpec::Multiple(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut index = 0usize; + + for byte in size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match recv_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(recv_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: None, + recv_buff: Some(Buffer::Multiple { + desc_lst: desc_lst.into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + BuffSpec::Indirect(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut index = 0usize; + + for byte in size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match recv_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(recv_data.len())), + }; + + desc_lst.push( + self.mem_pool().pull_from_raw_untracked( + Rc::clone(&self.mem_pool()), + next_slice, + ), + ); + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: None, + recv_buff: Some(Buffer::Indirect { + desc_lst: desc_lst.into_boxed_slice(), + ctrl_desc, + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + } + } + (Some((send_data, send_spec)), Some((recv_data, recv_spec))) => { + match (send_spec, recv_spec) { + (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { + // Buffer must have the right size + if send_data.len() != send_size.into() { + return Err(VirtqError::BufferSizeWrong(send_data.len())); + } + + let send_desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), send_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + // Buffer must have the right size + if recv_data.len() != recv_size.into() { + return Err(VirtqError::BufferSizeWrong(recv_data.len())); + } + + let recv_desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), recv_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Single { + desc_lst: vec![send_desc].into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: Some(Buffer::Single { + desc_lst: vec![recv_desc].into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { + // Buffer must have the right size + if send_data.len() != send_size.into() { + return Err(VirtqError::BufferSizeWrong(send_data.len())); + } + + let send_desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), send_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut index = 0usize; + + for byte in recv_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match recv_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(recv_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => recv_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Single { + desc_lst: vec![send_desc].into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: Some(Buffer::Multiple { + desc_lst: recv_desc_lst.into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut index = 0usize; + + for byte in send_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match send_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(send_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => send_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut index = 0usize; + + for byte in recv_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match recv_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(recv_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => recv_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Multiple { + desc_lst: send_desc_lst.into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: Some(Buffer::Multiple { + desc_lst: recv_desc_lst.into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut index = 0usize; + + for byte in send_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match send_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(send_data.len())), + }; + + match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), next_slice) + { + Ok(desc) => send_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + }; + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + // Buffer must have the right size + if recv_data.len() != recv_size.into() { + return Err(VirtqError::BufferSizeWrong(recv_data.len())); + } + + let recv_desc = match self + .mem_pool() + .pull_from_raw(Rc::clone(&self.mem_pool()), recv_data) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + send_buff: Some(Buffer::Multiple { + desc_lst: send_desc_lst.into_boxed_slice(), + len: send_data.len(), + next_write: 0, + }), + recv_buff: Some(Buffer::Single { + desc_lst: vec![recv_desc].into_boxed_slice(), + len: recv_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut index = 0usize; + + for byte in send_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match send_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(send_data.len())), + }; + + send_desc_lst.push( + self.mem_pool().pull_from_raw_untracked( + Rc::clone(&self.mem_pool()), + next_slice, + ), + ); + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut index = 0usize; + + for byte in recv_size_lst { + let end_index = index + usize::from(*byte); + let next_slice = match recv_data.get(index..end_index) { + Some(slice) => slice, + None => return Err(VirtqError::BufferSizeWrong(recv_data.len())), + }; + + recv_desc_lst.push( + self.mem_pool().pull_from_raw_untracked( + Rc::clone(&self.mem_pool()), + next_slice, + ), + ); + + // update the starting index for the next iteration + index += usize::from(*byte); + } + + let ctrl_desc = match self + .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + Ok(TransferToken { + state: TransferState::Ready, + buff_tkn: Some(BufferToken { + recv_buff: Some(Buffer::Indirect { + desc_lst: recv_desc_lst.into_boxed_slice(), + ctrl_desc: ctrl_desc.no_dealloc_clone(), + len: recv_data.len(), + next_write: 0, + }), + send_buff: Some(Buffer::Indirect { + desc_lst: send_desc_lst.into_boxed_slice(), + ctrl_desc, + len: send_data.len(), + next_write: 0, + }), + vq: self, + ret_send: false, + ret_recv: false, + reusable: false, + }), + await_queue: None, + }) + } + (BuffSpec::Indirect(_), BuffSpec::Single(_)) + | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), + (BuffSpec::Single(_), BuffSpec::Indirect(_)) + | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), + } + } } } @@ -480,21 +862,582 @@ impl Virtq { /// // ++++++++++++++++++++++++++ /// ``` /// As a result indirect descriptors result in a single descriptor consumption in the actual queue. - pub fn prep_buffer( - &self, - rc_self: Rc, + fn prep_buffer( + self: Rc, send: Option>, recv: Option>, - ) -> Result { - match self { - Virtq::Packed(vq) => vq.prep_buffer(rc_self, send, recv), - Virtq::Split(vq) => vq.prep_buffer(rc_self, send, recv), + ) -> Result; + + /// The implementation of the method requires constraints that are incompatible with a trait object. + /// Because of this, we constrain it to static objects (via Sized) and call it from the implementation + /// of [Self::prep_buffer] inside the implementor. + fn prep_buffer_static( + self: Rc, + send: Option>, + recv: Option>, + ) -> Result + where + Self: Sized + 'static, + { + match (send, recv) { + // No buffers specified + (None, None) => Err(VirtqError::BufferNotSpecified), + // Send buffer specified, No recv buffer + (Some(spec), None) => { + match spec { + BuffSpec::Single(size) => { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), size) { + Ok(desc) => { + let buffer = Buffer::Single { + desc_lst: vec![desc].into_boxed_slice(), + len: size.into(), + next_write: 0, + }; + + Ok(BufferToken { + send_buff: Some(buffer), + recv_buff: None, + vq: self.clone(), + ret_send: true, + ret_recv: false, + reusable: true, + }) + } + Err(vq_err) => Err(vq_err), + } + } + BuffSpec::Multiple(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut len = 0usize; + + for size in size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + len += usize::from(*size); + } + + let buffer = Buffer::Multiple { + desc_lst: desc_lst.into_boxed_slice(), + len, + next_write: 0, + }; + + Ok(BufferToken { + send_buff: Some(buffer), + recv_buff: None, + vq: self.clone(), + ret_send: true, + ret_recv: false, + reusable: true, + }) + } + BuffSpec::Indirect(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut len = 0usize; + + for size in size_lst { + // As the indirect list does only consume one descriptor for the + // control descriptor, the actual list is untracked + desc_lst.push( + self.mem_pool() + .pull_untracked(Rc::clone(&self.mem_pool()), *size), + ); + len += usize::from(*size); + } + + let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + let buffer = Buffer::Indirect { + desc_lst: desc_lst.into_boxed_slice(), + ctrl_desc, + len, + next_write: 0, + }; + + Ok(BufferToken { + send_buff: Some(buffer), + recv_buff: None, + vq: self.clone(), + ret_send: true, + ret_recv: false, + reusable: true, + }) + } + } + } + // No send buffer, recv buffer is specified + (None, Some(spec)) => { + match spec { + BuffSpec::Single(size) => { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), size) { + Ok(desc) => { + let buffer = Buffer::Single { + desc_lst: vec![desc].into_boxed_slice(), + len: size.into(), + next_write: 0, + }; + + Ok(BufferToken { + send_buff: None, + recv_buff: Some(buffer), + vq: self.clone(), + ret_send: false, + ret_recv: true, + reusable: true, + }) + } + Err(vq_err) => Err(vq_err), + } + } + BuffSpec::Multiple(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut len = 0usize; + + for size in size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + len += usize::from(*size); + } + + let buffer = Buffer::Multiple { + desc_lst: desc_lst.into_boxed_slice(), + len, + next_write: 0, + }; + + Ok(BufferToken { + send_buff: None, + recv_buff: Some(buffer), + vq: self.clone(), + ret_send: false, + ret_recv: true, + reusable: true, + }) + } + BuffSpec::Indirect(size_lst) => { + let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); + let mut len = 0usize; + + for size in size_lst { + // As the indirect list does only consume one descriptor for the + // control descriptor, the actual list is untracked + desc_lst.push( + self.mem_pool() + .pull_untracked(Rc::clone(&self.mem_pool()), *size), + ); + len += usize::from(*size); + } + + let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + let buffer = Buffer::Indirect { + desc_lst: desc_lst.into_boxed_slice(), + ctrl_desc, + len, + next_write: 0, + }; + + Ok(BufferToken { + send_buff: None, + recv_buff: Some(buffer), + vq: self.clone(), + ret_send: false, + ret_recv: true, + reusable: true, + }) + } + } + } + // Send buffer specified, recv buffer specified + (Some(send_spec), Some(recv_spec)) => { + match (send_spec, recv_spec) { + (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { + let send_buff = + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), send_size) { + Ok(send_desc) => Some(Buffer::Single { + desc_lst: vec![send_desc].into_boxed_slice(), + len: send_size.into(), + next_write: 0, + }), + Err(vq_err) => return Err(vq_err), + }; + + let recv_buff = + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), recv_size) { + Ok(recv_desc) => Some(Buffer::Single { + desc_lst: vec![recv_desc].into_boxed_slice(), + len: recv_size.into(), + next_write: 0, + }), + Err(vq_err) => return Err(vq_err), + }; + + Ok(BufferToken { + send_buff, + recv_buff, + vq: self.clone(), + ret_send: true, + ret_recv: true, + reusable: true, + }) + } + (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { + let send_buff = + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), send_size) { + Ok(send_desc) => Some(Buffer::Single { + desc_lst: vec![send_desc].into_boxed_slice(), + len: send_size.into(), + next_write: 0, + }), + Err(vq_err) => return Err(vq_err), + }; + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut recv_len = 0usize; + + for size in recv_size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => recv_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + recv_len += usize::from(*size); + } + + let recv_buff = Some(Buffer::Multiple { + desc_lst: recv_desc_lst.into_boxed_slice(), + len: recv_len, + next_write: 0, + }); + + Ok(BufferToken { + send_buff, + recv_buff, + vq: self.clone(), + ret_send: true, + ret_recv: true, + reusable: true, + }) + } + (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut send_len = 0usize; + for size in send_size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => send_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + send_len += usize::from(*size); + } + + let send_buff = Some(Buffer::Multiple { + desc_lst: send_desc_lst.into_boxed_slice(), + len: send_len, + next_write: 0, + }); + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut recv_len = 0usize; + + for size in recv_size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => recv_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + recv_len += usize::from(*size); + } + + let recv_buff = Some(Buffer::Multiple { + desc_lst: recv_desc_lst.into_boxed_slice(), + len: recv_len, + next_write: 0, + }); + + Ok(BufferToken { + send_buff, + recv_buff, + vq: self.clone(), + ret_send: true, + ret_recv: true, + reusable: true, + }) + } + (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut send_len = 0usize; + + for size in send_size_lst { + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), *size) { + Ok(desc) => send_desc_lst.push(desc), + Err(vq_err) => return Err(vq_err), + } + send_len += usize::from(*size); + } + + let send_buff = Some(Buffer::Multiple { + desc_lst: send_desc_lst.into_boxed_slice(), + len: send_len, + next_write: 0, + }); + + let recv_buff = + match self.mem_pool().pull(Rc::clone(&self.mem_pool()), recv_size) { + Ok(recv_desc) => Some(Buffer::Single { + desc_lst: vec![recv_desc].into_boxed_slice(), + len: recv_size.into(), + next_write: 0, + }), + Err(vq_err) => return Err(vq_err), + }; + + Ok(BufferToken { + send_buff, + recv_buff, + vq: self.clone(), + ret_send: true, + ret_recv: true, + reusable: true, + }) + } + (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { + let mut send_desc_lst: Vec = + Vec::with_capacity(send_size_lst.len()); + let mut send_len = 0usize; + + for size in send_size_lst { + // As the indirect list does only consume one descriptor for the + // control descriptor, the actual list is untracked + send_desc_lst.push( + self.mem_pool() + .pull_untracked(Rc::clone(&self.mem_pool()), *size), + ); + send_len += usize::from(*size); + } + + let mut recv_desc_lst: Vec = + Vec::with_capacity(recv_size_lst.len()); + let mut recv_len = 0usize; + + for size in recv_size_lst { + // As the indirect list does only consume one descriptor for the + // control descriptor, the actual list is untracked + recv_desc_lst.push( + self.mem_pool() + .pull_untracked(Rc::clone(&self.mem_pool()), *size), + ); + recv_len += usize::from(*size); + } + + let ctrl_desc = match self + .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) + { + Ok(desc) => desc, + Err(vq_err) => return Err(vq_err), + }; + + let recv_buff = Some(Buffer::Indirect { + desc_lst: recv_desc_lst.into_boxed_slice(), + ctrl_desc: ctrl_desc.no_dealloc_clone(), + len: recv_len, + next_write: 0, + }); + let send_buff = Some(Buffer::Indirect { + desc_lst: send_desc_lst.into_boxed_slice(), + ctrl_desc, + len: send_len, + next_write: 0, + }); + + Ok(BufferToken { + send_buff, + recv_buff, + vq: self.clone(), + ret_send: true, + ret_recv: true, + reusable: true, + }) + } + (BuffSpec::Indirect(_), BuffSpec::Single(_)) + | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), + (BuffSpec::Single(_), BuffSpec::Indirect(_)) + | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), + } + } } } } -/// The trait needs to be implemented on structures which are to be used via the `prep_transfer()` function of virtqueues and for -/// structures which are to be used to write data into buffers of a [BufferToken] via `BufferToken.write()` or +/// These methods are an implementation detail and are meant only for consumption by the default method +/// implementations in [Virtq]. +trait VirtqPrivate { + fn create_indirect_ctrl( + &self, + send: Option<&Vec>, + recv: Option<&Vec>, + ) -> Result; + + fn mem_pool(&self) -> Rc; +} + +/// Allows to check, if a given structure crosses a physical page boundary. +/// Returns true, if the structure does NOT cross a boundary or crosses only +/// contiguous physical page boundaries. +/// +/// Structures provided to the Queue must pass this test, otherwise the queue +/// currently panics. +pub fn check_bounds(data: &T) -> bool { + let slice = data.as_slice_u8(); + + let start_virt = ptr::from_ref(slice.first().unwrap()).addr(); + let end_virt = ptr::from_ref(slice.last().unwrap()).addr(); + let end_phy_calc = paging::virt_to_phys(VirtAddr::from(start_virt)) + (slice.len() - 1); + let end_phy = paging::virt_to_phys(VirtAddr::from(end_virt)); + + end_phy == end_phy_calc +} + +/// Allows to check, if a given slice crosses a physical page boundary. +/// Returns true, if the slice does NOT cross a boundary or crosses only +/// contiguous physical page boundaries. +/// Slice MUST come from a boxed value. Otherwise the slice might be moved and +/// the test of this function is not longer valid. +/// +/// This check is especially useful if one wants to check if slices +/// into which the queue will destructure a structure are valid for the queue. +/// +/// Slices provided to the Queue must pass this test, otherwise the queue +/// currently panics. +pub fn check_bounds_slice(slice: &[u8]) -> bool { + let start_virt = ptr::from_ref(slice.first().unwrap()).addr(); + let end_virt = ptr::from_ref(slice.last().unwrap()).addr(); + let end_phy_calc = paging::virt_to_phys(VirtAddr::from(start_virt)) + (slice.len() - 1); + let end_phy = paging::virt_to_phys(VirtAddr::from(end_virt)); + + end_phy == end_phy_calc +} + +/// Frees memory regions gained access to via `Transfer.ret_raw()`. +pub fn free_raw(ptr: *mut u8, len: usize) { + crate::mm::deallocate(VirtAddr::from(ptr as usize), len); +} + +/// Dispatches a batch of TransferTokens. The actual behaviour depends on the respective +/// virtqueue implementation. Please see the respective docs for details. +/// +/// **INFO:** +/// Due to the missing HashMap implementation in the kernel, this function currently uses a nested +/// for-loop. The first iteration is over the number if dispatched tokens. Inside this loop, the +/// function iterates over a list of all already "used" virtqueues. If the given token belongs to an +/// existing queue it is inserted into the corresponding list of tokens, if it belongs to no queue, +/// a new entry in the "used" virtqueues list is made. +/// This procedure can possibly be very slow. +/// +/// The `notif` parameter indicates if the driver wants to have a notification for this specific +/// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the +/// updated notification flags before finishing transfers! +pub fn dispatch_batch(tkns: Vec, notif: bool) { + let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); + + // Sort the TransferTokens depending in the queue their coming from. + // then call dispatch_batch of that queue + for tkn in tkns { + let index = tkn.get_vq().index(); + let mut used = false; + let mut index_used = 0usize; + + for (pos, (vq, _)) in used_vqs.iter_mut().enumerate() { + if index == vq.index() { + index_used = pos; + used = true; + break; + } + } + + if used { + let (_, tkn_lst) = &mut used_vqs[index_used]; + tkn_lst.push(tkn); + } else { + let mut new_tkn_lst = Vec::new(); + let vq = tkn.get_vq(); + new_tkn_lst.push(tkn); + + used_vqs.push((vq, new_tkn_lst)) + } + } + + for (vq_ref, tkn_lst) in used_vqs { + vq_ref.dispatch_batch(tkn_lst, notif); + } +} + +/// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` +/// upon finish. +/// +/// **INFO:** +/// Due to the missing HashMap implementation in the kernel, this function currently uses a nested +/// for-loop. The first iteration is over the number if dispatched tokens. Inside this loop, the +/// function iterates over a list of all already "used" virtqueues. If the given token belongs to an +/// existing queue it is inserted into the corresponding list of tokens, if it belongs to no queue, +/// a new entry in the "used" virtqueues list is made. +/// This procedure can possibly be very slow. +/// +/// The `notif` parameter indicates if the driver wants to have a notification for this specific +/// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the +/// updated notification flags before finishing transfers! +pub fn dispatch_batch_await( + tkns: Vec, + await_queue: Rc>>, + notif: bool, +) { + let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); + + // Sort the TransferTokens depending in the queue their coming from. + // then call dispatch_batch of that queue + for tkn in tkns { + let index = tkn.get_vq().index(); + let mut used = false; + let mut index_used = 0usize; + + for (pos, (vq, _)) in used_vqs.iter_mut().enumerate() { + if index == vq.index() { + index_used = pos; + used = true; + break; + } + } + + if used { + let (_, tkn_lst) = &mut used_vqs[index_used]; + tkn_lst.push(tkn); + } else { + let mut new_tkn_lst = Vec::new(); + let vq = tkn.get_vq(); + new_tkn_lst.push(tkn); + + used_vqs.push((vq, new_tkn_lst)) + } + } + + for (vq, tkn_lst) in used_vqs { + vq.dispatch_batch_await(tkn_lst, Rc::clone(&await_queue), notif); + } +} + +/// The trait needs to be implemented for +/// structures which are to be used to write data into buffers of a [BufferToken] via [BufferToken::write] or /// `BufferToken.write_seq()`. /// /// **INFO:* @@ -709,7 +1652,7 @@ impl Transfer { /// The returned data is of type `Box<[Box<[u8]>]>`. This function therefore preserves /// the scattered structure of the buffer, /// - /// If one create this buffer via a `Virtq.prep_transfer()` or `Virtq.prep_transfer_from_raw()` + /// If one creates this buffer via a `Virtq.prep_transfer_from_raw()` /// call, a casting back to the original structure `T` is NOT possible. /// In these cases please use `Transfer.ret_cpy()` or use 'BuffSpec::Single' only! pub fn ret_scat_cpy( @@ -961,7 +1904,7 @@ pub struct TransferToken { /// Public Interface for TransferToken impl TransferToken { /// Returns a reference to the holding virtqueue - pub fn get_vq(&self) -> Rc { + pub fn get_vq(&self) -> Rc { // Unwrapping is okay here, as TransferToken must hold a BufferToken Rc::clone(&self.buff_tkn.as_ref().unwrap().vq) } @@ -1046,7 +1989,7 @@ pub struct BufferToken { //send_desc_lst: Option>, recv_buff: Option, //recv_desc_lst: Option>, - vq: Rc, + vq: Rc, /// Indicates whether the buff is returnable ret_send: bool, ret_recv: bool, @@ -1525,7 +2468,7 @@ impl BufferToken { match self.send_buff.as_mut() { Some(buff) => { if buff.len() < data.as_slice_u8().len() { - return Err(VirtqError::WriteToLarge(self)); + return Err(VirtqError::WriteTooLarge); } else { let data_slc = data.as_slice_u8(); let mut from = 0usize; @@ -1554,7 +2497,7 @@ impl BufferToken { let data_slc = data.as_slice_u8(); if buff.len() < data_slc.len() { - return Err(VirtqError::WriteToLarge(self)); + return Err(VirtqError::WriteTooLarge); } else { let mut from = 0usize; @@ -1608,7 +2551,7 @@ impl BufferToken { Err(_) => { // Need no match here, as result is the same, but for the future one could // pass on the actual BufferError wrapped inside a VirtqError, for better recovery - return Err(VirtqError::WriteToLarge(self)); + return Err(VirtqError::WriteTooLarge); } } } @@ -1624,7 +2567,7 @@ impl BufferToken { Err(_) => { // Need no match here, as result is the same, but for the future one could // pass on the actual BufferError wrapped inside a VirtqError, for better recovery - return Err(VirtqError::WriteToLarge(self)); + return Err(VirtqError::WriteTooLarge); } } } @@ -2363,7 +3306,7 @@ impl From for u16 { /// This module unifies errors provided to useres of a virtqueue, independent of the underlying /// virtqueue implementation, realized via the different enum variants. pub mod error { - use super::{BufferToken, Transfer}; + use super::Transfer; #[derive(Debug)] // Internal Error Handling for Buffers @@ -2415,12 +3358,14 @@ pub mod error { NoBufferAvail, /// Indicates that a write to a Buffer happened and the data to be written into /// the buffer/descriptor was to large for the buffer. - WriteToLarge(BufferToken), + WriteTooLarge, /// Indicates that a Bytes::new() call failed or generally that a buffer is to large to /// be transferred as one. The Maximum size is u32::MAX. This also is the maximum for indirect /// descriptors (both the one placed in the queue, as also the ones the indirect descriptor is /// referring to). BufferToLarge, + QueueSizeNotAllowed(u16), + FeatNotSupported(u64), } impl core::fmt::Debug for VirtqError { @@ -2435,8 +3380,10 @@ pub mod error { VirtqError::BufferSizeWrong(_) => write!(f, "Specified Buffer is to small for write!"), VirtqError::NoReuseBuffer => write!(f, "Buffer can not be reused!"), VirtqError::OngoingTransfer(_) => write!(f, "Transfer is ongoging and can not be used currently!"), - VirtqError::WriteToLarge(_) => write!(f, "Write is to large for BufferToken!"), + VirtqError::WriteTooLarge => write!(f, "Write is to large for BufferToken!"), VirtqError::BufferToLarge => write!(f, "Buffer to large for queue! u32::MAX exceeded."), + VirtqError::QueueSizeNotAllowed(_) => write!(f, "The requested queue size is not valid."), + VirtqError:: FeatNotSupported(_) => write!(f, "An unsupported feature was requested from the queue."), } } } diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index fbd3b262ab..e93944a7b1 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -12,7 +12,6 @@ use core::sync::atomic::{fence, Ordering}; use align_address::Align; -use self::error::VqPackedError; use super::super::features::Features; #[cfg(not(feature = "pci"))] use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; @@ -20,8 +19,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, - TransferState, TransferToken, Virtq, VqIndex, VqSize, + BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, TransferState, + TransferToken, Virtq, VirtqPrivate, VqIndex, VqSize, }; use crate::arch::mm::paging::{BasePageSize, PageSize}; use crate::arch::mm::{paging, VirtAddr}; @@ -974,30 +973,20 @@ pub struct PackedVq { // This interface is also public in order to allow people to use the PackedVq directly! // This is currently unlikely, as the Tokens hold a Rc for refering to their origin // queue. This could be eased -impl PackedVq { - /// Enables interrupts for this virtqueue upon receiving a transfer - pub fn enable_notifs(&self) { +impl Virtq for PackedVq { + fn enable_notifs(&self) { self.drv_event.borrow_mut().enable_notif(); } - /// Disables interrupts for this virtqueue upon receiving a transfer - pub fn disable_notifs(&self) { + fn disable_notifs(&self) { self.drv_event.borrow_mut().disable_notif(); } - /// See `Virtq.poll()` documentation - pub fn poll(&self) { + fn poll(&self) { self.descr_ring.borrow_mut().poll(); } - /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in - /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers - /// a device notification if wanted by the device. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, tkns: Vec, notif: bool) { + fn dispatch_batch(&self, tkns: Vec, notif: bool) { // Zero transfers are not allowed assert!(!tkns.is_empty()); @@ -1030,19 +1019,7 @@ impl PackedVq { } } - /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` - /// upon finish. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - /// - /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in - /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers - /// a device notification if wanted by the device. - /// - /// Tokens to get a reference to the provided await_queue, where they will be placed upon finish. - pub fn dispatch_batch_await( + fn dispatch_batch_await( &self, mut tkns: Vec, await_queue: Rc>>, @@ -1085,12 +1062,7 @@ impl PackedVq { } } - /// See `Virtq.prep_transfer()` documentation. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + fn dispatch(&self, tkn: TransferToken, notif: bool) { let (next_off, next_wrap) = self.descr_ring.borrow_mut().push(tkn); if notif { @@ -1120,19 +1092,17 @@ impl PackedVq { } } - /// See `Virtq.index()` documentation - pub fn index(&self) -> VqIndex { + fn index(&self) -> VqIndex { self.index } - /// See `Virtq::new()` documentation - pub fn new( + fn new( com_cfg: &mut ComCfg, notif_cfg: &NotifCfg, size: VqSize, index: VqIndex, feats: u64, - ) -> Result { + ) -> Result { // Currently we do not have support for in order use. // This steems from the fact, that the packedVq ReadCtrl currently is not // able to derive other finished transfer from a used-buffer notification. @@ -1142,7 +1112,7 @@ impl PackedVq { // and adjust its ReadCtrl accordingly. if feats & Features::VIRTIO_F_IN_ORDER == Features::VIRTIO_F_IN_ORDER { info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting..."); - return Err(VqPackedError::FeatNotSupported( + return Err(VirtqError::FeatNotSupported( feats & Features::VIRTIO_F_IN_ORDER, )); } @@ -1150,7 +1120,7 @@ impl PackedVq { // Get a handler to the queues configuration area. let mut vq_handler = match com_cfg.select_vq(index.into()) { Some(handler) => handler, - None => return Err(VqPackedError::QueueNotExisting(index.into())), + None => return Err(VirtqError::QueueNotExisting(index.into())), }; // Must catch zero size as it is not allowed for packed queues. @@ -1158,7 +1128,7 @@ impl PackedVq { // // See Virtio specification v1.1. - 4.1.4.3.2 let vq_size = if (size.0 == 0) | (size.0 > 32768) { - return Err(VqPackedError::SizeNotAllowed(size.0)); + return Err(VirtqError::QueueSizeNotAllowed(size.0)); } else { vq_handler.set_vq_size(size.0) }; @@ -1226,1013 +1196,32 @@ impl PackedVq { }) } - /// See `Virtq.prep_transfer_from_raw()` documentation. - pub fn prep_transfer_from_raw( - &self, - master: Rc, - send: Option<(*mut T, BuffSpec<'_>)>, - recv: Option<(*mut K, BuffSpec<'_>)>, + fn prep_transfer_from_raw( + self: Rc, + send: Option<(&[u8], BuffSpec<'_>)>, + recv: Option<(&mut [u8], BuffSpec<'_>)>, ) -> Result { - match (send, recv) { - (None, None) => Err(VirtqError::BufferNotSpecified), - (Some((send_data, send_spec)), None) => { - match send_spec { - BuffSpec::Single(size) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if data_slice.len() != size.into() { - return Err(VirtqError::BufferSizeWrong(data_slice.len())); - } - - let desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Multiple(size_lst) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Indirect(size_lst) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - } - } - (None, Some((recv_data, recv_spec))) => { - match recv_spec { - BuffSpec::Single(size) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if data_slice.len() != size.into() { - return Err(VirtqError::BufferSizeWrong(data_slice.len())); - } - - let desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Multiple(size_lst) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Indirect(size_lst) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - } - } - (Some((send_data, send_spec)), Some((recv_data, recv_spec))) => { - match (send_spec, recv_spec) { - (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if send_data_slice.len() != send_size.into() { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())); - } - - let send_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), send_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if recv_data_slice.len() != recv_size.into() { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())); - } - - let recv_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), recv_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if send_data_slice.len() != send_size.into() { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())); - } - - let send_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), send_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if recv_data_slice.len() != recv_size.into() { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())); - } - - let recv_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), recv_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - send_desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - recv_desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self - .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - recv_buff: Some(Buffer::Indirect { - desc_lst: recv_desc_lst.into_boxed_slice(), - ctrl_desc: ctrl_desc.no_dealloc_clone(), - len: recv_data_slice.len(), - next_write: 0, - }), - send_buff: Some(Buffer::Indirect { - desc_lst: send_desc_lst.into_boxed_slice(), - ctrl_desc, - len: send_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Indirect(_), BuffSpec::Single(_)) - | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), - (BuffSpec::Single(_), BuffSpec::Indirect(_)) - | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), - } - } - } + self.prep_transfer_from_raw_static(send, recv) } - /// See `Virtq.prep_buffer()` documentation. - pub fn prep_buffer( - &self, - master: Rc, + fn prep_buffer( + self: Rc, send: Option>, recv: Option>, ) -> Result { - match (send, recv) { - // No buffers specified - (None, None) => Err(VirtqError::BufferNotSpecified), - // Send buffer specified, No recv buffer - (Some(spec), None) => { - match spec { - BuffSpec::Single(size) => { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), size) { - Ok(desc) => { - let buffer = Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: size.into(), - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - Err(vq_err) => Err(vq_err), - } - } - BuffSpec::Multiple(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - len += usize::from(*size); - } - - let buffer = Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - BuffSpec::Indirect(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - len += usize::from(*size); - } - - let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let buffer = Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - } - } - // No send buffer, recv buffer is specified - (None, Some(spec)) => { - match spec { - BuffSpec::Single(size) => { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), size) { - Ok(desc) => { - let buffer = Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: size.into(), - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - Err(vq_err) => Err(vq_err), - } - } - BuffSpec::Multiple(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - len += usize::from(*size); - } - - let buffer = Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - BuffSpec::Indirect(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - len += usize::from(*size); - } - - let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let buffer = Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - } - } - // Send buffer specified, recv buffer specified - (Some(send_spec), Some(recv_spec)) => { - match (send_spec, recv_spec) { - (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { - let send_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), send_size) { - Ok(send_desc) => Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - let recv_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), recv_size) { - Ok(recv_desc) => Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { - let send_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), send_size) { - Ok(send_desc) => Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - recv_len += usize::from(*size); - } - - let recv_buff = Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - for size in send_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - send_len += usize::from(*size); - } - - let send_buff = Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_len, - next_write: 0, - }); - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - recv_len += usize::from(*size); - } - - let recv_buff = Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - - for size in send_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - send_len += usize::from(*size); - } - - let send_buff = Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_len, - next_write: 0, - }); - - let recv_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), recv_size) { - Ok(recv_desc) => Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - - for size in send_size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - send_desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - send_len += usize::from(*size); - } - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - recv_desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - recv_len += usize::from(*size); - } - - let ctrl_desc = match self - .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_buff = Some(Buffer::Indirect { - desc_lst: recv_desc_lst.into_boxed_slice(), - ctrl_desc: ctrl_desc.no_dealloc_clone(), - len: recv_len, - next_write: 0, - }); - let send_buff = Some(Buffer::Indirect { - desc_lst: send_desc_lst.into_boxed_slice(), - ctrl_desc, - len: send_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Indirect(_), BuffSpec::Single(_)) - | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), - (BuffSpec::Single(_), BuffSpec::Indirect(_)) - | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), - } - } - } + self.prep_buffer_static(send, recv) } - pub fn size(&self) -> VqSize { + fn size(&self) -> VqSize { self.size } } -// Private Interface for PackedVq -impl PackedVq { +impl VirtqPrivate for PackedVq { + fn mem_pool(&self) -> Rc { + self.mem_pool.clone() + } + fn create_indirect_ctrl( &self, send: Option<&Vec>, @@ -2326,12 +1315,3 @@ impl PackedVq { } } } - -pub mod error { - pub enum VqPackedError { - General, - SizeNotAllowed(u16), - QueueNotExisting(u16), - FeatNotSupported(u64), - } -} diff --git a/src/drivers/virtio/virtqueue/split.rs b/src/drivers/virtio/virtqueue/split.rs index 010723f850..3906f3d83b 100644 --- a/src/drivers/virtio/virtqueue/split.rs +++ b/src/drivers/virtio/virtqueue/split.rs @@ -17,8 +17,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, - TransferState, TransferToken, Virtq, VqIndex, VqSize, + BuffSpec, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, TransferState, + TransferToken, Virtq, VirtqPrivate, VqIndex, VqSize, }; use crate::arch::memory_barrier; use crate::arch::mm::paging::{BasePageSize, PageSize}; @@ -247,46 +247,24 @@ pub struct SplitVq { notif_ctrl: NotifCtrl, } -impl SplitVq { - /// Enables interrupts for this virtqueue upon receiving a transfer - pub fn enable_notifs(&self) { +impl Virtq for SplitVq { + fn enable_notifs(&self) { self.ring.borrow_mut().drv_enable_notif(); } - /// Disables interrupts for this virtqueue upon receiving a transfer - pub fn disable_notifs(&self) { + fn disable_notifs(&self) { self.ring.borrow_mut().drv_disable_notif(); } - /// See `Virtq.poll()` documentation - pub fn poll(&self) { + fn poll(&self) { self.ring.borrow_mut().poll() } - /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in - /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers - /// a device notification if wanted by the device. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, _tkns: Vec, _notif: bool) { + fn dispatch_batch(&self, _tkns: Vec, _notif: bool) { unimplemented!(); } - /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` - /// upon finish. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - /// - /// Dispatches a batch of transfer token. The buffers of the respective transfers are provided to the queue in - /// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers - /// a device notification if wanted by the device. - /// - /// Tokens to get a reference to the provided await_queue, where they will be placed upon finish. - pub fn dispatch_batch_await( + fn dispatch_batch_await( &self, _tkns: Vec, _await_queue: Rc>>, @@ -295,12 +273,7 @@ impl SplitVq { unimplemented!() } - /// See `Virtq.prep_transfer()` documentation. - /// - /// The `notif` parameter indicates if the driver wants to have a notification for this specific - /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the - /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + fn dispatch(&self, tkn: TransferToken, notif: bool) { let (next_off, next_wrap) = self.ring.borrow_mut().push(tkn); if notif { @@ -330,23 +303,21 @@ impl SplitVq { } } - /// See `Virtq.index()` documentation - pub fn index(&self) -> VqIndex { + fn index(&self) -> VqIndex { self.index } - /// See `Virtq::new()` documentation - pub fn new( + fn new( com_cfg: &mut ComCfg, notif_cfg: &NotifCfg, size: VqSize, index: VqIndex, _feats: u64, - ) -> Result { + ) -> Result { // Get a handler to the queues configuration area. let mut vq_handler = match com_cfg.select_vq(index.into()) { Some(handler) => handler, - None => return Err(()), + None => return Err(VirtqError::QueueNotExisting(index.into())), }; let size = vq_handler.set_vq_size(size.0); @@ -442,1013 +413,28 @@ impl SplitVq { }) } - /// See `Virtq.prep_transfer_from_raw()` documentation. - pub fn prep_transfer_from_raw( - &self, - master: Rc, - send: Option<(*mut T, BuffSpec<'_>)>, - recv: Option<(*mut K, BuffSpec<'_>)>, + fn prep_transfer_from_raw( + self: Rc, + send: Option<(&[u8], BuffSpec<'_>)>, + recv: Option<(&mut [u8], BuffSpec<'_>)>, ) -> Result { - match (send, recv) { - (None, None) => Err(VirtqError::BufferNotSpecified), - (Some((send_data, send_spec)), None) => { - match send_spec { - BuffSpec::Single(size) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if data_slice.len() != size.into() { - return Err(VirtqError::BufferSizeWrong(data_slice.len())); - } - - let desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Multiple(size_lst) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Indirect(size_lst) => { - let data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len: data_slice.len(), - next_write: 0, - }), - recv_buff: None, - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - } - } - (None, Some((recv_data, recv_spec))) => { - match recv_spec { - BuffSpec::Single(size) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if data_slice.len() != size.into() { - return Err(VirtqError::BufferSizeWrong(data_slice.len())); - } - - let desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Multiple(size_lst) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - BuffSpec::Indirect(size_lst) => { - let data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut index = 0usize; - - for byte in size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match data_slice.get(index..end_index) { - Some(slice) => slice, - None => return Err(VirtqError::BufferSizeWrong(data_slice.len())), - }; - - desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: None, - recv_buff: Some(Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len: data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - } - } - (Some((send_data, send_spec)), Some((recv_data, recv_spec))) => { - match (send_spec, recv_spec) { - (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if send_data_slice.len() != send_size.into() { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())); - } - - let send_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), send_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if recv_data_slice.len() != recv_size.into() { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())); - } - - let recv_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), recv_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - - // Buffer must have the right size - if send_data_slice.len() != send_size.into() { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())); - } - - let send_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), send_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), next_slice) - { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - }; - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - - // Buffer must have the right size - if recv_data_slice.len() != recv_size.into() { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())); - } - - let recv_desc = match self - .mem_pool - .pull_from_raw(Rc::clone(&self.mem_pool), recv_data_slice) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - send_buff: Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_data_slice.len(), - next_write: 0, - }), - recv_buff: Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { - let send_data_slice = unsafe { (*send_data).as_slice_u8() }; - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut index = 0usize; - - for byte in send_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match send_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(send_data_slice.len())) - } - }; - - send_desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let recv_data_slice = unsafe { (*recv_data).as_slice_u8() }; - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut index = 0usize; - - for byte in recv_size_lst { - let end_index = index + usize::from(*byte); - let next_slice = match recv_data_slice.get(index..end_index) { - Some(slice) => slice, - None => { - return Err(VirtqError::BufferSizeWrong(recv_data_slice.len())) - } - }; - - recv_desc_lst.push( - self.mem_pool - .pull_from_raw_untracked(Rc::clone(&self.mem_pool), next_slice), - ); - - // update the starting index for the next iteration - index += usize::from(*byte); - } - - let ctrl_desc = match self - .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - Ok(TransferToken { - state: TransferState::Ready, - buff_tkn: Some(BufferToken { - recv_buff: Some(Buffer::Indirect { - desc_lst: recv_desc_lst.into_boxed_slice(), - ctrl_desc: ctrl_desc.no_dealloc_clone(), - len: recv_data_slice.len(), - next_write: 0, - }), - send_buff: Some(Buffer::Indirect { - desc_lst: send_desc_lst.into_boxed_slice(), - ctrl_desc, - len: send_data_slice.len(), - next_write: 0, - }), - vq: master, - ret_send: false, - ret_recv: false, - reusable: false, - }), - await_queue: None, - }) - } - (BuffSpec::Indirect(_), BuffSpec::Single(_)) - | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), - (BuffSpec::Single(_), BuffSpec::Indirect(_)) - | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), - } - } - } + self.prep_transfer_from_raw_static(send, recv) } - /// See `Virtq.prep_buffer()` documentation. - pub fn prep_buffer( - &self, - master: Rc, + fn prep_buffer( + self: Rc, send: Option>, recv: Option>, ) -> Result { - match (send, recv) { - // No buffers specified - (None, None) => Err(VirtqError::BufferNotSpecified), - // Send buffer specified, No recv buffer - (Some(spec), None) => { - match spec { - BuffSpec::Single(size) => { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), size) { - Ok(desc) => { - let buffer = Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: size.into(), - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - Err(vq_err) => Err(vq_err), - } - } - BuffSpec::Multiple(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - len += usize::from(*size); - } - - let buffer = Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - BuffSpec::Indirect(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - len += usize::from(*size); - } - - let ctrl_desc = match self.create_indirect_ctrl(Some(&desc_lst), None) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let buffer = Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: Some(buffer), - recv_buff: None, - vq: master, - ret_send: true, - ret_recv: false, - reusable: true, - }) - } - } - } - // No send buffer, recv buffer is specified - (None, Some(spec)) => { - match spec { - BuffSpec::Single(size) => { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), size) { - Ok(desc) => { - let buffer = Buffer::Single { - desc_lst: vec![desc].into_boxed_slice(), - len: size.into(), - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - Err(vq_err) => Err(vq_err), - } - } - BuffSpec::Multiple(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - len += usize::from(*size); - } - - let buffer = Buffer::Multiple { - desc_lst: desc_lst.into_boxed_slice(), - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - BuffSpec::Indirect(size_lst) => { - let mut desc_lst: Vec = Vec::with_capacity(size_lst.len()); - let mut len = 0usize; - - for size in size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - len += usize::from(*size); - } - - let ctrl_desc = match self.create_indirect_ctrl(None, Some(&desc_lst)) { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let buffer = Buffer::Indirect { - desc_lst: desc_lst.into_boxed_slice(), - ctrl_desc, - len, - next_write: 0, - }; - - Ok(BufferToken { - send_buff: None, - recv_buff: Some(buffer), - vq: master, - ret_send: false, - ret_recv: true, - reusable: true, - }) - } - } - } - // Send buffer specified, recv buffer specified - (Some(send_spec), Some(recv_spec)) => { - match (send_spec, recv_spec) { - (BuffSpec::Single(send_size), BuffSpec::Single(recv_size)) => { - let send_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), send_size) { - Ok(send_desc) => Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - let recv_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), recv_size) { - Ok(recv_desc) => Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Single(send_size), BuffSpec::Multiple(recv_size_lst)) => { - let send_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), send_size) { - Ok(send_desc) => Some(Buffer::Single { - desc_lst: vec![send_desc].into_boxed_slice(), - len: send_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - recv_len += usize::from(*size); - } - - let recv_buff = Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Multiple(recv_size_lst)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - for size in send_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - send_len += usize::from(*size); - } - - let send_buff = Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_len, - next_write: 0, - }); - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => recv_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - recv_len += usize::from(*size); - } - - let recv_buff = Some(Buffer::Multiple { - desc_lst: recv_desc_lst.into_boxed_slice(), - len: recv_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Multiple(send_size_lst), BuffSpec::Single(recv_size)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - - for size in send_size_lst { - match self.mem_pool.pull(Rc::clone(&self.mem_pool), *size) { - Ok(desc) => send_desc_lst.push(desc), - Err(vq_err) => return Err(vq_err), - } - send_len += usize::from(*size); - } - - let send_buff = Some(Buffer::Multiple { - desc_lst: send_desc_lst.into_boxed_slice(), - len: send_len, - next_write: 0, - }); - - let recv_buff = - match self.mem_pool.pull(Rc::clone(&self.mem_pool), recv_size) { - Ok(recv_desc) => Some(Buffer::Single { - desc_lst: vec![recv_desc].into_boxed_slice(), - len: recv_size.into(), - next_write: 0, - }), - Err(vq_err) => return Err(vq_err), - }; - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Indirect(send_size_lst), BuffSpec::Indirect(recv_size_lst)) => { - let mut send_desc_lst: Vec = - Vec::with_capacity(send_size_lst.len()); - let mut send_len = 0usize; - - for size in send_size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - send_desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - send_len += usize::from(*size); - } - - let mut recv_desc_lst: Vec = - Vec::with_capacity(recv_size_lst.len()); - let mut recv_len = 0usize; - - for size in recv_size_lst { - // As the indirect list does only consume one descriptor for the - // control descriptor, the actual list is untracked - recv_desc_lst.push( - self.mem_pool - .pull_untracked(Rc::clone(&self.mem_pool), *size), - ); - recv_len += usize::from(*size); - } - - let ctrl_desc = match self - .create_indirect_ctrl(Some(&send_desc_lst), Some(&recv_desc_lst)) - { - Ok(desc) => desc, - Err(vq_err) => return Err(vq_err), - }; - - let recv_buff = Some(Buffer::Indirect { - desc_lst: recv_desc_lst.into_boxed_slice(), - ctrl_desc: ctrl_desc.no_dealloc_clone(), - len: recv_len, - next_write: 0, - }); - let send_buff = Some(Buffer::Indirect { - desc_lst: send_desc_lst.into_boxed_slice(), - ctrl_desc, - len: send_len, - next_write: 0, - }); - - Ok(BufferToken { - send_buff, - recv_buff, - vq: master, - ret_send: true, - ret_recv: true, - reusable: true, - }) - } - (BuffSpec::Indirect(_), BuffSpec::Single(_)) - | (BuffSpec::Indirect(_), BuffSpec::Multiple(_)) => Err(VirtqError::BufferInWithDirect), - (BuffSpec::Single(_), BuffSpec::Indirect(_)) - | (BuffSpec::Multiple(_), BuffSpec::Indirect(_)) => Err(VirtqError::BufferInWithDirect), - } - } - } + self.prep_buffer_static(send, recv) } - pub fn size(&self) -> VqSize { + fn size(&self) -> VqSize { self.size } } -// Private Interface for PackedVq -impl SplitVq { +impl VirtqPrivate for SplitVq { fn create_indirect_ctrl( &self, send: Option<&Vec>, @@ -1582,4 +568,7 @@ impl SplitVq { } } } + fn mem_pool(&self) -> Rc { + self.mem_pool.clone() + } }