#![allow(clippy::upper_case_acronyms)]
use alloc::{vec::Vec, boxed::Box};
use memory::{PhysicalAddress, MappedPages, create_contiguous_mapping, BorrowedSliceMappedPages, Mutable, MMIO_FLAGS};
use volatile::{ReadOnly,Volatile};
use bit_field::BitField;
use zerocopy::{U32, FromBytes};
use byteorder::BigEndian;
use kernel_config::memory::PAGE_SIZE;
use core::fmt;
use num_enum::TryFromPrimitive;
use core::{convert::TryFrom, marker::ConstParamTy};
use crate::{
Rqn, Sqn, Cqn, Pd, Td, Lkey, Eqn, Tirn, Tisn, FtId, FgId,
initialization_segment::InitializationSegment,
event_queue::EventQueueContext,
completion_queue::CompletionQueueContext,
send_queue::{SendQueueContext, SendQueueState, TransportInterfaceSendContext},
work_queue::WorkQueue,
receive_queue::{ReceiveQueueContext, ReceiveQueueState, TransportInterfaceReceiveContext},
flow_table::{FlowContext, FlowEntryInput, FlowGroupInput, FlowTableContext, FlowTableType, FlowContextAction, MatchCriteriaEnable, DestinationEntry, DestinationType}
};
#[allow(dead_code)]
const MAILBOX_SIZE_IN_BYTES: usize = 576;
const MAILBOX_DATA_SIZE_IN_BYTES: usize = 512;
const DEFAULT_MAILBOX_OFFSET_IN_PAGE: usize = 0;
const SIZE_PADDR_IN_BYTES: usize = 8;
pub enum CommandTransportType {
PCIe = 0x7 << 24
}
pub enum CommandQueueError {
NoCommandEntryAvailable,
MissingInputPages,
MissingInput,
IncorrectCommandOpcode,
InvalidCommandOpcode,
InvalidCommandDeliveryStatus,
InvalidCommandReturnStatus,
CommandNotCompleted,
InvalidMailboxOffset,
PageAllocationFailed,
UnimplementedOpcode,
NotImplemented,
InvalidPortType,
InvalidSQState,
}
impl From<CommandQueueError> for &'static str {
fn from(error: CommandQueueError) -> Self {
match error {
CommandQueueError::NoCommandEntryAvailable => "No command entry is available",
CommandQueueError::MissingInputPages => "No pages were passed to the command",
CommandQueueError::MissingInput => "An input was not passed to a command that required it",
CommandQueueError::IncorrectCommandOpcode => "Incorrect command opcode",
CommandQueueError::InvalidCommandOpcode => "Invalid command opcode. This could be because the value is invalid,
or because the driver currently doesn't support the opcode.",
CommandQueueError::InvalidCommandDeliveryStatus => "Invalid command delivery status",
CommandQueueError::InvalidCommandReturnStatus => "Invalid command return status",
CommandQueueError::CommandNotCompleted => "Command not complete yet",
CommandQueueError::InvalidMailboxOffset => "Invalid offset for mailbox in a page",
CommandQueueError::PageAllocationFailed => "Failed to allocate MappedPages",
CommandQueueError::UnimplementedOpcode => "Opcode is not implemented",
CommandQueueError::NotImplemented => "Function not implemented for the given opcode",
CommandQueueError::InvalidPortType => "Invalid port type",
CommandQueueError::InvalidSQState => "Invalid SQ State"
}
}
}
#[derive(Debug, TryFromPrimitive)]
#[repr(u32)]
pub enum CommandDeliveryStatus {
Success = 0x0,
SignatureErr = 0x1,
TokenErr = 0x2,
BadBlockNumber = 0x3,
BadOutputPointer = 0x4,
BadInputPointer = 0x5,
InternalErr = 0x6,
InputLenErr = 0x7,
OutputLenErr = 0x8,
ReservedNotZero = 0x9,
BadCommandType = 0x10,
}
#[derive(PartialEq, Debug, TryFromPrimitive, Copy, Clone)]
#[repr(u32)]
pub enum CommandOpcode {
QueryHcaCap = 0x100,
InitHca = 0x102,
EnableHca = 0x104,
QueryPages = 0x107,
ManagePages = 0x108,
QueryIssi = 0x10A,
SetIssi = 0x10B,
SetDriverVersion = 0x10D,
QuerySpecialContexts = 0x203,
CreateEq = 0x301,
CreateCq = 0x400,
QueryVportState = 0x750,
QueryNicVportContext = 0x754,
ModifyNicVportContext = 0x755,
AllocPd = 0x800,
AllocUar = 0x802,
AccessRegister = 0x805,
AllocTransportDomain = 0x816,
CreateTir = 0x900,
CreateSq = 0x904,
ModifySq = 0x905,
QuerySq = 0x907,
CreateRq = 0x908,
ModifyRq = 0x909,
QueryRq = 0x90B,
CreateTis = 0x912,
SetFlowTableRoot = 0x92f,
CreateFlowTable = 0x930,
CreateFlowGroup = 0x933,
SetFlowTableEntry = 0x936
}
impl CommandOpcode {
fn input_bytes(&self, num_pages: Option<usize>) -> Result<u32, CommandQueueError> {
let len = match self {
Self::QueryHcaCap => 16,
Self::InitHca => 16,
Self::EnableHca => 16,
Self::QueryPages => 16,
Self::ManagePages => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)? as u32;
0x10 + num_pages * SIZE_PADDR_IN_BYTES as u32
}
Self::QueryIssi => 8,
Self::SetIssi => 16,
Self::QuerySpecialContexts => 16,
Self::CreateEq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)? as u32;
0x110 + num_pages * SIZE_PADDR_IN_BYTES as u32
}
Self::CreateCq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)? as u32;
0x110 + num_pages * SIZE_PADDR_IN_BYTES as u32
}
Self::QueryVportState => 16,
Self::QueryNicVportContext => 16,
Self::ModifyNicVportContext => 0x200,
Self::AllocPd => 16,
Self::AllocUar => 16,
Self::AccessRegister => 0x20,
Self::AllocTransportDomain => 16,
Self::CreateTir => 0x20 + 0xF0,
Self::CreateSq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)? as u32;
0x20 + 0x30 + 0xC0 + (num_pages * SIZE_PADDR_IN_BYTES as u32)
}
Self::ModifySq => 0x118,
Self::QuerySq => 12,
Self::CreateRq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)? as u32;
0x20 + 0x30 + 0xC0 + num_pages * SIZE_PADDR_IN_BYTES as u32
},
Self::ModifyRq => 0x118,
Self::QueryRq => 12,
Self::CreateTis => 0x20 + 0xA0,
Self::SetFlowTableRoot => 0x40,
Self::CreateFlowTable => 0x40,
Self::CreateFlowGroup => 0x400,
Self::SetFlowTableEntry => 0x40 + 0x300 + 8,
_ => return Err(CommandQueueError::NotImplemented)
};
Ok(len)
}
fn output_bytes(&self) -> Result<u32, CommandQueueError> {
let len = match self {
Self::QueryHcaCap => 16 + 0x100,
Self::InitHca => 16,
Self::EnableHca => 12,
Self::QueryPages => 16,
Self::ManagePages => 16,
Self::QueryIssi => 0x70,
Self::SetIssi => 16,
Self::QuerySpecialContexts => 16,
Self::CreateEq => 16,
Self::CreateCq => 16,
Self::QueryVportState => 16,
Self::QueryNicVportContext => 16 + 0x108,
Self::ModifyNicVportContext => 16,
Self::AllocPd => 16,
Self::AllocUar => 16,
Self::AccessRegister => 16,
Self::AllocTransportDomain => 16,
Self::CreateTir => 16,
Self::CreateSq => 16,
Self::ModifySq => 8,
Self::QuerySq => 0x10 + MAILBOX_DATA_SIZE_IN_BYTES as u32,
Self::CreateRq => 16,
Self::ModifyRq => 16,
Self::QueryRq => 0x10 + MAILBOX_DATA_SIZE_IN_BYTES as u32,
Self::CreateTis => 16,
Self::SetFlowTableRoot => 0x10,
Self::CreateFlowTable => 0x10,
Self::CreateFlowGroup => 0x10,
Self::SetFlowTableEntry => 0x10,
_ => return Err(CommandQueueError::NotImplemented)
};
Ok(len)
}
fn num_input_mailboxes(&self, num_pages: Option<usize>) -> Result<usize, CommandQueueError> {
let num = match self {
Self::QueryHcaCap => 0,
Self::InitHca => 0,
Self::EnableHca => 0,
Self::QueryPages => 0,
Self::ManagePages => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)?;
Self::num_mailboxes(num_pages * SIZE_PADDR_IN_BYTES)
},
Self::QueryIssi => 0,
Self::SetIssi => 0,
Self::QuerySpecialContexts => 0,
Self::CreateEq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)?;
let size_of_mailbox_data = (0x110 - 0x10) + SIZE_PADDR_IN_BYTES * num_pages;
Self::num_mailboxes(size_of_mailbox_data)
},
Self::CreateCq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)?;
let size_of_mailbox_data = (0x110 - 0x10) + SIZE_PADDR_IN_BYTES * num_pages;
Self::num_mailboxes(size_of_mailbox_data)
},
Self::QueryVportState => 0,
Self::QueryNicVportContext => 0,
Self::ModifyNicVportContext => 1,
Self::AllocPd => 0,
Self::AllocUar => 0,
Self::AccessRegister => 1,
Self::AllocTransportDomain => 0,
Self::CreateTir => 1,
Self::CreateSq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)?;
let size_of_mailbox_data = 0x10 + 0x30 + 0xC0 + SIZE_PADDR_IN_BYTES * num_pages;
Self::num_mailboxes(size_of_mailbox_data)
},
Self::ModifySq => 1,
Self::QuerySq => 0,
Self::CreateRq => {
let num_pages = num_pages.ok_or(CommandQueueError::MissingInput)?;
let size_of_mailbox_data = 0x10 + 0x30 + 0xC0 + SIZE_PADDR_IN_BYTES * num_pages;
Self::num_mailboxes(size_of_mailbox_data)
},
Self::ModifyRq => 1,
Self::CreateTis => 1,
Self::SetFlowTableRoot => 1,
Self::CreateFlowTable => 1,
Self::CreateFlowGroup => 1,
Self::SetFlowTableEntry => {
const NUM_DEST_ENTRIES: usize = 1;
let size_of_mailbox_data = 0x30 + 0x300 + (NUM_DEST_ENTRIES * core::mem::size_of::<DestinationEntry>());
Self::num_mailboxes(size_of_mailbox_data)
},
_ => return Err(CommandQueueError::NotImplemented)
};
Ok(num)
}
fn num_output_mailboxes(&self) -> Result<usize, CommandQueueError> {
let num = match self {
Self::QueryHcaCap => 1,
Self::InitHca => 0,
Self::EnableHca => 0,
Self::QueryPages => 0,
Self::ManagePages => 0,
Self::QueryIssi => 1,
Self::SetIssi => 0,
Self::QuerySpecialContexts => 0,
Self::CreateEq => 0,
Self::CreateCq => 0,
Self::QueryVportState => 0,
Self::QueryNicVportContext => 1,
Self::ModifyNicVportContext => 0,
Self::AllocPd => 0,
Self::AllocUar => 0,
Self::AccessRegister => 1,
Self::AllocTransportDomain => 0,
Self::CreateTir => 0,
Self::CreateSq => 0,
Self::ModifySq => 0,
Self::QuerySq => 1,
Self::CreateRq => 0,
Self::ModifyRq => 0,
Self::CreateTis => 0,
Self::SetFlowTableRoot => 0,
Self::CreateFlowTable => 0,
Self::CreateFlowGroup => 0,
Self::SetFlowTableEntry => 0,
_ => return Err(CommandQueueError::NotImplemented)
};
Ok(num)
}
fn num_mailboxes(size_of_data_in_bytes: usize) -> usize {
libm::ceilf(size_of_data_in_bytes as f32 / MAILBOX_DATA_SIZE_IN_BYTES as f32) as usize
}
}
#[derive(Debug, TryFromPrimitive)]
#[repr(u8)]
pub enum CommandReturnStatus {
OK = 0x00,
InternalError = 0x01,
BadOp = 0x02,
BadParam = 0x03,
BadSysState = 0x04,
BadResource = 0x05,
ResourceBusy = 0x06,
ExceedLim = 0x08,
BadResState = 0x09,
BadIndex = 0x0A,
NoResources = 0x0F,
BadInputLen = 0x50,
BadOutputLen = 0x51,
BadResourceState = 0x10,
BadPkt = 0x30,
BadSize = 0x40,
}
pub enum ManagePagesOpMod {
AllocationFail = 0,
AllocationSuccess = 1,
HcaReturnPages = 2
}
pub enum QueryPagesOpMod {
BootPages = 1,
InitPages = 2,
RegularPages = 3
}
pub enum QueryVportStateOpMod {
VnicVport = 0,
EswVport = 1,
Uplink = 2,
}
#[derive(Copy, Clone)]
pub enum QueryHcaCapMaxOpMod {
GeneralDeviceCapabilities = (0x0 << 1),
EthernetOffloadCapabilities = (0x1 << 1)
}
#[derive(Copy, Clone)]
pub enum QueryHcaCapCurrentOpMod {
#[allow(clippy::identity_op)]
GeneralDeviceCapabilities = (0x0 << 1) | 0x1,
EthernetOffloadCapabilities = (0x1 << 1) | 0x1,
}
#[derive(Copy, Clone)]
pub enum AccessRegisterOpMod{
Write = 0,
Read = 1
}
#[derive(Debug, TryFromPrimitive)]
#[repr(u8)]
pub enum HcaPortType {
IB = 0x0,
Ethernet = 0x1
}
#[derive(FromBytes)]
#[repr(C)]
struct NicVportContext {
_unused0: [u8; 36],
mtu: Volatile<U32<BigEndian>>,
_unused1: [u8; 200],
allowed_list_size: Volatile<U32<BigEndian>>,
permanent_address_h: Volatile<U32<BigEndian>>,
permanent_address_l: Volatile<U32<BigEndian>>,
}
const _: () = assert!(core::mem::size_of::<NicVportContext>() == 252);
struct MailboxBuffer {
mp: MappedPages,
addr: PhysicalAddress
}
impl MailboxBuffer {
fn write_to_mailbox<T: FromBytes>(&mut self, data: T, offset: usize) -> Result<(), CommandQueueError> {
let context = self.mp.as_type_mut::<T>(offset).map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
*context = data;
Ok(())
}
}
#[derive(FromBytes, Default)]
#[repr(C)]
struct PhysicalAddressLayout {
upper: U32<BigEndian>,
lower: U32<BigEndian>,
}
impl PhysicalAddressLayout {
fn new(addr: PhysicalAddress) -> PhysicalAddressLayout{
PhysicalAddressLayout{
upper: U32::new((addr.value() >> 32) as u32),
lower: U32::new((addr.value() & 0xFFFF_FFFF) as u32)
}
}
}
enum NetworkPortRegisters {
PMTU = 0x5003,
}
#[derive(PartialEq, Eq)]
#[derive(ConstParamTy)]
pub enum CmdState {
Initialized,
Posted,
Completed
}
pub struct Command<const S: CmdState> {
pub(crate) entry_num: usize,
input_mailbox_buffers: Box<[MailboxBuffer]>,
output_mailbox_buffers: Box<[MailboxBuffer]>,
}
impl Command<{CmdState::Initialized}> {
fn new(
entry_num: usize,
mut entry: CommandQueueEntry,
input_mailbox_buffers: Box<[MailboxBuffer]>,
output_mailbox_buffers: Box<[MailboxBuffer]>,
command_queue: &mut BorrowedSliceMappedPages<CommandQueueEntry, Mutable>,
) -> Command<{CmdState::Initialized}> {
core::mem::swap(&mut entry, &mut command_queue[entry_num]);
Command {
entry_num,
input_mailbox_buffers,
output_mailbox_buffers
}
}
pub fn post(self, init_segment: &mut InitializationSegment) -> Command<{CmdState::Posted}> {
init_segment.post_command(&self);
Command {
entry_num: self.entry_num,
input_mailbox_buffers: self.input_mailbox_buffers,
output_mailbox_buffers: self.output_mailbox_buffers
}
}
}
impl Command<{CmdState::Posted}> {
pub fn complete(self, cmdq: &CommandQueue) -> Command<{CmdState::Completed}> {
cmdq.wait_for_command_completion(&self);
Command {
entry_num: self.entry_num,
input_mailbox_buffers: self.input_mailbox_buffers,
output_mailbox_buffers: self.output_mailbox_buffers
}
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub struct CommandCompletionStatus {
delivery_status: CommandDeliveryStatus,
return_status: CommandReturnStatus
}
pub struct CommandBuilder {
opcode: CommandOpcode,
opmod: Option<u16>,
allocated_pages: Option<Vec<PhysicalAddress>>,
user_access_region: Option<u32>,
queue_size: Option<u32>,
event_queue_num: Option<Eqn>,
doorbell_page: Option<PhysicalAddress>,
transport_domain: Option<Td>,
completion_queue_num: Option<Cqn>,
transport_interface_send_num: Option<Tisn>,
protection_domain: Option<Pd>,
send_queue_num: Option<Sqn>,
collapsed_cq: bool,
receive_queue_num: Option<Rqn>,
mtu: Option<u16>,
flow_table_id: Option<FtId>,
flow_group_id: Option<FgId>,
transport_interface_receive_num: Option<Tirn>,
}
impl CommandBuilder {
pub fn new(opcode: CommandOpcode) -> CommandBuilder {
CommandBuilder {
opcode,
opmod: None,
allocated_pages: None,
user_access_region: None,
queue_size: None,
event_queue_num: None,
doorbell_page: None,
transport_domain: None,
completion_queue_num: None,
transport_interface_send_num: None,
protection_domain: None,
send_queue_num: None,
collapsed_cq: false,
receive_queue_num: None,
mtu: None,
flow_table_id: None,
flow_group_id: None,
transport_interface_receive_num: None,
}
}
pub fn opmod(mut self, opmod: u16) -> CommandBuilder {
self.opmod = Some(opmod);
self
}
pub fn allocated_pages(mut self, allocated_pages: Vec<PhysicalAddress>) -> CommandBuilder {
self.allocated_pages = Some(allocated_pages);
self
}
pub fn uar(mut self, uar: u32) -> CommandBuilder {
self.user_access_region = Some(uar);
self
}
pub fn queue_size(mut self, size: u32) -> CommandBuilder {
self.queue_size = Some(size);
self
}
pub fn eqn(mut self, eqn: Eqn) -> CommandBuilder {
self.event_queue_num = Some(eqn);
self
}
pub fn db_page(mut self, db_page: PhysicalAddress) -> CommandBuilder {
self.doorbell_page = Some(db_page);
self
}
pub fn td(mut self, td: Td) -> CommandBuilder {
self.transport_domain = Some(td);
self
}
pub fn cqn(mut self, cqn: Cqn) -> CommandBuilder {
self.completion_queue_num = Some(cqn);
self
}
pub fn tisn(mut self, tisn: Tisn) -> CommandBuilder {
self.transport_interface_send_num = Some(tisn);
self
}
pub fn pd(mut self, pd: Pd) -> CommandBuilder {
self.protection_domain = Some(pd);
self
}
pub fn sqn(mut self, sqn: Sqn) -> CommandBuilder {
self.send_queue_num = Some(sqn);
self
}
pub fn collapsed_cq(mut self) -> CommandBuilder {
self.collapsed_cq = true;
self
}
pub fn rqn(mut self, rqn: Rqn) -> CommandBuilder {
self.receive_queue_num = Some(rqn);
self
}
pub fn mtu(mut self, mtu: u16) -> CommandBuilder {
self.mtu = Some(mtu);
self
}
pub fn flow_table_id(mut self, id: FtId) -> CommandBuilder {
self.flow_table_id = Some(id);
self
}
pub fn flow_group_id(mut self, id: FgId) -> CommandBuilder {
self.flow_group_id = Some(id);
self
}
pub fn tirn(mut self, tirn: Tirn) -> CommandBuilder {
self.transport_interface_receive_num = Some(tirn);
self
}
}
pub struct CommandQueue {
entries: BorrowedSliceMappedPages<CommandQueueEntry, Mutable>,
available_entries: Box<[bool]>,
token: u8,
mailbox_buffers: Vec<MailboxBuffer>,
}
impl CommandQueue {
pub fn create(
entries: BorrowedSliceMappedPages<CommandQueueEntry, Mutable>,
num_cmdq_entries: usize,
) -> Result<CommandQueue, &'static str> {
let available_entries = vec![true; num_cmdq_entries];
let mut mailbox_buffers= Vec::with_capacity(num_cmdq_entries);
for _ in 0..num_cmdq_entries {
let (mp, addr) = create_contiguous_mapping(PAGE_SIZE, MMIO_FLAGS)?;
mailbox_buffers.push(MailboxBuffer{mp, addr});
}
let token = 0xAA;
Ok(CommandQueue { entries, available_entries: available_entries.into_boxed_slice(), token, mailbox_buffers })
}
fn find_free_command_entry(&self) -> Option<usize> {
self.available_entries.iter().position(|&x| x)
}
pub fn create_and_execute_command(&mut self, parameters: CommandBuilder, init_segment: &mut InitializationSegment) -> Result<Command<{CmdState::Completed}>, CommandQueueError> {
Ok( self.create_command(parameters)?
.post(init_segment)
.complete(self) )
}
fn create_command(&mut self, parameters: CommandBuilder) -> Result<Command<{CmdState::Initialized}>, CommandQueueError>
{
let entry_num = self.find_free_command_entry().ok_or(CommandQueueError::NoCommandEntryAvailable)?;
let num_pages = parameters.allocated_pages.as_ref().map(|pages| pages.len());
#[cfg(mlx_verbose_log)]
{
if let Some(pages) = parameters.allocated_pages.as_ref() {
debug!("pages: {:?}", pages);
}
if let Some(pages) = parameters.doorbell_page.as_ref() {
debug!("db page: {:?}", pages);
}
}
let mut cmdq_entry = CommandQueueEntry::init(parameters.opcode, parameters.opmod, self.token, num_pages)?;
let mut input_mailbox_buffers: Box<[MailboxBuffer]> = self.initialize_mailboxes(parameters.opcode.num_input_mailboxes(num_pages)?)?;
let output_mailbox_buffers: Box<[MailboxBuffer]> = self.initialize_mailboxes(parameters.opcode.num_output_mailboxes()?)?;
match parameters.opcode {
CommandOpcode::EnableHca => {}
CommandOpcode::InitHca => {}
CommandOpcode::QuerySpecialContexts => {}
CommandOpcode::QueryPages => {}
CommandOpcode::AllocUar => {},
CommandOpcode::QueryVportState => { },
CommandOpcode::AllocPd => {},
CommandOpcode::AllocTransportDomain => {},
CommandOpcode::QueryHcaCap => {},
CommandOpcode::QueryIssi => {}
CommandOpcode::SetIssi => {
const ISSI_VERSION_1: u32 = 1;
cmdq_entry.set_input_inline_data_0(ISSI_VERSION_1);
}
CommandOpcode::ManagePages => {
let pages_pa = parameters.allocated_pages.ok_or(CommandQueueError::MissingInputPages)?;
cmdq_entry.set_input_inline_data_1(pages_pa.len() as u32);
Self::write_page_addrs_to_mailboxes(&mut input_mailbox_buffers, pages_pa, 0)?;
}
CommandOpcode::QueryNicVportContext => { },
CommandOpcode::ModifyNicVportContext => {
cmdq_entry.set_input_inline_data_1((1 << 6) | (1 << 4)); const NIC_VPORT_CONTEXT_OFFSET: usize = 0x100 - 0x10;
let context= input_mailbox_buffers[0].mp.as_type_mut::<NicVportContext>(NIC_VPORT_CONTEXT_OFFSET)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
context.mtu.write(U32::new(parameters.mtu.ok_or(CommandQueueError::MissingInput)? as u32));
context.allowed_list_size.write(U32::new((1 << 31) | (1 << 30) | (1 << 29))); }
CommandOpcode::AccessRegister => {
cmdq_entry.set_input_inline_data_0(NetworkPortRegisters::PMTU as u32);
let register_data = input_mailbox_buffers[0].mp.as_type_mut::<[U32<BigEndian>;3]>(0)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
register_data[0] = U32::new(1 << 16);
if parameters.opmod.ok_or(CommandQueueError::MissingInput)? == AccessRegisterOpMod::Write as u16 {
register_data[2] = U32::new((parameters.mtu.ok_or(CommandQueueError::MissingInput)? as u32) << 16);
} else {
cmdq_entry.set_input_length(20);
cmdq_entry.set_output_length(24);
}
}
CommandOpcode::CreateEq => {
let pages_pa = parameters.allocated_pages.ok_or(CommandQueueError::MissingInputPages)?;
Self::write_event_queue_context_to_mailbox(
&mut input_mailbox_buffers,
pages_pa,
parameters.user_access_region.ok_or(CommandQueueError::MissingInput)?,
parameters.queue_size.ok_or(CommandQueueError::MissingInput)?
)?;
},
CommandOpcode::CreateCq => {
let pages_pa = parameters.allocated_pages.ok_or(CommandQueueError::MissingInputPages)?;
Self::write_completion_queue_context_to_mailbox(
&mut input_mailbox_buffers,
pages_pa,
parameters.user_access_region.ok_or(CommandQueueError::MissingInput)?,
parameters.queue_size.ok_or(CommandQueueError::MissingInput)?,
parameters.event_queue_num.ok_or(CommandQueueError::MissingInput)?.0,
parameters.doorbell_page.ok_or(CommandQueueError::MissingInput)?,
parameters.collapsed_cq
)?;
},
CommandOpcode::CreateTis => {
const TIS_MAILBOX_INDEX: usize = 0;
Self::write_transport_interface_send_context_to_mailbox(
&mut input_mailbox_buffers[TIS_MAILBOX_INDEX],
parameters.transport_domain.ok_or(CommandQueueError::MissingInput)?.0
)?;
},
CommandOpcode::CreateSq => {
let pages_pa = parameters.allocated_pages.ok_or(CommandQueueError::MissingInputPages)?;
Self::write_send_queue_context_to_mailbox(
&mut input_mailbox_buffers,
pages_pa,
parameters.completion_queue_num.ok_or(CommandQueueError::MissingInput)?.0,
parameters.transport_interface_send_num.ok_or(CommandQueueError::MissingInput)?.0,
parameters.protection_domain.ok_or(CommandQueueError::MissingInput)?.0,
parameters.user_access_region.ok_or(CommandQueueError::MissingInput)?,
parameters.doorbell_page.ok_or(CommandQueueError::MissingInput)?,
parameters.queue_size.ok_or(CommandQueueError::MissingInput)?
)?;
},
CommandOpcode::ModifySq => {
let sq_state = 0 << 28;
cmdq_entry.set_input_inline_data_0(sq_state | parameters.send_queue_num.ok_or(CommandQueueError::MissingInput)?.0);
const SQ_CONTEXT_MAILBOX_INDEX: usize = 0;
Self::modify_sq_state(
&mut input_mailbox_buffers[SQ_CONTEXT_MAILBOX_INDEX],
)?;
},
CommandOpcode::QuerySq => {
cmdq_entry.set_input_inline_data_0(parameters.send_queue_num.ok_or(CommandQueueError::MissingInput)?.0);
},
CommandOpcode::CreateRq => {
let pages_pa = parameters.allocated_pages.ok_or(CommandQueueError::MissingInputPages)?;
Self::write_receive_queue_context_to_mailbox(
&mut input_mailbox_buffers,
pages_pa,
parameters.completion_queue_num.ok_or(CommandQueueError::MissingInput)?.0,
parameters.protection_domain.ok_or(CommandQueueError::MissingInput)?.0,
parameters.doorbell_page.ok_or(CommandQueueError::MissingInput)?,
parameters.queue_size.ok_or(CommandQueueError::MissingInput)?
)?;
}
CommandOpcode::ModifyRq => {
let rq_state = 0 << 28;
cmdq_entry.set_input_inline_data_0(rq_state | parameters.receive_queue_num.ok_or(CommandQueueError::MissingInput)?.0);
const RQ_CONTEXT_MAILBOX_INDEX: usize = 0;
Self::modify_rq_state(
&mut input_mailbox_buffers[RQ_CONTEXT_MAILBOX_INDEX]
)?;
},
CommandOpcode::CreateFlowTable => {
const FLOW_TABLE_CTXT_MAILBOX_INDEX: usize = 0;
Self::write_flow_table_context_to_mailbox(
&mut input_mailbox_buffers[FLOW_TABLE_CTXT_MAILBOX_INDEX],
parameters.queue_size.ok_or(CommandQueueError::MissingInput)?
)?;
},
CommandOpcode::CreateFlowGroup => {
const FLOW_GROUP_MAILBOX_INDEX: usize = 0;
Self::write_flow_group_info_to_mailbox(
&mut input_mailbox_buffers[FLOW_GROUP_MAILBOX_INDEX],
parameters.flow_table_id.ok_or(CommandQueueError::MissingInput)?.0
)?;
},
CommandOpcode::CreateTir => {
const TIR_MAILBOX_INDEX: usize = 0;
Self::write_transport_interface_receive_context_to_mailbox(
&mut input_mailbox_buffers[TIR_MAILBOX_INDEX],
parameters.receive_queue_num.ok_or(CommandQueueError::MissingInput)?.0,
parameters.transport_domain.ok_or(CommandQueueError::MissingInput)?.0
)?;
},
CommandOpcode::SetFlowTableEntry => {
Self::write_flow_entry_info_to_mailbox(
&mut input_mailbox_buffers,
parameters.flow_table_id.ok_or(CommandQueueError::MissingInput)?.0,
parameters.flow_group_id.ok_or(CommandQueueError::MissingInput)?.0,
parameters.transport_interface_receive_num.ok_or(CommandQueueError::MissingInput)?.0
)?;
},
CommandOpcode::SetFlowTableRoot => {
const FT_ROOT_MAILBOX_INDEX: usize = 0;
Self::write_flow_table_root_to_mailbox(
&mut input_mailbox_buffers[FT_ROOT_MAILBOX_INDEX],
parameters.flow_table_id.ok_or(CommandQueueError::MissingInput)?.0
)?;
}
_=> {
error!("unimplemented opcode");
return Err(CommandQueueError::UnimplementedOpcode);
}
}
if !input_mailbox_buffers.is_empty() {
cmdq_entry.set_input_mailbox_pointer(input_mailbox_buffers[0].addr);
}
if !output_mailbox_buffers.is_empty() {
cmdq_entry.set_output_mailbox_pointer(output_mailbox_buffers[0].addr);
}
let initialized_entry = Command::new(entry_num, cmdq_entry, input_mailbox_buffers, output_mailbox_buffers, &mut self.entries);
self.token = self.token.wrapping_add(1);
self.available_entries[entry_num] = false;
#[cfg(mlx_verbose_log)]
{
debug!("command INPUT: {:?}", parameters.opcode);
self.entries[entry_num].dump_command();
for (i,mb) in initialized_entry.input_mailbox_buffers.iter().enumerate() {
mb.mp.as_type::<CommandInterfaceMailbox>(0)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?
.dump_mailbox(i)
}
}
Ok(initialized_entry)
}
fn initialize_mailboxes(&mut self, num_mailboxes: usize) -> Result<Box<[MailboxBuffer]>, CommandQueueError> {
let mut command_mailbox_buffers = Vec::with_capacity(num_mailboxes);
for _ in 0..num_mailboxes {
if let Some(mb_buffer) = self.mailbox_buffers.pop() {
command_mailbox_buffers.push(mb_buffer);
} else {
let (mp, addr) = create_contiguous_mapping(PAGE_SIZE, MMIO_FLAGS)
.map_err(|_e| CommandQueueError::PageAllocationFailed)?;
command_mailbox_buffers.push(MailboxBuffer{mp, addr});
}
}
for block_num in 0..num_mailboxes {
let next_mb_addr = if block_num < (num_mailboxes - 1) {
command_mailbox_buffers[block_num + 1].addr.value()
} else {
0
};
let mailbox = command_mailbox_buffers[block_num].mp.as_type_mut::<CommandInterfaceMailbox>(DEFAULT_MAILBOX_OFFSET_IN_PAGE)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
mailbox.init(block_num as u32, self.token, next_mb_addr);
}
Ok(command_mailbox_buffers.into_boxed_slice())
}
fn write_page_addrs_to_mailboxes(
input_mailbox_buffers: &mut [MailboxBuffer],
mut pages: Vec<PhysicalAddress>,
first_mailbox_offset: usize
) -> Result<(), CommandQueueError> {
for (idx, mb_buffer) in input_mailbox_buffers.iter_mut().enumerate() {
let offset = if idx == 0 {
first_mailbox_offset
} else {
DEFAULT_MAILBOX_OFFSET_IN_PAGE
};
let paddr_per_buffer = (MAILBOX_DATA_SIZE_IN_BYTES - offset) / SIZE_PADDR_IN_BYTES;
for paddr_index in 0..paddr_per_buffer {
if let Some(paddr) = pages.pop() {
mb_buffer.write_to_mailbox(
PhysicalAddressLayout::new(paddr),
(paddr_index * SIZE_PADDR_IN_BYTES) + offset
)?;
} else {
break;
}
}
}
Ok(())
}
fn write_event_queue_context_to_mailbox(
input_mailbox_buffers: &mut [MailboxBuffer],
pages: Vec<PhysicalAddress>,
uar: u32,
eq_size: u32
) -> Result<(), CommandQueueError>
{
let eq_context = EventQueueContext::init(uar, eq_size);
input_mailbox_buffers[0].write_to_mailbox(eq_context, EventQueueContext::mailbox_offset())?;
const BITMASK_OFFSET: usize = 0x6C - 0x10;
const PAGE_REQUEST_BIT: u32 = 1 << 0xB;
let eq_bitmask: U32<BigEndian> = U32::new(PAGE_REQUEST_BIT);
input_mailbox_buffers[0].write_to_mailbox(eq_bitmask, BITMASK_OFFSET)?;
const EQ_PADDR_OFFSET: usize = 0x110 - 0x10;
Self::write_page_addrs_to_mailboxes(input_mailbox_buffers, pages, EQ_PADDR_OFFSET)?;
Ok(())
}
fn write_completion_queue_context_to_mailbox(
input_mailbox_buffers: &mut [MailboxBuffer],
pages: Vec<PhysicalAddress>,
uar: u32,
cq_size: u32,
c_eqn: u8,
doorbell_pa: PhysicalAddress,
collapsed: bool
) -> Result<(), CommandQueueError> {
let cq_context: CompletionQueueContext = CompletionQueueContext::init(uar, cq_size, c_eqn, doorbell_pa, collapsed);
input_mailbox_buffers[0].write_to_mailbox(cq_context, CompletionQueueContext::mailbox_offset())?;
const CQ_PADDR_OFFSET: usize = 0x110 - 0x10;
Self::write_page_addrs_to_mailboxes(input_mailbox_buffers, pages, CQ_PADDR_OFFSET)?;
Ok(())
}
fn write_transport_interface_send_context_to_mailbox(input_mailbox_buffer: &mut MailboxBuffer, td: u32) -> Result<(), CommandQueueError> {
let tis_context = TransportInterfaceSendContext::init(td);
input_mailbox_buffer.write_to_mailbox(tis_context, TransportInterfaceSendContext::mailbox_offset())?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn write_send_queue_context_to_mailbox(
input_mailbox_buffers: &mut [MailboxBuffer],
pages: Vec<PhysicalAddress>,
cqn: u32,
tisn: u32,
pd: u32,
uar_page: u32,
db_addr: PhysicalAddress,
wq_size: u32
) -> Result<(), CommandQueueError> {
let sq_context = SendQueueContext::init(cqn, tisn);
input_mailbox_buffers[0].write_to_mailbox(sq_context, SendQueueContext::mailbox_offset())?;
let wq = WorkQueue::init_sq(pd, uar_page, db_addr, wq_size);
input_mailbox_buffers[0].write_to_mailbox(wq, WorkQueue::mailbox_offset())?;
const SQ_PADDR_OFFSET: usize = 0x10 + 0x30 + 0xC0;
Self::write_page_addrs_to_mailboxes(input_mailbox_buffers, pages, SQ_PADDR_OFFSET)?;
Ok(())
}
fn modify_sq_state(input_mailbox_buffer: &mut MailboxBuffer) -> Result<(), CommandQueueError> {
let mut sq_context = SendQueueContext::default();
sq_context.set_state(SendQueueState::Ready);
input_mailbox_buffer.write_to_mailbox(sq_context, SendQueueContext::mailbox_offset())?;
Ok(())
}
fn modify_rq_state(input_mailbox_buffer: &mut MailboxBuffer) -> Result<(), CommandQueueError> {
let mut rq_context = ReceiveQueueContext::default();
rq_context.set_state(ReceiveQueueState::Ready);
input_mailbox_buffer.write_to_mailbox(rq_context, ReceiveQueueContext::mailbox_offset())?;
Ok(())
}
fn write_receive_queue_context_to_mailbox(
input_mailbox_buffers: &mut [MailboxBuffer],
pages: Vec<PhysicalAddress>,
cqn: u32,
pd: u32,
db_addr: PhysicalAddress,
wq_size: u32
) -> Result<(), CommandQueueError> {
let rq_context = ReceiveQueueContext::init(cqn);
input_mailbox_buffers[0].write_to_mailbox(rq_context, ReceiveQueueContext::mailbox_offset())?;
let wq = WorkQueue::init_rq(pd, db_addr, wq_size);
input_mailbox_buffers[0].write_to_mailbox(wq, WorkQueue::mailbox_offset())?;
const RQ_PADDR_OFFSET: usize = 0x10 + 0x30 + 0xC0;
Self::write_page_addrs_to_mailboxes(input_mailbox_buffers, pages, RQ_PADDR_OFFSET)?;
Ok(())
}
fn write_flow_table_context_to_mailbox(input_mailbox_buffer: &mut MailboxBuffer, num_ft_entries: u32) -> Result<(), CommandQueueError> {
let table_type: U32<BigEndian> = U32::new((FlowTableType::NicRx as u32) << 24);
input_mailbox_buffer.write_to_mailbox(table_type, 0)?;
let ft_context = FlowTableContext::init(num_ft_entries);
input_mailbox_buffer.write_to_mailbox(ft_context, FlowTableContext::mailbox_offset())?;
Ok(())
}
fn write_flow_group_info_to_mailbox(input_mailbox_buffer: &mut MailboxBuffer, ft_id: u32) -> Result<(), CommandQueueError> {
let flow_group_input = FlowGroupInput::init(
FlowTableType::NicRx,
ft_id,
0,
0,
MatchCriteriaEnable::None
);
input_mailbox_buffer.write_to_mailbox(flow_group_input, FlowGroupInput::mailbox_offset())?;
Ok(())
}
fn write_transport_interface_receive_context_to_mailbox(input_mailbox_buffer: &mut MailboxBuffer, rqn: u32, td: u32) -> Result<(), CommandQueueError> {
let tir_context = TransportInterfaceReceiveContext::init(rqn, td);
input_mailbox_buffer.write_to_mailbox(tir_context, TransportInterfaceReceiveContext::mailbox_offset())?;
Ok(())
}
fn write_flow_entry_info_to_mailbox(input_mailbox_buffers: &mut [MailboxBuffer], ft_id: u32, fg_id: u32, tirn: u32) -> Result<(), CommandQueueError> {
let flow_entry_input = FlowEntryInput::init(FlowTableType::NicRx, ft_id, 0);
input_mailbox_buffers[0].write_to_mailbox(flow_entry_input, FlowEntryInput::mailbox_offset())?;
let flow_context = FlowContext::init(fg_id, FlowContextAction::FwdDest, 1);
input_mailbox_buffers[0].write_to_mailbox(flow_context, FlowContext::mailbox_offset())?;
let dest_list_mb: usize = libm::ceilf((0x30 + 0x300) as f32 / MAILBOX_DATA_SIZE_IN_BYTES as f32) as usize;
const DEST_LIST_MB_OFFSET: usize = 304;
let dest_list = DestinationEntry::init(DestinationType::Tir, tirn);
input_mailbox_buffers[dest_list_mb - 1].write_to_mailbox(dest_list, DEST_LIST_MB_OFFSET)?;
Ok(())
}
fn write_flow_table_root_to_mailbox(input_mailbox_buffer: &mut MailboxBuffer, ft_id: u32) -> Result<(), CommandQueueError> {
let ft_root: [U32<BigEndian>; 2] = [U32::new((FlowTableType::NicRx as u32) << 24), U32::new(ft_id & 0xFF_FFFF)];
input_mailbox_buffer.write_to_mailbox(ft_root, 0)?;
Ok(())
}
pub fn wait_for_command_completion(&self, command: &Command<{CmdState::Posted}>) {
while self.entries[command.entry_num].owned_by_hw() {}
}
pub fn get_command_status(&mut self, command: Command<{CmdState::Completed}>) -> Result<CommandCompletionStatus, CommandQueueError> {
#[cfg(mlx_verbose_log)]
{
debug!("command OUTPUT");
self.entries[command.entry_num].dump_command();
for (i, mb) in command.output_mailbox_buffers.iter().enumerate() {
mb.mp.as_type::<CommandInterfaceMailbox>(0)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?
.dump_mailbox(i)
}
}
self.available_entries[command.entry_num] = true;
let delivery_status = self.entries[command.entry_num].get_delivery_status()?;
let return_status = self.entries[command.entry_num].get_return_status()?;
Ok(CommandCompletionStatus{ delivery_status, return_status })
}
fn check_command_output_validity(&self, entry_num: usize, cmd_opcode: CommandOpcode) -> Result<(), CommandQueueError> {
if self.entries[entry_num].owned_by_hw() {
error!("the command hasn't completed yet!");
return Err(CommandQueueError::CommandNotCompleted);
}
if self.entries[entry_num].get_command_opcode()? != cmd_opcode {
error!("Incorrect Command!: {:?}", self.entries[entry_num].get_command_opcode()?);
return Err(CommandQueueError::IncorrectCommandOpcode);
}
Ok(())
}
pub fn get_port_type(&mut self, command: Command<{CmdState::Completed}>) -> Result<(HcaPortType, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryHcaCap)?;
const DATA_OFFSET_IN_MAILBOX: usize = 0x34;
let mailbox = &command.output_mailbox_buffers[0].mp;
let port_type = (
mailbox.as_type::<U32<BigEndian>>(DATA_OFFSET_IN_MAILBOX)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?
).get();
HcaPortType::try_from(((port_type & 0x300) >> 8) as u8)
.map_err(|_e| CommandQueueError::InvalidPortType)
.and_then(|port_type| Ok((port_type, self.get_command_status(command)?)))
}
pub fn get_device_capabilities(&mut self, command: Command<{CmdState::Completed}>) -> Result<(HCACapabilities, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryHcaCap)?;
const DATA_OFFSET_IN_MAILBOX: usize = 0;
let mailbox = &command.output_mailbox_buffers[0].mp;
let capabilities = mailbox.as_type::<HCACapabilitiesLayout>(DATA_OFFSET_IN_MAILBOX)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
Ok((capabilities.get_capabilities(), self.get_command_status(command)?))
}
pub fn get_query_issi_command_output(&mut self, command: Command<{CmdState::Completed}>) -> Result<(u16, u8, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryIssi)?;
const DATA_OFFSET_IN_MAILBOX: usize = 0x6C - 0x10;
let mailbox = &command.output_mailbox_buffers[0].mp;
let supported_issi = (
mailbox.as_type::<U32<BigEndian>>(DATA_OFFSET_IN_MAILBOX)
.map_err(|_e| CommandQueueError::InvalidMailboxOffset)?
).get();
let current_issi = self.entries[command.entry_num].get_output_inline_data_0();
Ok((current_issi as u16, supported_issi as u8, self.get_command_status(command)?))
}
pub fn get_query_pages_command_output(&mut self, command: Command<{CmdState::Completed}>) -> Result<(u32, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryPages)?;
let num_pages = self.entries[command.entry_num].get_output_inline_data_1();
Ok((num_pages, self.get_command_status(command)?))
}
pub fn get_max_mtu(&mut self, command: Command<{CmdState::Completed}>) -> Result<(u16, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::AccessRegister)?;
const DATA_OFFSET_IN_MAILBOX: usize = 0x0;
let mailbox = &command.output_mailbox_buffers[0].mp;
let mtu_data = mailbox.as_type::<[U32<BigEndian>; 2]>(DATA_OFFSET_IN_MAILBOX).map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
let max_mtu = mtu_data[1].get() >> 16;
Ok((max_mtu as u16, self.get_command_status(command)?))
}
pub fn get_uar(&mut self, command: Command<{CmdState::Completed}>) -> Result<(u32, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::AllocUar)?;
let uar = self.entries[command.entry_num].get_output_inline_data_0();
Ok((uar & 0xFF_FFFF, self.get_command_status(command)?))
}
pub fn get_protection_domain(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Pd, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::AllocPd)?;
let pd = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Pd(pd & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_transport_domain(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Td, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::AllocTransportDomain)?;
let td = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Td(td & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_reserved_lkey(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Lkey, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QuerySpecialContexts)?;
let resd_lkey = self.entries[command.entry_num].get_output_inline_data_1();
Ok((Lkey(resd_lkey), self.get_command_status(command)?))
}
pub fn get_vport_state(&mut self, command: Command<{CmdState::Completed}>) -> Result<(u16, u8, u8, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryVportState)?;
let state = self.entries[command.entry_num].get_output_inline_data_1();
Ok(((state >> 16) as u16, (state as u8) >> 4, state as u8 & 0xF, self.get_command_status(command)?))
}
pub fn get_vport_mac_address(&mut self, command: Command<{CmdState::Completed}>) -> Result<([u8; 6], CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QueryNicVportContext)?;
const DATA_OFFSET_IN_MAILBOX: usize = 0x0;
let mailbox = &command.output_mailbox_buffers[0].mp;
let nic_vport_context = mailbox.as_type::<NicVportContext>(DATA_OFFSET_IN_MAILBOX).map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
let mac_address_h = nic_vport_context.permanent_address_h.read().get();
let mac_address_l = nic_vport_context.permanent_address_l.read().get();
Ok(([
(mac_address_h >> 8) as u8,
(mac_address_h) as u8,
(mac_address_l >> 24) as u8,
(mac_address_l >> 16) as u8,
(mac_address_l >> 8) as u8,
mac_address_l as u8
], self.get_command_status(command)?))
}
pub fn get_eq_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Eqn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateEq)?;
let eq_number = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Eqn(eq_number as u8), self.get_command_status(command)?))
}
pub fn get_cq_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Cqn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateCq)?;
let cq_number = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Cqn(cq_number & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_tis_context_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Tisn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateTis)?;
let tisn = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Tisn(tisn & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_send_queue_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Sqn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateSq)?;
let sqn = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Sqn(sqn & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_receive_queue_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Rqn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateRq)?;
let rqn = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Rqn(rqn & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_sq_state(&mut self, command: Command<{CmdState::Completed}>) -> Result<(SendQueueState, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::QuerySq)?;
let mailbox = &command.output_mailbox_buffers[0];
let sq_context = mailbox.mp.as_type::<SendQueueContext>(0x10).map_err(|_e| CommandQueueError::InvalidMailboxOffset)?;
let state = sq_context.get_state().map_err(|_e| CommandQueueError::InvalidSQState)?;
Ok((state, self.get_command_status(command)?))
}
pub fn get_flow_table_id(&mut self, command: Command<{CmdState::Completed}>) -> Result<(FtId, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateFlowTable)?;
let ft_id = self.entries[command.entry_num].get_output_inline_data_0();
Ok((FtId(ft_id & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_flow_group_id(&mut self, command: Command<{CmdState::Completed}>) -> Result<(FgId, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateFlowGroup)?;
let fg_id = self.entries[command.entry_num].get_output_inline_data_0();
Ok((FgId(fg_id & 0xFF_FFFF), self.get_command_status(command)?))
}
pub fn get_tir_context_number(&mut self, command: Command<{CmdState::Completed}>) -> Result<(Tirn, CommandCompletionStatus), CommandQueueError> {
self.check_command_output_validity(command.entry_num, CommandOpcode::CreateTir)?;
let tirn = self.entries[command.entry_num].get_output_inline_data_0();
Ok((Tirn(tirn & 0xFF_FFFF), self.get_command_status(command)?))
}
}
#[derive(FromBytes,Default)]
#[repr(C)]
pub struct CommandQueueEntry {
type_of_transport: Volatile<U32<BigEndian>>,
input_length: Volatile<U32<BigEndian>>,
input_mailbox_pointer_h: Volatile<U32<BigEndian>>,
input_mailbox_pointer_l: Volatile<U32<BigEndian>>,
command_input_opcode: Volatile<U32<BigEndian>>,
command_input_opmod: Volatile<U32<BigEndian>>,
command_input_inline_data_0: Volatile<U32<BigEndian>>,
command_input_inline_data_1: Volatile<U32<BigEndian>>,
command_output_status: Volatile<U32<BigEndian>>,
command_output_syndrome: Volatile<U32<BigEndian>>,
command_output_inline_data_0: Volatile<U32<BigEndian>>,
command_output_inline_data_1: Volatile<U32<BigEndian>>,
output_mailbox_pointer_h: Volatile<U32<BigEndian>>,
output_mailbox_pointer_l: Volatile<U32<BigEndian>>,
output_length: Volatile<U32<BigEndian>>,
token_signature_status_own: Volatile<U32<BigEndian>>
}
const _: () = assert!(core::mem::size_of::<CommandQueueEntry>() == 64);
impl fmt::Debug for CommandQueueEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CommandQueueEntry")
.field("type of transport", &self.type_of_transport.read().get())
.field("input length", &self.input_length.read().get())
.field("input_mailbox_ptr_h", &self.input_mailbox_pointer_h.read().get())
.field("input_mailbox_ptr_l", &self.input_mailbox_pointer_l.read().get())
.field("command_input_opcode",&self.command_input_opcode.read().get())
.field("command_input_opmod",&self.command_input_opmod.read().get())
.field("command_input_inline_data_0",&self.command_input_inline_data_0.read().get())
.field("command_input_inline_data_1",&self.command_input_inline_data_1.read().get())
.field("command_output_status",&self.command_output_status.read().get())
.field("command_output_syndrome",&self.command_output_syndrome.read().get())
.field("command_output_inline_data_0",&self.command_output_inline_data_0.read().get())
.field("command_output_inline_data_1",&self.command_output_inline_data_1.read().get())
.field("output_mailbox_pointer_h",&self.output_mailbox_pointer_h.read().get())
.field("output_mailbox_pointer_l",&self.output_mailbox_pointer_l.read().get())
.field("output_length",&self.output_length.read().get())
.field("token_signature_status_own",&self.token_signature_status_own.read().get())
.finish()
}
}
impl CommandQueueEntry {
fn init(opcode: CommandOpcode, opmod: Option<u16>, token: u8, num_pages: Option<usize>) -> Result<Self, CommandQueueError> {
let mut cmdq_entry = Self::default();
cmdq_entry.type_of_transport.write(U32::new(CommandTransportType::PCIe as u32));
let val = cmdq_entry.token_signature_status_own.read().get();
cmdq_entry.token_signature_status_own.write(U32::new(val | ((token as u32) << 24)));
cmdq_entry.change_ownership_to_hw();
cmdq_entry.input_length.write(U32::new(opcode.input_bytes(num_pages)?));
cmdq_entry.output_length.write(U32::new(opcode.output_bytes()?));
cmdq_entry.command_input_opcode.write(U32::new((opcode as u32) << 16));
cmdq_entry.command_input_opmod.write(U32::new(opmod.unwrap_or(0) as u32));
Ok(cmdq_entry)
}
fn set_input_length(&mut self, length_in_bytes: u32) {
self.input_length.write(U32::new(length_in_bytes));
}
fn set_output_length(&mut self, length_in_bytes: u32) {
self.output_length.write(U32::new(length_in_bytes));
}
fn set_input_inline_data_0(&mut self, command0: u32) {
self.command_input_inline_data_0.write(U32::new(command0));
}
fn set_input_inline_data_1(&mut self, command1: u32) {
self.command_input_inline_data_1.write(U32::new(command1));
}
fn set_input_mailbox_pointer(&mut self, mailbox_ptr: PhysicalAddress) {
self.input_mailbox_pointer_h.write(U32::new((mailbox_ptr.value() >> 32) as u32));
self.input_mailbox_pointer_l.write(U32::new((mailbox_ptr.value() & 0xFFFF_FFFF) as u32));
}
fn set_output_mailbox_pointer(&mut self, mailbox_ptr: PhysicalAddress) {
self.output_mailbox_pointer_h.write(U32::new((mailbox_ptr.value() >> 32) as u32));
self.output_mailbox_pointer_l.write(U32::new((mailbox_ptr.value() & 0xFFFF_FFFF) as u32));
}
fn get_command_opcode(&self) -> Result<CommandOpcode, CommandQueueError> {
let opcode = self.command_input_opcode.read().get() >> 16;
CommandOpcode::try_from(opcode).map_err(|_e| CommandQueueError::InvalidCommandOpcode)
}
fn get_output_inline_data(&self) -> (u8, u32, u32, u32) {
(
(self.command_output_status.read().get() >> 24) as u8,
self.command_output_syndrome.read().get(),
self.command_output_inline_data_0.read().get(),
self.command_output_inline_data_1.read().get()
)
}
fn get_output_inline_data_0(&self) -> u32 {
self.command_output_inline_data_0.read().get()
}
fn get_output_inline_data_1(&self) -> u32 {
self.command_output_inline_data_1.read().get()
}
pub fn get_delivery_status(&self) -> Result<CommandDeliveryStatus, CommandQueueError> {
let status = (self.token_signature_status_own.read().get() & 0xFE) >> 1;
CommandDeliveryStatus::try_from(status).map_err(|_e| CommandQueueError::InvalidCommandDeliveryStatus)
}
fn change_ownership_to_hw(&mut self) {
let ownership = self.token_signature_status_own.read().get() | 0x1;
self.token_signature_status_own.write(U32::new(ownership));
}
pub fn owned_by_hw(&self) -> bool {
self.token_signature_status_own.read().get().get_bit(0)
}
pub fn get_return_status(&self) -> Result<CommandReturnStatus, CommandQueueError> {
let (status, _syndrome, _, _) = self.get_output_inline_data();
CommandReturnStatus::try_from(status).map_err(|_e| CommandQueueError::InvalidCommandReturnStatus)
}
#[cfg(mlx_verbose_log)]
fn dump_command(&self) {
unsafe {
let ptr = self as *const CommandQueueEntry as *const u32;
debug!("000: {:#010x} {:#010x} {:#010x} {:#010x}", (*ptr).to_be(), (*ptr.offset(1)).to_be(), (*ptr.offset(2)).to_be(), (*ptr.offset(3)).to_be());
debug!("010: {:#010x} {:#010x} {:#010x} {:#010x}", (*ptr.offset(4)).to_be(), (*ptr.offset(5)).to_be(), (*ptr.offset(6)).to_be(), (*ptr.offset(7)).to_be());
debug!("020: {:#010x} {:#010x} {:#010x} {:#010x}", (*ptr.offset(8)).to_be(), (*ptr.offset(9)).to_be(), (*ptr.offset(10)).to_be(), (*ptr.offset(11)).to_be());
debug!("030: {:#010x} {:#010x} {:#010x} {:#010x} \n", (*ptr.offset(12)).to_be(), (*ptr.offset(13)).to_be(), (*ptr.offset(14)).to_be(), (*ptr.offset(15)).to_be());
}
}
}
#[derive(FromBytes)]
#[repr(C)]
struct CommandInterfaceMailbox {
mailbox_data: Volatile<[u8; 512]>,
_padding: [u8; 48],
next_pointer_h: Volatile<U32<BigEndian>>,
next_pointer_l: Volatile<U32<BigEndian>>,
block_number: Volatile<U32<BigEndian>>,
token_ctrl_signature: Volatile<U32<BigEndian>>
}
const _: () = assert!(core::mem::size_of::<CommandInterfaceMailbox>() == MAILBOX_SIZE_IN_BYTES);
impl CommandInterfaceMailbox {
fn clear_all_fields(&mut self) {
self. mailbox_data.write([0;512]);
self.next_pointer_h.write(U32::new(0));
self.next_pointer_l.write(U32::new(0));
self.block_number.write(U32::new(0));
self.token_ctrl_signature.write(U32::new(0));
}
fn init(&mut self, block_num: u32, token: u8, next_mb_addr: usize) {
self.clear_all_fields();
self.block_number.write(U32::new(block_num));
self.token_ctrl_signature.write(U32::new((token as u32) << 16));
self.next_pointer_h.write(U32::new((next_mb_addr >> 32) as u32));
self.next_pointer_l.write(U32::new((next_mb_addr & 0xFFFF_FFFF) as u32));
}
#[cfg(mlx_verbose_log)]
fn dump_mailbox(&self, block_num: usize) {
debug!("Mailbox {}", block_num);
unsafe {
let ptr = self as *const CommandInterfaceMailbox as *const u32;
for i in 0..MAILBOX_SIZE_IN_BYTES/16 {
let x = (i * 4) as isize;
debug!("{:#05x}: {:#010x} {:#010x} {:#010x} {:#010x}", i*16 + 0x40 + (block_num * MAILBOX_SIZE_IN_BYTES), (*ptr.offset(x)).to_be(), (*ptr.offset(x+1)).to_be(), (*ptr.offset(x+2)).to_be(), (*ptr.offset(x+3)).to_be());
}
debug!("")
}
}
}
impl fmt::Debug for CommandInterfaceMailbox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CommandInterfaceMailbox")
.field("mailbox_data", &self.mailbox_data.read())
.field("next pointer h", &self.next_pointer_h.read().get())
.field("next pointer l", &self.next_pointer_l.read().get())
.field("block number", &self.block_number.read().get())
.field("token ctrl signature", &self.token_ctrl_signature.read().get())
.finish()
}
}
#[derive(FromBytes)]
#[repr(C)]
struct HCACapabilitiesLayout {
vhca_resource_manager: ReadOnly<U32<BigEndian>>,
transpose_max_element_size: ReadOnly<U32<BigEndian>>,
transpose_max_size: ReadOnly<U32<BigEndian>>,
_padding0: u32,
log_max_qp: ReadOnly<U32<BigEndian>>,
scatter_fcs: ReadOnly<U32<BigEndian>>,
log_max_cq: ReadOnly<U32<BigEndian>>,
log_max_eq: ReadOnly<U32<BigEndian>>,
log_max_klm: ReadOnly<U32<BigEndian>>,
log_max_ra_res_dc: ReadOnly<U32<BigEndian>>,
log_max_ra_res_qp: ReadOnly<U32<BigEndian>>,
gid_table_size: ReadOnly<U32<BigEndian>>,
pkey_table_size: ReadOnly<U32<BigEndian>>,
num_ports: ReadOnly<U32<BigEndian>>,
wol_p: ReadOnly<U32<BigEndian>>,
cqe_version: ReadOnly<U32<BigEndian>>,
extended_retry_count: ReadOnly<U32<BigEndian>>,
rc: ReadOnly<U32<BigEndian>>,
log_pg_sz: ReadOnly<U32<BigEndian>>,
lag_native: ReadOnly<U32<BigEndian>>,
max_wqe_sz_sq: ReadOnly<U32<BigEndian>>,
max_wqe_sz_rq: ReadOnly<U32<BigEndian>>,
max_wqe_sz_sq_dc: ReadOnly<U32<BigEndian>>,
max_qp_mcg: ReadOnly<U32<BigEndian>>,
log_max_mcg: ReadOnly<U32<BigEndian>>,
log_max_xrcd: ReadOnly<U32<BigEndian>>,
max_flow_counter_15_0: ReadOnly<U32<BigEndian>>,
log_max_tis: ReadOnly<U32<BigEndian>>,
log_max_tis_per_sq: ReadOnly<U32<BigEndian>>,
log_min_stride_sz_sq: ReadOnly<U32<BigEndian>>,
log_max_wq_sz: ReadOnly<U32<BigEndian>>,
log_max_current_uc_list: ReadOnly<U32<BigEndian>>,
_padding1: u64,
create_qp_start_hint: ReadOnly<U32<BigEndian>>,
max_num_eqs: ReadOnly<U32<BigEndian>>,
log_uar_page_sz: ReadOnly<U32<BigEndian>>,
_padding2: u32,
device_frequency_mhz: ReadOnly<U32<BigEndian>>,
device_frequency_khz: ReadOnly<U32<BigEndian>>,
nvmf_target: ReadOnly<U32<BigEndian>>,
_padding3: u32,
flex_parser_protocols: ReadOnly<U32<BigEndian>>,
flex_parser_header: ReadOnly<U32<BigEndian>>,
_padding4: u32,
cqe_compression: ReadOnly<U32<BigEndian>>,
cqe_compression_max_num: ReadOnly<U32<BigEndian>>,
log_max_xrq: ReadOnly<U32<BigEndian>>,
sw_owner_id: ReadOnly<U32<BigEndian>>,
num_ppcnt: ReadOnly<U32<BigEndian>>,
num_q: ReadOnly<U32<BigEndian>>,
max_num_sf: ReadOnly<U32<BigEndian>>,
_padding5: u32,
flex_parser_id: ReadOnly<U32<BigEndian>>,
sf_base_id: ReadOnly<U32<BigEndian>>,
num_total_dynamic: ReadOnly<U32<BigEndian>>,
dynmaic_msix_table: ReadOnly<U32<BigEndian>>,
max_dynamic_vf: ReadOnly<U32<BigEndian>>,
max_flow_execute: ReadOnly<U32<BigEndian>>,
_padding6: u64,
match_definer: ReadOnly<U32<BigEndian>>,
}
const _: () = assert!(core::mem::size_of::<HCACapabilitiesLayout>() == 256);
#[allow(dead_code)]
#[derive(Debug)]
pub struct HCACapabilities {
log_max_cq_sz: u8,
log_max_cq: u8,
log_max_eq_sz: u8,
log_max_mkey: u8,
log_max_eq: u8,
max_indirection: u8,
log_max_mrw_sz: u8,
log_max_klm_list_size: u8,
end_pad: bool,
start_pad: bool,
cache_line_128byte: bool,
vport_counters: bool,
vport_group_manager: bool,
nic_flow_table: bool,
port_type: u8,
num_ports: u8,
log_max_msg: u8,
max_tc: u8,
cqe_version: u8,
cmdif_checksum: u8,
wq_signature: bool,
sctr_data_cqe: bool,
eth_net_offloads: bool,
cq_oi: bool,
cq_resize: bool,
cq_moderation: bool,
cq_eq_remap: bool,
scqe_break_moderation: bool,
cq_period_start_from_cqe: bool,
imaicl: bool,
xrc: bool,
ud: bool,
uc: bool,
rc: bool,
uar_sz: u8,
log_pg_sz: u8,
bf: bool,
driver_version: bool,
pad_tx_eth_packet: bool,
log_bf_reg_size: u8,
log_max_transport_domain: u8,
log_max_pd: u8,
max_flow_counter: u16,
log_max_rq: u8,
log_max_sq: u8,
log_max_tir: u8,
log_max_tis: u8,
basic_cyclic_rcv_wqe: bool,
log_max_rmp: u8,
log_max_rqt: u8,
log_max_rqt_size: u8,
log_max_tis_per_sq: u8,
log_max_stride_sz_rq: u8,
log_min_stride_sz_rq: u8,
log_max_stride_sz_sq: u8,
log_min_stride_sz_sq: u8,
log_max_wq_sz: u8,
log_max_vlan_list: u8,
log_max_current_mc_list: u8,
log_max_current_uc_list: u8,
log_max_l2_table: u8,
log_uar_page_sz: u16,
device_frequency_mhz: u32,
}
impl HCACapabilitiesLayout {
fn get_capabilities(&self) -> HCACapabilities {
HCACapabilities {
log_max_cq_sz: ((self.log_max_cq.read().get() >> 16) & 0xFF) as u8,
log_max_cq: (self.log_max_cq.read().get() & 0x1F) as u8,
log_max_eq_sz: ((self.log_max_eq.read().get() >> 24) & 0xFF) as u8,
log_max_mkey: ((self.log_max_eq.read().get() >> 16) & 0x3F) as u8,
log_max_eq: (self.log_max_eq.read().get() & 0xF) as u8,
max_indirection: ((self.log_max_klm.read().get() >> 24) & 0xFF) as u8,
log_max_mrw_sz: ((self.log_max_klm.read().get() >> 16) & 0x7F) as u8,
log_max_klm_list_size: (self.log_max_klm.read().get() & 0x3F) as u8,
end_pad: self.gid_table_size.read().get().get_bit(31),
start_pad: self.gid_table_size.read().get().get_bit(28),
cache_line_128byte: self.gid_table_size.read().get().get_bit(27),
vport_counters: self.pkey_table_size.read().get().get_bit(30),
vport_group_manager: self.num_ports.read().get().get_bit(31),
nic_flow_table: self.num_ports.read().get().get_bit(25),
port_type: ((self.num_ports.read().get() >> 8) & 0x3) as u8,
num_ports: (self.num_ports.read().get() & 0xFF) as u8,
log_max_msg: ((self.wol_p.read().get() >> 24) & 0x1F) as u8,
max_tc: ((self.wol_p.read().get() >> 16) & 0xF) as u8,
cqe_version: (self.cqe_version.read().get() & 0xF) as u8,
cmdif_checksum: ((self.extended_retry_count.read().get() >> 14) & 0x3) as u8,
wq_signature: self.extended_retry_count.read().get().get_bit(11),
sctr_data_cqe: self.extended_retry_count.read().get().get_bit(10),
eth_net_offloads: self.extended_retry_count.read().get().get_bit(3),
cq_oi: self.rc.read().get().get_bit(31),
cq_resize: self.rc.read().get().get_bit(30),
cq_moderation: self.rc.read().get().get_bit(29),
cq_eq_remap: self.rc.read().get().get_bit(25),
scqe_break_moderation: self.rc.read().get().get_bit(21),
cq_period_start_from_cqe: self.rc.read().get().get_bit(20),
imaicl: self.rc.read().get().get_bit(14),
xrc: self.rc.read().get().get_bit(3),
ud: self.rc.read().get().get_bit(2),
uc: self.rc.read().get().get_bit(1),
rc: self.rc.read().get().get_bit(0),
uar_sz: ((self.log_pg_sz.read().get() >> 16) & 0x3F) as u8,
log_pg_sz: (self.log_pg_sz.read().get() & 0xFF) as u8,
bf: self.lag_native.read().get().get_bit(31),
driver_version: self.lag_native.read().get().get_bit(30),
pad_tx_eth_packet: self.lag_native.read().get().get_bit(29),
log_bf_reg_size: ((self.lag_native.read().get() >> 16) & 0x1F) as u8,
log_max_transport_domain: ((self.log_max_xrcd.read().get() >> 24) & 0x1F) as u8,
log_max_pd: ((self.log_max_xrcd.read().get() >> 16) & 0x1F) as u8,
max_flow_counter: (self.max_flow_counter_15_0.read().get() & 0xFFFF) as u16,
log_max_rq: ((self.log_max_tis.read().get() >> 24) & 0x1F) as u8,
log_max_sq: ((self.log_max_tis.read().get() >> 16) & 0x1F) as u8,
log_max_tir: ((self.log_max_tis.read().get() >> 8) & 0x1F) as u8,
log_max_tis: (self.log_max_tis.read().get() & 0x1F) as u8,
basic_cyclic_rcv_wqe: self.log_max_tis_per_sq.read().get().get_bit(31),
log_max_rmp: ((self.log_max_tis_per_sq.read().get() >> 24) & 0x1F) as u8,
log_max_rqt: ((self.log_max_tis_per_sq.read().get() >> 16) & 0x1F) as u8,
log_max_rqt_size: ((self.log_max_tis_per_sq.read().get() >> 8) & 0x1F) as u8,
log_max_tis_per_sq: (self.log_max_tis_per_sq.read().get() & 0x1F) as u8,
log_max_stride_sz_rq: ((self.log_min_stride_sz_sq.read().get() >> 24) & 0x1F) as u8,
log_min_stride_sz_rq: ((self.log_min_stride_sz_sq.read().get() >> 16) & 0x1F) as u8,
log_max_stride_sz_sq: ((self.log_min_stride_sz_sq.read().get() >> 8) & 0x1F) as u8,
log_min_stride_sz_sq: (self.log_min_stride_sz_sq.read().get() & 0x1F) as u8,
log_max_wq_sz: (self.log_max_wq_sz.read().get() & 0x1F) as u8,
log_max_vlan_list: ((self.log_max_current_uc_list.read().get() >> 16) & 0x1F) as u8,
log_max_current_mc_list: ((self.log_max_current_uc_list.read().get() >> 8) & 0x1F) as u8,
log_max_current_uc_list: (self.log_max_current_uc_list.read().get() & 0x1F) as u8,
log_max_l2_table: ((self.log_uar_page_sz.read().get() >> 24) & 0x1F) as u8,
log_uar_page_sz: (self.log_uar_page_sz.read().get() & 0xFFFF) as u16,
device_frequency_mhz: self.device_frequency_mhz.read().get()
}
}
}