// SPDX-License-Identifier: GPL-2.012//! Memory-mapped IO.3//!4//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)56use crate::error::{code::EINVAL, Result};7use crate::{bindings, build_assert, ffi::c_void};89pub mod mem;10pub mod poll;11pub mod resource;1213pub use resource::Resource;1415/// Raw representation of an MMIO region.16///17/// By itself, the existence of an instance of this structure does not provide any guarantees that18/// the represented MMIO region does exist or is properly mapped.19///20/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`21/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure22/// any guarantees are given.23pub struct IoRaw<const SIZE: usize = 0> {24addr: usize,25maxsize: usize,26}2728impl<const SIZE: usize> IoRaw<SIZE> {29/// Returns a new `IoRaw` instance on success, an error otherwise.30pub fn new(addr: usize, maxsize: usize) -> Result<Self> {31if maxsize < SIZE {32return Err(EINVAL);33}3435Ok(Self { addr, maxsize })36}3738/// Returns the base address of the MMIO region.39#[inline]40pub fn addr(&self) -> usize {41self.addr42}4344/// Returns the maximum size of the MMIO region.45#[inline]46pub fn maxsize(&self) -> usize {47self.maxsize48}49}5051/// IO-mapped memory region.52///53/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the54/// mapping, performing an additional region request etc.55///56/// # Invariant57///58/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size59/// `maxsize`.60///61/// # Examples62///63/// ```no_run64/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};65/// # use core::ops::Deref;66///67/// // See also [`pci::Bar`] for a real example.68/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);69///70/// impl<const SIZE: usize> IoMem<SIZE> {71/// /// # Safety72/// ///73/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs74/// /// virtual address space.75/// unsafe fn new(paddr: usize) -> Result<Self>{76/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is77/// // valid for `ioremap`.78/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };79/// if addr.is_null() {80/// return Err(ENOMEM);81/// }82///83/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))84/// }85/// }86///87/// impl<const SIZE: usize> Drop for IoMem<SIZE> {88/// fn drop(&mut self) {89/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.90/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };91/// }92/// }93///94/// impl<const SIZE: usize> Deref for IoMem<SIZE> {95/// type Target = Io<SIZE>;96///97/// fn deref(&self) -> &Self::Target {98/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.99/// unsafe { Io::from_raw(&self.0) }100/// }101/// }102///103///# fn no_run() -> Result<(), Error> {104/// // SAFETY: Invalid usage for example purposes.105/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };106/// iomem.write32(0x42, 0x0);107/// assert!(iomem.try_write32(0x42, 0x0).is_ok());108/// assert!(iomem.try_write32(0x42, 0x4).is_err());109/// # Ok(())110/// # }111/// ```112#[repr(transparent)]113pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);114115macro_rules! define_read {116($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {117/// Read IO data from a given offset known at compile time.118///119/// Bound checks are performed on compile time, hence if the offset is not known at compile120/// time, the build will fail.121$(#[$attr])*122#[inline]123pub fn $name(&self, offset: usize) -> $type_name {124let addr = self.io_addr_assert::<$type_name>(offset);125126// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.127unsafe { bindings::$c_fn(addr as *const c_void) }128}129130/// Read IO data from a given offset.131///132/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is133/// out of bounds.134$(#[$attr])*135pub fn $try_name(&self, offset: usize) -> Result<$type_name> {136let addr = self.io_addr::<$type_name>(offset)?;137138// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.139Ok(unsafe { bindings::$c_fn(addr as *const c_void) })140}141};142}143144macro_rules! define_write {145($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {146/// Write IO data from a given offset known at compile time.147///148/// Bound checks are performed on compile time, hence if the offset is not known at compile149/// time, the build will fail.150$(#[$attr])*151#[inline]152pub fn $name(&self, value: $type_name, offset: usize) {153let addr = self.io_addr_assert::<$type_name>(offset);154155// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.156unsafe { bindings::$c_fn(value, addr as *mut c_void) }157}158159/// Write IO data from a given offset.160///161/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is162/// out of bounds.163$(#[$attr])*164pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {165let addr = self.io_addr::<$type_name>(offset)?;166167// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.168unsafe { bindings::$c_fn(value, addr as *mut c_void) }169Ok(())170}171};172}173174impl<const SIZE: usize> Io<SIZE> {175/// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.176///177/// # Safety178///179/// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size180/// `maxsize`.181pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {182// SAFETY: `Io` is a transparent wrapper around `IoRaw`.183unsafe { &*core::ptr::from_ref(raw).cast() }184}185186/// Returns the base address of this mapping.187#[inline]188pub fn addr(&self) -> usize {189self.0.addr()190}191192/// Returns the maximum size of this mapping.193#[inline]194pub fn maxsize(&self) -> usize {195self.0.maxsize()196}197198#[inline]199const fn offset_valid<U>(offset: usize, size: usize) -> bool {200let type_size = core::mem::size_of::<U>();201if let Some(end) = offset.checked_add(type_size) {202end <= size && offset % type_size == 0203} else {204false205}206}207208#[inline]209fn io_addr<U>(&self, offset: usize) -> Result<usize> {210if !Self::offset_valid::<U>(offset, self.maxsize()) {211return Err(EINVAL);212}213214// Probably no need to check, since the safety requirements of `Self::new` guarantee that215// this can't overflow.216self.addr().checked_add(offset).ok_or(EINVAL)217}218219#[inline]220fn io_addr_assert<U>(&self, offset: usize) -> usize {221build_assert!(Self::offset_valid::<U>(offset, SIZE));222223self.addr() + offset224}225226define_read!(read8, try_read8, readb -> u8);227define_read!(read16, try_read16, readw -> u16);228define_read!(read32, try_read32, readl -> u32);229define_read!(230#[cfg(CONFIG_64BIT)]231read64,232try_read64,233readq -> u64234);235236define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);237define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);238define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);239define_read!(240#[cfg(CONFIG_64BIT)]241read64_relaxed,242try_read64_relaxed,243readq_relaxed -> u64244);245246define_write!(write8, try_write8, writeb <- u8);247define_write!(write16, try_write16, writew <- u16);248define_write!(write32, try_write32, writel <- u32);249define_write!(250#[cfg(CONFIG_64BIT)]251write64,252try_write64,253writeq <- u64254);255256define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);257define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);258define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);259define_write!(260#[cfg(CONFIG_64BIT)]261write64_relaxed,262try_write64_relaxed,263writeq_relaxed <- u64264);265}266267268