Path: blob/master/drivers/crypto/intel/keembay/ocs-aes.c
29278 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* Intel Keem Bay OCS AES Crypto Driver.3*4* Copyright (C) 2018-2020 Intel Corporation5*/67#include <linux/dma-mapping.h>8#include <linux/interrupt.h>9#include <linux/kernel.h>10#include <linux/platform_device.h>11#include <linux/slab.h>12#include <linux/swab.h>1314#include <asm/byteorder.h>15#include <asm/errno.h>1617#include <crypto/aes.h>18#include <crypto/gcm.h>1920#include "ocs-aes.h"2122#define AES_COMMAND_OFFSET 0x000023#define AES_KEY_0_OFFSET 0x000424#define AES_KEY_1_OFFSET 0x000825#define AES_KEY_2_OFFSET 0x000C26#define AES_KEY_3_OFFSET 0x001027#define AES_KEY_4_OFFSET 0x001428#define AES_KEY_5_OFFSET 0x001829#define AES_KEY_6_OFFSET 0x001C30#define AES_KEY_7_OFFSET 0x002031#define AES_IV_0_OFFSET 0x002432#define AES_IV_1_OFFSET 0x002833#define AES_IV_2_OFFSET 0x002C34#define AES_IV_3_OFFSET 0x003035#define AES_ACTIVE_OFFSET 0x003436#define AES_STATUS_OFFSET 0x003837#define AES_KEY_SIZE_OFFSET 0x004438#define AES_IER_OFFSET 0x004839#define AES_ISR_OFFSET 0x005C40#define AES_MULTIPURPOSE1_0_OFFSET 0x020041#define AES_MULTIPURPOSE1_1_OFFSET 0x020442#define AES_MULTIPURPOSE1_2_OFFSET 0x020843#define AES_MULTIPURPOSE1_3_OFFSET 0x020C44#define AES_MULTIPURPOSE2_0_OFFSET 0x022045#define AES_MULTIPURPOSE2_1_OFFSET 0x022446#define AES_MULTIPURPOSE2_2_OFFSET 0x022847#define AES_MULTIPURPOSE2_3_OFFSET 0x022C48#define AES_BYTE_ORDER_CFG_OFFSET 0x02C049#define AES_TLEN_OFFSET 0x030050#define AES_T_MAC_0_OFFSET 0x030451#define AES_T_MAC_1_OFFSET 0x030852#define AES_T_MAC_2_OFFSET 0x030C53#define AES_T_MAC_3_OFFSET 0x031054#define AES_PLEN_OFFSET 0x031455#define AES_A_DMA_SRC_ADDR_OFFSET 0x040056#define AES_A_DMA_DST_ADDR_OFFSET 0x040457#define AES_A_DMA_SRC_SIZE_OFFSET 0x040858#define AES_A_DMA_DST_SIZE_OFFSET 0x040C59#define AES_A_DMA_DMA_MODE_OFFSET 0x041060#define AES_A_DMA_NEXT_SRC_DESCR_OFFSET 0x041861#define AES_A_DMA_NEXT_DST_DESCR_OFFSET 0x041C62#define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET 0x042063#define AES_A_DMA_LOG_OFFSET 0x042464#define AES_A_DMA_STATUS_OFFSET 0x042865#define AES_A_DMA_PERF_CNTR_OFFSET 0x042C66#define AES_A_DMA_MSI_ISR_OFFSET 0x048067#define AES_A_DMA_MSI_IER_OFFSET 0x048468#define AES_A_DMA_MSI_MASK_OFFSET 0x048869#define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET 0x060070#define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET 0x07007172/*73* AES_A_DMA_DMA_MODE register.74* Default: 0x00000000.75* bit[31] ACTIVE76* This bit activates the DMA. When the DMA finishes, it resets77* this bit to zero.78* bit[30:26] Unused by this driver.79* bit[25] SRC_LINK_LIST_EN80* Source link list enable bit. When the linked list is terminated81* this bit is reset by the DMA.82* bit[24] DST_LINK_LIST_EN83* Destination link list enable bit. When the linked list is84* terminated this bit is reset by the DMA.85* bit[23:0] Unused by this driver.86*/87#define AES_A_DMA_DMA_MODE_ACTIVE BIT(31)88#define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)89#define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)9091/*92* AES_ACTIVE register93* default 0x0000000094* bit[31:10] Reserved95* bit[9] LAST_ADATA96* bit[8] LAST_GCX97* bit[7:2] Reserved98* bit[1] TERMINATION99* bit[0] TRIGGER100*/101#define AES_ACTIVE_LAST_ADATA BIT(9)102#define AES_ACTIVE_LAST_CCM_GCM BIT(8)103#define AES_ACTIVE_TERMINATION BIT(1)104#define AES_ACTIVE_TRIGGER BIT(0)105106#define AES_DISABLE_INT 0x00000000107#define AES_DMA_CPD_ERR_INT BIT(8)108#define AES_DMA_OUTBUF_RD_ERR_INT BIT(7)109#define AES_DMA_OUTBUF_WR_ERR_INT BIT(6)110#define AES_DMA_INBUF_RD_ERR_INT BIT(5)111#define AES_DMA_INBUF_WR_ERR_INT BIT(4)112#define AES_DMA_BAD_COMP_INT BIT(3)113#define AES_DMA_SAI_INT BIT(2)114#define AES_DMA_SRC_DONE_INT BIT(0)115#define AES_COMPLETE_INT BIT(1)116117#define AES_DMA_MSI_MASK_CLEAR BIT(0)118119#define AES_128_BIT_KEY 0x00000000120#define AES_256_BIT_KEY BIT(0)121122#define AES_DEACTIVATE_PERF_CNTR 0x00000000123#define AES_ACTIVATE_PERF_CNTR BIT(0)124125#define AES_MAX_TAG_SIZE_U32 4126127#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)128129/*130* There is an inconsistency in the documentation. This is documented as a131* 11-bit value, but it is actually 10-bits.132*/133#define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK 0x3FF134135/*136* During CCM decrypt, the OCS block needs to finish processing the ciphertext137* before the tag is written. For 128-bit mode this required delay is 28 OCS138* clock cycles. For 256-bit mode it is 36 OCS clock cycles.139*/140#define CCM_DECRYPT_DELAY_TAG_CLK_COUNT 36UL141142/*143* During CCM decrypt there must be a delay of at least 42 OCS clock cycles144* between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM145* bit in the same register (as stated in the OCS databook)146*/147#define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT 42UL148149/* See RFC3610 section 2.2 */150#define L_PRIME_MIN (1)151#define L_PRIME_MAX (7)152/*153* CCM IV format from RFC 3610 section 2.3154*155* Octet Number Contents156* ------------ ---------157* 0 Flags158* 1 ... 15-L Nonce N159* 16-L ... 15 Counter i160*161* Flags = L' = L - 1162*/163#define L_PRIME_IDX 0164#define COUNTER_START(lprime) (16 - ((lprime) + 1))165#define COUNTER_LEN(lprime) ((lprime) + 1)166167enum aes_counter_mode {168AES_CTR_M_NO_INC = 0,169AES_CTR_M_32_INC = 1,170AES_CTR_M_64_INC = 2,171AES_CTR_M_128_INC = 3,172};173174/**175* struct ocs_dma_linked_list - OCS DMA linked list entry.176* @src_addr: Source address of the data.177* @src_len: Length of data to be fetched.178* @next: Next dma_list to fetch.179* @ll_flags: Flags (Freeze @ terminate) for the DMA engine.180*/181struct ocs_dma_linked_list {182u32 src_addr;183u32 src_len;184u32 next;185u32 ll_flags;186} __packed;187188/*189* Set endianness of inputs and outputs190* AES_BYTE_ORDER_CFG191* default 0x00000000192* bit [10] - KEY_HI_LO_SWAP193* bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD194* bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD195* bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD196* bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD197* bit [5] - IV_SWAP_DWORDS_IN_OCTWORD198* bit [4] - IV_SWAP_BYTES_IN_DWORD199* bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD200* bit [2] - DOUT_SWAP_BYTES_IN_DWORD201* bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD202* bit [0] - DOUT_SWAP_BYTES_IN_DWORD203*/204static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)205{206iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);207}208209/* Trigger AES process start. */210static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)211{212iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);213}214215/* Indicate last bulk of data. */216static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)217{218iowrite32(AES_ACTIVE_TERMINATION,219aes_dev->base_reg + AES_ACTIVE_OFFSET);220}221222/*223* Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.224*225* Called when DMA is programmed to fetch the last batch of data.226* - For AES-CCM it is called for the last batch of Payload data and Ciphertext227* data.228* - For AES-GCM, it is called for the last batch of Plaintext data and229* Ciphertext data.230*/231static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)232{233iowrite32(AES_ACTIVE_LAST_CCM_GCM,234aes_dev->base_reg + AES_ACTIVE_OFFSET);235}236237/* Wait for LAST_CCM_GCM bit to be unset. */238static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)239{240u32 aes_active_reg;241242do {243aes_active_reg = ioread32(aes_dev->base_reg +244AES_ACTIVE_OFFSET);245} while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);246}247248/* Wait for 10 bits of input occupancy. */249static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)250{251u32 reg;252253do {254reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);255} while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);256}257258/*259* Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all260* other bits).261*262* Called when DMA is programmed to fetch the last batch of Associated Data263* (CCM case) or Additional Authenticated Data (GCM case).264*/265static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)266{267iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,268aes_dev->base_reg + AES_ACTIVE_OFFSET);269}270271/* Set DMA src and dst transfer size to 0 */272static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)273{274iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);275iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);276}277278/* Activate DMA for zero-byte transfer case. */279static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)280{281iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,282aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);283}284285/* Activate DMA and enable src linked list */286static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)287{288iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |289AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,290aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);291}292293/* Activate DMA and enable dst linked list */294static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)295{296iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |297AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,298aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);299}300301/* Activate DMA and enable src and dst linked lists */302static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)303{304iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |305AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |306AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,307aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);308}309310/* Reset PERF_CNTR to 0 and activate it */311static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)312{313iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);314iowrite32(AES_ACTIVATE_PERF_CNTR,315aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);316}317318/* Wait until PERF_CNTR is > delay, then deactivate it */319static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,320int delay)321{322while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)323;324iowrite32(AES_DEACTIVATE_PERF_CNTR,325aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);326}327328/* Disable AES and DMA IRQ. */329static void aes_irq_disable(struct ocs_aes_dev *aes_dev)330{331u32 isr_val = 0;332333/* Disable interrupts */334iowrite32(AES_DISABLE_INT,335aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);336iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);337338/* Clear any pending interrupt */339isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);340if (isr_val)341iowrite32(isr_val,342aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);343344isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);345if (isr_val)346iowrite32(isr_val,347aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);348349isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);350if (isr_val)351iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);352}353354/* Enable AES or DMA IRQ. IRQ is disabled once fired. */355static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)356{357if (irq == AES_COMPLETE_INT) {358/* Ensure DMA error interrupts are enabled */359iowrite32(AES_DMA_CPD_ERR_INT |360AES_DMA_OUTBUF_RD_ERR_INT |361AES_DMA_OUTBUF_WR_ERR_INT |362AES_DMA_INBUF_RD_ERR_INT |363AES_DMA_INBUF_WR_ERR_INT |364AES_DMA_BAD_COMP_INT |365AES_DMA_SAI_INT,366aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);367/*368* AES_IER369* default 0x00000000370* bits [31:3] - reserved371* bit [2] - EN_SKS_ERR372* bit [1] - EN_AES_COMPLETE373* bit [0] - reserved374*/375iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);376return;377}378if (irq == AES_DMA_SRC_DONE_INT) {379/* Ensure AES interrupts are disabled */380iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);381/*382* DMA_MSI_IER383* default 0x00000000384* bits [31:9] - reserved385* bit [8] - CPD_ERR_INT_EN386* bit [7] - OUTBUF_RD_ERR_INT_EN387* bit [6] - OUTBUF_WR_ERR_INT_EN388* bit [5] - INBUF_RD_ERR_INT_EN389* bit [4] - INBUF_WR_ERR_INT_EN390* bit [3] - BAD_COMP_INT_EN391* bit [2] - SAI_INT_EN392* bit [1] - DST_DONE_INT_EN393* bit [0] - SRC_DONE_INT_EN394*/395iowrite32(AES_DMA_CPD_ERR_INT |396AES_DMA_OUTBUF_RD_ERR_INT |397AES_DMA_OUTBUF_WR_ERR_INT |398AES_DMA_INBUF_RD_ERR_INT |399AES_DMA_INBUF_WR_ERR_INT |400AES_DMA_BAD_COMP_INT |401AES_DMA_SAI_INT |402AES_DMA_SRC_DONE_INT,403aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);404}405}406407/* Enable and wait for IRQ (either from OCS AES engine or DMA) */408static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)409{410int rc;411412reinit_completion(&aes_dev->irq_completion);413aes_irq_enable(aes_dev, irq);414rc = wait_for_completion_interruptible(&aes_dev->irq_completion);415if (rc)416return rc;417418return aes_dev->dma_err_mask ? -EIO : 0;419}420421/* Configure DMA to OCS, linked list mode */422static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,423dma_addr_t dma_list)424{425iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);426iowrite32(dma_list,427aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);428}429430/* Configure DMA from OCS, linked list mode */431static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,432dma_addr_t dma_list)433{434iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);435iowrite32(dma_list,436aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);437}438439irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)440{441struct ocs_aes_dev *aes_dev = dev_id;442u32 aes_dma_isr;443444/* Read DMA ISR status. */445aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);446447/* Disable and clear interrupts. */448aes_irq_disable(aes_dev);449450/* Save DMA error status. */451aes_dev->dma_err_mask = aes_dma_isr &452(AES_DMA_CPD_ERR_INT |453AES_DMA_OUTBUF_RD_ERR_INT |454AES_DMA_OUTBUF_WR_ERR_INT |455AES_DMA_INBUF_RD_ERR_INT |456AES_DMA_INBUF_WR_ERR_INT |457AES_DMA_BAD_COMP_INT |458AES_DMA_SAI_INT);459460/* Signal IRQ completion. */461complete(&aes_dev->irq_completion);462463return IRQ_HANDLED;464}465466/**467* ocs_aes_set_key() - Write key into OCS AES hardware.468* @aes_dev: The OCS AES device to write the key to.469* @key_size: The size of the key (in bytes).470* @key: The key to write.471* @cipher: The cipher the key is for.472*473* For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.474*475* Return: 0 on success, negative error code otherwise.476*/477int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,478enum ocs_cipher cipher)479{480const u32 *key_u32;481u32 val;482int i;483484/* OCS AES supports 128-bit and 256-bit keys only. */485if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {486dev_err(aes_dev->dev,487"%d-bit keys not supported by AES cipher\n",488key_size * 8);489return -EINVAL;490}491/* OCS SM4 supports 128-bit keys only. */492if (cipher == OCS_SM4 && key_size != 16) {493dev_err(aes_dev->dev,494"%d-bit keys not supported for SM4 cipher\n",495key_size * 8);496return -EINVAL;497}498499if (!key)500return -EINVAL;501502key_u32 = (const u32 *)key;503504/* Write key to AES_KEY[0-7] registers */505for (i = 0; i < (key_size / sizeof(u32)); i++) {506iowrite32(key_u32[i],507aes_dev->base_reg + AES_KEY_0_OFFSET +508(i * sizeof(u32)));509}510/*511* Write key size512* bits [31:1] - reserved513* bit [0] - AES_KEY_SIZE514* 0 - 128 bit key515* 1 - 256 bit key516*/517val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;518iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);519520return 0;521}522523/* Write AES_COMMAND */524static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,525enum ocs_cipher cipher,526enum ocs_mode mode,527enum ocs_instruction instruction)528{529u32 val;530531/* AES_COMMAND532* default 0x000000CC533* bit [14] - CIPHER_SELECT534* 0 - AES535* 1 - SM4536* bits [11:8] - OCS_AES_MODE537* 0000 - ECB538* 0001 - CBC539* 0010 - CTR540* 0110 - CCM541* 0111 - GCM542* 1001 - CTS543* bits [7:6] - AES_INSTRUCTION544* 00 - ENCRYPT545* 01 - DECRYPT546* 10 - EXPAND547* 11 - BYPASS548* bits [3:2] - CTR_M_BITS549* 00 - No increment550* 01 - Least significant 32 bits are incremented551* 10 - Least significant 64 bits are incremented552* 11 - Full 128 bits are incremented553*/554val = (cipher << 14) | (mode << 8) | (instruction << 6) |555(AES_CTR_M_128_INC << 2);556iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);557}558559static void ocs_aes_init(struct ocs_aes_dev *aes_dev,560enum ocs_mode mode,561enum ocs_cipher cipher,562enum ocs_instruction instruction)563{564/* Ensure interrupts are disabled and pending interrupts cleared. */565aes_irq_disable(aes_dev);566567/* Set endianness recommended by data-sheet. */568aes_a_set_endianness(aes_dev);569570/* Set AES_COMMAND register. */571set_ocs_aes_command(aes_dev, cipher, mode, instruction);572}573574/*575* Write the byte length of the last AES/SM4 block of Payload data (without576* zero padding and without the length of the MAC) in register AES_PLEN.577*/578static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,579u32 size)580{581u32 val;582583if (size == 0) {584val = 0;585goto exit;586}587588val = size % AES_BLOCK_SIZE;589if (val == 0)590val = AES_BLOCK_SIZE;591592exit:593iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);594}595596/*597* Validate inputs according to mode.598* If OK return 0; else return -EINVAL.599*/600static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,601const u8 *iv, u32 iv_size,602dma_addr_t aad_dma_list, u32 aad_size,603const u8 *tag, u32 tag_size,604enum ocs_cipher cipher, enum ocs_mode mode,605enum ocs_instruction instruction,606dma_addr_t dst_dma_list)607{608/* Ensure cipher, mode and instruction are valid. */609if (!(cipher == OCS_AES || cipher == OCS_SM4))610return -EINVAL;611612if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&613mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&614mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)615return -EINVAL;616617if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&618instruction != OCS_EXPAND && instruction != OCS_BYPASS)619return -EINVAL;620621/*622* When instruction is OCS_BYPASS, OCS simply copies data from source623* to destination using DMA.624*625* AES mode is irrelevant, but both source and destination DMA626* linked-list must be defined.627*/628if (instruction == OCS_BYPASS) {629if (src_dma_list == DMA_MAPPING_ERROR ||630dst_dma_list == DMA_MAPPING_ERROR)631return -EINVAL;632633return 0;634}635636/*637* For performance reasons switch based on mode to limit unnecessary638* conditionals for each mode639*/640switch (mode) {641case OCS_MODE_ECB:642/* Ensure input length is multiple of block size */643if (src_size % AES_BLOCK_SIZE != 0)644return -EINVAL;645646/* Ensure source and destination linked lists are created */647if (src_dma_list == DMA_MAPPING_ERROR ||648dst_dma_list == DMA_MAPPING_ERROR)649return -EINVAL;650651return 0;652653case OCS_MODE_CBC:654/* Ensure input length is multiple of block size */655if (src_size % AES_BLOCK_SIZE != 0)656return -EINVAL;657658/* Ensure source and destination linked lists are created */659if (src_dma_list == DMA_MAPPING_ERROR ||660dst_dma_list == DMA_MAPPING_ERROR)661return -EINVAL;662663/* Ensure IV is present and block size in length */664if (!iv || iv_size != AES_BLOCK_SIZE)665return -EINVAL;666667return 0;668669case OCS_MODE_CTR:670/* Ensure input length of 1 byte or greater */671if (src_size == 0)672return -EINVAL;673674/* Ensure source and destination linked lists are created */675if (src_dma_list == DMA_MAPPING_ERROR ||676dst_dma_list == DMA_MAPPING_ERROR)677return -EINVAL;678679/* Ensure IV is present and block size in length */680if (!iv || iv_size != AES_BLOCK_SIZE)681return -EINVAL;682683return 0;684685case OCS_MODE_CTS:686/* Ensure input length >= block size */687if (src_size < AES_BLOCK_SIZE)688return -EINVAL;689690/* Ensure source and destination linked lists are created */691if (src_dma_list == DMA_MAPPING_ERROR ||692dst_dma_list == DMA_MAPPING_ERROR)693return -EINVAL;694695/* Ensure IV is present and block size in length */696if (!iv || iv_size != AES_BLOCK_SIZE)697return -EINVAL;698699return 0;700701case OCS_MODE_GCM:702/* Ensure IV is present and GCM_AES_IV_SIZE in length */703if (!iv || iv_size != GCM_AES_IV_SIZE)704return -EINVAL;705706/*707* If input data present ensure source and destination linked708* lists are created709*/710if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||711dst_dma_list == DMA_MAPPING_ERROR))712return -EINVAL;713714/* If aad present ensure aad linked list is created */715if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)716return -EINVAL;717718/* Ensure tag destination is set */719if (!tag)720return -EINVAL;721722/* Just ensure that tag_size doesn't cause overflows. */723if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))724return -EINVAL;725726return 0;727728case OCS_MODE_CCM:729/* Ensure IV is present and block size in length */730if (!iv || iv_size != AES_BLOCK_SIZE)731return -EINVAL;732733/* 2 <= L <= 8, so 1 <= L' <= 7 */734if (iv[L_PRIME_IDX] < L_PRIME_MIN ||735iv[L_PRIME_IDX] > L_PRIME_MAX)736return -EINVAL;737738/* If aad present ensure aad linked list is created */739if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)740return -EINVAL;741742/* Just ensure that tag_size doesn't cause overflows. */743if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))744return -EINVAL;745746if (instruction == OCS_DECRYPT) {747/*748* If input data present ensure source and destination749* linked lists are created750*/751if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||752dst_dma_list == DMA_MAPPING_ERROR))753return -EINVAL;754755/* Ensure input tag is present */756if (!tag)757return -EINVAL;758759return 0;760}761762/* Instruction == OCS_ENCRYPT */763764/*765* Destination linked list always required (for tag even if no766* input data)767*/768if (dst_dma_list == DMA_MAPPING_ERROR)769return -EINVAL;770771/* If input data present ensure src linked list is created */772if (src_size && src_dma_list == DMA_MAPPING_ERROR)773return -EINVAL;774775return 0;776777default:778return -EINVAL;779}780}781782/**783* ocs_aes_op() - Perform AES/SM4 operation.784* @aes_dev: The OCS AES device to use.785* @mode: The mode to use (ECB, CBC, CTR, or CTS).786* @cipher: The cipher to use (AES or SM4).787* @instruction: The instruction to perform (encrypt or decrypt).788* @dst_dma_list: The OCS DMA list mapping output memory.789* @src_dma_list: The OCS DMA list mapping input payload data.790* @src_size: The amount of data mapped by @src_dma_list.791* @iv: The IV vector.792* @iv_size: The size (in bytes) of @iv.793*794* Return: 0 on success, negative error code otherwise.795*/796int ocs_aes_op(struct ocs_aes_dev *aes_dev,797enum ocs_mode mode,798enum ocs_cipher cipher,799enum ocs_instruction instruction,800dma_addr_t dst_dma_list,801dma_addr_t src_dma_list,802u32 src_size,803u8 *iv,804u32 iv_size)805{806u32 *iv32;807int rc;808809rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,810NULL, 0, cipher, mode, instruction,811dst_dma_list);812if (rc)813return rc;814/*815* ocs_aes_validate_inputs() is a generic check, now ensure mode is not816* GCM or CCM.817*/818if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)819return -EINVAL;820821/* Cast IV to u32 array. */822iv32 = (u32 *)iv;823824ocs_aes_init(aes_dev, mode, cipher, instruction);825826if (mode == OCS_MODE_CTS) {827/* Write the byte length of the last data block to engine. */828ocs_aes_write_last_data_blk_len(aes_dev, src_size);829}830831/* ECB is the only mode that doesn't use IV. */832if (mode != OCS_MODE_ECB) {833iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);834iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);835iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);836iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);837}838839/* Set AES_ACTIVE.TRIGGER to start the operation. */840aes_a_op_trigger(aes_dev);841842/* Configure and activate input / output DMA. */843dma_to_ocs_aes_ll(aes_dev, src_dma_list);844dma_from_ocs_aes_ll(aes_dev, dst_dma_list);845aes_a_dma_active_src_dst_ll_en(aes_dev);846847if (mode == OCS_MODE_CTS) {848/*849* For CTS mode, instruct engine to activate ciphertext850* stealing if last block of data is incomplete.851*/852aes_a_set_last_gcx(aes_dev);853} else {854/* For all other modes, just write the 'termination' bit. */855aes_a_op_termination(aes_dev);856}857858/* Wait for engine to complete processing. */859rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);860if (rc)861return rc;862863if (mode == OCS_MODE_CTR) {864/* Read back IV for streaming mode */865iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);866iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);867iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);868iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);869}870871return 0;872}873874/* Compute and write J0 to engine registers. */875static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,876const u8 *iv)877{878const u32 *j0 = (u32 *)iv;879880/*881* IV must be 12 bytes; Other sizes not supported as Linux crypto API882* does only expects/allows 12 byte IV for GCM883*/884iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);885iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);886iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);887iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);888}889890/* Read GCM tag from engine registers. */891static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,892u8 *tag, u32 tag_size)893{894u32 tag_u32[AES_MAX_TAG_SIZE_U32];895896/*897* The Authentication Tag T is stored in Little Endian order in the898* registers with the most significant bytes stored from AES_T_MAC[3]899* downward.900*/901tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));902tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));903tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));904tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));905906memcpy(tag, tag_u32, tag_size);907}908909/**910* ocs_aes_gcm_op() - Perform GCM operation.911* @aes_dev: The OCS AES device to use.912* @cipher: The Cipher to use (AES or SM4).913* @instruction: The instruction to perform (encrypt or decrypt).914* @dst_dma_list: The OCS DMA list mapping output memory.915* @src_dma_list: The OCS DMA list mapping input payload data.916* @src_size: The amount of data mapped by @src_dma_list.917* @iv: The input IV vector.918* @aad_dma_list: The OCS DMA list mapping input AAD data.919* @aad_size: The amount of data mapped by @aad_dma_list.920* @out_tag: Where to store computed tag.921* @tag_size: The size (in bytes) of @out_tag.922*923* Return: 0 on success, negative error code otherwise.924*/925int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,926enum ocs_cipher cipher,927enum ocs_instruction instruction,928dma_addr_t dst_dma_list,929dma_addr_t src_dma_list,930u32 src_size,931const u8 *iv,932dma_addr_t aad_dma_list,933u32 aad_size,934u8 *out_tag,935u32 tag_size)936{937u64 bit_len;938u32 val;939int rc;940941rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,942GCM_AES_IV_SIZE, aad_dma_list,943aad_size, out_tag, tag_size, cipher,944OCS_MODE_GCM, instruction,945dst_dma_list);946if (rc)947return rc;948949ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);950951/* Compute and write J0 to OCS HW. */952ocs_aes_gcm_write_j0(aes_dev, iv);953954/* Write out_tag byte length */955iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);956957/* Write the byte length of the last plaintext / ciphertext block. */958ocs_aes_write_last_data_blk_len(aes_dev, src_size);959960/* Write ciphertext bit length */961bit_len = (u64)src_size * 8;962val = bit_len & 0xFFFFFFFF;963iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);964val = bit_len >> 32;965iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);966967/* Write aad bit length */968bit_len = (u64)aad_size * 8;969val = bit_len & 0xFFFFFFFF;970iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);971val = bit_len >> 32;972iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);973974/* Set AES_ACTIVE.TRIGGER to start the operation. */975aes_a_op_trigger(aes_dev);976977/* Process AAD. */978if (aad_size) {979/* If aad present, configure DMA to feed it to the engine. */980dma_to_ocs_aes_ll(aes_dev, aad_dma_list);981aes_a_dma_active_src_ll_en(aes_dev);982983/* Instructs engine to pad last block of aad, if needed. */984aes_a_set_last_gcx_and_adata(aes_dev);985986/* Wait for DMA transfer to complete. */987rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);988if (rc)989return rc;990} else {991aes_a_set_last_gcx_and_adata(aes_dev);992}993994/* Wait until adata (if present) has been processed. */995aes_a_wait_last_gcx(aes_dev);996aes_a_dma_wait_input_buffer_occupancy(aes_dev);997998/* Now process payload. */999if (src_size) {1000/* Configure and activate DMA for both input and output data. */1001dma_to_ocs_aes_ll(aes_dev, src_dma_list);1002dma_from_ocs_aes_ll(aes_dev, dst_dma_list);1003aes_a_dma_active_src_dst_ll_en(aes_dev);1004} else {1005aes_a_dma_set_xfer_size_zero(aes_dev);1006aes_a_dma_active(aes_dev);1007}10081009/* Instruct AES/SMA4 engine payload processing is over. */1010aes_a_set_last_gcx(aes_dev);10111012/* Wait for OCS AES engine to complete processing. */1013rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);1014if (rc)1015return rc;10161017ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);10181019return 0;1020}10211022/* Write encrypted tag to AES/SM4 engine. */1023static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,1024const u8 *in_tag, u32 tag_size)1025{1026int i;10271028/* Ensure DMA input buffer is empty */1029aes_a_dma_wait_input_buffer_occupancy(aes_dev);10301031/*1032* During CCM decrypt, the OCS block needs to finish processing the1033* ciphertext before the tag is written. So delay needed after DMA has1034* completed writing the ciphertext1035*/1036aes_a_dma_reset_and_activate_perf_cntr(aes_dev);1037aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,1038CCM_DECRYPT_DELAY_TAG_CLK_COUNT);10391040/* Write encrypted tag to AES/SM4 engine. */1041for (i = 0; i < tag_size; i++) {1042iowrite8(in_tag[i], aes_dev->base_reg +1043AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);1044}1045}10461047/*1048* Write B0 CCM block to OCS AES HW.1049*1050* Note: B0 format is documented in NIST Special Publication 800-38C1051* https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf1052* (see Section A.2.1)1053*/1054static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,1055const u8 *iv, u32 adata_size, u32 tag_size,1056u32 cryptlen)1057{1058u8 b0[16]; /* CCM B0 block is 16 bytes long. */1059int i, q;10601061/* Initialize B0 to 0. */1062memset(b0, 0, sizeof(b0));10631064/*1065* B0[0] is the 'Flags Octet' and has the following structure:1066* bit 7: Reserved1067* bit 6: Adata flag1068* bit 5-3: t value encoded as (t-2)/21069* bit 2-0: q value encoded as q - 11070*/1071/* If there is AAD data, set the Adata flag. */1072if (adata_size)1073b0[0] |= BIT(6);1074/*1075* t denotes the octet length of T.1076* t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is1077* encoded as (t - 2) / 21078*/1079b0[0] |= (((tag_size - 2) / 2) & 0x7) << 3;1080/*1081* q is the octet length of Q.1082* q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as1083* q - 1 == iv[0] & 0x7;1084*/1085b0[0] |= iv[0] & 0x7;1086/*1087* Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]1088* and must be copied to b0[1]..b0[15-q].1089* q == (iv[0] & 0x7) + 11090*/1091q = (iv[0] & 0x7) + 1;1092for (i = 1; i <= 15 - q; i++)1093b0[i] = iv[i];1094/*1095* The rest of B0 must contain Q, i.e., the message length.1096* Q is encoded in q octets, in big-endian order, so to write it, we1097* start from the end of B0 and we move backward.1098*/1099i = sizeof(b0) - 1;1100while (q) {1101b0[i] = cryptlen & 0xff;1102cryptlen >>= 8;1103i--;1104q--;1105}1106/*1107* If cryptlen is not zero at this point, it means that its original1108* value was too big.1109*/1110if (cryptlen)1111return -EOVERFLOW;1112/* Now write B0 to OCS AES input buffer. */1113for (i = 0; i < sizeof(b0); i++)1114iowrite8(b0[i], aes_dev->base_reg +1115AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);1116return 0;1117}11181119/*1120* Write adata length to OCS AES HW.1121*1122* Note: adata len encoding is documented in NIST Special Publication 800-38C1123* https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf1124* (see Section A.2.2)1125*/1126static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,1127u64 adata_len)1128{1129u8 enc_a[10]; /* Maximum encoded size: 10 octets. */1130int i, len;11311132/*1133* adata_len ('a') is encoded as follows:1134* If 0 < a < 2^16 - 2^8 ==> 'a' encoded as [a]16, i.e., two octets1135* (big endian).1136* If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,1137* i.e., six octets (big endian).1138* If 2^32 ≤ a < 2^64 ==> 'a' encoded as 0xff || 0xff || [a]64,1139* i.e., ten octets (big endian).1140*/1141if (adata_len < 65280) {1142len = 2;1143*(__be16 *)enc_a = cpu_to_be16(adata_len);1144} else if (adata_len <= 0xFFFFFFFF) {1145len = 6;1146*(__be16 *)enc_a = cpu_to_be16(0xfffe);1147*(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);1148} else { /* adata_len >= 2^32 */1149len = 10;1150*(__be16 *)enc_a = cpu_to_be16(0xffff);1151*(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);1152}1153for (i = 0; i < len; i++)1154iowrite8(enc_a[i],1155aes_dev->base_reg +1156AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);1157}11581159static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,1160dma_addr_t adata_dma_list, u32 adata_size)1161{1162int rc;11631164if (!adata_size) {1165/* Since no aad the LAST_GCX bit can be set now */1166aes_a_set_last_gcx_and_adata(aes_dev);1167goto exit;1168}11691170/* Adata case. */11711172/*1173* Form the encoding of the Associated data length and write it1174* to the AES/SM4 input buffer.1175*/1176ocs_aes_ccm_write_adata_len(aes_dev, adata_size);11771178/* Configure the AES/SM4 DMA to fetch the Associated Data */1179dma_to_ocs_aes_ll(aes_dev, adata_dma_list);11801181/* Activate DMA to fetch Associated data. */1182aes_a_dma_active_src_ll_en(aes_dev);11831184/* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */1185aes_a_set_last_gcx_and_adata(aes_dev);11861187/* Wait for DMA transfer to complete. */1188rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);1189if (rc)1190return rc;11911192exit:1193/* Wait until adata (if present) has been processed. */1194aes_a_wait_last_gcx(aes_dev);1195aes_a_dma_wait_input_buffer_occupancy(aes_dev);11961197return 0;1198}11991200static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,1201dma_addr_t dst_dma_list,1202dma_addr_t src_dma_list,1203u32 src_size)1204{1205if (src_size) {1206/*1207* Configure and activate DMA for both input and output1208* data.1209*/1210dma_to_ocs_aes_ll(aes_dev, src_dma_list);1211dma_from_ocs_aes_ll(aes_dev, dst_dma_list);1212aes_a_dma_active_src_dst_ll_en(aes_dev);1213} else {1214/* Configure and activate DMA for output data only. */1215dma_from_ocs_aes_ll(aes_dev, dst_dma_list);1216aes_a_dma_active_dst_ll_en(aes_dev);1217}12181219/*1220* Set the LAST GCX bit in AES_ACTIVE Register to instruct1221* AES/SM4 engine to pad the last block of data.1222*/1223aes_a_set_last_gcx(aes_dev);12241225/* We are done, wait for IRQ and return. */1226return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);1227}12281229static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,1230dma_addr_t dst_dma_list,1231dma_addr_t src_dma_list,1232u32 src_size)1233{1234if (!src_size) {1235/* Let engine process 0-length input. */1236aes_a_dma_set_xfer_size_zero(aes_dev);1237aes_a_dma_active(aes_dev);1238aes_a_set_last_gcx(aes_dev);12391240return 0;1241}12421243/*1244* Configure and activate DMA for both input and output1245* data.1246*/1247dma_to_ocs_aes_ll(aes_dev, src_dma_list);1248dma_from_ocs_aes_ll(aes_dev, dst_dma_list);1249aes_a_dma_active_src_dst_ll_en(aes_dev);1250/*1251* Set the LAST GCX bit in AES_ACTIVE Register; this allows the1252* AES/SM4 engine to differentiate between encrypted data and1253* encrypted MAC.1254*/1255aes_a_set_last_gcx(aes_dev);1256/*1257* Enable DMA DONE interrupt; once DMA transfer is over,1258* interrupt handler will process the MAC/tag.1259*/1260return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);1261}12621263/*1264* Compare Tag to Yr.1265*1266* Only used at the end of CCM decrypt. If tag == yr, message authentication1267* has succeeded.1268*/1269static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,1270u8 tag_size_bytes)1271{1272u32 tag[AES_MAX_TAG_SIZE_U32];1273u32 yr[AES_MAX_TAG_SIZE_U32];1274u8 i;12751276/* Read Tag and Yr from AES registers. */1277for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {1278tag[i] = ioread32(aes_dev->base_reg +1279AES_T_MAC_0_OFFSET + (i * sizeof(u32)));1280yr[i] = ioread32(aes_dev->base_reg +1281AES_MULTIPURPOSE2_0_OFFSET +1282(i * sizeof(u32)));1283}12841285return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;1286}12871288/**1289* ocs_aes_ccm_op() - Perform CCM operation.1290* @aes_dev: The OCS AES device to use.1291* @cipher: The Cipher to use (AES or SM4).1292* @instruction: The instruction to perform (encrypt or decrypt).1293* @dst_dma_list: The OCS DMA list mapping output memory.1294* @src_dma_list: The OCS DMA list mapping input payload data.1295* @src_size: The amount of data mapped by @src_dma_list.1296* @iv: The input IV vector.1297* @adata_dma_list: The OCS DMA list mapping input A-data.1298* @adata_size: The amount of data mapped by @adata_dma_list.1299* @in_tag: Input tag.1300* @tag_size: The size (in bytes) of @in_tag.1301*1302* Note: for encrypt the tag is appended to the ciphertext (in the memory1303* mapped by @dst_dma_list).1304*1305* Return: 0 on success, negative error code otherwise.1306*/1307int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,1308enum ocs_cipher cipher,1309enum ocs_instruction instruction,1310dma_addr_t dst_dma_list,1311dma_addr_t src_dma_list,1312u32 src_size,1313u8 *iv,1314dma_addr_t adata_dma_list,1315u32 adata_size,1316u8 *in_tag,1317u32 tag_size)1318{1319u32 *iv_32;1320u8 lprime;1321int rc;13221323rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,1324AES_BLOCK_SIZE, adata_dma_list, adata_size,1325in_tag, tag_size, cipher, OCS_MODE_CCM,1326instruction, dst_dma_list);1327if (rc)1328return rc;13291330ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);13311332/*1333* Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt1334* auth tag so ensure this is the case1335*/1336lprime = iv[L_PRIME_IDX];1337memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));13381339/*1340* Nonce is already converted to ctr0 before being passed into this1341* function as iv.1342*/1343iv_32 = (u32 *)iv;1344iowrite32(__swab32(iv_32[0]),1345aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);1346iowrite32(__swab32(iv_32[1]),1347aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);1348iowrite32(__swab32(iv_32[2]),1349aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);1350iowrite32(__swab32(iv_32[3]),1351aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);13521353/* Write MAC/tag length in register AES_TLEN */1354iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);1355/*1356* Write the byte length of the last AES/SM4 block of Payload data1357* (without zero padding and without the length of the MAC) in register1358* AES_PLEN.1359*/1360ocs_aes_write_last_data_blk_len(aes_dev, src_size);13611362/* Set AES_ACTIVE.TRIGGER to start the operation. */1363aes_a_op_trigger(aes_dev);13641365aes_a_dma_reset_and_activate_perf_cntr(aes_dev);13661367/* Form block B0 and write it to the AES/SM4 input buffer. */1368rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);1369if (rc)1370return rc;1371/*1372* Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT1373* clock cycles since TRIGGER bit was set1374*/1375aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,1376CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);13771378/* Process Adata. */1379ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);13801381/* For Encrypt case we just process the payload and return. */1382if (instruction == OCS_ENCRYPT) {1383return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,1384src_dma_list, src_size);1385}1386/* For Decypt we need to process the payload and then the tag. */1387rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,1388src_dma_list, src_size);1389if (rc)1390return rc;13911392/* Process MAC/tag directly: feed tag to engine and wait for IRQ. */1393ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);1394rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);1395if (rc)1396return rc;13971398return ccm_compare_tag_to_yr(aes_dev, tag_size);1399}14001401/**1402* ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.1403* @aes_dev: The OCS AES device the list will be created for.1404* @sg: The SG list OCS DMA linked list will be created from. When1405* passed to this function, @sg must have been already mapped1406* with dma_map_sg().1407* @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the1408* value returned by dma_map_sg() when @sg was mapped.1409* @dll_desc: The OCS DMA dma_list to use to store information about the1410* created linked list.1411* @data_size: The size of the data (from the SG list) to be mapped into the1412* OCS DMA linked list.1413* @data_offset: The offset (within the SG list) of the data to be mapped.1414*1415* Return: 0 on success, negative error code otherwise.1416*/1417int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,1418struct scatterlist *sg,1419int sg_dma_count,1420struct ocs_dll_desc *dll_desc,1421size_t data_size, size_t data_offset)1422{1423struct ocs_dma_linked_list *ll = NULL;1424struct scatterlist *sg_tmp;1425unsigned int tmp;1426int dma_nents;1427int i;14281429if (!dll_desc || !sg || !aes_dev)1430return -EINVAL;14311432/* Default values for when no ddl_desc is created. */1433dll_desc->vaddr = NULL;1434dll_desc->dma_addr = DMA_MAPPING_ERROR;1435dll_desc->size = 0;14361437if (data_size == 0)1438return 0;14391440/* Loop over sg_list until we reach entry at specified offset. */1441while (data_offset >= sg_dma_len(sg)) {1442data_offset -= sg_dma_len(sg);1443sg_dma_count--;1444sg = sg_next(sg);1445/* If we reach the end of the list, offset was invalid. */1446if (!sg || sg_dma_count == 0)1447return -EINVAL;1448}14491450/* Compute number of DMA-mapped SG entries to add into OCS DMA list. */1451dma_nents = 0;1452tmp = 0;1453sg_tmp = sg;1454while (tmp < data_offset + data_size) {1455/* If we reach the end of the list, data_size was invalid. */1456if (!sg_tmp)1457return -EINVAL;1458tmp += sg_dma_len(sg_tmp);1459dma_nents++;1460sg_tmp = sg_next(sg_tmp);1461}1462if (dma_nents > sg_dma_count)1463return -EINVAL;14641465/* Allocate the DMA list, one entry for each SG entry. */1466dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;1467dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,1468&dll_desc->dma_addr, GFP_KERNEL);1469if (!dll_desc->vaddr)1470return -ENOMEM;14711472/* Populate DMA linked list entries. */1473ll = dll_desc->vaddr;1474for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {1475ll[i].src_addr = sg_dma_address(sg) + data_offset;1476ll[i].src_len = min(sg_dma_len(sg) - data_offset, data_size);1477data_offset = 0;1478data_size -= ll[i].src_len;1479/* Current element points to the DMA address of the next one. */1480ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));1481ll[i].ll_flags = 0;1482}1483/* Terminate last element. */1484ll[i - 1].next = 0;1485ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;14861487return 0;1488}148914901491