Path: blob/master/drivers/crypto/hisilicon/hpre/hpre_crypto.c
29268 views
// SPDX-License-Identifier: GPL-2.01/* Copyright (c) 2019 HiSilicon Limited. */2#include <crypto/akcipher.h>3#include <crypto/dh.h>4#include <crypto/ecc_curve.h>5#include <crypto/ecdh.h>6#include <crypto/rng.h>7#include <crypto/internal/akcipher.h>8#include <crypto/internal/kpp.h>9#include <crypto/internal/rsa.h>10#include <crypto/kpp.h>11#include <crypto/scatterwalk.h>12#include <linux/dma-mapping.h>13#include <linux/fips.h>14#include <linux/module.h>15#include <linux/time.h>16#include "hpre.h"1718struct hpre_ctx;1920#define HPRE_CRYPTO_ALG_PRI 100021#define HPRE_ALIGN_SZ 6422#define HPRE_BITS_2_BYTES_SHIFT 323#define HPRE_RSA_512BITS_KSZ 6424#define HPRE_RSA_1536BITS_KSZ 19225#define HPRE_CRT_PRMS 526#define HPRE_CRT_Q 227#define HPRE_CRT_P 328#define HPRE_CRT_INV 429#define HPRE_DH_G_FLAG 0x0230#define HPRE_TRY_SEND_TIMES 10031#define HPRE_INVLD_REQ_ID (-1)3233#define HPRE_SQE_ALG_BITS 534#define HPRE_SQE_DONE_SHIFT 3035#define HPRE_DH_MAX_P_SZ 5123637#define HPRE_DFX_SEC_TO_US 100000038#define HPRE_DFX_US_TO_NS 10003940#define HPRE_ENABLE_HPCORE_SHIFT 74142/* due to nist p521 */43#define HPRE_ECC_MAX_KSZ 664445/* size in bytes of the n prime */46#define HPRE_ECC_NIST_P192_N_SIZE 2447#define HPRE_ECC_NIST_P256_N_SIZE 3248#define HPRE_ECC_NIST_P384_N_SIZE 484950/* size in bytes */51#define HPRE_ECC_HW256_KSZ_B 3252#define HPRE_ECC_HW384_KSZ_B 485354/* capability register mask of driver */55#define HPRE_DRV_RSA_MASK_CAP BIT(0)56#define HPRE_DRV_DH_MASK_CAP BIT(1)57#define HPRE_DRV_ECDH_MASK_CAP BIT(2)58#define HPRE_DRV_X25519_MASK_CAP BIT(5)5960static DEFINE_MUTEX(hpre_algs_lock);61static unsigned int hpre_available_devs;6263typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);6465struct hpre_rsa_ctx {66/* low address: e--->n */67char *pubkey;68dma_addr_t dma_pubkey;6970/* low address: d--->n */71char *prikey;72dma_addr_t dma_prikey;7374/* low address: dq->dp->q->p->qinv */75char *crt_prikey;76dma_addr_t dma_crt_prikey;7778struct crypto_akcipher *soft_tfm;79};8081struct hpre_dh_ctx {82/*83* If base is g we compute the public key84* ya = g^xa mod p; [RFC2631 sec 2.1.1]85* else if base if the counterpart public key we86* compute the shared secret87* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]88* low address: d--->n, please refer to Hisilicon HPRE UM89*/90char *xa_p;91dma_addr_t dma_xa_p;9293char *g; /* m */94dma_addr_t dma_g;95};9697struct hpre_ecdh_ctx {98/* low address: p->a->k->b */99unsigned char *p;100dma_addr_t dma_p;101102/* low address: x->y */103unsigned char *g;104dma_addr_t dma_g;105};106107struct hpre_ctx {108struct hisi_qp *qp;109struct device *dev;110struct hpre_asym_request **req_list;111struct hpre *hpre;112spinlock_t req_lock;113unsigned int key_sz;114bool crt_g2_mode;115struct idr req_idr;116union {117struct hpre_rsa_ctx rsa;118struct hpre_dh_ctx dh;119struct hpre_ecdh_ctx ecdh;120};121/* for ecc algorithms */122unsigned int curve_id;123/* for high performance core */124u8 enable_hpcore;125};126127struct hpre_asym_request {128char *src;129char *dst;130struct hpre_sqe req;131struct hpre_ctx *ctx;132union {133struct akcipher_request *rsa;134struct kpp_request *dh;135struct kpp_request *ecdh;136} areq;137int err;138int req_id;139hpre_cb cb;140struct timespec64 req_time;141};142143static inline unsigned int hpre_align_sz(void)144{145return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;146}147148static inline unsigned int hpre_align_pd(void)149{150return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);151}152153static int hpre_alloc_req_id(struct hpre_ctx *ctx)154{155unsigned long flags;156int id;157158spin_lock_irqsave(&ctx->req_lock, flags);159id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);160spin_unlock_irqrestore(&ctx->req_lock, flags);161162return id;163}164165static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)166{167unsigned long flags;168169spin_lock_irqsave(&ctx->req_lock, flags);170idr_remove(&ctx->req_idr, req_id);171spin_unlock_irqrestore(&ctx->req_lock, flags);172}173174static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)175{176struct hpre_ctx *ctx;177struct hpre_dfx *dfx;178int id;179180ctx = hpre_req->ctx;181id = hpre_alloc_req_id(ctx);182if (unlikely(id < 0))183return -EINVAL;184185ctx->req_list[id] = hpre_req;186hpre_req->req_id = id;187188dfx = ctx->hpre->debug.dfx;189if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))190ktime_get_ts64(&hpre_req->req_time);191192return id;193}194195static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)196{197struct hpre_ctx *ctx = hpre_req->ctx;198int id = hpre_req->req_id;199200if (hpre_req->req_id >= 0) {201hpre_req->req_id = HPRE_INVLD_REQ_ID;202ctx->req_list[id] = NULL;203hpre_free_req_id(ctx, id);204}205}206207static struct hisi_qp *hpre_get_qp_and_start(u8 type)208{209struct hisi_qp *qp;210int ret;211212qp = hpre_create_qp(type);213if (!qp) {214pr_err("Can not create hpre qp!\n");215return ERR_PTR(-ENODEV);216}217218ret = hisi_qm_start_qp(qp, 0);219if (ret < 0) {220hisi_qm_free_qps(&qp, 1);221pci_err(qp->qm->pdev, "Can not start qp!\n");222return ERR_PTR(-EINVAL);223}224225return qp;226}227228static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,229struct scatterlist *data, unsigned int len,230int is_src, dma_addr_t *tmp)231{232struct device *dev = hpre_req->ctx->dev;233enum dma_data_direction dma_dir;234235if (is_src) {236hpre_req->src = NULL;237dma_dir = DMA_TO_DEVICE;238} else {239hpre_req->dst = NULL;240dma_dir = DMA_FROM_DEVICE;241}242*tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);243if (unlikely(dma_mapping_error(dev, *tmp))) {244dev_err(dev, "dma map data err!\n");245return -ENOMEM;246}247248return 0;249}250251static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,252struct scatterlist *data, unsigned int len,253int is_src, dma_addr_t *tmp)254{255struct hpre_ctx *ctx = hpre_req->ctx;256struct device *dev = ctx->dev;257void *ptr;258int shift;259260shift = ctx->key_sz - len;261if (unlikely(shift < 0))262return -EINVAL;263264ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);265if (unlikely(!ptr))266return -ENOMEM;267268if (is_src) {269scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);270hpre_req->src = ptr;271} else {272hpre_req->dst = ptr;273}274275return 0;276}277278static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,279struct scatterlist *data, unsigned int len,280int is_src, int is_dh)281{282struct hpre_sqe *msg = &hpre_req->req;283struct hpre_ctx *ctx = hpre_req->ctx;284dma_addr_t tmp = 0;285int ret;286287/* when the data is dh's source, we should format it */288if ((sg_is_last(data) && len == ctx->key_sz) &&289((is_dh && !is_src) || !is_dh))290ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);291else292ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);293294if (unlikely(ret))295return ret;296297if (is_src)298msg->in = cpu_to_le64(tmp);299else300msg->out = cpu_to_le64(tmp);301302return 0;303}304305static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,306struct hpre_asym_request *req,307struct scatterlist *dst,308struct scatterlist *src)309{310struct device *dev = ctx->dev;311struct hpre_sqe *sqe = &req->req;312dma_addr_t tmp;313314tmp = le64_to_cpu(sqe->in);315if (unlikely(dma_mapping_error(dev, tmp)))316return;317318if (src) {319if (req->src)320dma_free_coherent(dev, ctx->key_sz, req->src, tmp);321else322dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);323}324325tmp = le64_to_cpu(sqe->out);326if (unlikely(dma_mapping_error(dev, tmp)))327return;328329if (req->dst) {330if (dst)331scatterwalk_map_and_copy(req->dst, dst, 0,332ctx->key_sz, 1);333dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);334} else {335dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);336}337}338339static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,340void **kreq)341{342struct hpre_asym_request *req;343unsigned int err, done, alg;344int id;345346#define HPRE_NO_HW_ERR 0347#define HPRE_HW_TASK_DONE 3348#define HREE_HW_ERR_MASK GENMASK(10, 0)349#define HREE_SQE_DONE_MASK GENMASK(1, 0)350#define HREE_ALG_TYPE_MASK GENMASK(4, 0)351id = (int)le16_to_cpu(sqe->tag);352req = ctx->req_list[id];353hpre_rm_req_from_ctx(req);354*kreq = req;355356err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &357HREE_HW_ERR_MASK;358359done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &360HREE_SQE_DONE_MASK;361362if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))363return 0;364365alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;366dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",367alg, done, err);368369return -EINVAL;370}371372static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)373{374struct hpre *hpre;375376if (!ctx || !qp || qlen < 0)377return -EINVAL;378379spin_lock_init(&ctx->req_lock);380ctx->qp = qp;381ctx->dev = &qp->qm->pdev->dev;382383hpre = container_of(ctx->qp->qm, struct hpre, qm);384ctx->hpre = hpre;385ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);386if (!ctx->req_list)387return -ENOMEM;388ctx->key_sz = 0;389ctx->crt_g2_mode = false;390idr_init(&ctx->req_idr);391392return 0;393}394395static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)396{397if (is_clear_all) {398idr_destroy(&ctx->req_idr);399kfree(ctx->req_list);400hisi_qm_free_qps(&ctx->qp, 1);401}402403ctx->crt_g2_mode = false;404ctx->key_sz = 0;405}406407static bool hpre_is_bd_timeout(struct hpre_asym_request *req,408u64 overtime_thrhld)409{410struct timespec64 reply_time;411u64 time_use_us;412413ktime_get_ts64(&reply_time);414time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *415HPRE_DFX_SEC_TO_US +416(reply_time.tv_nsec - req->req_time.tv_nsec) /417HPRE_DFX_US_TO_NS;418419if (time_use_us <= overtime_thrhld)420return false;421422return true;423}424425static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)426{427struct hpre_dfx *dfx = ctx->hpre->debug.dfx;428struct hpre_asym_request *req;429struct kpp_request *areq;430u64 overtime_thrhld;431int ret;432433ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);434areq = req->areq.dh;435areq->dst_len = ctx->key_sz;436437overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);438if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))439atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);440441hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);442kpp_request_complete(areq, ret);443atomic64_inc(&dfx[HPRE_RECV_CNT].value);444}445446static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)447{448struct hpre_dfx *dfx = ctx->hpre->debug.dfx;449struct hpre_asym_request *req;450struct akcipher_request *areq;451u64 overtime_thrhld;452int ret;453454ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);455456overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);457if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))458atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);459460areq = req->areq.rsa;461areq->dst_len = ctx->key_sz;462hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);463akcipher_request_complete(areq, ret);464atomic64_inc(&dfx[HPRE_RECV_CNT].value);465}466467static void hpre_alg_cb(struct hisi_qp *qp, void *resp)468{469struct hpre_ctx *ctx = qp->qp_ctx;470struct hpre_dfx *dfx = ctx->hpre->debug.dfx;471struct hpre_sqe *sqe = resp;472struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];473474if (unlikely(!req)) {475atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);476return;477}478479req->cb(ctx, resp);480}481482static void hpre_stop_qp_and_put(struct hisi_qp *qp)483{484hisi_qm_stop_qp(qp);485hisi_qm_free_qps(&qp, 1);486}487488static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)489{490struct hisi_qp *qp;491int ret;492493qp = hpre_get_qp_and_start(type);494if (IS_ERR(qp))495return PTR_ERR(qp);496497qp->qp_ctx = ctx;498qp->req_cb = hpre_alg_cb;499500ret = hpre_ctx_set(ctx, qp, qp->sq_depth);501if (ret)502hpre_stop_qp_and_put(qp);503504return ret;505}506507static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)508{509struct hpre_asym_request *h_req;510struct hpre_sqe *msg;511int req_id;512void *tmp;513514if (is_rsa) {515struct akcipher_request *akreq = req;516517if (akreq->dst_len < ctx->key_sz) {518akreq->dst_len = ctx->key_sz;519return -EOVERFLOW;520}521522tmp = akcipher_request_ctx(akreq);523h_req = PTR_ALIGN(tmp, hpre_align_sz());524h_req->cb = hpre_rsa_cb;525h_req->areq.rsa = akreq;526msg = &h_req->req;527memset(msg, 0, sizeof(*msg));528} else {529struct kpp_request *kreq = req;530531if (kreq->dst_len < ctx->key_sz) {532kreq->dst_len = ctx->key_sz;533return -EOVERFLOW;534}535536tmp = kpp_request_ctx(kreq);537h_req = PTR_ALIGN(tmp, hpre_align_sz());538h_req->cb = hpre_dh_cb;539h_req->areq.dh = kreq;540msg = &h_req->req;541memset(msg, 0, sizeof(*msg));542msg->key = cpu_to_le64(ctx->dh.dma_xa_p);543}544545msg->in = cpu_to_le64(DMA_MAPPING_ERROR);546msg->out = cpu_to_le64(DMA_MAPPING_ERROR);547msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);548msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;549h_req->ctx = ctx;550551req_id = hpre_add_req_to_ctx(h_req);552if (req_id < 0)553return -EBUSY;554555msg->tag = cpu_to_le16((u16)req_id);556557return 0;558}559560static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)561{562struct hpre_dfx *dfx = ctx->hpre->debug.dfx;563int ctr = 0;564int ret;565566do {567atomic64_inc(&dfx[HPRE_SEND_CNT].value);568spin_lock_bh(&ctx->req_lock);569ret = hisi_qp_send(ctx->qp, msg);570spin_unlock_bh(&ctx->req_lock);571if (ret != -EBUSY)572break;573atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);574} while (ctr++ < HPRE_TRY_SEND_TIMES);575576if (likely(!ret))577return ret;578579if (ret != -EBUSY)580atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);581582return ret;583}584585static int hpre_dh_compute_value(struct kpp_request *req)586{587struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);588struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);589void *tmp = kpp_request_ctx(req);590struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());591struct hpre_sqe *msg = &hpre_req->req;592int ret;593594ret = hpre_msg_request_set(ctx, req, false);595if (unlikely(ret))596return ret;597598if (req->src) {599ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);600if (unlikely(ret))601goto clear_all;602} else {603msg->in = cpu_to_le64(ctx->dh.dma_g);604}605606ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);607if (unlikely(ret))608goto clear_all;609610if (ctx->crt_g2_mode && !req->src)611msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);612else613msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);614615/* success */616ret = hpre_send(ctx, msg);617if (likely(!ret))618return -EINPROGRESS;619620clear_all:621hpre_rm_req_from_ctx(hpre_req);622hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);623624return ret;625}626627static int hpre_is_dh_params_length_valid(unsigned int key_sz)628{629#define _HPRE_DH_GRP1 768630#define _HPRE_DH_GRP2 1024631#define _HPRE_DH_GRP5 1536632#define _HPRE_DH_GRP14 2048633#define _HPRE_DH_GRP15 3072634#define _HPRE_DH_GRP16 4096635switch (key_sz) {636case _HPRE_DH_GRP1:637case _HPRE_DH_GRP2:638case _HPRE_DH_GRP5:639case _HPRE_DH_GRP14:640case _HPRE_DH_GRP15:641case _HPRE_DH_GRP16:642return 0;643default:644return -EINVAL;645}646}647648static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)649{650struct device *dev = ctx->dev;651unsigned int sz;652653if (params->p_size > HPRE_DH_MAX_P_SZ)654return -EINVAL;655656if (hpre_is_dh_params_length_valid(params->p_size <<657HPRE_BITS_2_BYTES_SHIFT))658return -EINVAL;659660sz = ctx->key_sz = params->p_size;661ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,662&ctx->dh.dma_xa_p, GFP_KERNEL);663if (!ctx->dh.xa_p)664return -ENOMEM;665666memcpy(ctx->dh.xa_p + sz, params->p, sz);667668/* If g equals 2 don't copy it */669if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {670ctx->crt_g2_mode = true;671return 0;672}673674ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);675if (!ctx->dh.g) {676dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,677ctx->dh.dma_xa_p);678ctx->dh.xa_p = NULL;679return -ENOMEM;680}681682memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);683684return 0;685}686687static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)688{689struct device *dev = ctx->dev;690unsigned int sz = ctx->key_sz;691692if (is_clear_all)693hisi_qm_stop_qp(ctx->qp);694695if (ctx->dh.g) {696dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);697ctx->dh.g = NULL;698}699700if (ctx->dh.xa_p) {701memzero_explicit(ctx->dh.xa_p, sz);702dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,703ctx->dh.dma_xa_p);704ctx->dh.xa_p = NULL;705}706707hpre_ctx_clear(ctx, is_clear_all);708}709710static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,711unsigned int len)712{713struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);714struct dh params;715int ret;716717if (crypto_dh_decode_key(buf, len, ¶ms) < 0)718return -EINVAL;719720/* Free old secret if any */721hpre_dh_clear_ctx(ctx, false);722723ret = hpre_dh_set_params(ctx, ¶ms);724if (ret < 0)725goto err_clear_ctx;726727memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,728params.key_size);729730return 0;731732err_clear_ctx:733hpre_dh_clear_ctx(ctx, false);734return ret;735}736737static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)738{739struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);740741return ctx->key_sz;742}743744static int hpre_dh_init_tfm(struct crypto_kpp *tfm)745{746struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);747748kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());749750return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);751}752753static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)754{755struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);756757hpre_dh_clear_ctx(ctx, true);758}759760static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)761{762while (!**ptr && *len) {763(*ptr)++;764(*len)--;765}766}767768static bool hpre_rsa_key_size_is_support(unsigned int len)769{770unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;771772#define _RSA_1024BITS_KEY_WDTH 1024773#define _RSA_2048BITS_KEY_WDTH 2048774#define _RSA_3072BITS_KEY_WDTH 3072775#define _RSA_4096BITS_KEY_WDTH 4096776777switch (bits) {778case _RSA_1024BITS_KEY_WDTH:779case _RSA_2048BITS_KEY_WDTH:780case _RSA_3072BITS_KEY_WDTH:781case _RSA_4096BITS_KEY_WDTH:782return true;783default:784return false;785}786}787788static int hpre_rsa_enc(struct akcipher_request *req)789{790struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);791struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);792void *tmp = akcipher_request_ctx(req);793struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());794struct hpre_sqe *msg = &hpre_req->req;795int ret;796797/* For 512 and 1536 bits key size, use soft tfm instead */798if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||799ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {800akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);801ret = crypto_akcipher_encrypt(req);802akcipher_request_set_tfm(req, tfm);803return ret;804}805806if (unlikely(!ctx->rsa.pubkey))807return -EINVAL;808809ret = hpre_msg_request_set(ctx, req, true);810if (unlikely(ret))811return ret;812813msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);814msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);815816ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);817if (unlikely(ret))818goto clear_all;819820ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);821if (unlikely(ret))822goto clear_all;823824/* success */825ret = hpre_send(ctx, msg);826if (likely(!ret))827return -EINPROGRESS;828829clear_all:830hpre_rm_req_from_ctx(hpre_req);831hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);832833return ret;834}835836static int hpre_rsa_dec(struct akcipher_request *req)837{838struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);839struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);840void *tmp = akcipher_request_ctx(req);841struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());842struct hpre_sqe *msg = &hpre_req->req;843int ret;844845/* For 512 and 1536 bits key size, use soft tfm instead */846if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||847ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {848akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);849ret = crypto_akcipher_decrypt(req);850akcipher_request_set_tfm(req, tfm);851return ret;852}853854if (unlikely(!ctx->rsa.prikey))855return -EINVAL;856857ret = hpre_msg_request_set(ctx, req, true);858if (unlikely(ret))859return ret;860861if (ctx->crt_g2_mode) {862msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);863msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |864HPRE_ALG_NC_CRT);865} else {866msg->key = cpu_to_le64(ctx->rsa.dma_prikey);867msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |868HPRE_ALG_NC_NCRT);869}870871ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);872if (unlikely(ret))873goto clear_all;874875ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);876if (unlikely(ret))877goto clear_all;878879/* success */880ret = hpre_send(ctx, msg);881if (likely(!ret))882return -EINPROGRESS;883884clear_all:885hpre_rm_req_from_ctx(hpre_req);886hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);887888return ret;889}890891static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,892size_t vlen, bool private)893{894const char *ptr = value;895896hpre_rsa_drop_leading_zeros(&ptr, &vlen);897898ctx->key_sz = vlen;899900/* if invalid key size provided, we use software tfm */901if (!hpre_rsa_key_size_is_support(ctx->key_sz))902return 0;903904ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,905&ctx->rsa.dma_pubkey,906GFP_KERNEL);907if (!ctx->rsa.pubkey)908return -ENOMEM;909910if (private) {911ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,912&ctx->rsa.dma_prikey,913GFP_KERNEL);914if (!ctx->rsa.prikey) {915dma_free_coherent(ctx->dev, vlen << 1,916ctx->rsa.pubkey,917ctx->rsa.dma_pubkey);918ctx->rsa.pubkey = NULL;919return -ENOMEM;920}921memcpy(ctx->rsa.prikey + vlen, ptr, vlen);922}923memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);924925/* Using hardware HPRE to do RSA */926return 1;927}928929static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,930size_t vlen)931{932const char *ptr = value;933934hpre_rsa_drop_leading_zeros(&ptr, &vlen);935936if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)937return -EINVAL;938939memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);940941return 0;942}943944static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,945size_t vlen)946{947const char *ptr = value;948949hpre_rsa_drop_leading_zeros(&ptr, &vlen);950951if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)952return -EINVAL;953954memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);955956return 0;957}958959static int hpre_crt_para_get(char *para, size_t para_sz,960const char *raw, size_t raw_sz)961{962const char *ptr = raw;963size_t len = raw_sz;964965hpre_rsa_drop_leading_zeros(&ptr, &len);966if (!len || len > para_sz)967return -EINVAL;968969memcpy(para + para_sz - len, ptr, len);970971return 0;972}973974static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)975{976unsigned int hlf_ksz = ctx->key_sz >> 1;977struct device *dev = ctx->dev;978u64 offset;979int ret;980981ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,982&ctx->rsa.dma_crt_prikey,983GFP_KERNEL);984if (!ctx->rsa.crt_prikey)985return -ENOMEM;986987ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,988rsa_key->dq, rsa_key->dq_sz);989if (ret)990goto free_key;991992offset = hlf_ksz;993ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,994rsa_key->dp, rsa_key->dp_sz);995if (ret)996goto free_key;997998offset = hlf_ksz * HPRE_CRT_Q;999ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,1000rsa_key->q, rsa_key->q_sz);1001if (ret)1002goto free_key;10031004offset = hlf_ksz * HPRE_CRT_P;1005ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,1006rsa_key->p, rsa_key->p_sz);1007if (ret)1008goto free_key;10091010offset = hlf_ksz * HPRE_CRT_INV;1011ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,1012rsa_key->qinv, rsa_key->qinv_sz);1013if (ret)1014goto free_key;10151016ctx->crt_g2_mode = true;10171018return 0;10191020free_key:1021offset = hlf_ksz * HPRE_CRT_PRMS;1022memzero_explicit(ctx->rsa.crt_prikey, offset);1023dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,1024ctx->rsa.dma_crt_prikey);1025ctx->rsa.crt_prikey = NULL;1026ctx->crt_g2_mode = false;10271028return ret;1029}10301031/* If it is clear all, all the resources of the QP will be cleaned. */1032static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)1033{1034unsigned int half_key_sz = ctx->key_sz >> 1;1035struct device *dev = ctx->dev;10361037if (is_clear_all)1038hisi_qm_stop_qp(ctx->qp);10391040if (ctx->rsa.pubkey) {1041dma_free_coherent(dev, ctx->key_sz << 1,1042ctx->rsa.pubkey, ctx->rsa.dma_pubkey);1043ctx->rsa.pubkey = NULL;1044}10451046if (ctx->rsa.crt_prikey) {1047memzero_explicit(ctx->rsa.crt_prikey,1048half_key_sz * HPRE_CRT_PRMS);1049dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,1050ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);1051ctx->rsa.crt_prikey = NULL;1052}10531054if (ctx->rsa.prikey) {1055memzero_explicit(ctx->rsa.prikey, ctx->key_sz);1056dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,1057ctx->rsa.dma_prikey);1058ctx->rsa.prikey = NULL;1059}10601061hpre_ctx_clear(ctx, is_clear_all);1062}10631064/*1065* we should judge if it is CRT or not,1066* CRT: return true, N-CRT: return false .1067*/1068static bool hpre_is_crt_key(struct rsa_key *key)1069{1070u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +1071key->qinv_sz;10721073#define LEN_OF_NCRT_PARA 510741075/* N-CRT less than 5 parameters */1076return len > LEN_OF_NCRT_PARA;1077}10781079static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,1080unsigned int keylen, bool private)1081{1082struct rsa_key rsa_key;1083int ret;10841085hpre_rsa_clear_ctx(ctx, false);10861087if (private)1088ret = rsa_parse_priv_key(&rsa_key, key, keylen);1089else1090ret = rsa_parse_pub_key(&rsa_key, key, keylen);1091if (ret < 0)1092return ret;10931094ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);1095if (ret <= 0)1096return ret;10971098if (private) {1099ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);1100if (ret < 0)1101goto free;11021103if (hpre_is_crt_key(&rsa_key)) {1104ret = hpre_rsa_setkey_crt(ctx, &rsa_key);1105if (ret < 0)1106goto free;1107}1108}11091110ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);1111if (ret < 0)1112goto free;11131114if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {1115ret = -EINVAL;1116goto free;1117}11181119return 0;11201121free:1122hpre_rsa_clear_ctx(ctx, false);1123return ret;1124}11251126static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,1127unsigned int keylen)1128{1129struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);1130int ret;11311132ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);1133if (ret)1134return ret;11351136return hpre_rsa_setkey(ctx, key, keylen, false);1137}11381139static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,1140unsigned int keylen)1141{1142struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);1143int ret;11441145ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);1146if (ret)1147return ret;11481149return hpre_rsa_setkey(ctx, key, keylen, true);1150}11511152static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)1153{1154struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);11551156/* For 512 and 1536 bits key size, use soft tfm instead */1157if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||1158ctx->key_sz == HPRE_RSA_1536BITS_KSZ)1159return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);11601161return ctx->key_sz;1162}11631164static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)1165{1166struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);1167int ret;11681169ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);1170if (IS_ERR(ctx->rsa.soft_tfm)) {1171pr_err("Can not alloc_akcipher!\n");1172return PTR_ERR(ctx->rsa.soft_tfm);1173}11741175akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +1176hpre_align_pd());11771178ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);1179if (ret)1180crypto_free_akcipher(ctx->rsa.soft_tfm);11811182return ret;1183}11841185static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)1186{1187struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);11881189hpre_rsa_clear_ctx(ctx, true);1190crypto_free_akcipher(ctx->rsa.soft_tfm);1191}11921193static void hpre_key_to_big_end(u8 *data, int len)1194{1195int i, j;11961197for (i = 0; i < len / 2; i++) {1198j = len - i - 1;1199swap(data[j], data[i]);1200}1201}12021203static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)1204{1205struct device *dev = ctx->dev;1206unsigned int sz = ctx->key_sz;1207unsigned int shift = sz << 1;12081209if (is_clear_all)1210hisi_qm_stop_qp(ctx->qp);12111212if (ctx->ecdh.p) {1213/* ecdh: p->a->k->b */1214memzero_explicit(ctx->ecdh.p + shift, sz);1215dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);1216ctx->ecdh.p = NULL;1217}12181219hpre_ctx_clear(ctx, is_clear_all);1220}12211222/*1223* The bits of 192/224/256/384/521 are supported by HPRE,1224* and convert the bits like:1225* bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;1226* If the parameter bit width is insufficient, then we fill in the1227* high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;1228*/1229static unsigned int hpre_ecdh_supported_curve(unsigned short id)1230{1231switch (id) {1232case ECC_CURVE_NIST_P192:1233case ECC_CURVE_NIST_P256:1234return HPRE_ECC_HW256_KSZ_B;1235case ECC_CURVE_NIST_P384:1236return HPRE_ECC_HW384_KSZ_B;1237default:1238break;1239}12401241return 0;1242}12431244static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)1245{1246unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);1247u8 i = 0;12481249while (i < ndigits - 1) {1250memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));1251i++;1252}12531254memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);1255hpre_key_to_big_end((u8 *)addr, cur_sz);1256}12571258static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,1259unsigned int cur_sz)1260{1261unsigned int shifta = ctx->key_sz << 1;1262unsigned int shiftb = ctx->key_sz << 2;1263void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;1264void *a = ctx->ecdh.p + shifta - cur_sz;1265void *b = ctx->ecdh.p + shiftb - cur_sz;1266void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;1267void *y = ctx->ecdh.g + shifta - cur_sz;1268const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);1269char *n;12701271if (unlikely(!curve))1272return -EINVAL;12731274n = kzalloc(ctx->key_sz, GFP_KERNEL);1275if (!n)1276return -ENOMEM;12771278fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);1279fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);1280fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);1281fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);1282fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);1283fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);12841285if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {1286kfree(n);1287return -EINVAL;1288}12891290kfree(n);1291return 0;1292}12931294static unsigned int hpre_ecdh_get_curvesz(unsigned short id)1295{1296switch (id) {1297case ECC_CURVE_NIST_P192:1298return HPRE_ECC_NIST_P192_N_SIZE;1299case ECC_CURVE_NIST_P256:1300return HPRE_ECC_NIST_P256_N_SIZE;1301case ECC_CURVE_NIST_P384:1302return HPRE_ECC_NIST_P384_N_SIZE;1303default:1304break;1305}13061307return 0;1308}13091310static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)1311{1312struct device *dev = ctx->dev;1313unsigned int sz, shift, curve_sz;1314int ret;13151316ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);1317if (!ctx->key_sz)1318return -EINVAL;13191320curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);1321if (!curve_sz || params->key_size > curve_sz)1322return -EINVAL;13231324sz = ctx->key_sz;13251326if (!ctx->ecdh.p) {1327ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,1328GFP_KERNEL);1329if (!ctx->ecdh.p)1330return -ENOMEM;1331}13321333shift = sz << 2;1334ctx->ecdh.g = ctx->ecdh.p + shift;1335ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;13361337ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);1338if (ret) {1339dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);1340dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);1341ctx->ecdh.p = NULL;1342return ret;1343}13441345return 0;1346}13471348static bool hpre_key_is_zero(char *key, unsigned short key_sz)1349{1350int i;13511352for (i = 0; i < key_sz; i++)1353if (key[i])1354return false;13551356return true;1357}13581359static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)1360{1361struct device *dev = ctx->dev;1362int ret;13631364ret = crypto_get_default_rng();1365if (ret) {1366dev_err(dev, "failed to get default rng, ret = %d!\n", ret);1367return ret;1368}13691370ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,1371params->key_size);1372crypto_put_default_rng();1373if (ret)1374dev_err(dev, "failed to get rng, ret = %d!\n", ret);13751376return ret;1377}13781379static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,1380unsigned int len)1381{1382struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);1383unsigned int sz, sz_shift, curve_sz;1384struct device *dev = ctx->dev;1385char key[HPRE_ECC_MAX_KSZ];1386struct ecdh params;1387int ret;13881389if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {1390dev_err(dev, "failed to decode ecdh key!\n");1391return -EINVAL;1392}13931394/* Use stdrng to generate private key */1395if (!params.key || !params.key_size) {1396params.key = key;1397curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);1398if (!curve_sz) {1399dev_err(dev, "Invalid curve size!\n");1400return -EINVAL;1401}14021403params.key_size = curve_sz - 1;1404ret = ecdh_gen_privkey(ctx, ¶ms);1405if (ret)1406return ret;1407}14081409if (hpre_key_is_zero(params.key, params.key_size)) {1410dev_err(dev, "Invalid hpre key!\n");1411return -EINVAL;1412}14131414hpre_ecc_clear_ctx(ctx, false);14151416ret = hpre_ecdh_set_param(ctx, ¶ms);1417if (ret < 0) {1418dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);1419return ret;1420}14211422sz = ctx->key_sz;1423sz_shift = (sz << 1) + sz - params.key_size;1424memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);14251426return 0;1427}14281429static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,1430struct hpre_asym_request *req,1431struct scatterlist *dst,1432struct scatterlist *src)1433{1434struct device *dev = ctx->dev;1435struct hpre_sqe *sqe = &req->req;1436dma_addr_t dma;14371438dma = le64_to_cpu(sqe->in);1439if (unlikely(dma_mapping_error(dev, dma)))1440return;14411442if (src && req->src)1443dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);14441445dma = le64_to_cpu(sqe->out);1446if (unlikely(dma_mapping_error(dev, dma)))1447return;14481449if (req->dst)1450dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);1451if (dst)1452dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);1453}14541455static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)1456{1457unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);1458struct hpre_dfx *dfx = ctx->hpre->debug.dfx;1459struct hpre_asym_request *req = NULL;1460struct kpp_request *areq;1461u64 overtime_thrhld;1462char *p;1463int ret;14641465ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);1466areq = req->areq.ecdh;1467areq->dst_len = ctx->key_sz << 1;14681469overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);1470if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))1471atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);14721473/* Do unmap before data processing */1474hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);14751476p = sg_virt(areq->dst);1477memmove(p, p + ctx->key_sz - curve_sz, curve_sz);1478memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);14791480kpp_request_complete(areq, ret);14811482atomic64_inc(&dfx[HPRE_RECV_CNT].value);1483}14841485static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,1486struct kpp_request *req)1487{1488struct hpre_asym_request *h_req;1489struct hpre_sqe *msg;1490int req_id;1491void *tmp;14921493if (req->dst_len < ctx->key_sz << 1) {1494req->dst_len = ctx->key_sz << 1;1495return -EINVAL;1496}14971498tmp = kpp_request_ctx(req);1499h_req = PTR_ALIGN(tmp, hpre_align_sz());1500h_req->cb = hpre_ecdh_cb;1501h_req->areq.ecdh = req;1502msg = &h_req->req;1503memset(msg, 0, sizeof(*msg));1504msg->in = cpu_to_le64(DMA_MAPPING_ERROR);1505msg->out = cpu_to_le64(DMA_MAPPING_ERROR);1506msg->key = cpu_to_le64(ctx->ecdh.dma_p);15071508msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);1509msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;1510h_req->ctx = ctx;15111512req_id = hpre_add_req_to_ctx(h_req);1513if (req_id < 0)1514return -EBUSY;15151516msg->tag = cpu_to_le16((u16)req_id);1517return 0;1518}15191520static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,1521struct scatterlist *data, unsigned int len)1522{1523struct hpre_sqe *msg = &hpre_req->req;1524struct hpre_ctx *ctx = hpre_req->ctx;1525struct device *dev = ctx->dev;1526unsigned int tmpshift;1527dma_addr_t dma = 0;1528void *ptr;1529int shift;15301531/* Src_data include gx and gy. */1532shift = ctx->key_sz - (len >> 1);1533if (unlikely(shift < 0))1534return -EINVAL;15351536ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);1537if (unlikely(!ptr))1538return -ENOMEM;15391540tmpshift = ctx->key_sz << 1;1541scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);1542memcpy(ptr + shift, ptr + tmpshift, len >> 1);1543memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);15441545hpre_req->src = ptr;1546msg->in = cpu_to_le64(dma);1547return 0;1548}15491550static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,1551struct scatterlist *data, unsigned int len)1552{1553struct hpre_sqe *msg = &hpre_req->req;1554struct hpre_ctx *ctx = hpre_req->ctx;1555struct device *dev = ctx->dev;1556dma_addr_t dma;15571558if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {1559dev_err(dev, "data or data length is illegal!\n");1560return -EINVAL;1561}15621563hpre_req->dst = NULL;1564dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);1565if (unlikely(dma_mapping_error(dev, dma))) {1566dev_err(dev, "dma map data err!\n");1567return -ENOMEM;1568}15691570msg->out = cpu_to_le64(dma);1571return 0;1572}15731574static int hpre_ecdh_compute_value(struct kpp_request *req)1575{1576struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);1577struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);1578struct device *dev = ctx->dev;1579void *tmp = kpp_request_ctx(req);1580struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());1581struct hpre_sqe *msg = &hpre_req->req;1582int ret;15831584ret = hpre_ecdh_msg_request_set(ctx, req);1585if (unlikely(ret)) {1586dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);1587return ret;1588}15891590if (req->src) {1591ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);1592if (unlikely(ret)) {1593dev_err(dev, "failed to init src data, ret = %d!\n", ret);1594goto clear_all;1595}1596} else {1597msg->in = cpu_to_le64(ctx->ecdh.dma_g);1598}15991600ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);1601if (unlikely(ret)) {1602dev_err(dev, "failed to init dst data, ret = %d!\n", ret);1603goto clear_all;1604}16051606msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);1607msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;16081609ret = hpre_send(ctx, msg);1610if (likely(!ret))1611return -EINPROGRESS;16121613clear_all:1614hpre_rm_req_from_ctx(hpre_req);1615hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);1616return ret;1617}16181619static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)1620{1621struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);16221623/* max size is the pub_key_size, include x and y */1624return ctx->key_sz << 1;1625}16261627static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)1628{1629struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);16301631ctx->curve_id = ECC_CURVE_NIST_P192;16321633kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());16341635return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);1636}16371638static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)1639{1640struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);16411642ctx->curve_id = ECC_CURVE_NIST_P256;1643ctx->enable_hpcore = 1;16441645kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());16461647return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);1648}16491650static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)1651{1652struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);16531654ctx->curve_id = ECC_CURVE_NIST_P384;16551656kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());16571658return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);1659}16601661static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)1662{1663struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);16641665hpre_ecc_clear_ctx(ctx, true);1666}16671668static struct akcipher_alg rsa = {1669.encrypt = hpre_rsa_enc,1670.decrypt = hpre_rsa_dec,1671.set_pub_key = hpre_rsa_setpubkey,1672.set_priv_key = hpre_rsa_setprivkey,1673.max_size = hpre_rsa_max_size,1674.init = hpre_rsa_init_tfm,1675.exit = hpre_rsa_exit_tfm,1676.base = {1677.cra_ctxsize = sizeof(struct hpre_ctx),1678.cra_priority = HPRE_CRYPTO_ALG_PRI,1679.cra_name = "rsa",1680.cra_driver_name = "hpre-rsa",1681.cra_module = THIS_MODULE,1682},1683};16841685static struct kpp_alg dh = {1686.set_secret = hpre_dh_set_secret,1687.generate_public_key = hpre_dh_compute_value,1688.compute_shared_secret = hpre_dh_compute_value,1689.max_size = hpre_dh_max_size,1690.init = hpre_dh_init_tfm,1691.exit = hpre_dh_exit_tfm,1692.base = {1693.cra_ctxsize = sizeof(struct hpre_ctx),1694.cra_priority = HPRE_CRYPTO_ALG_PRI,1695.cra_name = "dh",1696.cra_driver_name = "hpre-dh",1697.cra_module = THIS_MODULE,1698},1699};17001701static struct kpp_alg ecdh_curves[] = {1702{1703.set_secret = hpre_ecdh_set_secret,1704.generate_public_key = hpre_ecdh_compute_value,1705.compute_shared_secret = hpre_ecdh_compute_value,1706.max_size = hpre_ecdh_max_size,1707.init = hpre_ecdh_nist_p192_init_tfm,1708.exit = hpre_ecdh_exit_tfm,1709.base = {1710.cra_ctxsize = sizeof(struct hpre_ctx),1711.cra_priority = HPRE_CRYPTO_ALG_PRI,1712.cra_name = "ecdh-nist-p192",1713.cra_driver_name = "hpre-ecdh-nist-p192",1714.cra_module = THIS_MODULE,1715},1716}, {1717.set_secret = hpre_ecdh_set_secret,1718.generate_public_key = hpre_ecdh_compute_value,1719.compute_shared_secret = hpre_ecdh_compute_value,1720.max_size = hpre_ecdh_max_size,1721.init = hpre_ecdh_nist_p256_init_tfm,1722.exit = hpre_ecdh_exit_tfm,1723.base = {1724.cra_ctxsize = sizeof(struct hpre_ctx),1725.cra_priority = HPRE_CRYPTO_ALG_PRI,1726.cra_name = "ecdh-nist-p256",1727.cra_driver_name = "hpre-ecdh-nist-p256",1728.cra_module = THIS_MODULE,1729},1730}, {1731.set_secret = hpre_ecdh_set_secret,1732.generate_public_key = hpre_ecdh_compute_value,1733.compute_shared_secret = hpre_ecdh_compute_value,1734.max_size = hpre_ecdh_max_size,1735.init = hpre_ecdh_nist_p384_init_tfm,1736.exit = hpre_ecdh_exit_tfm,1737.base = {1738.cra_ctxsize = sizeof(struct hpre_ctx),1739.cra_priority = HPRE_CRYPTO_ALG_PRI,1740.cra_name = "ecdh-nist-p384",1741.cra_driver_name = "hpre-ecdh-nist-p384",1742.cra_module = THIS_MODULE,1743},1744}1745};17461747static int hpre_register_rsa(struct hisi_qm *qm)1748{1749int ret;17501751if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))1752return 0;17531754rsa.base.cra_flags = 0;1755ret = crypto_register_akcipher(&rsa);1756if (ret)1757dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);17581759return ret;1760}17611762static void hpre_unregister_rsa(struct hisi_qm *qm)1763{1764if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))1765return;17661767crypto_unregister_akcipher(&rsa);1768}17691770static int hpre_register_dh(struct hisi_qm *qm)1771{1772int ret;17731774if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))1775return 0;17761777ret = crypto_register_kpp(&dh);1778if (ret)1779dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);17801781return ret;1782}17831784static void hpre_unregister_dh(struct hisi_qm *qm)1785{1786if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))1787return;17881789crypto_unregister_kpp(&dh);1790}17911792static int hpre_register_ecdh(struct hisi_qm *qm)1793{1794int ret, i;17951796if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))1797return 0;17981799for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {1800ret = crypto_register_kpp(&ecdh_curves[i]);1801if (ret) {1802dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",1803ecdh_curves[i].base.cra_name, ret);1804goto unreg_kpp;1805}1806}18071808return 0;18091810unreg_kpp:1811for (--i; i >= 0; --i)1812crypto_unregister_kpp(&ecdh_curves[i]);18131814return ret;1815}18161817static void hpre_unregister_ecdh(struct hisi_qm *qm)1818{1819int i;18201821if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))1822return;18231824for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)1825crypto_unregister_kpp(&ecdh_curves[i]);1826}18271828int hpre_algs_register(struct hisi_qm *qm)1829{1830int ret = 0;18311832mutex_lock(&hpre_algs_lock);1833if (hpre_available_devs) {1834hpre_available_devs++;1835goto unlock;1836}18371838ret = hpre_register_rsa(qm);1839if (ret)1840goto unlock;18411842ret = hpre_register_dh(qm);1843if (ret)1844goto unreg_rsa;18451846ret = hpre_register_ecdh(qm);1847if (ret)1848goto unreg_dh;18491850hpre_available_devs++;1851mutex_unlock(&hpre_algs_lock);18521853return ret;18541855unreg_dh:1856hpre_unregister_dh(qm);1857unreg_rsa:1858hpre_unregister_rsa(qm);1859unlock:1860mutex_unlock(&hpre_algs_lock);1861return ret;1862}18631864void hpre_algs_unregister(struct hisi_qm *qm)1865{1866mutex_lock(&hpre_algs_lock);1867if (--hpre_available_devs)1868goto unlock;18691870hpre_unregister_ecdh(qm);1871hpre_unregister_dh(qm);1872hpre_unregister_rsa(qm);18731874unlock:1875mutex_unlock(&hpre_algs_lock);1876}187718781879