From 619e464ae22a17e6a060527c8591ccf78eb368ba Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Thu, 2 Apr 2020 14:53:01 +0800 Subject: crypto: hisilicon - put vfs_num into struct hisi_qm We plan to move vfs_num related code into qm.c, put the param vfs_num into struct hisi_qm first. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre.h | 1 - drivers/crypto/hisilicon/hpre/hpre_main.c | 12 +++++------- drivers/crypto/hisilicon/qm.h | 1 + drivers/crypto/hisilicon/sec2/sec.h | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 17 ++++++++--------- drivers/crypto/hisilicon/zip/zip_main.c | 19 ++++++++----------- 6 files changed, 22 insertions(+), 29 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 03d512ec6336..0a8ba468e2be 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -47,7 +47,6 @@ struct hpre_debug { struct hpre { struct hisi_qm qm; struct hpre_debug debug; - u32 num_vfs; unsigned long status; }; diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 88be53bf4a38..5269e5b9be73 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -354,9 +354,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); - struct hpre_debug *debug = file->debug; - struct hpre *hpre = container_of(debug, struct hpre, debug); - u32 num_vfs = hpre->num_vfs; + u32 num_vfs = qm->vfs_num; u32 vfq_num, tmp; @@ -827,7 +825,7 @@ static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs) static int hpre_clear_vft_config(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; - u32 num_vfs = hpre->num_vfs; + u32 num_vfs = qm->vfs_num; int ret; u32 i; @@ -836,7 +834,7 @@ static int hpre_clear_vft_config(struct hpre *hpre) if (ret) return ret; } - hpre->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -860,7 +858,7 @@ static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - hpre->num_vfs = num_vfs; + hpre->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -903,7 +901,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_algs_unregister(); hisi_qm_del_from_list(qm, &hpre_devices); - if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) { + if (qm->fun_type == QM_HW_PF && qm->vfs_num) { ret = hpre_sriov_disable(pdev); if (ret) { pci_err(pdev, "Disable SRIOV fail!\n"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index ec5b6f48db6c..33c5a8edd2ae 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -161,6 +161,7 @@ struct hisi_qm { u32 qp_num; u32 qp_in_used; u32 ctrl_qp_num; + u32 vfs_num; struct list_head list; struct qm_dma qdma; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 3598fa17beb2..2326634a1d71 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -172,7 +172,6 @@ struct sec_dev { struct sec_debug debug; u32 ctx_q_num; bool iommu_used; - u32 num_vfs; unsigned long status; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 1f54ebe164b6..ef26239ec360 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -424,23 +424,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file) static int sec_current_qm_write(struct sec_debug_file *file, u32 val) { struct hisi_qm *qm = file->qm; - struct sec_dev *sec = container_of(qm, struct sec_dev, qm); u32 vfq_num; u32 tmp; - if (val > sec->num_vfs) + if (val > qm->vfs_num) return -EINVAL; /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ if (!val) { qm->debug.curr_qm_qp_num = qm->qp_num; } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs; + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - if (val == sec->num_vfs) + if (val == qm->vfs_num) qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - qm->qp_num - - (sec->num_vfs - 1) * vfq_num; + (qm->vfs_num - 1) * vfq_num; else qm->debug.curr_qm_qp_num = vfq_num; } @@ -926,7 +925,7 @@ static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) static int sec_clear_vft_config(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; - u32 num_vfs = sec->num_vfs; + u32 num_vfs = qm->vfs_num; int ret; u32 i; @@ -936,7 +935,7 @@ static int sec_clear_vft_config(struct sec_dev *sec) return ret; } - sec->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -962,7 +961,7 @@ static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - sec->num_vfs = num_vfs; + sec->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -1006,7 +1005,7 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_del_from_list(qm, &sec_devices); - if (qm->fun_type == QM_HW_PF && sec->num_vfs) + if (qm->fun_type == QM_HW_PF && qm->vfs_num) (void)sec_sriov_disable(pdev); sec_debugfs_exit(sec); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index fcc85d2dbd07..f5ffa0297730 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -134,7 +134,6 @@ struct ctrl_debug_file { * Just relevant for PF. */ struct hisi_zip_ctrl { - u32 num_vfs; struct hisi_zip *hisi_zip; struct dentry *debug_root; struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; @@ -342,21 +341,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file) static int current_qm_write(struct ctrl_debug_file *file, u32 val) { struct hisi_qm *qm = file_to_qm(file); - struct hisi_zip_ctrl *ctrl = file->ctrl; u32 vfq_num; u32 tmp; - if (val > ctrl->num_vfs) + if (val > qm->vfs_num) return -EINVAL; /* Calculate curr_qm_qp_num and store */ if (val == 0) { qm->debug.curr_qm_qp_num = qm->qp_num; } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; - if (val == ctrl->num_vfs) + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; + if (val == qm->vfs_num) qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - - qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; + qm->qp_num - (qm->vfs_num - 1) * vfq_num; else qm->debug.curr_qm_qp_num = vfq_num; } @@ -686,9 +684,8 @@ static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) { - struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl; struct hisi_qm *qm = &hisi_zip->qm; - u32 i, num_vfs = ctrl->num_vfs; + u32 i, num_vfs = qm->vfs_num; int ret; for (i = 1; i <= num_vfs; i++) { @@ -697,7 +694,7 @@ static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) return ret; } - ctrl->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -723,7 +720,7 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - hisi_zip->ctrl->num_vfs = num_vfs; + hisi_zip->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -852,7 +849,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_qm *qm = &hisi_zip->qm; - if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) + if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_zip_sriov_disable(pdev); hisi_zip_debugfs_exit(hisi_zip); -- cgit v1.2.3 From cd1b7ae3435cc428579615241adeee36d217320c Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Thu, 2 Apr 2020 14:53:02 +0800 Subject: crypto: hisilicon - unify SR-IOV related codes into QM Clean the duplicate SR-IOV related codes, put all into qm.c. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 105 +---------------------- drivers/crypto/hisilicon/qm.c | 136 ++++++++++++++++++++++++++++-- drivers/crypto/hisilicon/qm.h | 4 +- drivers/crypto/hisilicon/sec2/sec_main.c | 108 +----------------------- drivers/crypto/hisilicon/zip/zip_main.c | 109 +----------------------- 5 files changed, 138 insertions(+), 324 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 5269e5b9be73..4e41d308f0a9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -792,107 +792,6 @@ err_with_qm_init: return ret; } -static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs) -{ - struct hisi_qm *qm = &hpre->qm; - u32 qp_num = qm->qp_num; - int q_num, remain_q_num, i; - u32 q_base = qp_num; - int ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - - /* If remaining queues are not enough, return error. */ - if (remain_q_num < num_vfs) - return -EINVAL; - - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num); - if (ret) - return ret; - q_base += q_num; - } - - return 0; -} - -static int hpre_clear_vft_config(struct hpre *hpre) -{ - struct hisi_qm *qm = &hpre->qm; - u32 num_vfs = qm->vfs_num; - int ret; - u32 i; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - qm->vfs_num = 0; - - return 0; -} - -static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct hpre *hpre = pci_get_drvdata(pdev); - int pre_existing_vfs, num_vfs, ret; - - pre_existing_vfs = pci_num_vf(pdev); - if (pre_existing_vfs) { - pci_err(pdev, - "Can't enable VF. Please disable pre-enabled VFs!\n"); - return 0; - } - - num_vfs = min_t(int, max_vfs, HPRE_VF_NUM); - ret = hpre_vf_q_assign(hpre, num_vfs); - if (ret) { - pci_err(pdev, "Can't assign queues for VF!\n"); - return ret; - } - - hpre->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - pci_err(pdev, "Can't enable VF!\n"); - hpre_clear_vft_config(hpre); - return ret; - } - - return num_vfs; -} - -static int hpre_sriov_disable(struct pci_dev *pdev) -{ - struct hpre *hpre = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in hpre_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return hpre_clear_vft_config(hpre); -} - -static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs) - return hpre_sriov_enable(pdev, num_vfs); - else - return hpre_sriov_disable(pdev); -} - static void hpre_remove(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); @@ -902,7 +801,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_algs_unregister(); hisi_qm_del_from_list(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) { - ret = hpre_sriov_disable(pdev); + ret = hisi_qm_sriov_disable(pdev); if (ret) { pci_err(pdev, "Disable SRIOV fail!\n"); return; @@ -929,7 +828,7 @@ static struct pci_driver hpre_pci_driver = { .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, - .sriov_configure = hpre_sriov_configure, + .sriov_configure = hisi_qm_sriov_configure, .err_handler = &hpre_err_handler, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f795fb557630..7c2dedc12d13 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1781,12 +1781,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) EXPORT_SYMBOL_GPL(hisi_qm_get_vft); /** - * hisi_qm_set_vft() - Set "virtual function table" for a qm. - * @fun_num: Number of operated function. - * @qm: The qm in which to set vft, alway in a PF. - * @base: The base number of queue in vft. - * @number: The number of queues in vft. 0 means invalid vft. - * * This function is alway called in PF driver, it is used to assign queues * among PF and VFs. * @@ -1794,7 +1788,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_get_vft); * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) * (VF function number 0x2) */ -int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, +static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number) { u32 max_q_num = qm->ctrl_qp_num; @@ -1805,7 +1799,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, return qm_set_sqc_cqc_vft(qm, fun_num, base, number); } -EXPORT_SYMBOL_GPL(hisi_qm_set_vft); static void qm_init_eq_aeq_status(struct hisi_qm *qm) { @@ -2299,6 +2292,133 @@ err: } EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); +static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) +{ + u32 remain_q_num, q_num, i, j; + u32 q_base = qm->qp_num; + int ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_qp_num - qm->qp_num; + + /* If remain queues not enough, return error. */ + if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) + return -EINVAL; + + q_num = remain_q_num / num_vfs; + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, q_num); + if (ret) { + for (j = i; j > 0; j--) + hisi_qm_set_vft(qm, j, 0, 0); + return ret; + } + q_base += q_num; + } + + return 0; +} + +static int qm_clear_vft_config(struct hisi_qm *qm) +{ + int ret; + u32 i; + + for (i = 1; i <= qm->vfs_num; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + qm->vfs_num = 0; + + return 0; +} + +/** + * hisi_qm_sriov_enable() - enable virtual functions + * @pdev: the PCIe device + * @max_vfs: the number of virtual functions to enable + * + * Returns the number of enabled VFs. If there are VFs enabled already or + * max_vfs is more than the total number of device can be enabled, returns + * failure. + */ +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int pre_existing_vfs, num_vfs, total_vfs, ret; + + total_vfs = pci_sriov_get_totalvfs(pdev); + pre_existing_vfs = pci_num_vf(pdev); + if (pre_existing_vfs) { + pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", + pre_existing_vfs); + return 0; + } + + num_vfs = min_t(int, max_vfs, total_vfs); + ret = qm_vf_q_assign(qm, num_vfs); + if (ret) { + pci_err(pdev, "Can't assign queues for VF!\n"); + return ret; + } + + qm->vfs_num = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + pci_err(pdev, "Can't enable VF!\n"); + qm_clear_vft_config(qm); + return ret; + } + + pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); + + return num_vfs; +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); + +/** + * hisi_qm_sriov_disable - disable virtual functions + * @pdev: the PCI device + * + * Return failure if there are VFs assigned already. + */ +int hisi_qm_sriov_disable(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); + return -EPERM; + } + + /* remove in hpre_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + return qm_clear_vft_config(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); + +/** + * hisi_qm_sriov_configure - configure the number of VFs + * @pdev: The PCI device + * @num_vfs: The number of VFs need enabled + * + * Enable SR-IOV according to num_vfs, 0 means disable. + */ +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return hisi_qm_sriov_disable(pdev); + else + return hisi_qm_sriov_enable(pdev, num_vfs); +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); + static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) { u32 err_sts; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 33c5a8edd2ae..665e53d8d958 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -268,10 +268,12 @@ void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); int hisi_qm_get_free_qp_num(struct hisi_qm *qm); int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); -int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); int hisi_qm_debug_init(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); +int hisi_qm_sriov_disable(struct pci_dev *pdev); +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index ef26239ec360..129648a49114 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -892,110 +892,6 @@ err_qm_uninit: return ret; } -/* now we only support equal assignment */ -static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) -{ - struct hisi_qm *qm = &sec->qm; - u32 qp_num = qm->qp_num; - u32 q_base = qp_num; - u32 q_num, remain_q_num; - int i, j, ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - q_num = remain_q_num / num_vfs; - - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); - if (ret) { - for (j = i; j > 0; j--) - hisi_qm_set_vft(qm, j, 0, 0); - return ret; - } - q_base += q_num; - } - - return 0; -} - -static int sec_clear_vft_config(struct sec_dev *sec) -{ - struct hisi_qm *qm = &sec->qm; - u32 num_vfs = qm->vfs_num; - int ret; - u32 i; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - - qm->vfs_num = 0; - - return 0; -} - -static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct sec_dev *sec = pci_get_drvdata(pdev); - int pre_existing_vfs, ret; - u32 num_vfs; - - pre_existing_vfs = pci_num_vf(pdev); - - if (pre_existing_vfs) { - pci_err(pdev, "Can't enable VF. Please disable at first!\n"); - return 0; - } - - num_vfs = min_t(u32, max_vfs, SEC_VF_NUM); - - ret = sec_vf_q_assign(sec, num_vfs); - if (ret) { - pci_err(pdev, "Can't assign queues for VF!\n"); - return ret; - } - - sec->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - pci_err(pdev, "Can't enable VF!\n"); - sec_clear_vft_config(sec); - return ret; - } - - return num_vfs; -} - -static int sec_sriov_disable(struct pci_dev *pdev) -{ - struct sec_dev *sec = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - pci_err(pdev, "Can't disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in sec_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return sec_clear_vft_config(sec); -} - -static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs) - return sec_sriov_enable(pdev, num_vfs); - else - return sec_sriov_disable(pdev); -} - static void sec_remove(struct pci_dev *pdev) { struct sec_dev *sec = pci_get_drvdata(pdev); @@ -1006,7 +902,7 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_del_from_list(qm, &sec_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) - (void)sec_sriov_disable(pdev); + hisi_qm_sriov_disable(pdev); sec_debugfs_exit(sec); @@ -1030,7 +926,7 @@ static struct pci_driver sec_pci_driver = { .probe = sec_probe, .remove = sec_remove, .err_handler = &sec_err_handler, - .sriov_configure = sec_sriov_configure, + .sriov_configure = hisi_qm_sriov_configure, }; static void sec_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index f5ffa0297730..5dcda7b9856c 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -653,101 +653,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return 0; } -/* Currently we only support equal assignment */ -static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) -{ - struct hisi_qm *qm = &hisi_zip->qm; - u32 qp_num = qm->qp_num; - u32 q_base = qp_num; - u32 q_num, remain_q_num, i; - int ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - if (remain_q_num < num_vfs) - return -EINVAL; - - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); - if (ret) - return ret; - q_base += q_num; - } - - return 0; -} - -static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) -{ - struct hisi_qm *qm = &hisi_zip->qm; - u32 i, num_vfs = qm->vfs_num; - int ret; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - - qm->vfs_num = 0; - - return 0; -} - -static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - int pre_existing_vfs, num_vfs, ret; - - pre_existing_vfs = pci_num_vf(pdev); - - if (pre_existing_vfs) { - dev_err(&pdev->dev, - "Can't enable VF. Please disable pre-enabled VFs!\n"); - return 0; - } - - num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); - - ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); - if (ret) { - dev_err(&pdev->dev, "Can't assign queues for VF!\n"); - return ret; - } - - hisi_zip->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - dev_err(&pdev->dev, "Can't enable VF!\n"); - hisi_zip_clear_vft_config(hisi_zip); - return ret; - } - - return num_vfs; -} - -static int hisi_zip_sriov_disable(struct pci_dev *pdev) -{ - struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - dev_err(&pdev->dev, - "Can't disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in hisi_zip_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return hisi_zip_clear_vft_config(hisi_zip); -} - static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -820,7 +725,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) } if (qm->fun_type == QM_HW_PF && vfs_num > 0) { - ret = hisi_zip_sriov_enable(pdev, vfs_num); + ret = hisi_qm_sriov_enable(pdev, vfs_num); if (ret < 0) goto err_remove_from_list; } @@ -836,21 +741,13 @@ err_qm_uninit: return ret; } -static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs == 0) - return hisi_zip_sriov_disable(pdev); - else - return hisi_zip_sriov_enable(pdev, num_vfs); -} - static void hisi_zip_remove(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_qm *qm = &hisi_zip->qm; if (qm->fun_type == QM_HW_PF && qm->vfs_num) - hisi_zip_sriov_disable(pdev); + hisi_qm_sriov_disable(pdev); hisi_zip_debugfs_exit(hisi_zip); hisi_qm_stop(qm); @@ -870,7 +767,7 @@ static struct pci_driver hisi_zip_pci_driver = { .probe = hisi_zip_probe, .remove = hisi_zip_remove, .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? - hisi_zip_sriov_configure : NULL, + hisi_qm_sriov_configure : NULL, .err_handler = &hisi_zip_err_handler, }; -- cgit v1.2.3 From 35ee280fb1fb97ffa32a19953457becb4f45579b Mon Sep 17 00:00:00 2001 From: Hao Fang Date: Thu, 2 Apr 2020 14:53:03 +0800 Subject: crypto: hisilicon - add vfs_num module parameter for hpre/sec The vfs_num module parameter has been used in zip driver, this patch adds this for HPRE and SEC driver. Signed-off-by: Hao Fang Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 19 +++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 20 ++++++++++++++++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 18 ++++++++++++++++++ drivers/crypto/hisilicon/zip/zip_main.c | 9 +++++++-- 4 files changed, 64 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 4e41d308f0a9..9cff5c1b7c9b 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -195,6 +195,15 @@ static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM; module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444); MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + struct hisi_qp *hpre_create_qp(void) { int node = cpu_to_node(smp_processor_id()); @@ -777,8 +786,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_err(pdev, "fail to register algs to crypto!\n"); goto err_with_qm_start; } + + if (qm->fun_type == QM_HW_PF && vfs_num) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_with_crypto_register; + } + return 0; +err_with_crypto_register: + hpre_algs_unregister(); + err_with_qm_start: hisi_qm_del_from_list(qm, &hpre_devices); hisi_qm_stop(qm); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 665e53d8d958..1b5171b91141 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -8,6 +8,8 @@ #include #include +#define QM_MAX_VFS_NUM_V2 63 + /* qm user domain */ #define QM_ARUSER_M_CFG_1 0x100088 #define AXUSER_SNOOP_ENABLE BIT(30) @@ -235,6 +237,24 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; +static inline int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0) + return ret; + + if (n > QM_MAX_VFS_NUM_V2) + return -EINVAL; + + return param_set_int(val, kp); +} + static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) { INIT_LIST_HEAD(&qm_list->list); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 129648a49114..c76c49ed883c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -207,6 +207,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + void sec_destroy_qps(struct hisi_qp **qps, int qp_num) { hisi_qm_free_qps(qps, qp_num); @@ -876,8 +885,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_remove_from_list; } + if (qm->fun_type == QM_HW_PF && vfs_num) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_crypto_unregister; + } + return 0; +err_crypto_unregister: + sec_unregister_from_crypto(); + err_remove_from_list: hisi_qm_del_from_list(qm, &sec_devices); sec_debugfs_exit(sec); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 5dcda7b9856c..fe9d6d29eed2 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -231,9 +231,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + static u32 vfs_num; -module_param(vfs_num, uint, 0444); -MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)"); +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static const struct pci_device_id hisi_zip_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, -- cgit v1.2.3 From 6c6dd5802c2d6769fa589c0e8de54299def199a7 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 3 Apr 2020 16:16:38 +0800 Subject: crypto: hisilicon/qm - add controller reset interface Add the main implementation of the controller reset interface, which is roughly divided into three parts, stop, reset, and reinitialization. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 544 ++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 16 ++ 2 files changed, 560 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 7c2dedc12d13..98e65c5b0c4a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include +#include +#include #include #include #include @@ -122,9 +124,11 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 +#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0) #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x1fff #define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 #define QM_FIFO_OVERFLOW_TYPE 0xc0 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 @@ -140,6 +144,25 @@ #define QM_RAS_CE_TIMES_PER_IRQ 1 #define QM_RAS_MSI_INT_SEL 0x1040f4 +#define QM_DEV_RESET_FLAG 0 +#define QM_RESET_WAIT_TIMEOUT 400 +#define QM_PEH_VENDOR_ID 0x1000d8 +#define ACC_VENDOR_ID_VALUE 0x5a5a +#define QM_PEH_DFX_INFO0 0x1000fc +#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 +#define ACC_PEH_MSI_DISABLE GENMASK(31, 0) +#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 +#define ACC_MASTER_TRANS_RETURN_RW 3 +#define ACC_MASTER_TRANS_RETURN 0x300150 +#define ACC_MASTER_GLOBAL_CTRL 0x300000 +#define ACC_AM_CFG_PORT_WR_EN 0x30001c +#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT +#define ACC_AM_ROB_ECC_INT_STS 0x300104 +#define ACC_ROB_ECC_ERR_MULTPL BIT(1) + +#define POLL_PERIOD 10 +#define POLL_TIMEOUT 1000 +#define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 @@ -1012,10 +1035,18 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, { u32 irq_enable = ce | nfe | fe | msi; u32 irq_unmask = ~irq_enable; + u32 error_status; qm->error_mask = ce | nfe | fe; qm->msi_mask = msi; + /* clear QM hw residual error source */ + error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + if (error_status) { + error_status &= qm->error_mask; + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + } + /* configure error type */ writel(ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); @@ -1080,6 +1111,9 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) error_status = qm->error_mask & tmp; if (error_status) { + if (error_status & QM_ECC_MBIT) + qm->err_status.is_qm_ecc_mbit = true; + qm_log_hw_error(qm, error_status); /* clear err sts */ @@ -1971,6 +2005,52 @@ int hisi_qm_start(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_start); +static int qm_restart(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int ret, i; + + ret = hisi_qm_start(qm); + if (ret < 0) + return ret; + + write_lock(&qm->qps_lock); + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) { + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + dev_err(dev, "Failed to start qp%d!\n", i); + + write_unlock(&qm->qps_lock); + return ret; + } + } + } + write_unlock(&qm->qps_lock); + + return 0; +} + +/** + * This function clears all queues memory in a qm. Reset of accelerator can + * use this to clear queues. + */ +static void qm_clear_queues(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) + memset(qp->qdma.va, 0, qp->qdma.size); + } + + memset(qm->qdma.va, 0, qm->qdma.size); +} + /** * hisi_qm_stop() - Stop a qm. * @qm: The qm which will be stopped. @@ -2014,6 +2094,8 @@ int hisi_qm_stop(struct hisi_qm *qm) dev_err(dev, "Failed to set vft!\n"); } + qm_clear_queues(qm); + return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop); @@ -2431,6 +2513,9 @@ static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) /* get device hardware error status */ err_sts = qm->err_ini->get_dev_hw_err_status(qm); if (err_sts) { + if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) + qm->err_status.is_dev_ecc_mbit = true; + if (!qm->err_ini->log_dev_hw_err) { dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); return PCI_ERS_RESULT_NEED_RESET; @@ -2481,6 +2566,465 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); +static int qm_check_req_recv(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == ACC_VENDOR_ID_VALUE), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) { + dev_err(&pdev->dev, "Fails to read QM reg!\n"); + return ret; + } + + writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == PCI_VENDOR_ID_HUAWEI), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); + + return ret; +} + +static int qm_set_pf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 cmd; + int i; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set) + cmd |= PCI_COMMAND_MEMORY; + else + cmd &= ~PCI_COMMAND_MEMORY; + + pci_write_config_word(pdev, PCI_COMMAND, cmd); + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_vf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 sriov_ctrl; + int pos; + int i; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set) + sriov_ctrl |= PCI_SRIOV_CTRL_MSE; + else + sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; + pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); + + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> + ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_msi(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + + if (set) { + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, + 0); + } else { + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, + ACC_PEH_MSI_DISABLE); + if (qm->err_status.is_qm_ecc_mbit || + qm->err_status.is_dev_ecc_mbit) + return 0; + + mdelay(1); + if (readl(qm->io_base + QM_PEH_DFX_INFO0)) + return -EFAULT; + } + + return 0; +} + +static int qm_vf_reset_prepare(struct hisi_qm *qm) +{ + struct hisi_qm_list *qm_list = qm->qm_list; + struct pci_dev *pdev = qm->pdev; + struct pci_dev *virtfn; + struct hisi_qm *vf_qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(vf_qm, &qm_list->list, list) { + virtfn = vf_qm->pdev; + if (virtfn == pdev) + continue; + + if (pci_physfn(virtfn) == pdev) { + ret = hisi_qm_stop(vf_qm); + if (ret) + goto stop_fail; + } + } + +stop_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_reset_prepare_ready(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + int delay = 0; + + /* All reset requests need to be queued for processing */ + while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return -EBUSY; + } + + return 0; +} + +static int qm_controller_reset_prepare(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "Controller reset not ready!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(qm); + if (ret) { + pci_err(pdev, "Fails to stop VFs!\n"); + return ret; + } + } + + ret = hisi_qm_stop(qm); + if (ret) { + pci_err(pdev, "Fails to stop QM!\n"); + return ret; + } + + return 0; +} + +static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + + qm->err_ini->close_axi_master_ooo(qm); + + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + /* Ensure all doorbells and mailboxes received by QM */ + ret = qm_check_req_recv(qm); + if (ret) + return ret; + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable vf MSE bit.\n"); + return ret; + } + } + + ret = qm_set_msi(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable PEH MSI bit.\n"); + return ret; + } + + qm_dev_ecc_mbit_handle(qm); + + /* OOO register set and check */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, + qm->io_base + ACC_MASTER_GLOBAL_CTRL); + + /* If bus lock, reset chip */ + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, + (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) { + pci_emerg(pdev, "Bus lock! Please reset system.\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable pf MSE bit.\n"); + return ret; + } + + /* The reset related sub-control registers are not in PCI BAR */ + if (ACPI_HANDLE(&pdev->dev)) { + unsigned long long value = 0; + acpi_status s; + + s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + qm->err_ini->err_info.acpi_rst, + NULL, &value); + if (ACPI_FAILURE(s)) { + pci_err(pdev, "NO controller reset method!\n"); + return -EIO; + } + + if (value) { + pci_err(pdev, "Reset step %llu failed!\n", value); + return -EIO; + } + } else { + pci_err(pdev, "No reset method!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_vf_reset_done(struct hisi_qm *qm) +{ + struct hisi_qm_list *qm_list = qm->qm_list; + struct pci_dev *pdev = qm->pdev; + struct pci_dev *virtfn; + struct hisi_qm *vf_qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(vf_qm, &qm_list->list, list) { + virtfn = vf_qm->pdev; + if (virtfn == pdev) + continue; + + if (pci_physfn(virtfn) == pdev) { + ret = qm_restart(vf_qm); + if (ret) + goto restart_fail; + } + } + +restart_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_get_dev_err_status(struct hisi_qm *qm) +{ + + return(qm->err_ini->get_dev_hw_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask); +} + +static int qm_dev_hw_init(struct hisi_qm *qm) +{ + return qm->err_ini->hw_init(qm); +} + +static void qm_restart_prepare(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_status.is_qm_ecc_mbit && + !qm->err_status.is_dev_ecc_mbit) + return; + + /* temporarily close the OOO port used for PEH to write out MSI */ + value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); + writel(value & ~qm->err_ini->err_info.msi_wr_port, + qm->io_base + ACC_AM_CFG_PORT_WR_EN); + + /* clear dev ecc 2bit error source if having */ + value = qm_get_dev_err_status(qm); + if (value && qm->err_ini->clear_dev_hw_err_status) + qm->err_ini->clear_dev_hw_err_status(qm, value); + + /* clear QM ecc mbit error source */ + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + /* clear AM Reorder Buffer ecc mbit source */ + writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); + + if (qm->err_ini->open_axi_master_ooo) + qm->err_ini->open_axi_master_ooo(qm); +} + +static void qm_restart_done(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_status.is_qm_ecc_mbit && + !qm->err_status.is_dev_ecc_mbit) + return; + + /* open the OOO port for PEH to write out MSI */ + value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); + value |= qm->err_ini->err_info.msi_wr_port; + writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); + + qm->err_status.is_qm_ecc_mbit = false; + qm->err_status.is_dev_ecc_mbit = false; +} + +static int qm_controller_reset_done(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_set_msi(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable PEH MSI bit!\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable pf MSE bit!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable vf MSE bit!\n"); + return ret; + } + } + + ret = qm_dev_hw_init(qm); + if (ret) { + pci_err(pdev, "Failed to init device\n"); + return ret; + } + + qm_restart_prepare(qm); + + ret = qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_q_assign(qm, qm->vfs_num); + if (ret) { + pci_err(pdev, "Failed to assign queue!\n"); + return ret; + } + } + + ret = qm_vf_reset_done(qm); + if (ret) { + pci_err(pdev, "Failed to start VFs!\n"); + return -EPERM; + } + + hisi_qm_dev_err_init(qm); + qm_restart_done(qm); + + clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); + + return 0; +} + +int qm_controller_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + pci_info(pdev, "Controller resetting...\n"); + + ret = qm_controller_reset_prepare(qm); + if (ret) + return ret; + + ret = qm_soft_reset(qm); + if (ret) { + pci_err(pdev, "Controller reset failed (%d)\n", ret); + return ret; + } + + ret = qm_controller_reset_done(qm); + if (ret) + return ret; + + pci_info(pdev, "Controller reset complete\n"); + + return 0; +} + +/** + * hisi_qm_dev_slot_reset() - slot reset + * @pdev: the PCIe device + * + * This function offers QM relate PCIe device reset interface. Drivers which + * use QM can use this function as slot_reset in its struct pci_error_handlers. + */ +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + if (pdev->is_virtfn) + return PCI_ERS_RESULT_RECOVERED; + + pci_aer_clear_nonfatal_status(pdev); + + /* reset pcie device controller */ + ret = qm_controller_reset(qm); + if (ret) { + pci_err(pdev, "Controller reset failed (%d)\n", ret); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 1b5171b91141..9d17167840f8 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -133,16 +133,28 @@ struct hisi_qm_status { struct hisi_qm; struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + u32 ecc_2bits_mask; u32 ce; u32 nfe; u32 fe; u32 msi; }; +struct hisi_qm_err_status { + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; +}; + struct hisi_qm_err_ini { + int (*hw_init)(struct hisi_qm *qm); void (*hw_err_enable)(struct hisi_qm *qm); void (*hw_err_disable)(struct hisi_qm *qm); u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*open_axi_master_ooo)(struct hisi_qm *qm); + void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); struct hisi_qm_err_info err_info; }; @@ -165,6 +177,7 @@ struct hisi_qm { u32 ctrl_qp_num; u32 vfs_num; struct list_head list; + struct hisi_qm_list *qm_list; struct qm_dma qdma; struct qm_sqc *sqc; @@ -178,6 +191,8 @@ struct hisi_qm { struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_status err_status; + unsigned long reset_flag; rwlock_t qps_lock; unsigned long *qp_bitmap; @@ -298,6 +313,7 @@ void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, -- cgit v1.2.3 From 84c9b7802b02a0b13de1db262122e0c59f4abd77 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 3 Apr 2020 16:16:39 +0800 Subject: crypto: hisilicon/zip - add controller reset support for zip Register controller reset handle with PCIe AER. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 57 +++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index fe9d6d29eed2..37db11f96fab 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -62,6 +62,7 @@ #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 +#define HZIP_CORE_INT_SET 0x3010A8 #define HZIP_CORE_INT_STATUS 0x3010AC #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 @@ -83,6 +84,9 @@ #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) +#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C +#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) +#define HZIP_WR_PORT BIT(11) #define HZIP_BUF_SIZE 22 @@ -254,9 +258,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num) return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } -static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) +static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) { - void __iomem *base = hisi_zip->qm.io_base; + void __iomem *base = qm->io_base; /* qm user domain */ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); @@ -283,7 +287,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); - if (hisi_zip->qm.use_sva) { + if (qm->use_sva) { writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63); writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63); } else { @@ -299,6 +303,8 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + + return 0; } static void hisi_zip_hw_error_enable(struct hisi_qm *qm) @@ -601,8 +607,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) } err++; } - - writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); } static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) @@ -610,17 +614,56 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + HZIP_CORE_INT_STATUS); } +static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); +} + +static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val | HZIP_AXI_SHUTDOWN_ENABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); +} + +static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) +{ + u32 nfe_enb; + + /* Disable ECC Mbit error report. */ + nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + + /* Inject zip ECC Mbit error to block master ooo. */ + writel(HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_SET); +} + static const struct hisi_qm_err_ini hisi_zip_err_ini = { + .hw_init = hisi_zip_set_user_domain_and_cache, .hw_err_enable = hisi_zip_hw_error_enable, .hw_err_disable = hisi_zip_hw_error_disable, .get_dev_hw_err_status = hisi_zip_get_hw_err_status, + .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, .log_dev_hw_err = hisi_zip_log_hw_error, + .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, + .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, + .msi_wr_port = HZIP_WR_PORT, + .acpi_rst = "ZRST", } }; @@ -651,7 +694,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) qm->err_ini = &hisi_zip_err_ini; - hisi_zip_set_user_domain_and_cache(hisi_zip); + hisi_zip_set_user_domain_and_cache(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(hisi_zip); @@ -697,6 +740,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->qp_base = HZIP_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->qm_list = &zip_devices; } else if (qm->fun_type == QM_HW_VF) { /* * have no way to get qm configure in VM in v1 hardware, @@ -764,6 +808,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) static const struct pci_error_handlers hisi_zip_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver hisi_zip_pci_driver = { -- cgit v1.2.3 From 1f5c9f34f0cc78e058088090b9d2abca45690e6b Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 3 Apr 2020 16:16:40 +0800 Subject: crypto: hisilicon/hpre - add controller reset support for HPRE Add support for the controller reset in HPRE driver. Signed-off-by: Hui Tang Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 46 +++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9cff5c1b7c9b..0d63666ba373 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -59,10 +59,6 @@ #define HPRE_HAC_ECC2_CNT 0x301a08 #define HPRE_HAC_INT_STATUS 0x301800 #define HPRE_HAC_SOURCE_INT 0x301600 -#define MASTER_GLOBAL_CTRL_SHUTDOWN 1 -#define MASTER_TRANS_RETURN_RW 3 -#define HPRE_MASTER_TRANS_RETURN 0x300150 -#define HPRE_MASTER_GLOBAL_CTRL 0x300000 #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 @@ -80,6 +76,13 @@ #define HPRE_BD_USR_MASK 0x3 #define HPRE_CLUSTER_CORE_MASK 0xf +#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 +#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0) +#define HPRE_WR_MSI_PORT BIT(2) + +#define HPRE_CORE_ECC_2BIT_ERR BIT(1) +#define HPRE_OOO_ECC_2BIT_ERR BIT(5) + #define HPRE_VIA_MSI_DSM 1 static struct hisi_qm_list hpre_devices; @@ -241,9 +244,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) return 0; } -static int hpre_set_user_domain_and_cache(struct hpre *hpre) +static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; unsigned long offset; int ret, i; @@ -339,6 +341,9 @@ static void hpre_hw_error_disable(struct hisi_qm *qm) static void hpre_hw_error_enable(struct hisi_qm *qm) { + /* clear HPRE hw error source if having */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + /* enable hpre hw error interrupts */ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); @@ -700,8 +705,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) err->msg, err->int_msk); err++; } - - writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); } static u32 hpre_get_hw_err_status(struct hisi_qm *qm) @@ -709,16 +712,39 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + HPRE_HAC_INT_STATUS); } +static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); +} + +static void hpre_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 value; + + value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); + writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); +} + static const struct hisi_qm_err_ini hpre_err_ini = { + .hw_init = hpre_set_user_domain_and_cache, .hw_err_enable = hpre_hw_error_enable, .hw_err_disable = hpre_hw_error_disable, .get_dev_hw_err_status = hpre_get_hw_err_status, + .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, + .open_axi_master_ooo = hpre_open_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR, + .msi_wr_port = HPRE_WR_MSI_PORT, + .acpi_rst = "HRST", } }; @@ -729,10 +755,11 @@ static int hpre_pf_probe_init(struct hpre *hpre) qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; - ret = hpre_set_user_domain_and_cache(hpre); + ret = hpre_set_user_domain_and_cache(qm); if (ret) return ret; + qm->qm_list = &hpre_devices; qm->err_ini = &hpre_err_ini; hisi_qm_dev_err_init(qm); @@ -840,6 +867,7 @@ static void hpre_remove(struct pci_dev *pdev) static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver hpre_pci_driver = { -- cgit v1.2.3 From 141876c252a461818c39d45ca30ef1cb7c71953a Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Fri, 3 Apr 2020 16:16:41 +0800 Subject: crypto: hisilicon/sec2 - add controller reset support for SEC2 Add support for controller reset in SEC driver. Signed-off-by: Yang Shen Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 40 ++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 12 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index c76c49ed883c..07a5f4eb96ff 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -249,9 +249,8 @@ static const struct pci_device_id sec_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, sec_dev_ids); -static u8 sec_get_endian(struct sec_dev *sec) +static u8 sec_get_endian(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; u32 reg; /* @@ -279,9 +278,8 @@ static u8 sec_get_endian(struct sec_dev *sec) return SEC_64BE; } -static int sec_engine_init(struct sec_dev *sec) +static int sec_engine_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; int ret; u32 reg; @@ -324,7 +322,7 @@ static int sec_engine_init(struct sec_dev *sec) /* config endian */ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); - reg |= sec_get_endian(sec); + reg |= sec_get_endian(qm); writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); /* Enable sm4 xts mode multiple iv */ @@ -334,10 +332,8 @@ static int sec_engine_init(struct sec_dev *sec) return 0; } -static int sec_set_user_domain_and_cache(struct sec_dev *sec) +static int sec_set_user_domain_and_cache(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; - /* qm user domain */ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); @@ -358,7 +354,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec) CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); - return sec_engine_init(sec); + return sec_engine_init(qm); } /* sec_debug_regs_clear() - clear the sec debug regs */ @@ -683,8 +679,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) } errs++; } - - writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); } static u32 sec_get_hw_err_status(struct hisi_qm *qm) @@ -692,17 +686,37 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + SEC_CORE_INT_STATUS); } +static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); +} + +static void sec_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + static const struct hisi_qm_err_ini sec_err_ini = { + .hw_init = sec_set_user_domain_and_cache, .hw_err_enable = sec_hw_error_enable, .hw_err_disable = sec_hw_error_disable, .get_dev_hw_err_status = sec_get_hw_err_status, + .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, + .open_axi_master_ooo = sec_open_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, + .msi_wr_port = BIT(0), + .acpi_rst = "SRST", } }; @@ -726,7 +740,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) qm->err_ini = &sec_err_ini; - ret = sec_set_user_domain_and_cache(sec); + ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -783,6 +797,7 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) qm->qp_base = SEC_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &sec_devices; ret = sec_pf_probe_init(sec); if (ret) @@ -936,6 +951,7 @@ static void sec_remove(struct pci_dev *pdev) static const struct pci_error_handlers sec_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver sec_pci_driver = { -- cgit v1.2.3 From f037fc5f93f4e3c973f8c3b7ebd6ccd123f0944b Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Fri, 3 Apr 2020 16:16:42 +0800 Subject: crypto: hisilicon/qm - stop qp by judging sq and cq tail It is not working well to determine whether the queue is empty based on whether the used count is 0. It is more stable to get if the queue is stopping by checking if the tail pointer of the send queue and the completion queue are equal. Signed-off-by: Yang Shen Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 123 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 114 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 98e65c5b0c4a..80c552523ac4 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -55,6 +55,7 @@ #define QM_SQ_TYPE_SHIFT 8 #define QM_SQ_TYPE_MASK GENMASK(3, 0) +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -66,6 +67,7 @@ #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) @@ -162,6 +164,8 @@ #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 +#define WAIT_PERIOD_US_MAX 200 +#define WAIT_PERIOD_US_MIN 100 #define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 @@ -1362,6 +1366,107 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp); +static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + void *ctx_addr; + + ctx_addr = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx_addr) + return ERR_PTR(-ENOMEM); + + *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_err(dev, "DMA mapping error!\n"); + kfree(ctx_addr); + return ERR_PTR(-ENOMEM); + } + + return ctx_addr; +} + +static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + + dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); + kfree(ctx_addr); +} + +static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); +} + +static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); +} + +/** + * Determine whether the queue is cleared by judging the tail pointers of + * sq and cq. + */ +static int qm_drain_qp(struct hisi_qp *qp) +{ + size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + dma_addr_t dma_addr; + int ret = 0, i = 0; + void *addr; + + /* + * No need to judge if ECC multi-bit error occurs because the + * master OOO will be blocked. + */ + if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) + return 0; + + addr = qm_ctx_alloc(qm, size, &dma_addr); + if (IS_ERR(addr)) { + dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); + return -ENOMEM; + } + + while (++i) { + ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); + if (ret) { + dev_err_ratelimited(dev, "Failed to dump sqc!\n"); + break; + } + sqc = addr; + + ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), + qp->qp_id); + if (ret) { + dev_err_ratelimited(dev, "Failed to dump cqc!\n"); + break; + } + cqc = addr + sizeof(struct qm_sqc); + + if ((sqc->tail == cqc->tail) && + (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) + break; + + if (i == MAX_WAIT_COUNTS) { + dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); + ret = -EBUSY; + break; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + qm_ctx_free(qm, size, addr, &dma_addr); + + return ret; +} + /** * hisi_qm_stop_qp() - Stop a qp in qm. * @qp: The qp we want to stop. @@ -1371,20 +1476,20 @@ EXPORT_SYMBOL_GPL(hisi_qm_start_qp); int hisi_qm_stop_qp(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; - int i = 0; + int ret; /* it is stopped */ if (test_bit(QP_STOP, &qp->qp_status.flags)) return 0; - while (atomic_read(&qp->qp_status.used)) { - i++; - msleep(20); - if (i == 10) { - dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n"); - return 0; - } - } + ret = qm_drain_qp(qp); + if (ret) + dev_err(dev, "Failed to drain out data for stopping!\n"); + + if (qp->qm->wq) + flush_workqueue(qp->qm->wq); + else + flush_work(&qp->qm->work); set_bit(QP_STOP, &qp->qp_status.flags); -- cgit v1.2.3 From 56e0b6273ec8791ffe1c3cdc5d32fe5d001fd520 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 4 Apr 2020 06:07:54 +0800 Subject: crypto: amlogic - Delete duplicate dev_err in meson_crypto_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When something goes wrong, platform_get_irq() will print an error message, so in order to avoid the situation of repeat output,we should remove dev_err here. Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 9d4ead2f7ebb..411857fad8ba 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev) mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); - if (mc->irqs[i] < 0) { - dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i); + if (mc->irqs[i] < 0) return mc->irqs[i]; - } err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0, "gxl-crypto", mc); -- cgit v1.2.3 From 56b80bdee4a16cf330562801667a1e62fe7b9255 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 4 Apr 2020 17:34:53 +0200 Subject: crypto: sun8i-ss - Delete an error message in sun8i_ss_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Acked-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 6b301afffd11..a1fb2fbdbe7b 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev) return err; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n"); + if (irq < 0) return irq; - } ss->reset = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(ss->reset)) { -- cgit v1.2.3 From eebac678556d6927f09a992872f4464cf3aecc76 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Apr 2020 18:26:48 +0200 Subject: crypto: ccp -- don't "select" CONFIG_DMADEVICES DMADEVICES is the top-level option for the slave DMA subsystem, and should not be selected by device drivers, as this can cause circular dependencies such as: drivers/net/ethernet/freescale/Kconfig:6:error: recursive dependency detected! drivers/net/ethernet/freescale/Kconfig:6: symbol NET_VENDOR_FREESCALE depends on PPC_BESTCOMM drivers/dma/bestcomm/Kconfig:6: symbol PPC_BESTCOMM depends on DMADEVICES drivers/dma/Kconfig:6: symbol DMADEVICES is selected by CRYPTO_DEV_SP_CCP drivers/crypto/ccp/Kconfig:10: symbol CRYPTO_DEV_SP_CCP depends on CRYPTO crypto/Kconfig:16: symbol CRYPTO is selected by LIBCRC32C lib/Kconfig:222: symbol LIBCRC32C is selected by LIQUIDIO drivers/net/ethernet/cavium/Kconfig:65: symbol LIQUIDIO depends on PTP_1588_CLOCK drivers/ptp/Kconfig:8: symbol PTP_1588_CLOCK is implied by FEC drivers/net/ethernet/freescale/Kconfig:23: symbol FEC depends on NET_VENDOR_FREESCALE The LIQUIDIO driver causing this problem is addressed in a separate patch, but this change is needed to prevent it from happening again. Using "depends on DMADEVICES" is what we do for all other implementations of slave DMA controllers as well. Fixes: b3c2fee5d66b ("crypto: ccp - Ensure all dependencies are specified") Signed-off-by: Arnd Bergmann Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index e0a8bd15aa74..32268e239bf1 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_SP_CCP bool "Cryptographic Coprocessor device" default y - depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_CCP_DD && DMADEVICES select HW_RANDOM select DMA_ENGINE - select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help -- cgit v1.2.3 From 0a8f5989e03476cfb2a7756e33fa4d0163cb4375 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 11 Apr 2020 14:06:33 +0200 Subject: crypto: marvell/octeontx - Add missing '\n' in log messages Message logged by 'dev_xxx()' or 'pr_xxx()' should end with a '\n'. While at it, I've introduced a few pr_cont that looked logical to me. Fixes: 10b4f09491bf ("crypto: marvell - add the Virtual Function driver for CPT") Fixes: d9110b0b01ff ("crypto: marvell - add support for OCTEON TX CPT engine") Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_main.c | 4 +- drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c | 12 +-- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 95 +++++++++++----------- drivers/crypto/marvell/octeontx/otx_cptvf_algs.c | 6 +- drivers/crypto/marvell/octeontx/otx_cptvf_main.c | 12 +-- drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c | 10 +-- 6 files changed, 70 insertions(+), 69 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c index 200fb3303db0..34bb3063eb70 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c @@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt) /* Check BIST status */ bist = (u64)otx_cpt_check_bist_status(cpt); if (bist) { - dev_err(dev, "RAM BIST failed with code 0x%llx", bist); + dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist); return -ENODEV; } bist = otx_cpt_check_exe_bist_status(cpt); if (bist) { - dev_err(dev, "Engine BIST failed with code 0x%llx", bist); + dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist); return -ENODEV; } diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c index a6774232e9a3..a9e3de65875a 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c @@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id) hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8, raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false); if (vf_id >= 0) - pr_debug("MBOX opcode %s received from VF%d raw_data %s", + pr_debug("MBOX opcode %s received from VF%d raw_data %s\n", get_mbox_opcode_str(mbox_msg->msg), vf_id, raw_data_str); else - pr_debug("MBOX opcode %s received from PF raw_data %s", + pr_debug("MBOX opcode %s received from PF raw_data %s\n", get_mbox_opcode_str(mbox_msg->msg), raw_data_str); } @@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp) struct otx_cpt_ucode *ucode; if (q >= cpt->max_vfs) { - dev_err(dev, "Requested queue %d is > than maximum avail %d", + dev_err(dev, "Requested queue %d is > than maximum avail %d\n", q, cpt->max_vfs); return -EINVAL; } if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Requested group %d is > than maximum avail %d", + dev_err(dev, "Requested group %d is > than maximum avail %d\n", grp, OTX_CPT_MAX_ENGINE_GROUPS); return -EINVAL; } eng_grp = &cpt->eng_grps.grp[grp]; if (!eng_grp->is_enabled) { - dev_err(dev, "Requested engine group %d is disabled", grp); + dev_err(dev, "Requested engine group %d is disabled\n", grp); return -EINVAL; } @@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf) vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data); if ((vftype != OTX_CPT_AE_TYPES) && (vftype != OTX_CPT_SE_TYPES)) { - dev_err(dev, "VF%d binding to eng group %llu failed", + dev_err(dev, "VF%d binding to eng group %llu failed\n", vf, mbx.data); otx_cptpf_mbox_send_nack(cpt, vf, &mbx); } else { diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index d04baa319592..fec8f3b9b112 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev, int i; if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) { - dev_err(dev, "unsupported number of engines %d on octeontx", + dev_err(dev, "unsupported number of engines %d on octeontx\n", eng_grp->g->engs_num); return bmap; } @@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev, } if (!found) - dev_err(dev, "No engines reserved for engine group %d", + dev_err(dev, "No engines reserved for engine group %d\n", eng_grp->idx); return bmap; } @@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev, ucode_size = ntohl(ucode_hdr->code_length) * 2; if (!ucode_size || (size < round_up(ucode_size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size", filename); + dev_err(dev, "Ucode %s invalid size\n", filename); return -EINVAL; } @@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch, { struct tar_ucode_info_t *curr; - pr_debug("Tar archive filename %s", tar_filename); - pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data, + pr_debug("Tar archive filename %s\n", tar_filename); + pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data, tar_arch->fw->size); list_for_each_entry(curr, &tar_arch->ucodes, list) { - pr_debug("Ucode filename %s", curr->ucode.filename); - pr_debug("Ucode version string %s", curr->ucode.ver_str); - pr_debug("Ucode version %d.%d.%d.%d", + pr_debug("Ucode filename %s\n", curr->ucode.filename); + pr_debug("Ucode version string %s\n", curr->ucode.ver_str); + pr_debug("Ucode version %d.%d.%d.%d\n", curr->ucode.ver_num.nn, curr->ucode.ver_num.xx, curr->ucode.ver_num.yy, curr->ucode.ver_num.zz); - pr_debug("Ucode type (%d) %s", curr->ucode.type, + pr_debug("Ucode type (%d) %s\n", curr->ucode.type, get_ucode_type_str(curr->ucode.type)); - pr_debug("Ucode size %d", curr->ucode.size); + pr_debug("Ucode size %d\n", curr->ucode.size); pr_debug("Ucode ptr %p\n", curr->ucode_ptr); } } @@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, goto release_tar_arch; if (tar_arch->fw->size < TAR_BLOCK_LEN) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } tar_size = tar_arch->fw->size; tar_blk = (struct tar_blk_t *) tar_arch->fw->data; if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) { - dev_err(dev, "Unsupported format of tar archive %s", + dev_err(dev, "Unsupported format of tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, if (tar_offs + cur_size > tar_size || tar_offs + 2*TAR_BLOCK_LEN > tar_size) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, /* Check for the end of the archive */ if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp, static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode) { - pr_debug("Ucode info"); - pr_debug("Ucode version string %s", ucode->ver_str); - pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn, + pr_debug("Ucode info\n"); + pr_debug("Ucode version string %s\n", ucode->ver_str); + pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn, ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz); - pr_debug("Ucode type %s", get_ucode_type_str(ucode->type)); - pr_debug("Ucode size %d", ucode->size); - pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va); + pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type)); + pr_debug("Ucode size %d\n", ucode->size); + pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va); pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma); } @@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev, u32 mask[4]; int i, j; - pr_debug("Engine groups global info"); - pr_debug("max SE %d, max AE %d", + pr_debug("Engine groups global info\n"); + pr_debug("max SE %d, max AE %d\n", eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt); - pr_debug("free SE %d", eng_grps->avail.se_cnt); - pr_debug("free AE %d", eng_grps->avail.ae_cnt); + pr_debug("free SE %d\n", eng_grps->avail.se_cnt); + pr_debug("free AE %d\n", eng_grps->avail.ae_cnt); for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s", i, grp->is_enabled ? + pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ? "enabled" : "disabled"); if (grp->is_enabled) { mirrored_grp = &eng_grps->grp[grp->mirror.idx]; - pr_debug("Ucode0 filename %s, version %s", + pr_debug("Ucode0 filename %s, version %s\n", grp->mirror.is_ena ? mirrored_grp->ucode[0].filename : grp->ucode[0].filename, @@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev, if (engs->type) { print_engs_info(grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, j); - pr_debug("Slot%d: %s", j, engs_info); + pr_debug("Slot%d: %s\n", j, engs_info); bitmap_to_arr32(mask, engs->bmap, eng_grps->engs_num); - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x", + pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n", mask[3], mask[2], mask[1], mask[0]); } else - pr_debug("Slot%d not used", j); + pr_debug("Slot%d not used\n", j); } if (grp->is_enabled) { cpt_print_engines_mask(grp, dev, engs_mask, OTX_CPT_UCODE_NAME_LENGTH); - pr_debug("Cmask: %s", engs_mask); + pr_debug("Cmask: %s\n", engs_mask); } } } @@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev, if (avail_cnt < req_eng->count) { dev_err(dev, - "Error available %s engines %d < than requested %d", + "Error available %s engines %d < than requested %d\n", get_eng_type_str(req_eng->type), avail_cnt, req_eng->count); return -EBUSY; @@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev, OTX_CPT_UCODE_ALIGNMENT, &ucode->dma, GFP_KERNEL); if (!ucode->va) { - dev_err(dev, "Unable to allocate space for microcode"); + dev_err(dev, "Unable to allocate space for microcode\n"); return -ENOMEM; } ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT); @@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, ucode->size = ntohl(ucode_hdr->code_length) * 2; if (!ucode->size || (fw->size < round_up(ucode->size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size", ucode_filename); + dev_err(dev, "Ucode %s invalid size\n", ucode_filename); ret = -EINVAL; goto release_fw; } ret = get_ucode_type(ucode_hdr, &ucode->type); if (ret) { - dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename, - ucode->type); + dev_err(dev, "Microcode %s unknown type 0x%x\n", + ucode->filename, ucode->type); goto release_fw; } @@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev, break; default: - dev_err(dev, "Invalid engine type %d", engs->type); + dev_err(dev, "Invalid engine type %d\n", engs->type); return -EINVAL; } @@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev, return -EINVAL; if (eng_grp->mirror.ref_count) { - dev_err(dev, "Can't delete engine_group%d as it is used by:", + dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):", eng_grp->idx); for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { if (eng_grp->g->grp[i].mirror.is_ena && eng_grp->g->grp[i].mirror.idx == eng_grp->idx) - dev_err(dev, "engine_group%d", i); + pr_cont(" %d", i); } + pr_cont("\n"); return -EINVAL; } @@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev, if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0], engs[i].type)) { dev_err(dev, - "Microcode %s does not support %s engines", + "Microcode %s does not support %s engines\n", eng_grp->ucode[0].filename, get_eng_type_str(engs[i].type)); return -EINVAL; @@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev, /* Validate if requested engine types are supported by this device */ for (i = 0; i < engs_cnt; i++) if (!dev_supports_eng_type(eng_grps, engs[i].type)) { - dev_err(dev, "Device does not support %s engines", + dev_err(dev, "Device does not support %s engines\n", get_eng_type_str(engs[i].type)); return -EPERM; } @@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev, /* Find engine group which is not used */ eng_grp = find_unused_eng_grp(eng_grps); if (!eng_grp) { - dev_err(dev, "Error all engine groups are being used"); + dev_err(dev, "Error all engine groups are being used\n"); return -ENOSPC; } @@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev, eng_grp->is_enabled = true; if (eng_grp->mirror.is_ena) dev_info(dev, - "Engine_group%d: reuse microcode %s from group %d", + "Engine_group%d: reuse microcode %s from group %d\n", eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str, mirrored_eng_grp->idx); else - dev_info(dev, "Engine_group%d: microcode loaded %s", + dev_info(dev, "Engine_group%d: microcode loaded %s\n", eng_grp->idx, eng_grp->ucode[0].ver_str); return 0; @@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev, } else { if (del_grp_idx < 0 || del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Invalid engine group index %d", + dev_err(dev, "Invalid engine group index %d\n", del_grp_idx); ret = -EINVAL; return ret; } if (!eng_grps->grp[del_grp_idx].is_enabled) { - dev_err(dev, "Error engine_group%d is not configured", + dev_err(dev, "Error engine_group%d is not configured\n", del_grp_idx); ret = -EINVAL; return ret; @@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt) udelay(CSR_DELAY); reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); if (timeout--) { - dev_warn(&cpt->pdev->dev, "Cores still busy"); + dev_warn(&cpt->pdev->dev, "Cores still busy\n"); break; } } @@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev, eng_grps->avail.max_ae_cnt; if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) { dev_err(&pdev->dev, - "Number of engines %d > than max supported %d", + "Number of engines %d > than max supported %d\n", eng_grps->engs_num, OTX_CPT_MAX_ENGINES); ret = -EINVAL; goto err; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 06202bcffb33..60e744f680d3 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, case OTX_CPT_SE_TYPES: count = atomic_read(&se_devices.count); if (count >= CPT_MAX_VF_NUM) { - dev_err(&pdev->dev, "No space to add a new device"); + dev_err(&pdev->dev, "No space to add a new device\n"); ret = -ENOSPC; goto err; } @@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, case OTX_CPT_AE_TYPES: count = atomic_read(&ae_devices.count); if (count >= CPT_MAX_VF_NUM) { - dev_err(&pdev->dev, "No space to a add new device"); + dev_err(&pdev->dev, "No space to a add new device\n"); ret = -ENOSPC; goto err; } @@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, } if (!dev_found) { - dev_err(&pdev->dev, "%s device not found", __func__); + dev_err(&pdev->dev, "%s device not found\n", __func__); goto exit; } diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c index a91860b5dc77..ce3168327a39 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c @@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq, cptvf_write_vq_done_ack(cptvf, intr); wqe = get_cptvf_vq_wqe(cptvf, 0); if (unlikely(!wqe)) { - dev_err(&pdev->dev, "No work to schedule for VF (%d)", + dev_err(&pdev->dev, "No work to schedule for VF (%d)\n", cptvf->vfid); return IRQ_NONE; } @@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec) if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], GFP_KERNEL)) { dev_err(&pdev->dev, - "Allocation failed for affinity_mask for VF %d", + "Allocation failed for affinity_mask for VF %d\n", cptvf->vfid); return; } @@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev, return -EINVAL; if (val >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Engine group >= than max available groups %d", + dev_err(dev, "Engine group >= than max available groups %d\n", OTX_CPT_MAX_ENGINE_GROUPS); return -EINVAL; } @@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev, cptvf_misc_intr_handler, 0, "CPT VF misc intr", cptvf); if (err) { - dev_err(dev, "Failed to request misc irq"); + dev_err(dev, "Failed to request misc irq\n"); goto free_vectors; } @@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev, cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE; err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF); if (err) { - dev_err(dev, "cptvf_sw_init() failed"); + dev_err(dev, "cptvf_sw_init() failed\n"); goto free_misc_irq; } /* Convey VQ LEN to PF */ @@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev) /* Convey DOWN to PF */ if (otx_cptvf_send_vf_down(cptvf)) { - dev_err(&pdev->dev, "PF not responding to DOWN msg"); + dev_err(&pdev->dev, "PF not responding to DOWN msg\n"); } else { sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group); otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index df839b880354..239195cccf93 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, GFP_ATOMIC; ret = setup_sgio_list(pdev, &info, req, gfp); if (unlikely(ret)) { - dev_err(&pdev->dev, "Setting up SG list failed"); + dev_err(&pdev->dev, "Setting up SG list failed\n"); goto request_cleanup; } cpt_req->dlen = info->dlen; @@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, struct otx_cptvf *cptvf = pci_get_drvdata(pdev); if (!otx_cpt_device_ready(cptvf)) { - dev_err(&pdev->dev, "CPT Device is not ready"); + dev_err(&pdev->dev, "CPT Device is not ready\n"); return -ENODEV; } if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) { - dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request", + dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n", cptvf->vfid); return -EINVAL; } else if ((cptvf->vftype == OTX_CPT_AE_TYPES) && (req->ctrl.s.se_req)) { - dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request", + dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n", cptvf->vfid); return -EINVAL; } @@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev, /* check for timeout */ if (time_after_eq(jiffies, cpt_info->time_in + OTX_CPT_COMMAND_TIMEOUT * HZ)) - dev_warn(&pdev->dev, "Request timed out 0x%p", req); + dev_warn(&pdev->dev, "Request timed out 0x%p\n", req); else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) { cpt_info->time_in = jiffies; cpt_info->extra_time++; -- cgit v1.2.3 From f88480e300ac13141aa84f0f70b745df2e11b203 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 17 Apr 2020 15:08:31 +0800 Subject: crypto: hisilicon/qm - fix build failure with ACPI off Add Kconfig dependency to fix kbuild warnings. Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") Reported-by: kbuild test robot Reported-by: Stephen Rothwell Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index f09c6cf7823e..99e962e39f36 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -42,6 +42,7 @@ config CRYPTO_DEV_HISI_QM depends on ARM64 || COMPILE_TEST depends on PCI && PCI_MSI depends on UACCE || UACCE=n + depends on ACPI help HiSilicon accelerator engines use a common queue management interface. Specific engine driver may use this module. -- cgit v1.2.3 From c73d1871503713c7ee37da9fde155322dc50280f Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 21 Apr 2020 14:56:49 +1000 Subject: crypto: hisilicon/qm - add more ACPI dependencies due to the selects of CRYPTO_DEV_HISI_QM which now depends on ACPI Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset...") Signed-off-by: Stephen Rothwell Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 99e962e39f36..9c3b3ca815e6 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2 depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ACPI help Support for HiSilicon SEC Engine of version 2 in crypto subsystem. It provides AES, SM4, and 3DES algorithms with ECB @@ -53,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP depends on ARM64 || (COMPILE_TEST && 64BIT) depends on !CPU_BIG_ENDIAN || COMPILE_TEST depends on UACCE || UACCE=n + depends on ACPI select CRYPTO_DEV_HISI_QM help Support for HiSilicon ZIP Driver @@ -62,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ACPI select CRYPTO_DEV_HISI_QM select CRYPTO_DH select CRYPTO_RSA -- cgit v1.2.3 From 8a656a48f75f193b901efe14326663505874c37a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 15 Apr 2020 23:49:47 +0100 Subject: crypto: chelsio - remove redundant assignment to variable error The variable error is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index c29b80dd30d8..5d3000fdd5f4 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1757,7 +1757,7 @@ static int chcr_ahash_final(struct ahash_request *req) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); - int error = -EINVAL; + int error; unsigned int cpu; cpu = get_cpu(); -- cgit v1.2.3 From 9c3d6497fbfa0911d4cd6f261762a0a7af29566a Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sun, 19 Apr 2020 15:12:45 +0800 Subject: crypto: bcm - Delete redundant variable definition The variable "i" is redundant to be assigned a value of zero,because it's assigned in the for loop, so remove redundant one here. Signed-off-by: Shengju Zhang Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c8b9408541a9..5db23c18c81f 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4724,7 +4724,6 @@ static int spu_dt_read(struct platform_device *pdev) spu->spu_type = matched_spu_type->type; spu->spu_subtype = matched_spu_type->subtype; - i = 0; for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { -- cgit v1.2.3 From 97f9ac3db6612f14ac0c509e1a63ce14fd4cc0eb Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 21 Apr 2020 12:44:49 -0500 Subject: crypto: ccp - Add support for SEV-ES to the PSP driver To provide support for SEV-ES, the hypervisor must provide an area of memory to the PSP. Once this Trusted Memory Region (TMR) is provided to the PSP, the contents of this area of memory are no longer available to the x86. Update the PSP driver to allocate a 1MB region for the TMR that is 1MB aligned and then provide it to the PSP through the SEV INIT command. Signed-off-by: Tom Lendacky Reviewed-by: Brijesh Singh Reviewed-by: Joerg Roedel Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 896f190b9a50..439cd737076e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during static bool psp_dead; static int psp_timeout; +/* Trusted Memory Region (TMR): + * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator + * to allocate the memory, which will return aligned memory for the specified + * allocation order. + */ +#define SEV_ES_TMR_SIZE (1024 * 1024) +static void *sev_es_tmr; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error) if (sev->state == SEV_STATE_INIT) return 0; + if (sev_es_tmr) { + u64 tmr_pa; + + /* + * Do not include the encryption mask on the physical + * address of the TMR (firmware should clear it anyway). + */ + tmr_pa = __pa(sev_es_tmr); + + sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES; + sev->init_cmd_buf.tmr_address = tmr_pa; + sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE; + } + rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error); if (rc) return rc; @@ -1012,6 +1035,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; + struct page *tmr_page; int error, rc; if (!sev) @@ -1041,6 +1065,16 @@ void sev_pci_init(void) sev_update_firmware(sev->dev) == 0) sev_get_api_version(); + /* Obtain the TMR memory area for SEV-ES use */ + tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE)); + if (tmr_page) { + sev_es_tmr = page_address(tmr_page); + } else { + sev_es_tmr = NULL; + dev_warn(sev->dev, + "SEV: TMR allocation failed, SEV-ES support unavailable\n"); + } + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { @@ -1075,4 +1109,13 @@ void sev_pci_exit(void) return; sev_platform_shutdown(NULL); + + if (sev_es_tmr) { + /* The TMR area was encrypted, flush it from the cache */ + wbinvd_on_all_cpus(); + + free_pages((unsigned long)sev_es_tmr, + get_order(SEV_ES_TMR_SIZE)); + sev_es_tmr = NULL; + } } -- cgit v1.2.3 From d0f6223c0dc5196ef684657f6b77d4fa5fca2020 Mon Sep 17 00:00:00 2001 From: Zou Wei Date: Thu, 23 Apr 2020 10:22:36 +0800 Subject: crypto: hisilicon/qm - Make qm_controller_reset() static Fix the following sparse warning: drivers/crypto/hisilicon/qm.c:3079:5: warning: symbol 'qm_controller_reset' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: Zou Wei Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 80c552523ac4..69d02cb40e4b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3076,7 +3076,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return 0; } -int qm_controller_reset(struct hisi_qm *qm) +static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; -- cgit v1.2.3 From 42a13ddbab00455504d50ef159360f7451d597e4 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 25 Apr 2020 22:22:58 +0800 Subject: crypto: bcm - Remove the unnecessary cast for PTR_ERR(). It's not necessary to specify 'int' casting for PTR_ERR(). Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 5db23c18c81f..36a1f4e2aff9 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev) for (i = 0; i < iproc_priv.spu.num_chan; i++) { iproc_priv.mbox[i] = mbox_request_channel(mcl, i); if (IS_ERR(iproc_priv.mbox[i])) { - err = (int)PTR_ERR(iproc_priv.mbox[i]); + err = PTR_ERR(iproc_priv.mbox[i]); dev_err(dev, "Mbox channel %d request failed with err %d", i, err); -- cgit v1.2.3 From 12b3cf9093542d9f752a4968815ece836159013f Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 25 Apr 2020 22:24:14 +0800 Subject: crypto: bcm - Fix unused assignment Delete unused initialized value in cipher.c file. Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 36a1f4e2aff9..7bdecf813940 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) container_of(areq, struct skcipher_request, base); struct iproc_ctx_s *ctx = rctx->ctx; struct spu_cipher_parms cipher_parms; - int err = 0; - unsigned int chunksize = 0; /* Num bytes of request to submit */ - int remaining = 0; /* Bytes of request still to process */ + int err; + unsigned int chunksize; /* Num bytes of request to submit */ + int remaining; /* Bytes of request still to process */ int chunk_start; /* Beginning of data for current SPU msg */ /* IV or ctr value to use in this SPU msg */ @@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) /* number of bytes still to be hashed in this req */ unsigned int nbytes_to_hash = 0; - int err = 0; + int err; unsigned int chunksize = 0; /* length of hash carry + new data */ /* * length of new data, not from hash carry, to be submitted in @@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) struct spu_hw *spu = &iproc_priv.spu; struct brcm_message *mssg = msg; struct iproc_reqctx_s *rctx; - int err = 0; + int err; rctx = mssg->ctx; if (unlikely(!rctx)) { @@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req) struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); - int err = 0; + int err; const char *alg_name; flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes); @@ -2299,7 +2299,7 @@ ahash_finup_exit: static int ahash_digest(struct ahash_request *req) { - int err = 0; + int err; flow_log("ahash_digest() nbytes:%u\n", req->nbytes); @@ -4746,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spu_hw *spu = &iproc_priv.spu; - int err = 0; + int err; iproc_priv.pdev = pdev; platform_set_drvdata(iproc_priv.pdev, -- cgit v1.2.3 From 43b05ce76733164e1c1b33ab16eda14130646c96 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Mon, 27 Apr 2020 08:42:24 +0200 Subject: crypto: stm32/hash - defer probe for reset controller Change stm32 HASH driver to defer its probe operation when reset controller device is registered but has not been probed yet. Signed-off-by: Etienne Carriere Reviewed-by: Lionel DEBIEVE Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 167b80eec437..fad6190be088 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1482,7 +1482,12 @@ static int stm32_hash_probe(struct platform_device *pdev) pm_runtime_enable(dev); hdev->rst = devm_reset_control_get(&pdev->dev, NULL); - if (!IS_ERR(hdev->rst)) { + if (IS_ERR(hdev->rst)) { + if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_reset; + } + } else { reset_control_assert(hdev->rst); udelay(2); reset_control_deassert(hdev->rst); @@ -1535,7 +1540,7 @@ err_engine: if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); - +err_reset: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); -- cgit v1.2.3 From 45dafed6c5ecd01400766ce99a49b2d8ad351ce6 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Mon, 27 Apr 2020 08:42:25 +0200 Subject: crypto: stm32/hash - defer probe for dma device Change stm32 HASH driver to defer its probe operation when DMA channel device is registered but has not been probed yet. Signed-off-by: Etienne Carriere Reviewed-by: Lionel DEBIEVE Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index fad6190be088..0d592f55a271 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) { struct dma_slave_config dma_conf; + struct dma_chan *chan; int err; memset(&dma_conf, 0, sizeof(dma_conf)); @@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) dma_conf.dst_maxburst = hdev->dma_maxburst; dma_conf.device_fc = false; - hdev->dma_lch = dma_request_chan(hdev->dev, "in"); - if (IS_ERR(hdev->dma_lch)) { - dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); - return PTR_ERR(hdev->dma_lch); - } + chan = dma_request_chan(hdev->dev, "in"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + hdev->dma_lch = chan; err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); if (err) { @@ -1498,8 +1499,15 @@ static int stm32_hash_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hdev); ret = stm32_hash_dma_init(hdev); - if (ret) + switch (ret) { + case 0: + break; + case -ENOENT: dev_dbg(dev, "DMA mode not available\n"); + break; + default: + goto err_dma; + } spin_lock(&stm32_hash.lock); list_add_tail(&hdev->list, &stm32_hash.dev_list); @@ -1537,7 +1545,7 @@ err_engine: spin_lock(&stm32_hash.lock); list_del(&hdev->list); spin_unlock(&stm32_hash.lock); - +err_dma: if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); err_reset: -- cgit v1.2.3 From 79cd691f609c3f5b9603063099502b70ab32ae87 Mon Sep 17 00:00:00 2001 From: Lionel Debieve Date: Mon, 27 Apr 2020 08:42:26 +0200 Subject: crypto: stm32/hash - don't print error on probe deferral Change driver to not print an error message when the device probe is deferred for a clock resource. Signed-off-by: Lionel Debieve Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 0d592f55a271..03c5e6683805 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1464,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev) hdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdev->clk)) { - dev_err(dev, "failed to get clock for hash (%lu)\n", - PTR_ERR(hdev->clk)); + if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) { + dev_err(dev, "failed to get clock for hash (%lu)\n", + PTR_ERR(hdev->clk)); + } + return PTR_ERR(hdev->clk); } -- cgit v1.2.3 From 3f7819bd42153b6df04af40dfa3439ce71ce4ad1 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Mon, 27 Apr 2020 16:22:18 +0800 Subject: crypto: bcm - Use the defined variable to clean code Use the defined variable "dev" to make the code cleaner. Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 7bdecf813940..a353217a0d33 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4717,7 +4717,7 @@ static int spu_dt_read(struct platform_device *pdev) matched_spu_type = of_device_get_match_data(dev); if (!matched_spu_type) { - dev_err(&pdev->dev, "Failed to match device\n"); + dev_err(dev, "Failed to match device\n"); return -ENODEV; } @@ -4730,7 +4730,7 @@ static int spu_dt_read(struct platform_device *pdev) spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); if (IS_ERR(spu->reg_vbase[i])) { err = PTR_ERR(spu->reg_vbase[i]); - dev_err(&pdev->dev, "Failed to map registers: %d\n", + dev_err(dev, "Failed to map registers: %d\n", err); spu->reg_vbase[i] = NULL; return err; @@ -4756,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto failure; - err = spu_mb_init(&pdev->dev); + err = spu_mb_init(dev); if (err < 0) goto failure; @@ -4765,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev) else if (spu->spu_type == SPU_TYPE_SPU2) iproc_priv.bcm_hdr_len = 0; - spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype); + spu_functions_register(dev, spu->spu_type, spu->spu_subtype); spu_counters_init(); -- cgit v1.2.3 From 7e3e48d86b7c96f12896dd7111e4e5ae51c1b7da Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:06 -0700 Subject: crypto: artpec6 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Jesper Nilsson Cc: Lars Persson Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index fcf1effc7661..62ba0325a618 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); if (keylen > blocksize) { - SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); - - hdesc->tfm = tfm_ctx->child_hash; - tfm_ctx->hmac_key_length = blocksize; - ret = crypto_shash_digest(hdesc, key, keylen, - tfm_ctx->hmac_key); + + ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen, + tfm_ctx->hmac_key); if (ret) return ret; - } else { memcpy(tfm_ctx->hmac_key, key, keylen); tfm_ctx->hmac_key_length = keylen; -- cgit v1.2.3 From f32b6775c795b125361ea9181ca4fcfa261a91dd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:07 -0700 Subject: crypto: ccp - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Tom Lendacky Signed-off-by: Eric Biggers Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-sha.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 474e6f1a6a84..b0cc2bd73af8 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, { struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct crypto_shash *shash = ctx->u.sha.hmac_tfm; - - SHASH_DESC_ON_STACK(sdesc, shash); - unsigned int block_size = crypto_shash_blocksize(shash); unsigned int digest_size = crypto_shash_digestsize(shash); int i, ret; @@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, if (key_len > block_size) { /* Must hash the input key */ - sdesc->tfm = shash; - - ret = crypto_shash_digest(sdesc, key, key_len, - ctx->u.sha.key); + ret = crypto_shash_tfm_digest(shash, key, key_len, + ctx->u.sha.key); if (ret) return -EINVAL; -- cgit v1.2.3 From 8cbb809794b14703c960b5e2941c9e7fd97b765a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:08 -0700 Subject: crypto: ccree - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Gilad Ben-Yossef Signed-off-by: Eric Biggers Acked-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index a84335328f37..872ea3ff1c6b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, int key_len = keylen >> 1; int err; - SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm); - - desc->tfm = ctx_p->shash_tfm; - - err = crypto_shash_digest(desc, ctx_p->user.key, key_len, - ctx_p->user.key + key_len); + err = crypto_shash_tfm_digest(ctx_p->shash_tfm, + ctx_p->user.key, key_len, + ctx_p->user.key + key_len); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; -- cgit v1.2.3 From 61c38e3a94f26035104aec643ee17e80e29ff329 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:09 -0700 Subject: crypto: hisilicon/sec2 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Zaibo Xu Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 7f1c6a31b82f..848ab492d26e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -832,7 +832,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_authenc_keys *keys) { struct crypto_shash *hash_tfm = ctx->hash_tfm; - SHASH_DESC_ON_STACK(shash, hash_tfm); int blocksize, ret; if (!keys->authkeylen) { @@ -842,8 +841,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, blocksize = crypto_shash_blocksize(hash_tfm); if (keys->authkeylen > blocksize) { - ret = crypto_shash_digest(shash, keys->authkey, - keys->authkeylen, ctx->a_key); + ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, + keys->authkeylen, ctx->a_key); if (ret) { pr_err("hisi_sec2: aead auth digest error!\n"); return -EINVAL; -- cgit v1.2.3 From e0077ea8ee1774cb99cf9adf10dd4e6dcbf363b0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:10 -0700 Subject: crypto: mediatek - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-sha.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index bd6309e57ab8..da3f0b8814aa 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c @@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, size_t ds = crypto_shash_digestsize(bctx->shash); int err, i; - SHASH_DESC_ON_STACK(shash, bctx->shash); - - shash->tfm = bctx->shash; - if (keylen > bs) { - err = crypto_shash_digest(shash, key, keylen, bctx->ipad); + err = crypto_shash_tfm_digest(bctx->shash, key, keylen, + bctx->ipad); if (err) return err; keylen = ds; -- cgit v1.2.3 From ce8e04888dd873179bc3979b0574cc7827071df5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:11 -0700 Subject: crypto: n2 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index f5c468f2cc82..6a828bbecea4 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_shash *child_shash = ctx->child_shash; struct crypto_ahash *fallback_tfm; - SHASH_DESC_ON_STACK(shash, child_shash); int err, bs, ds; fallback_tfm = ctx->base.fallback_tfm; @@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, if (err) return err; - shash->tfm = child_shash; - bs = crypto_shash_blocksize(child_shash); ds = crypto_shash_digestsize(child_shash); BUG_ON(ds > N2_HASH_KEY_MAX); if (keylen > bs) { - err = crypto_shash_digest(shash, key, keylen, - ctx->hash_key); + err = crypto_shash_tfm_digest(child_shash, key, keylen, + ctx->hash_key); if (err) return err; keylen = ds; -- cgit v1.2.3 From e29ba412bdfe15233abff3b49a46763d4a6dd7d9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:12 -0700 Subject: crypto: omap-sham - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e4072cd38585..d600c5b3fdd3 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1245,16 +1245,6 @@ static int omap_sham_update(struct ahash_request *req) return omap_sham_enqueue(req, OP_UPDATE); } -static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags, - const u8 *data, unsigned int len, u8 *out) -{ - SHASH_DESC_ON_STACK(shash, tfm); - - shash->tfm = tfm; - - return crypto_shash_digest(shash, data, len, out); -} - static int omap_sham_final_shash(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); @@ -1270,9 +1260,8 @@ static int omap_sham_final_shash(struct ahash_request *req) !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) offset = get_block_size(ctx); - return omap_sham_shash_digest(tctx->fallback, req->base.flags, - ctx->buffer + offset, - ctx->bufcnt - offset, req->result); + return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset, + ctx->bufcnt - offset, req->result); } static int omap_sham_final(struct ahash_request *req) @@ -1351,9 +1340,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, return err; if (keylen > bs) { - err = omap_sham_shash_digest(bctx->shash, - crypto_shash_get_flags(bctx->shash), - key, keylen, bctx->ipad); + err = crypto_shash_tfm_digest(bctx->shash, key, keylen, + bctx->ipad); if (err) return err; keylen = ds; -- cgit v1.2.3 From ecca1ad60cdfc061c1242490637e9e806ff1d884 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:13 -0700 Subject: crypto: s5p-sss - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Krzysztof Kozlowski Cc: Vladimir Zapolskiy Cc: Kamil Konieczny Signed-off-by: Eric Biggers Acked-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 39 ++++++--------------------------------- 1 file changed, 6 insertions(+), 33 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 2a16800d2579..341433fbcc4a 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -1520,37 +1520,6 @@ static int s5p_hash_update(struct ahash_request *req) return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */ } -/** - * s5p_hash_shash_digest() - calculate shash digest - * @tfm: crypto transformation - * @flags: tfm flags - * @data: input data - * @len: length of data - * @out: output buffer - */ -static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags, - const u8 *data, unsigned int len, u8 *out) -{ - SHASH_DESC_ON_STACK(shash, tfm); - - shash->tfm = tfm; - - return crypto_shash_digest(shash, data, len, out); -} - -/** - * s5p_hash_final_shash() - calculate shash digest - * @req: AHASH request - */ -static int s5p_hash_final_shash(struct ahash_request *req) -{ - struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); - - return s5p_hash_shash_digest(tctx->fallback, req->base.flags, - ctx->buffer, ctx->bufcnt, req->result); -} - /** * s5p_hash_final() - close up hash and calculate digest * @req: AHASH request @@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req) if (ctx->error) return -EINVAL; /* uncompleted hash is not needed */ - if (!ctx->digcnt && ctx->bufcnt < BUFLEN) - return s5p_hash_final_shash(req); + if (!ctx->digcnt && ctx->bufcnt < BUFLEN) { + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + + return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer, + ctx->bufcnt, req->result); + } return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */ } -- cgit v1.2.3 From 2aaba014b55be46affcae78edff356c5e3389081 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:26 -0700 Subject: crypto: lib/sha1 - remove unnecessary includes of linux/cryptohash.h sounds very generic and important, like it's the header to include if you're doing cryptographic hashing in the kernel. But actually it only includes the library implementation of the SHA-1 compression function (not even the full SHA-1). This should basically never be used anymore; SHA-1 is no longer considered secure, and there are much better ways to do cryptographic hashing in the kernel. Most files that include this header don't actually need it. So in preparation for removing it, remove all these unneeded includes of it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 1 - drivers/crypto/chelsio/chcr_algo.c | 1 - drivers/crypto/chelsio/chcr_ipsec.c | 1 - drivers/crypto/omap-sham.c | 1 - 4 files changed, 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index e536e2a6bbd8..75ccf41a7cb9 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 5d3000fdd5f4..caf1136e7ef9 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 9fd3b9d1ec2f..25bf6d963066 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index d600c5b3fdd3..063ad5d03f33 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 1036bb50c80561bab85a0dc57e87e6c7645c97b7 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:34:59 +0200 Subject: crypto: ccree - constify struct debugfs_reg32 pid_cd_regs and debug_regs are never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 2871 2320 64 5255 1487 drivers/crypto/ccree/cc_debugfs.o After: text data bss dec hex filename 3255 1936 64 5255 1487 drivers/crypto/ccree/cc_debugfs.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index c454afce7781..7083767602fc 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = { { .name = "VERSION" }, /* Must be 1st */ }; -static struct debugfs_reg32 pid_cid_regs[] = { +static const struct debugfs_reg32 pid_cid_regs[] = { CC_DEBUG_REG(PERIPHERAL_ID_0), CC_DEBUG_REG(PERIPHERAL_ID_1), CC_DEBUG_REG(PERIPHERAL_ID_2), @@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = { CC_DEBUG_REG(COMPONENT_ID_3), }; -static struct debugfs_reg32 debug_regs[] = { +static const struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(HOST_IRR), CC_DEBUG_REG(HOST_POWER_DOWN_EN), CC_DEBUG_REG(AXIM_MON_ERR), -- cgit v1.2.3 From 2c2207aee52b6c6627f91aa2c7f5316c4087363a Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:00 +0200 Subject: crypto: hisilicon/hpre - constify struct debugfs_reg32 hpre_cluster_dfx_regs and hpre_com_dfx_regs are never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 16455 6288 480 23223 5ab7 drivers/crypto/hisilicon/hpre/hpre_main.o After: text data bss dec hex filename 16839 5904 480 23223 5ab7 drivers/crypto/hisilicon/hpre/hpre_main.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 0d63666ba373..840e16c14570 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -134,7 +134,7 @@ static const u64 hpre_cluster_offsets[] = { HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, }; -static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { +static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, @@ -142,7 +142,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, }; -static struct debugfs_reg32 hpre_com_dfx_regs[] = { +static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, {"AXQOS ", HPRE_VFG_AXQOS}, {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, -- cgit v1.2.3 From 8f68659bac1da933bf5526d4eeec46504d68457b Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:01 +0200 Subject: crypto: hisilicon/zip - constify struct debugfs_reg32 hzip_dfx_regs is never changed and can be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 15236 6160 480 21876 5574 drivers/crypto/hisilicon/zip/zip_main.o After: text data bss dec hex filename 15620 5776 480 21876 5574 drivers/crypto/hisilicon/zip/zip_main.o Signed-off-by: Rikard Falkeborn Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 37db11f96fab..6934a03d21e1 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -165,7 +165,7 @@ static const u64 core_offsets[] = { [HZIP_DECOMP_CORE5] = 0x309000, }; -static struct debugfs_reg32 hzip_dfx_regs[] = { +static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_GET_BD_NUM ", 0x00ull}, {"HZIP_GET_RIGHT_BD ", 0x04ull}, {"HZIP_GET_ERROR_BD ", 0x08ull}, -- cgit v1.2.3 From c549e8127213bbebba76b88a148875a80561e51d Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:02 +0200 Subject: crypto: hisilicon/sec2 - constify sec_dfx_regs sec_dfx_regs is never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 17982 7312 480 25774 64ae drivers/crypto/hisilicon/sec2/sec_main.o After: text data bss dec hex filename 18366 6928 480 25774 64ae drivers/crypto/hisilicon/sec2/sec_main.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 07a5f4eb96ff..6f577b34098f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -110,7 +110,7 @@ static const char * const sec_dbg_file_name[] = { [SEC_CLEAR_ENABLE] = "clear_enable", }; -static struct debugfs_reg32 sec_dfx_regs[] = { +static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, {"SEC_SAA_EN ", 0x301270}, {"SEC_BD_LATENCY_MIN ", 0x301600}, -- cgit v1.2.3 From 18614230f430a1108aad5ba2be2dd158b664081b Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:54 +0800 Subject: crypto: hisilicon/sec2 - modify the SEC probe process Adjust the position of SMMU status check and SEC queue initialization in SEC probe Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 67 ++++++++++++++------------------ 1 file changed, 30 insertions(+), 37 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6f577b34098f..5853a0695459 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -765,6 +765,21 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->dev_name = sec_name; qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = SEC_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &sec_devices; + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * v2 hardware has no such problem. + */ + qm->qp_base = SEC_PF_DEF_Q_NUM; + qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; + } qm->use_dma_api = true; return hisi_qm_init(qm); @@ -775,8 +790,9 @@ static void sec_qm_uninit(struct hisi_qm *qm) hisi_qm_uninit(qm); } -static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) +static int sec_probe_init(struct sec_dev *sec) { + struct hisi_qm *qm = &sec->qm; int ret; /* @@ -793,40 +809,18 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) return -ENOMEM; } - if (qm->fun_type == QM_HW_PF) { - qm->qp_base = SEC_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; - qm->debug.curr_qm_qp_num = pf_q_num; - qm->qm_list = &sec_devices; - + if (qm->fun_type == QM_HW_PF) ret = sec_pf_probe_init(sec); - if (ret) - goto err_probe_uninit; - } else if (qm->fun_type == QM_HW_VF) { - /* - * have no way to get qm configure in VM in v1 hardware, - * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force - * to trigger only one VF in v1 hardware. - * v2 hardware has no such problem. - */ - if (qm->ver == QM_HW_V1) { - qm->qp_base = SEC_PF_DEF_Q_NUM; - qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; - } else if (qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_probe_uninit; - } - } else { - ret = -ENODEV; - goto err_probe_uninit; + else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + if (ret) { + destroy_workqueue(qm->wq); + return ret; } return 0; -err_probe_uninit: - destroy_workqueue(qm->wq); - return ret; } static void sec_probe_uninit(struct hisi_qm *qm) @@ -865,18 +859,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, sec); - sec->ctx_q_num = ctx_q_num; - sec_iommu_used_check(sec); - qm = &sec->qm; - ret = sec_qm_init(qm, pdev); if (ret) { - pci_err(pdev, "Failed to pre init qm!\n"); + pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); return ret; } - ret = sec_probe_init(qm, sec); + sec->ctx_q_num = ctx_q_num; + sec_iommu_used_check(sec); + + ret = sec_probe_init(sec); if (ret) { pci_err(pdev, "Failed to probe!\n"); goto err_qm_uninit; -- cgit v1.2.3 From 5f3a2a5d37ff9058e3fa253cb8445f7780513635 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:55 +0800 Subject: crypto: hisilicon/hpre - modify the HPRE probe process Misc fixes on coding style: 1.Merge pre-initialization and initialization of QM 2.Package the initialization of QM's PF and VF into a function Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 42 ++++++++++++++++++------------- 1 file changed, 25 insertions(+), 17 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 840e16c14570..e7585efa1596 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -666,7 +666,7 @@ static void hpre_debugfs_exit(struct hpre *hpre) debugfs_remove_recursive(qm->debug.debug_root); } -static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { enum qm_hw_ver rev_id; @@ -685,13 +685,14 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->dev_name = hpre_name; qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; + if (pdev->is_physfn) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = hpre_pf_q_num; } qm->use_dma_api = true; - return 0; + return hisi_qm_init(qm); } static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) @@ -766,6 +767,20 @@ static int hpre_pf_probe_init(struct hpre *hpre) return 0; } +static int hpre_probe_init(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + int ret = -ENODEV; + + if (qm->fun_type == QM_HW_PF) + ret = hpre_pf_probe_init(hpre); + else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + return ret; +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -779,23 +794,16 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, hpre); qm = &hpre->qm; - ret = hpre_qm_pre_init(qm, pdev); - if (ret) - return ret; - - ret = hisi_qm_init(qm); - if (ret) + ret = hpre_qm_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); return ret; + } - if (pdev->is_physfn) { - ret = hpre_pf_probe_init(hpre); - if (ret) - goto err_with_qm_init; - } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_with_qm_init; + ret = hpre_probe_init(hpre); + if (ret) { + pci_err(pdev, "Failed to probe (%d)!\n", ret); + goto err_with_qm_init; } ret = hisi_qm_start(qm); -- cgit v1.2.3 From cfd66a660f73adfd388666f122e998691763aa55 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:56 +0800 Subject: crypto: hisilicon/zip - modify the ZIP probe process Misc fixes on coding style: 1.Merge QM initialization code into a function 2.Merge QM's PF and VF initialization into a function Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 60 +++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 18 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6934a03d21e1..3132c4e6a9da 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -701,23 +701,14 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return 0; } -static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - struct hisi_zip *hisi_zip; enum qm_hw_ver rev_id; - struct hisi_qm *qm; - int ret; rev_id = hisi_qm_get_hw_version(pdev); if (rev_id == QM_HW_UNKNOWN) return -EINVAL; - hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); - if (!hisi_zip) - return -ENOMEM; - pci_set_drvdata(pdev, hisi_zip); - - qm = &hisi_zip->qm; qm->use_dma_api = true; qm->pdev = pdev; qm->ver = rev_id; @@ -725,13 +716,16 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; - qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : - QM_HW_VF; - ret = hisi_qm_init(qm); - if (ret) { - dev_err(&pdev->dev, "Failed to init qm!\n"); - return ret; - } + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? + QM_HW_PF : QM_HW_VF; + + return hisi_qm_init(qm); +} + +static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + int ret; if (qm->fun_type == QM_HW_PF) { ret = hisi_zip_pf_probe_init(hisi_zip); @@ -754,7 +748,36 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } else if (qm->ver == QM_HW_V2) /* v2 starts to support get vft by mailbox */ - hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + } + + return 0; +} + +static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_zip *hisi_zip; + struct hisi_qm *qm; + int ret; + + hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); + if (!hisi_zip) + return -ENOMEM; + + pci_set_drvdata(pdev, hisi_zip); + + qm = &hisi_zip->qm; + + ret = hisi_zip_qm_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret); + return ret; + } + + ret = hisi_zip_probe_init(hisi_zip); + if (ret) { + pci_err(pdev, "Failed to probe (%d)!\n", ret); + goto err_qm_uninit; } ret = hisi_qm_start(qm); @@ -787,6 +810,7 @@ err_remove_from_list: hisi_qm_stop(qm); err_qm_uninit: hisi_qm_uninit(qm); + return ret; } -- cgit v1.2.3 From 20b291f51802b35d84e20efbf110e8c9a853a22c Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:43:57 +0800 Subject: crypto: hisilicon - refactor module parameter pf_q_num related code put q_num_set similar code into qm to reduce the redundancy. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 39 ++++++------------------------- drivers/crypto/hisilicon/qm.h | 39 +++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 35 ++------------------------- drivers/crypto/hisilicon/zip/zip_main.c | 33 +------------------------- 4 files changed, 49 insertions(+), 97 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e7585efa1596..18b3bb1ae950 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -159,44 +159,19 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"INT_STATUS ", HPRE_INT_STATUS}, }; -static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp) +static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev; - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL); - if (!pdev) { - q_num = HPRE_QUEUE_NUM_V2; - pr_info("No device found currently, suppose queue number is %d\n", - q_num); - } else { - rev_id = pdev->revision; - if (rev_id != QM_HW_V2) - return -EINVAL; - - q_num = HPRE_QUEUE_NUM_V2; - } - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || n == 0 || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); } static const struct kernel_param_ops hpre_pf_q_num_ops = { - .set = hpre_pf_q_num_set, + .set = pf_q_num_set, .get = param_get_int, }; -static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM; -module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444); -MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)"); +static u32 pf_q_num = HPRE_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, @@ -688,7 +663,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (pdev->is_physfn) { qm->qp_base = HPRE_PF_DEF_Q_BASE; - qm->qp_num = hpre_pf_q_num; + qm->qp_num = pf_q_num; } qm->use_dma_api = true; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 9d17167840f8..d1be8cdc99a3 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -8,6 +8,8 @@ #include #include +#define QM_QNUM_V1 4096 +#define QM_QNUM_V2 1024 #define QM_MAX_VFS_NUM_V2 63 /* qm user domain */ @@ -252,6 +254,43 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; +static inline int q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + device, NULL); + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); + pr_info("No device found currently, suppose queue number is %d\n", + q_num); + } else { + rev_id = pdev->revision; + switch (rev_id) { + case QM_HW_V1: + q_num = QM_QNUM_V1; + break; + case QM_HW_V2: + q_num = QM_QNUM_V2; + break; + default: + return -EINVAL; + } + } + + ret = kstrtou32(val, 10, &n); + if (ret || !n || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + static inline int vfs_num_set(const char *val, const struct kernel_param *kp) { u32 n; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 5853a0695459..06f840c397f2 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -136,45 +136,14 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev; - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - SEC_PF_PCI_DEVICE_ID, NULL); - if (!pdev) { - q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2); - pr_info("No device, suppose queue number is %d!\n", q_num); - } else { - rev_id = pdev->revision; - - switch (rev_id) { - case QM_HW_V1: - q_num = SEC_QUEUE_NUM_V1; - break; - case QM_HW_V2: - q_num = SEC_QUEUE_NUM_V2; - break; - default: - return -EINVAL; - } - } - - ret = kstrtou32(val, 10, &n); - if (ret || !n || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); } static const struct kernel_param_ops sec_pf_q_num_ops = { .set = sec_pf_q_num_set, .get = param_get_int, }; + static u32 pf_q_num = SEC_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 3132c4e6a9da..a8cb699c67b7 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -192,38 +192,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = { static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - PCI_DEVICE_ID_ZIP_PF, NULL); - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - if (!pdev) { - q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2); - pr_info("No device found currently, suppose queue number is %d\n", - q_num); - } else { - rev_id = pdev->revision; - switch (rev_id) { - case QM_HW_V1: - q_num = HZIP_QUEUE_NUM_V1; - break; - case QM_HW_V2: - q_num = HZIP_QUEUE_NUM_V2; - break; - default: - return -EINVAL; - } - } - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || n > q_num || n == 0) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF); } static const struct kernel_param_ops pf_q_num_ops = { -- cgit v1.2.3 From b67202e8ed30bfa07b07a6f8fc762417a9a4e6de Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sat, 9 May 2020 17:43:58 +0800 Subject: crypto: hisilicon/qm - add state machine for QM Add specific states for qm and qp, every state change under critical region to prevent from race condition. Meanwhile, qp state change will also depend on qm state. Due to the introduction of these states, it is necessary to pay attention to the calls of public logic, such as concurrent scenarios resetting and releasing queue will call hisi_qm_stop, which needs to add additional status to distinguish and process. Signed-off-by: Zhou Wang Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 366 +++++++++++++++++++++++++++++++++--------- drivers/crypto/hisilicon/qm.h | 24 ++- 2 files changed, 307 insertions(+), 83 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 69d02cb40e4b..e42097e1d8a7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -352,6 +352,93 @@ static const char * const qm_fifo_overflow[] = { "cq", "eq", "aeq", }; +static const char * const qm_s[] = { + "init", "start", "close", "stop", +}; + +static const char * const qp_s[] = { + "none", "init", "start", "stop", "close", +}; + +static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) +{ + enum qm_state curr = atomic_read(&qm->status.flags); + bool avail = false; + + switch (curr) { + case QM_INIT: + if (new == QM_START || new == QM_CLOSE) + avail = true; + break; + case QM_START: + if (new == QM_STOP) + avail = true; + break; + case QM_STOP: + if (new == QM_CLOSE || new == QM_START) + avail = true; + break; + default: + break; + } + + dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + + if (!avail) + dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + + return avail; +} + +static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, + enum qp_state new) +{ + enum qm_state qm_curr = atomic_read(&qm->status.flags); + enum qp_state qp_curr = 0; + bool avail = false; + + if (qp) + qp_curr = atomic_read(&qp->qp_status.flags); + + switch (new) { + case QP_INIT: + if (qm_curr == QM_START || qm_curr == QM_INIT) + avail = true; + break; + case QP_START: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP)) + avail = true; + break; + case QP_STOP: + if ((qm_curr == QM_START && qp_curr == QP_START) || + (qp_curr == QP_INIT)) + avail = true; + break; + case QP_CLOSE: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_INIT)) + avail = true; + break; + default: + break; + } + + dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + + if (!avail) + dev_warn(&qm->pdev->dev, + "Can not change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + + return avail; +} + /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ static int qm_wait_mb_ready(struct hisi_qm *qm) { @@ -699,7 +786,7 @@ static void qm_init_qp_status(struct hisi_qp *qp) qp_status->sq_tail = 0; qp_status->cq_head = 0; qp_status->cqc_phase = true; - qp_status->flags = 0; + atomic_set(&qp_status->flags, 0); } static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, @@ -1155,29 +1242,21 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) return qp->sqe + sq_tail * qp->qm->sqe_size; } -/** - * hisi_qm_create_qp() - Create a queue pair from qm. - * @qm: The qm we create a qp from. - * @alg_type: Accelerator specific algorithm type in sqc. - * - * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating - * qp memory fails. - */ -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; int qp_id, ret; + if (!qm_qp_avail_state(qm, NULL, QP_INIT)) + return ERR_PTR(-EPERM); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); - write_lock(&qm->qps_lock); - qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); if (qp_id >= qm->qp_num) { - write_unlock(&qm->qps_lock); dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); ret = -EBUSY; goto err_free_qp; @@ -1185,9 +1264,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) set_bit(qp_id, qm->qp_bitmap); qm->qp_array[qp_id] = qp; qm->qp_in_used++; - - write_unlock(&qm->qps_lock); - qp->qm = qm; if (qm->use_dma_api) { @@ -1206,18 +1282,36 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) qp->qp_id = qp_id; qp->alg_type = alg_type; + atomic_set(&qp->qp_status.flags, QP_INIT); return qp; err_clear_bit: - write_lock(&qm->qps_lock); qm->qp_array[qp_id] = NULL; clear_bit(qp_id, qm->qp_bitmap); - write_unlock(&qm->qps_lock); err_free_qp: kfree(qp); return ERR_PTR(ret); } + +/** + * hisi_qm_create_qp() - Create a queue pair from qm. + * @qm: The qm we create a qp from. + * @alg_type: Accelerator specific algorithm type in sqc. + * + * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating + * qp memory fails. + */ +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +{ + struct hisi_qp *qp; + + down_write(&qm->qps_lock); + qp = qm_create_qp_nolock(qm, alg_type); + up_write(&qm->qps_lock); + + return qp; +} EXPORT_SYMBOL_GPL(hisi_qm_create_qp); /** @@ -1232,16 +1326,23 @@ void hisi_qm_release_qp(struct hisi_qp *qp) struct qm_dma *qdma = &qp->qdma; struct device *dev = &qm->pdev->dev; + down_write(&qm->qps_lock); + + if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + if (qm->use_dma_api && qdma->va) dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - write_lock(&qm->qps_lock); qm->qp_array[qp->qp_id] = NULL; clear_bit(qp->qp_id, qm->qp_bitmap); qm->qp_in_used--; - write_unlock(&qm->qps_lock); kfree(qp); + + up_write(&qm->qps_lock); } EXPORT_SYMBOL_GPL(hisi_qm_release_qp); @@ -1312,15 +1413,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) return ret; } -/** - * hisi_qm_start_qp() - Start a qp into running. - * @qp: The qp we want to start to run. - * @arg: Accelerator specific argument. - * - * After this function, qp can receive request from user. Return 0 if - * successful, Return -EBUSY if failed. - */ -int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -1330,6 +1423,9 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) size_t off = 0; int ret; + if (!qm_qp_avail_state(qm, qp, QP_START)) + return -EPERM; + #define QP_INIT_BUF(qp, type, size) do { \ (qp)->type = ((qp)->qdma.va + (off)); \ (qp)->type##_dma = (qp)->qdma.dma + (off); \ @@ -1360,10 +1456,31 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) if (ret) return ret; + atomic_set(&qp->qp_status.flags, QP_START); dev_dbg(dev, "queue %d started\n", qp_id); return 0; } + +/** + * hisi_qm_start_qp() - Start a qp into running. + * @qp: The qp we want to start to run. + * @arg: Accelerator specific argument. + * + * After this function, qp can receive request from user. Return 0 if + * successful, Return -EBUSY if failed. + */ +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +{ + struct hisi_qm *qm = qp->qm; + int ret; + + down_write(&qm->qps_lock); + ret = qm_start_qp_nolock(qp, arg); + up_write(&qm->qps_lock); + + return ret; +} EXPORT_SYMBOL_GPL(hisi_qm_start_qp); static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, @@ -1467,20 +1584,26 @@ static int qm_drain_qp(struct hisi_qp *qp) return ret; } -/** - * hisi_qm_stop_qp() - Stop a qp in qm. - * @qp: The qp we want to stop. - * - * This function is reverse of hisi_qm_start_qp. Return 0 if successful. - */ -int hisi_qm_stop_qp(struct hisi_qp *qp) +static int qm_stop_qp_nolock(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; int ret; - /* it is stopped */ - if (test_bit(QP_STOP, &qp->qp_status.flags)) + /* + * It is allowed to stop and release qp when reset, If the qp is + * stopped when reset but still want to be released then, the + * is_resetting flag should be set negative so that this qp will not + * be restarted after reset. + */ + if (atomic_read(&qp->qp_status.flags) == QP_STOP) { + qp->is_resetting = false; return 0; + } + + if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) + return -EPERM; + + atomic_set(&qp->qp_status.flags, QP_STOP); ret = qm_drain_qp(qp); if (ret) @@ -1491,12 +1614,27 @@ int hisi_qm_stop_qp(struct hisi_qp *qp) else flush_work(&qp->qm->work); - set_bit(QP_STOP, &qp->qp_status.flags); - dev_dbg(dev, "stop queue %u!", qp->qp_id); return 0; } + +/** + * hisi_qm_stop_qp() - Stop a qp in qm. + * @qp: The qp we want to stop. + * + * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + */ +int hisi_qm_stop_qp(struct hisi_qp *qp) +{ + int ret; + + down_write(&qp->qm->qps_lock); + ret = qm_stop_qp_nolock(qp); + up_write(&qp->qm->qps_lock); + + return ret; +} EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); /** @@ -1506,6 +1644,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); * * This function will return -EBUSY if qp is currently full, and -EAGAIN * if qp related qm is resetting. + * + * Note: This function may run with qm_irq_thread and ACC reset at same time. + * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC + * reset may happen, we have no lock here considering performance. This + * causes current qm_db sending fail or can not receive sended sqe. QM + * sync/async receive function should handle the error sqe. ACC reset + * done function should clear used sqe to 0. */ int hisi_qp_send(struct hisi_qp *qp, const void *msg) { @@ -1514,7 +1659,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; void *sqe = qm_get_avail_sqe(qp); - if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) { + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || + atomic_read(&qp->qm->status.flags) == QM_STOP || + qp->is_resetting)) { dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); return -EAGAIN; } @@ -1554,11 +1701,11 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce) int i, ret; struct hisi_qm *qm = uacce->priv; - read_lock(&qm->qps_lock); + down_read(&qm->qps_lock); for (i = 0, ret = 0; i < qm->qp_num; i++) if (!qm->qp_array[i]) ret++; - read_unlock(&qm->qps_lock); + up_read(&qm->qps_lock); return ret; } @@ -1658,9 +1805,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type) struct hisi_qm *qm = q->uacce->priv; struct hisi_qp *qp = q->priv; - write_lock(&qm->qps_lock); + down_write(&qm->qps_lock); qp->alg_type = type; - write_unlock(&qm->qps_lock); + up_write(&qm->qps_lock); return 0; } @@ -1762,9 +1909,9 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) { int ret; - read_lock(&qm->qps_lock); + down_read(&qm->qps_lock); ret = qm->qp_num - qm->qp_in_used; - read_unlock(&qm->qps_lock); + up_read(&qm->qps_lock); return ret; } @@ -1840,9 +1987,10 @@ int hisi_qm_init(struct hisi_qm *qm) qm->qp_in_used = 0; mutex_init(&qm->mailbox_lock); - rwlock_init(&qm->qps_lock); + init_rwsem(&qm->qps_lock); INIT_WORK(&qm->work, qm_work_process); + atomic_set(&qm->status.flags, QM_INIT); dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", qm->use_dma_api ? "dma api" : "iommu api"); @@ -1875,6 +2023,13 @@ void hisi_qm_uninit(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + uacce_remove(qm->uacce); qm->uacce = NULL; @@ -1890,6 +2045,8 @@ void hisi_qm_uninit(struct hisi_qm *qm) iounmap(qm->io_base); pci_release_mem_regions(pdev); pci_disable_device(pdev); + + up_write(&qm->qps_lock); } EXPORT_SYMBOL_GPL(hisi_qm_uninit); @@ -2072,12 +2229,21 @@ static int __hisi_qm_start(struct hisi_qm *qm) int hisi_qm_start(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; + int ret = 0; + + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_START)) { + up_write(&qm->qps_lock); + return -EPERM; + } dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); if (!qm->qp_num) { dev_err(dev, "qp_num should not be 0\n"); - return -EINVAL; + ret = -EINVAL; + goto err_unlock; } if (!qm->qp_bitmap) { @@ -2086,12 +2252,15 @@ int hisi_qm_start(struct hisi_qm *qm) qm->qp_array = devm_kcalloc(dev, qm->qp_num, sizeof(struct hisi_qp *), GFP_KERNEL); - if (!qm->qp_bitmap || !qm->qp_array) - return -ENOMEM; + if (!qm->qp_bitmap || !qm->qp_array) { + ret = -ENOMEM; + goto err_unlock; + } } if (!qm->use_dma_api) { dev_dbg(&qm->pdev->dev, "qm delay start\n"); + up_write(&qm->qps_lock); return 0; } else if (!qm->qdma.va) { qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + @@ -2102,11 +2271,19 @@ int hisi_qm_start(struct hisi_qm *qm) &qm->qdma.dma, GFP_KERNEL); dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", qm->qdma.va, &qm->qdma.dma, qm->qdma.size); - if (!qm->qdma.va) - return -ENOMEM; + if (!qm->qdma.va) { + ret = -ENOMEM; + goto err_unlock; + } } - return __hisi_qm_start(qm); + ret = __hisi_qm_start(qm); + if (!ret) + atomic_set(&qm->status.flags, QM_START); + +err_unlock: + up_write(&qm->qps_lock); + return ret; } EXPORT_SYMBOL_GPL(hisi_qm_start); @@ -2120,20 +2297,44 @@ static int qm_restart(struct hisi_qm *qm) if (ret < 0) return ret; - write_lock(&qm->qps_lock); + down_write(&qm->qps_lock); for (i = 0; i < qm->qp_num; i++) { qp = qm->qp_array[i]; - if (qp) { - ret = hisi_qm_start_qp(qp, 0); + if (qp && atomic_read(&qp->qp_status.flags) == QP_STOP && + qp->is_resetting == true) { + ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { dev_err(dev, "Failed to start qp%d!\n", i); - write_unlock(&qm->qps_lock); + up_write(&qm->qps_lock); + return ret; + } + qp->is_resetting = false; + } + } + up_write(&qm->qps_lock); + + return 0; +} + +/* Stop started qps in reset flow */ +static int qm_stop_started_qp(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int i, ret; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { + qp->is_resetting = true; + ret = qm_stop_qp_nolock(qp); + if (ret < 0) { + dev_err(dev, "Failed to stop qp%d!\n", i); return ret; } } } - write_unlock(&qm->qps_lock); return 0; } @@ -2149,7 +2350,7 @@ static void qm_clear_queues(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = qm->qp_array[i]; - if (qp) + if (qp && qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } @@ -2166,41 +2367,43 @@ static void qm_clear_queues(struct hisi_qm *qm) */ int hisi_qm_stop(struct hisi_qm *qm) { - struct device *dev; - struct hisi_qp *qp; - int ret = 0, i; + struct device *dev = &qm->pdev->dev; + int ret = 0; - if (!qm || !qm->pdev) { - WARN_ON(1); - return -EINVAL; + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_STOP)) { + ret = -EPERM; + goto err_unlock; } - dev = &qm->pdev->dev; + if (qm->status.stop_reason == QM_SOFT_RESET || + qm->status.stop_reason == QM_FLR) { + ret = qm_stop_started_qp(qm); + if (ret < 0) { + dev_err(dev, "Failed to stop started qp!\n"); + goto err_unlock; + } + } /* Mask eq and aeq irq */ writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); - /* Stop all qps belong to this qm */ - for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp) { - ret = hisi_qm_stop_qp(qp); - if (ret < 0) { - dev_err(dev, "Failed to stop qp%d!\n", i); - return -EBUSY; - } - } - } - if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, 0, 0); - if (ret < 0) + if (ret < 0) { dev_err(dev, "Failed to set vft!\n"); + ret = -EBUSY; + goto err_unlock; + } } qm_clear_queues(qm); + atomic_set(&qm->status.flags, QM_STOP); +err_unlock: + up_write(&qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop); @@ -2772,6 +2975,7 @@ static int qm_set_msi(struct hisi_qm *qm, bool set) static int qm_vf_reset_prepare(struct hisi_qm *qm) { struct hisi_qm_list *qm_list = qm->qm_list; + int stop_reason = qm->status.stop_reason; struct pci_dev *pdev = qm->pdev; struct pci_dev *virtfn; struct hisi_qm *vf_qm; @@ -2784,6 +2988,7 @@ static int qm_vf_reset_prepare(struct hisi_qm *qm) continue; if (pci_physfn(virtfn) == pdev) { + vf_qm->status.stop_reason = stop_reason; ret = hisi_qm_stop(vf_qm); if (ret) goto stop_fail; @@ -2830,6 +3035,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) } } + qm->status.stop_reason = QM_SOFT_RESET; ret = hisi_qm_stop(qm); if (ret) { pci_err(pdev, "Fails to stop QM!\n"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index d1be8cdc99a3..eff156a26f48 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -84,8 +84,24 @@ /* page number for queue file region */ #define QM_DOORBELL_PAGE_NR 1 +enum qm_stop_reason { + QM_NORMAL, + QM_SOFT_RESET, + QM_FLR, +}; + +enum qm_state { + QM_INIT = 0, + QM_START, + QM_CLOSE, + QM_STOP, +}; + enum qp_state { + QP_INIT = 1, + QP_START, QP_STOP, + QP_CLOSE, }; enum qm_hw_ver { @@ -129,7 +145,8 @@ struct hisi_qm_status { bool eqc_phase; u32 aeq_head; bool aeqc_phase; - unsigned long flags; + atomic_t flags; + int stop_reason; }; struct hisi_qm; @@ -196,7 +213,7 @@ struct hisi_qm { struct hisi_qm_err_status err_status; unsigned long reset_flag; - rwlock_t qps_lock; + struct rw_semaphore qps_lock; unsigned long *qp_bitmap; struct hisi_qp **qp_array; @@ -225,7 +242,7 @@ struct hisi_qp_status { u16 sq_tail; u16 cq_head; bool cqc_phase; - unsigned long flags; + atomic_t flags; }; struct hisi_qp_ops { @@ -250,6 +267,7 @@ struct hisi_qp { void (*event_cb)(struct hisi_qp *qp); struct hisi_qm *qm; + bool is_resetting; u16 pasid; struct uacce_queue *uacce_q; }; -- cgit v1.2.3 From 7ce396fa12a96a0e709a7b55cd5ab24161259634 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:43:59 +0800 Subject: crypto: hisilicon - add FLR support Add callback reset_prepare and reset_done in QM, The callback reset_prepare will uninit device error configuration and stop the QM, the callback reset_done will init the device error configuration and restart the QM. Uninit the error configuration will disable device block master OOO when Multi-bit ECC error occurs to avoid the request of FLR will not return. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 16 ++++ drivers/crypto/hisilicon/qm.c | 133 +++++++++++++++++++++++++++++- drivers/crypto/hisilicon/qm.h | 2 + drivers/crypto/hisilicon/sec2/sec_main.c | 2 + drivers/crypto/hisilicon/zip/zip_main.c | 16 ++++ 5 files changed, 165 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 18b3bb1ae950..3475b1999635 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -310,12 +310,21 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm) static void hpre_hw_error_disable(struct hisi_qm *qm) { + u32 val; + /* disable hpre hw error interrupts */ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); + + /* disable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static void hpre_hw_error_enable(struct hisi_qm *qm) { + u32 val; + /* clear HPRE hw error source if having */ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); @@ -324,6 +333,11 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); + + /* enable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val |= HPRE_AM_OOO_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -851,6 +865,8 @@ static void hpre_remove(struct pci_dev *pdev) static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver hpre_pci_driver = { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e42097e1d8a7..c30df080b9d0 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -175,6 +175,7 @@ #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_TMP_BUF_LEN 22 +#define QM_PCI_COMMAND_INVALID ~0 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ @@ -2874,6 +2875,11 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); +static int qm_get_hw_error_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); +} + static int qm_check_req_recv(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -3166,9 +3172,7 @@ restart_fail: static int qm_get_dev_err_status(struct hisi_qm *qm) { - - return(qm->err_ini->get_dev_hw_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask); + return qm->err_ini->get_dev_hw_err_status(qm); } static int qm_dev_hw_init(struct hisi_qm *qm) @@ -3190,7 +3194,8 @@ static void qm_restart_prepare(struct hisi_qm *qm) qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm); + value = qm_get_dev_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); @@ -3336,6 +3341,126 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); +/* check the interrupt is ecc-mbit error or not */ +static int qm_check_dev_error(struct hisi_qm *qm) +{ + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT; + if (ret) + return ret; + + return (qm_get_dev_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask); +} + +void hisi_qm_reset_prepare(struct pci_dev *pdev) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + struct hisi_qm *qm = pci_get_drvdata(pdev); + u32 delay = 0; + int ret; + + hisi_qm_dev_err_uninit(pf_qm); + + /* + * Check whether there is an ECC mbit error, If it occurs, need to + * wait for soft reset to fix it. + */ + while (qm_check_dev_error(pf_qm)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return; + } + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); + return; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(qm); + if (ret) { + pci_err(pdev, "Failed to prepare reset, ret = %d.\n", + ret); + return; + } + } + + ret = hisi_qm_stop(qm); + if (ret) { + pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); + return; + } + + pci_info(pdev, "FLR resetting...\n"); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); + +static bool qm_flr_reset_complete(struct pci_dev *pdev) +{ + struct pci_dev *pf_pdev = pci_physfn(pdev); + struct hisi_qm *qm = pci_get_drvdata(pf_pdev); + u32 id; + + pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); + if (id == QM_PCI_COMMAND_INVALID) { + pci_err(pdev, "Device can not be used!\n"); + return false; + } + + clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); + + return true; +} + +void hisi_qm_reset_done(struct pci_dev *pdev) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + hisi_qm_dev_err_init(pf_qm); + + ret = qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); + goto flr_done; + } + + if (qm->fun_type == QM_HW_PF) { + ret = qm_dev_hw_init(qm); + if (ret) { + pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); + goto flr_done; + } + + if (!qm->vfs_num) + goto flr_done; + + ret = qm_vf_q_assign(qm, qm->vfs_num); + if (ret) { + pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret); + goto flr_done; + } + + ret = qm_vf_reset_done(qm); + if (ret) { + pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret); + goto flr_done; + } + } + +flr_done: + if (qm_flr_reset_complete(pdev)) + pci_info(pdev, "FLR reset complete\n"); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_done); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index eff156a26f48..25934e3c7acd 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -371,6 +371,8 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); +void hisi_qm_reset_prepare(struct pci_dev *pdev); +void hisi_qm_reset_done(struct pci_dev *pdev); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 06f840c397f2..067d1c22fc00 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -914,6 +914,8 @@ static void sec_remove(struct pci_dev *pdev) static const struct pci_error_handlers sec_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver sec_pci_driver = { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index a8cb699c67b7..da90218207cb 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -278,6 +278,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { + u32 val; + if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); @@ -296,12 +298,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) /* enable ZIP hw error interrupts */ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); + + /* enable ZIP block master OOO when m-bit error occur */ + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + val = val | HZIP_AXI_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { + u32 val; + /* disable ZIP hw error interrupts */ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); + + /* disable ZIP block master OOO when m-bit error occur */ + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + val = val & ~HZIP_AXI_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) @@ -802,6 +816,8 @@ static void hisi_zip_remove(struct pci_dev *pdev) static const struct pci_error_handlers hisi_zip_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver hisi_zip_pci_driver = { -- cgit v1.2.3 From b977e03005127b1cbfef05517fbedaa7c5a177a1 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:00 +0800 Subject: crypto: hisilicon - remove use_dma_api related codes The codes related use_dma_api is useless which should be removed. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 - drivers/crypto/hisilicon/qm.c | 34 ++++++++++++------------------- drivers/crypto/hisilicon/qm.h | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 1 - drivers/crypto/hisilicon/zip/zip_main.c | 1 - 5 files changed, 13 insertions(+), 25 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 3475b1999635..dba7b6012c5d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -679,7 +679,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; } - qm->use_dma_api = true; return hisi_qm_init(qm); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c30df080b9d0..800beef2639f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1267,20 +1267,18 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) qm->qp_in_used++; qp->qm = qm; - if (qm->use_dma_api) { - qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; - qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, - &qp->qdma.dma, GFP_KERNEL); - if (!qp->qdma.va) { - ret = -ENOMEM; - goto err_clear_bit; - } - - dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", - qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, + &qp->qdma.dma, GFP_KERNEL); + if (!qp->qdma.va) { + ret = -ENOMEM; + goto err_clear_bit; } + dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", + qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp->qp_id = qp_id; qp->alg_type = alg_type; atomic_set(&qp->qp_status.flags, QP_INIT); @@ -1334,7 +1332,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) return; } - if (qm->use_dma_api && qdma->va) + if (qdma->va) dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); qm->qp_array[qp->qp_id] = NULL; @@ -1992,8 +1990,6 @@ int hisi_qm_init(struct hisi_qm *qm) INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); - dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", - qm->use_dma_api ? "dma api" : "iommu api"); return 0; @@ -2034,7 +2030,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) uacce_remove(qm->uacce); qm->uacce = NULL; - if (qm->use_dma_api && qm->qdma.va) { + if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); @@ -2259,11 +2255,7 @@ int hisi_qm_start(struct hisi_qm *qm) } } - if (!qm->use_dma_api) { - dev_dbg(&qm->pdev->dev, "qm delay start\n"); - up_write(&qm->qps_lock); - return 0; - } else if (!qm->qdma.va) { + if (!qm->qdma.va) { qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 25934e3c7acd..743cb63d2de6 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -230,7 +230,6 @@ struct hisi_qm { struct work_struct work; const char *algs; - bool use_dma_api; bool use_sva; resource_size_t phys_base; resource_size_t phys_size; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 067d1c22fc00..8ff6e52a58d6 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -749,7 +749,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_base = SEC_PF_DEF_Q_NUM; qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - qm->use_dma_api = true; return hisi_qm_init(qm); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index da90218207cb..903dff968c40 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -692,7 +692,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (rev_id == QM_HW_UNKNOWN) return -EINVAL; - qm->use_dma_api = true; qm->pdev = pdev; qm->ver = rev_id; -- cgit v1.2.3 From d9701f8d9b12903bf212f542235659477024a43f Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 9 May 2020 17:44:01 +0800 Subject: crypto: hisilicon - unify initial value assignment into QM Some initial value assignment of struct hisi_qm could put into QM. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 22 +++++++------- drivers/crypto/hisilicon/qm.c | 44 +++++++++++++++++++-------- drivers/crypto/hisilicon/sec2/sec_main.c | 50 +++++++++++++++---------------- drivers/crypto/hisilicon/zip/zip_main.c | 37 ++++++++++------------- 4 files changed, 81 insertions(+), 72 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index dba7b6012c5d..bfae7fb23e4c 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -672,12 +672,13 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->ver = rev_id; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; - qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? - QM_HW_PF : QM_HW_VF; - if (pdev->is_physfn) { + qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->qm_list = &hpre_devices; } return hisi_qm_init(qm); @@ -748,7 +749,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; - qm->qm_list = &hpre_devices; qm->err_ini = &hpre_err_ini; hisi_qm_dev_err_init(qm); @@ -758,15 +758,15 @@ static int hpre_pf_probe_init(struct hpre *hpre) static int hpre_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; - int ret = -ENODEV; + int ret; - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { ret = hpre_pf_probe_init(hpre); - else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + return ret; + } - return ret; + return 0; } static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -779,8 +779,6 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!hpre) return -ENOMEM; - pci_set_drvdata(pdev, hpre); - qm = &hpre->qm; ret = hpre_qm_init(qm, pdev); if (ret) { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 800beef2639f..e40163835346 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1916,6 +1916,27 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); +static void hisi_qm_pre_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + switch (qm->ver) { + case QM_HW_V1: + qm->ops = &qm_hw_ops_v1; + break; + case QM_HW_V2: + qm->ops = &qm_hw_ops_v2; + break; + default: + return; + } + + pci_set_drvdata(pdev, qm); + mutex_init(&qm->mailbox_lock); + init_rwsem(&qm->qps_lock); + qm->qp_in_used = 0; +} + /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. @@ -1929,16 +1950,7 @@ int hisi_qm_init(struct hisi_qm *qm) unsigned int num_vec; int ret; - switch (qm->ver) { - case QM_HW_V1: - qm->ops = &qm_hw_ops_v1; - break; - case QM_HW_V2: - qm->ops = &qm_hw_ops_v2; - break; - default: - return -EINVAL; - } + hisi_qm_pre_init(qm); ret = qm_alloc_uacce(qm); if (ret < 0) @@ -1984,15 +1996,21 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_free_irq_vectors; - qm->qp_in_used = 0; - mutex_init(&qm->mailbox_lock); - init_rwsem(&qm->qps_lock); + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_irq_unregister; + } + INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); return 0; +err_irq_unregister: + qm_irq_unregister(qm); err_free_irq_vectors: pci_free_irq_vectors(pdev); err_iounmap: diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 8ff6e52a58d6..74e806fd9ff9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -722,6 +722,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { enum qm_hw_ver rev_id; + int ret; rev_id = hisi_qm_get_hw_version(pdev); if (rev_id == QM_HW_UNKNOWN) @@ -729,9 +730,9 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = rev_id; - qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; + qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { @@ -750,7 +751,25 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - return hisi_qm_init(qm); + /* + * WQ_HIGHPRI: SEC request must be low delayed, + * so need a high priority workqueue. + * WQ_UNBOUND: SEC task is likely with long + * running CPU intensive workloads. + */ + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "fail to alloc workqueue\n"); + return -ENOMEM; + } + + ret = hisi_qm_init(qm); + if (ret) + destroy_workqueue(qm->wq); + + return ret; } static void sec_qm_uninit(struct hisi_qm *qm) @@ -763,29 +782,10 @@ static int sec_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - /* - * WQ_HIGHPRI: SEC request must be low delayed, - * so need a high priority workqueue. - * WQ_UNBOUND: SEC task is likely with long - * running CPU intensive workloads. - */ - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | - WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { ret = sec_pf_probe_init(sec); - else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - - if (ret) { - destroy_workqueue(qm->wq); - return ret; + if (ret) + return ret; } return 0; @@ -825,8 +825,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!sec) return -ENOMEM; - pci_set_drvdata(pdev, sec); - qm = &sec->qm; ret = sec_qm_init(qm, pdev); if (ret) { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 903dff968c40..0ddd56a0a075 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -694,12 +694,27 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = rev_id; - qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = HZIP_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + qm->qm_list = &zip_devices; + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * + * v2 hardware has no such problem. + */ + qm->qp_base = HZIP_PF_DEF_Q_NUM; + qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; + } return hisi_qm_init(qm); } @@ -713,24 +728,6 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) ret = hisi_zip_pf_probe_init(hisi_zip); if (ret) return ret; - - qm->qp_base = HZIP_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; - qm->qm_list = &zip_devices; - } else if (qm->fun_type == QM_HW_VF) { - /* - * have no way to get qm configure in VM in v1 hardware, - * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force - * to trigger only one VF in v1 hardware. - * - * v2 hardware has no such problem. - */ - if (qm->ver == QM_HW_V1) { - qm->qp_base = HZIP_PF_DEF_Q_NUM; - qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; - } else if (qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); } return 0; @@ -746,8 +743,6 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!hisi_zip) return -ENOMEM; - pci_set_drvdata(pdev, hisi_zip); - qm = &hisi_zip->qm; ret = hisi_zip_qm_init(qm, pdev); -- cgit v1.2.3 From 5308f6600a393ee848ed9d9f77b167aa6b202e9c Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 9 May 2020 17:44:02 +0800 Subject: crypto: hisilicon - QM memory management optimization Put all the code for the memory allocation into the QM initialization process. Before, The qp memory was allocated when the qp was created, and released when the qp was released, It is now changed to allocate all the qp memory once. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 265 ++++++++++++++++++++---------------------- drivers/crypto/hisilicon/qm.h | 4 +- 2 files changed, 128 insertions(+), 141 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e40163835346..e988124e732a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -575,7 +576,7 @@ static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) { u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - return qm->qp_array[cqn]; + return &qm->qp_array[cqn]; } static void qm_cq_head_update(struct hisi_qp *qp) @@ -625,8 +626,7 @@ static void qm_work_process(struct work_struct *work) while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { eqe_num++; qp = qm_to_hisi_qp(qm, eqe); - if (qp) - qm_poll_qp(qp, qm); + qm_poll_qp(qp, qm); if (qm->status.eq_head == QM_Q_DEPTH - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; @@ -1247,50 +1247,36 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; - int qp_id, ret; + int qp_id; if (!qm_qp_avail_state(qm, NULL, QP_INIT)) return ERR_PTR(-EPERM); - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) - return ERR_PTR(-ENOMEM); - - qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); - if (qp_id >= qm->qp_num) { - dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); - ret = -EBUSY; - goto err_free_qp; + if (qm->qp_in_used == qm->qp_num) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + return ERR_PTR(-EBUSY); } - set_bit(qp_id, qm->qp_bitmap); - qm->qp_array[qp_id] = qp; - qm->qp_in_used++; - qp->qm = qm; - qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; - qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, - &qp->qdma.dma, GFP_KERNEL); - if (!qp->qdma.va) { - ret = -ENOMEM; - goto err_clear_bit; + qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); + if (qp_id < 0) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + return ERR_PTR(-EBUSY); } - dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", - qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp = &qm->qp_array[qp_id]; + + memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + qp->event_cb = NULL; + qp->req_cb = NULL; qp->qp_id = qp_id; qp->alg_type = alg_type; + qm->qp_in_used++; atomic_set(&qp->qp_status.flags, QP_INIT); return qp; - -err_clear_bit: - qm->qp_array[qp_id] = NULL; - clear_bit(qp_id, qm->qp_bitmap); -err_free_qp: - kfree(qp); - return ERR_PTR(ret); } /** @@ -1322,8 +1308,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp); void hisi_qm_release_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; - struct qm_dma *qdma = &qp->qdma; - struct device *dev = &qm->pdev->dev; down_write(&qm->qps_lock); @@ -1332,14 +1316,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp) return; } - if (qdma->va) - dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - - qm->qp_array[qp->qp_id] = NULL; - clear_bit(qp->qp_id, qm->qp_bitmap); qm->qp_in_used--; - - kfree(qp); + idr_remove(&qm->qp_idr, qp->qp_id); up_write(&qm->qps_lock); } @@ -1416,41 +1394,13 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; - enum qm_hw_ver ver = qm->ver; int qp_id = qp->qp_id; int pasid = arg; - size_t off = 0; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) return -EPERM; -#define QP_INIT_BUF(qp, type, size) do { \ - (qp)->type = ((qp)->qdma.va + (off)); \ - (qp)->type##_dma = (qp)->qdma.dma + (off); \ - off += (size); \ -} while (0) - - if (!qp->qdma.dma) { - dev_err(dev, "cannot get qm dma buffer\n"); - return -EINVAL; - } - - /* sq need 128 bytes alignment */ - if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) { - dev_err(dev, "qm sq is not aligned to 128 byte\n"); - return -EINVAL; - } - - QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH); - QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH); - - dev_dbg(dev, "init qp buffer(v%d):\n" - " sqe (%pK, %lx)\n" - " cqe (%pK, %lx)\n", - ver, qp->sqe, (unsigned long)qp->sqe_dma, - qp->cqe, (unsigned long)qp->cqe_dma); - ret = qm_qp_ctx_cfg(qp, qp_id, pasid); if (ret) return ret; @@ -1697,16 +1647,7 @@ static void qm_qp_event_notifier(struct hisi_qp *qp) static int hisi_qm_get_available_instances(struct uacce_device *uacce) { - int i, ret; - struct hisi_qm *qm = uacce->priv; - - down_read(&qm->qps_lock); - for (i = 0, ret = 0; i < qm->qp_num; i++) - if (!qm->qp_array[i]) - ret++; - up_read(&qm->qps_lock); - - return ret; + return hisi_qm_get_free_qp_num(uacce->priv); } static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, @@ -1916,6 +1857,99 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); +static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) +{ + struct device *dev = &qm->pdev->dev; + struct qm_dma *qdma; + int i; + + for (i = num - 1; i >= 0; i--) { + qdma = &qm->qp_array[i].qdma; + dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + } + + kfree(qm->qp_array); +} + +static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) +{ + struct device *dev = &qm->pdev->dev; + size_t off = qm->sqe_size * QM_Q_DEPTH; + struct hisi_qp *qp; + + qp = &qm->qp_array[id]; + qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, + GFP_KERNEL); + if (!qp->qdma.va) + return -ENOMEM; + + qp->sqe = qp->qdma.va; + qp->sqe_dma = qp->qdma.dma; + qp->cqe = qp->qdma.va + off; + qp->cqe_dma = qp->qdma.dma + off; + qp->qdma.size = dma_size; + qp->qm = qm; + qp->qp_id = id; + + return 0; +} + +static int hisi_qm_memory_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + size_t qp_dma_size, off = 0; + int i, ret = 0; + +#define QM_INIT_BUF(qm, type, num) do { \ + (qm)->type = ((qm)->qdma.va + (off)); \ + (qm)->type##_dma = (qm)->qdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ +} while (0) + + idr_init(&qm->qp_idr); + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); + qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, + GFP_ATOMIC); + dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); + if (!qm->qdma.va) + return -ENOMEM; + + QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, sqc, qm->qp_num); + QM_INIT_BUF(qm, cqc, qm->qp_num); + + qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); + if (!qm->qp_array) { + ret = -ENOMEM; + goto err_alloc_qp_array; + } + + /* one more page for device or qp statuses */ + qp_dma_size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp_dma_size = PAGE_ALIGN(qp_dma_size); + for (i = 0; i < qm->qp_num; i++) { + ret = hisi_qp_memory_init(qm, qp_dma_size, i); + if (ret) + goto err_init_qp_mem; + + dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); + } + + return ret; + +err_init_qp_mem: + hisi_qp_memory_uninit(qm, i); +err_alloc_qp_array: + dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); + + return ret; +} + static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -2003,6 +2037,10 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_irq_unregister; } + ret = hisi_qm_memory_init(qm); + if (ret) + goto err_irq_unregister; + INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); @@ -2048,6 +2086,9 @@ void hisi_qm_uninit(struct hisi_qm *qm) uacce_remove(qm->uacce); qm->uacce = NULL; + hisi_qp_memory_uninit(qm, qm->qp_num); + idr_destroy(&qm->qp_idr); + if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, @@ -2176,22 +2217,10 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) static int __hisi_qm_start(struct hisi_qm *qm) { - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - size_t off = 0; int ret; -#define QM_INIT_BUF(qm, type, num) do { \ - (qm)->type = ((qm)->qdma.va + (off)); \ - (qm)->type##_dma = (qm)->qdma.dma + (off); \ - off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ -} while (0) - WARN_ON(!qm->qdma.dma); - if (qm->qp_num == 0) - return -EINVAL; - if (qm->fun_type == QM_HW_PF) { ret = qm_dev_mem_reset(qm); if (ret) @@ -2202,21 +2231,6 @@ static int __hisi_qm_start(struct hisi_qm *qm) return ret; } - QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); - QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); - QM_INIT_BUF(qm, sqc, qm->qp_num); - QM_INIT_BUF(qm, cqc, qm->qp_num); - - dev_dbg(dev, "init qm buffer:\n" - " eqe (%pK, %lx)\n" - " aeqe (%pK, %lx)\n" - " sqc (%pK, %lx)\n" - " cqc (%pK, %lx)\n", - qm->eqe, (unsigned long)qm->eqe_dma, - qm->aeqe, (unsigned long)qm->aeqe_dma, - qm->sqc, (unsigned long)qm->sqc_dma, - qm->cqc, (unsigned long)qm->cqc_dma); - ret = qm_eq_ctx_cfg(qm); if (ret) return ret; @@ -2261,33 +2275,6 @@ int hisi_qm_start(struct hisi_qm *qm) goto err_unlock; } - if (!qm->qp_bitmap) { - qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num), - sizeof(long), GFP_KERNEL); - qm->qp_array = devm_kcalloc(dev, qm->qp_num, - sizeof(struct hisi_qp *), - GFP_KERNEL); - if (!qm->qp_bitmap || !qm->qp_array) { - ret = -ENOMEM; - goto err_unlock; - } - } - - if (!qm->qdma.va) { - qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + - QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + - QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + - QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); - qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, - &qm->qdma.dma, GFP_KERNEL); - dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", - qm->qdma.va, &qm->qdma.dma, qm->qdma.size); - if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_unlock; - } - } - ret = __hisi_qm_start(qm); if (!ret) atomic_set(&qm->status.flags, QM_START); @@ -2310,8 +2297,8 @@ static int qm_restart(struct hisi_qm *qm) down_write(&qm->qps_lock); for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp && atomic_read(&qp->qp_status.flags) == QP_STOP && + qp = &qm->qp_array[i]; + if (atomic_read(&qp->qp_status.flags) == QP_STOP && qp->is_resetting == true) { ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { @@ -2336,7 +2323,7 @@ static int qm_stop_started_qp(struct hisi_qm *qm) int i, ret; for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; + qp = &qm->qp_array[i]; if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { qp->is_resetting = true; ret = qm_stop_qp_nolock(qp); @@ -2360,8 +2347,8 @@ static void qm_clear_queues(struct hisi_qm *qm) int i; for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp && qp->is_resetting) + qp = &qm->qp_array[i]; + if (qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 743cb63d2de6..80b9746dfe19 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -214,8 +214,8 @@ struct hisi_qm { unsigned long reset_flag; struct rw_semaphore qps_lock; - unsigned long *qp_bitmap; - struct hisi_qp **qp_array; + struct idr qp_idr; + struct hisi_qp *qp_array; struct mutex mailbox_lock; -- cgit v1.2.3 From 3176637ac10eddffdc3bd75281fa354a0d5a0c1e Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:03 +0800 Subject: crypto: hisilicon - remove codes of directly report device errors through MSI The hardware device can be configured to report directly through MSI, but this method will not go through RAS, configure all hardware errors that should be processed by driver to NFE. Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 - drivers/crypto/hisilicon/qm.c | 54 +++++++------------------------ drivers/crypto/hisilicon/qm.h | 4 +-- drivers/crypto/hisilicon/sec2/sec_main.c | 1 - drivers/crypto/hisilicon/zip/zip_main.c | 1 - 5 files changed, 13 insertions(+), 48 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index bfae7fb23e4c..c5ddd3a8ec85 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -730,7 +730,6 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR, .msi_wr_port = HPRE_WR_MSI_PORT, diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e988124e732a..80935d661ed3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -313,8 +313,7 @@ struct hisi_qm_hw_ops { u8 cmd, u16 index, u8 priority); u32 (*get_irq_num)(struct hisi_qm *qm); int (*debug_init)(struct hisi_qm *qm); - void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi); + void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); }; @@ -707,26 +706,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) static irqreturn_t qm_abnormal_irq(int irq, void *data) { - const struct hisi_qm_hw_error *err = qm_hw_error; - struct hisi_qm *qm = data; - struct device *dev = &qm->pdev->dev; - u32 error_status, tmp; - - /* read err sts */ - tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - error_status = qm->msi_mask & tmp; - - while (err->msg) { - if (err->int_msk & error_status) - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - err++; - } - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - return IRQ_HANDLED; } @@ -1116,28 +1095,21 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) return 0; } -static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi) +static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi) +static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) { - u32 irq_enable = ce | nfe | fe | msi; + u32 irq_enable = ce | nfe | fe; u32 irq_unmask = ~irq_enable; - u32 error_status; qm->error_mask = ce | nfe | fe; - qm->msi_mask = msi; /* clear QM hw residual error source */ - error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - if (error_status) { - error_status &= qm->error_mask; - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - } + writel(QM_ABNORMAL_INT_SOURCE_CLR, + qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ writel(ce, qm->io_base + QM_RAS_CE_ENABLE); @@ -1145,9 +1117,6 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); writel(fe, qm->io_base + QM_RAS_FE_ENABLE); - /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */ - writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL); - irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } @@ -1207,9 +1176,11 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + if (error_status == QM_DB_RANDOM_INVALID) { + writel(error_status, qm->io_base + + QM_ABNORMAL_INT_SOURCE); + return PCI_ERS_RESULT_RECOVERED; + } return PCI_ERS_RESULT_NEED_RESET; } @@ -2476,8 +2447,7 @@ static void qm_hw_error_init(struct hisi_qm *qm) return; } - qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, - err_info->fe, err_info->msi); + qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); } static void qm_hw_error_uninit(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 80b9746dfe19..fc5e96a02399 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -74,7 +74,7 @@ #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ - QM_OF_FIFO_OF) + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID) #define QM_BASE_CE QM_ECC_1BIT #define QM_Q_DEPTH 1024 @@ -158,7 +158,6 @@ struct hisi_qm_err_info { u32 ce; u32 nfe; u32 fe; - u32 msi; }; struct hisi_qm_err_status { @@ -224,7 +223,6 @@ struct hisi_qm { struct qm_debug debug; u32 error_mask; - u32 msi_mask; struct workqueue_struct *wq; struct work_struct work; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 74e806fd9ff9..c3381f253d55 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -682,7 +682,6 @@ static const struct hisi_qm_err_ini sec_err_ini = { .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, .msi_wr_port = BIT(0), .acpi_rst = "SRST", diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 0ddd56a0a075..6161b1025b7f 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -643,7 +643,6 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, .msi_wr_port = HZIP_WR_PORT, .acpi_rst = "ZRST", -- cgit v1.2.3 From dbdc1ec31fc05c118eedb4211f502e6352c915b9 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:04 +0800 Subject: crypto: hisilicon - add device error report through abnormal irq By configuring the device error in firmware to report through abnormal interruption, process all NFE errors in irq handler. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 339 +++++++++++++++++++++++------------------- drivers/crypto/hisilicon/qm.h | 1 + 2 files changed, 187 insertions(+), 153 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 80935d661ed3..6365f931bb73 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -219,6 +219,12 @@ enum vft_type { CQC_VFT, }; +enum acc_err_result { + ACC_ERR_NONE, + ACC_ERR_NEED_RESET, + ACC_ERR_RECOVERED, +}; + struct qm_cqe { __le32 rsvd0; __le16 cmd_id; @@ -315,7 +321,7 @@ struct hisi_qm_hw_ops { int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); - pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); + enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); }; static const char * const qm_debug_file_name[] = { @@ -704,46 +710,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t qm_abnormal_irq(int irq, void *data) -{ - return IRQ_HANDLED; -} - -static int qm_irq_register(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - int ret; - - ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), - qm_irq, IRQF_SHARED, qm->dev_name, qm); - if (ret) - return ret; - - if (qm->ver == QM_HW_V2) { - ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), - qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); - if (ret) - goto err_aeq_irq; - - if (qm->fun_type == QM_HW_PF) { - ret = request_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), - qm_abnormal_irq, IRQF_SHARED, - qm->dev_name, qm); - if (ret) - goto err_abonormal_irq; - } - } - - return 0; - -err_abonormal_irq: - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); -err_aeq_irq: - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - return ret; -} - static void qm_irq_unregister(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -1163,7 +1129,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) } } -static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { u32 error_status, tmp; @@ -1179,13 +1145,13 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) if (error_status == QM_DB_RANDOM_INVALID) { writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { @@ -1942,100 +1908,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) qm->qp_in_used = 0; } -/** - * hisi_qm_init() - Initialize configures about qm. - * @qm: The qm needing init. - * - * This function init qm, then we can call hisi_qm_start to put qm into work. - */ -int hisi_qm_init(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - unsigned int num_vec; - int ret; - - hisi_qm_pre_init(qm); - - ret = qm_alloc_uacce(qm); - if (ret < 0) - dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret); - - ret = pci_enable_device_mem(pdev); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to enable device mem!\n"); - goto err_remove_uacce; - } - - ret = pci_request_mem_regions(pdev, qm->dev_name); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to request mem regions!\n"); - goto err_disable_pcidev; - } - - qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); - qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); - qm->io_base = ioremap(qm->phys_base, qm->phys_size); - if (!qm->io_base) { - ret = -EIO; - goto err_release_mem_regions; - } - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (ret < 0) - goto err_iounmap; - pci_set_master(pdev); - - if (!qm->ops->get_irq_num) { - ret = -EOPNOTSUPP; - goto err_iounmap; - } - num_vec = qm->ops->get_irq_num(qm); - ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); - if (ret < 0) { - dev_err(dev, "Failed to enable MSI vectors!\n"); - goto err_iounmap; - } - - ret = qm_irq_register(qm); - if (ret) - goto err_free_irq_vectors; - - if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_irq_unregister; - } - - ret = hisi_qm_memory_init(qm); - if (ret) - goto err_irq_unregister; - - INIT_WORK(&qm->work, qm_work_process); - - atomic_set(&qm->status.flags, QM_INIT); - - return 0; - -err_irq_unregister: - qm_irq_unregister(qm); -err_free_irq_vectors: - pci_free_irq_vectors(pdev); -err_iounmap: - iounmap(qm->io_base); -err_release_mem_regions: - pci_release_mem_regions(pdev); -err_disable_pcidev: - pci_disable_device(pdev); -err_remove_uacce: - uacce_remove(qm->uacce); - qm->uacce = NULL; - - return ret; -} -EXPORT_SYMBOL_GPL(hisi_qm_init); - /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -2460,11 +2332,11 @@ static void qm_hw_error_uninit(struct hisi_qm *qm) qm->ops->hw_error_uninit(qm); } -static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) { if (!qm->ops->hw_error_handle) { dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); - return PCI_ERS_RESULT_NONE; + return ACC_ERR_NONE; } return qm->ops->hw_error_handle(qm); @@ -2777,13 +2649,13 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) } EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); -static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) +static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) { u32 err_sts; if (!qm->err_ini->get_dev_hw_err_status) { dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); - return PCI_ERS_RESULT_NONE; + return ACC_ERR_NONE; } /* get device hardware error status */ @@ -2794,20 +2666,19 @@ static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) if (!qm->err_ini->log_dev_hw_err) { dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } qm->err_ini->log_dev_hw_err(qm, err_sts); - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } -static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) +static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) { - struct hisi_qm *qm = pci_get_drvdata(pdev); - pci_ers_result_t qm_ret, dev_ret; + enum acc_err_result qm_ret, dev_ret; /* log qm error */ qm_ret = qm_hw_error_handle(qm); @@ -2815,9 +2686,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) /* log device error */ dev_ret = qm_dev_err_handle(qm); - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - dev_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + return (qm_ret == ACC_ERR_NEED_RESET || + dev_ret == ACC_ERR_NEED_RESET) ? + ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; } /** @@ -2831,6 +2702,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { + struct hisi_qm *qm = pci_get_drvdata(pdev); + enum acc_err_result ret; + if (pdev->is_virtfn) return PCI_ERS_RESULT_NONE; @@ -2838,7 +2712,11 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - return qm_process_dev_error(pdev); + ret = qm_process_dev_error(qm); + if (ret == ACC_ERR_NEED_RESET) + return PCI_ERS_RESULT_NEED_RESET; + + return PCI_ERS_RESULT_RECOVERED; } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); @@ -3428,6 +3306,161 @@ flr_done: } EXPORT_SYMBOL_GPL(hisi_qm_reset_done); +static irqreturn_t qm_abnormal_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + enum acc_err_result ret; + + ret = qm_process_dev_error(qm); + if (ret == ACC_ERR_NEED_RESET) + schedule_work(&qm->rst_work); + + return IRQ_HANDLED; +} + +static int qm_irq_register(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), + qm_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + return ret; + + if (qm->ver == QM_HW_V2) { + ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), + qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + goto err_aeq_irq; + + if (qm->fun_type == QM_HW_PF) { + ret = request_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), + qm_abnormal_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + goto err_abonormal_irq; + } + } + + return 0; + +err_abonormal_irq: + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); +err_aeq_irq: + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + return ret; +} + +static void hisi_qm_controller_reset(struct work_struct *rst_work) +{ + struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); + int ret; + + /* reset pcie device controller */ + ret = qm_controller_reset(qm); + if (ret) + dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); + +} + +/** + * hisi_qm_init() - Initialize configures about qm. + * @qm: The qm needing init. + * + * This function init qm, then we can call hisi_qm_start to put qm into work. + */ +int hisi_qm_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned int num_vec; + int ret; + + hisi_qm_pre_init(qm); + + ret = qm_alloc_uacce(qm); + if (ret < 0) + dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret); + + ret = pci_enable_device_mem(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable device mem!\n"); + goto err_remove_uacce; + } + + ret = pci_request_mem_regions(pdev, qm->dev_name); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request mem regions!\n"); + goto err_disable_pcidev; + } + + qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); + qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); + qm->io_base = ioremap(qm->phys_base, qm->phys_size); + if (!qm->io_base) { + ret = -EIO; + goto err_release_mem_regions; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret < 0) + goto err_iounmap; + pci_set_master(pdev); + + if (!qm->ops->get_irq_num) { + ret = -EOPNOTSUPP; + goto err_iounmap; + } + num_vec = qm->ops->get_irq_num(qm); + ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to enable MSI vectors!\n"); + goto err_iounmap; + } + + ret = qm_irq_register(qm); + if (ret) + goto err_free_irq_vectors; + + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_irq_unregister; + } + + ret = hisi_qm_memory_init(qm); + if (ret) + goto err_irq_unregister; + + INIT_WORK(&qm->work, qm_work_process); + if (qm->fun_type == QM_HW_PF) + INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); + + atomic_set(&qm->status.flags, QM_INIT); + + return 0; + +err_irq_unregister: + qm_irq_unregister(qm); +err_free_irq_vectors: + pci_free_irq_vectors(pdev); +err_iounmap: + iounmap(qm->io_base); +err_release_mem_regions: + pci_release_mem_regions(pdev); +err_disable_pcidev: + pci_disable_device(pdev); +err_remove_uacce: + uacce_remove(qm->uacce); + qm->uacce = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_init); + + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index fc5e96a02399..a431ff2fac3c 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -226,6 +226,7 @@ struct hisi_qm { struct workqueue_struct *wq; struct work_struct work; + struct work_struct rst_work; const char *algs; bool use_sva; -- cgit v1.2.3 From 2c959a33f8630a008c7047e90312ba10ea2c78b7 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sat, 9 May 2020 17:44:05 +0800 Subject: crypto: hisilicon/zip - Use temporary sqe when doing work Currently zip sqe is stored in hisi_zip_qp_ctx, which will bring corruption with multiple parallel users of the crypto tfm. This patch removes the zip_sqe in hisi_zip_qp_ctx and uses a temporary sqe instead. Signed-off-by: Zhou Wang Signed-off-by: Jonathan Cameron Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 369ec3220574..5fb9d4b41bb9 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -64,7 +64,6 @@ struct hisi_zip_req_q { struct hisi_zip_qp_ctx { struct hisi_qp *qp; - struct hisi_zip_sqe zip_sqe; struct hisi_zip_req_q req_q; struct hisi_acc_sgl_pool *sgl_pool; struct hisi_zip *zip_dev; @@ -484,11 +483,11 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_qp_ctx *qp_ctx) { - struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_sqe zip_sqe; dma_addr_t input; dma_addr_t output; int ret; @@ -511,13 +510,13 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, } req->dma_dst = output; - hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen, + hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen, a_req->dlen, req->sskip, req->dskip); - hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); - hisi_zip_config_tag(zip_sqe, req->req_id); + hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL); + hisi_zip_config_tag(&zip_sqe, req->req_id); /* send command to start a task */ - ret = hisi_qp_send(qp, zip_sqe); + ret = hisi_qp_send(qp, &zip_sqe); if (ret < 0) goto err_unmap_output; -- cgit v1.2.3 From 49c2c082e00e0bc4f5cbb7c21c7f0f873b35ab09 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:09 +0200 Subject: crypto: stm32/crc32 - fix ext4 chksum BUG_ON() Allow use of crc_update without prior call to crc_init. And change (and fix) driver to use CRC device even on unaligned buffers. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 98 +++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 50 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 8e92e4ac79f1..c6156bf6c603 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,8 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) #define CRC_INIT_DEFAULT 0xFFFFFFFF +#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) +#define CRC_CR_REV_IN_BYTE BIT(5) +#define CRC_CR_REV_OUT BIT(7) #define CRC_AUTOSUSPEND_DELAY 50 @@ -38,8 +40,6 @@ struct stm32_crc { struct device *dev; void __iomem *regs; struct clk *clk; - u8 pending_data[sizeof(u32)]; - size_t nb_pending_bytes; }; struct stm32_crc_list { @@ -59,7 +59,6 @@ struct stm32_crc_ctx { struct stm32_crc_desc_ctx { u32 partial; /* crc32c: partial in first 4 bytes of that struct */ - struct stm32_crc *crc; }; static int stm32_crc32_cra_init(struct crypto_tfm *tfm) @@ -99,25 +98,22 @@ static int stm32_crc_init(struct shash_desc *desc) struct stm32_crc *crc; spin_lock_bh(&crc_list.lock); - list_for_each_entry(crc, &crc_list.dev_list, list) { - ctx->crc = crc; - break; - } + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); spin_unlock_bh(&crc_list.lock); - pm_runtime_get_sync(ctx->crc->dev); + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ - writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); - ctx->crc->nb_pending_bytes = 0; + ctx->partial = readl_relaxed(crc->regs + CRC_DR); - pm_runtime_mark_last_busy(ctx->crc->dev); - pm_runtime_put_autosuspend(ctx->crc->dev); + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); return 0; } @@ -126,31 +122,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, unsigned int length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc *crc = ctx->crc; - u32 *d32; - unsigned int i; + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + spin_lock_bh(&crc_list.lock); + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + spin_unlock_bh(&crc_list.lock); pm_runtime_get_sync(crc->dev); - if (unlikely(crc->nb_pending_bytes)) { - while (crc->nb_pending_bytes != sizeof(u32) && length) { - /* Fill in pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); + /* + * Restore previously calculated CRC for this context as init value + * Restore polynomial configuration + * Configure in register for word input data, + * Configure out register in reversed bit mode data. + */ + writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + + if (d8 != PTR_ALIGN(d8, sizeof(u32))) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { + writeb_relaxed(*d8++, crc->regs + CRC_DR); length--; } - - if (crc->nb_pending_bytes == sizeof(u32)) { - /* Process completed pending data */ - writel_relaxed(*(u32 *)crc->pending_data, - crc->regs + CRC_DR); - crc->nb_pending_bytes = 0; - } + /* Configure for word data */ + writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); } - d32 = (u32 *)d8; - for (i = 0; i < length >> 2; i++) - /* Process 32 bits data */ - writel_relaxed(*(d32++), crc->regs + CRC_DR); + for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) + writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); + + if (length) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (length--) + writeb_relaxed(*d8++, crc->regs + CRC_DR); + } /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); @@ -158,22 +172,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); - /* Check for pending data (non 32 bits) */ - length &= 3; - if (likely(!length)) - return 0; - - if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { - /* Shall not happen */ - dev_err(crc->dev, "Pending data overflow\n"); - return -EINVAL; - } - - d8 = (const u8 *)d32; - for (i = 0; i < length; i++) - /* Store pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); - return 0; } -- cgit v1.2.3 From a8cc3128bf2c01c4d448fe17149e87132113b445 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:10 +0200 Subject: crypto: stm32/crc32 - fix run-time self test issue. Fix wrong crc32 initialisation value: "alg: shash: stm32_crc32 test failed (wrong result) on test vector 0, cfg="init+update+final aligned buffer" cra_name="crc32c" expects an init value of 0XFFFFFFFF, cra_name="crc32" expects an init value of 0. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index c6156bf6c603..1c3e411b7acb 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,10 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_INIT_DEFAULT 0xFFFFFFFF #define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) #define CRC_CR_REV_IN_BYTE BIT(5) #define CRC_CR_REV_OUT BIT(7) +#define CRC32C_INIT_DEFAULT 0xFFFFFFFF #define CRC_AUTOSUSPEND_DELAY 50 @@ -65,7 +65,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = 0; mctx->poly = CRC32_POLY_LE; return 0; } @@ -74,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = CRC32C_INIT_DEFAULT; mctx->poly = CRC32C_POLY_LE; return 0; } -- cgit v1.2.3 From 10b89c43a64eb0d236903b79a3bc9d8f6cbfd9c7 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:11 +0200 Subject: crypto: stm32/crc32 - fix multi-instance Ensure CRC algorithm is registered only once in crypto framework when there are several instances of CRC devices. Update the CRC device list management to avoid that only the first CRC instance is used. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 48 ++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 12 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 1c3e411b7acb..10304511f9b4 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -91,16 +91,29 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, return 0; } -static int stm32_crc_init(struct shash_desc *desc) +static struct stm32_crc *stm32_crc_get_next_crc(void) { - struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; spin_lock_bh(&crc_list.lock); crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + if (crc) + list_move_tail(&crc->list, &crc_list.dev_list); spin_unlock_bh(&crc_list.lock); + return crc; +} + +static int stm32_crc_init(struct shash_desc *desc) +{ + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ @@ -125,9 +138,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; - spin_lock_bh(&crc_list.lock); - crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); - spin_unlock_bh(&crc_list.lock); + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; pm_runtime_get_sync(crc->dev); @@ -200,6 +213,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); } +static unsigned int refcnt; +static DEFINE_MUTEX(refcnt_lock); static struct shash_alg algs[] = { /* CRC-32 */ { @@ -290,12 +305,18 @@ static int stm32_crc_probe(struct platform_device *pdev) list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) { - dev_err(dev, "Failed to register\n"); - clk_disable_unprepare(crc->clk); - return ret; + mutex_lock(&refcnt_lock); + if (!refcnt) { + ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); + if (ret) { + mutex_unlock(&refcnt_lock); + dev_err(dev, "Failed to register\n"); + clk_disable_unprepare(crc->clk); + return ret; + } } + refcnt++; + mutex_unlock(&refcnt_lock); dev_info(dev, "Initialized\n"); @@ -316,7 +337,10 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_lock(&refcnt_lock); + if (!--refcnt) + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_unlock(&refcnt_lock); pm_runtime_disable(crc->dev); pm_runtime_put_noidle(crc->dev); -- cgit v1.2.3 From 100f84beee4874234d04a1ea642b8c9738d7020d Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:12 +0200 Subject: crypto: stm32/crc32 - don't sleep in runtime pm Ensure stm32_crc_update() and stm32_crc_init() can be called in atomic context and can't sleep. Add pm_runtime_irq_safe() to make pm_runtime_get_sync() atomic. Change runtime pm to call clk_enable()/clk_disable() and change system pm to unprepare/prepare the clock and force runtime pm suspend/resume. Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 45 ++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 10304511f9b4..413415c216ef 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -297,6 +297,7 @@ static int stm32_crc_probe(struct platform_device *pdev) pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); + pm_runtime_irq_safe(dev); pm_runtime_enable(dev); platform_set_drvdata(pdev, crc); @@ -350,34 +351,60 @@ static int stm32_crc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int stm32_crc_runtime_suspend(struct device *dev) +static int __maybe_unused stm32_crc_suspend(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(crc->clk); + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + + clk_unprepare(crc->clk); return 0; } -static int stm32_crc_runtime_resume(struct device *dev) +static int __maybe_unused stm32_crc_resume(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); int ret; - ret = clk_prepare_enable(crc->clk); + ret = clk_prepare(crc->clk); if (ret) { - dev_err(crc->dev, "Failed to prepare_enable clock\n"); + dev_err(crc->dev, "Failed to prepare clock\n"); + return ret; + } + + return pm_runtime_force_resume(dev); +} + +static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev) +{ + struct stm32_crc *crc = dev_get_drvdata(dev); + + clk_disable(crc->clk); + + return 0; +} + +static int __maybe_unused stm32_crc_runtime_resume(struct device *dev) +{ + struct stm32_crc *crc = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(crc->clk); + if (ret) { + dev_err(crc->dev, "Failed to enable clock\n"); return ret; } return 0; } -#endif static const struct dev_pm_ops stm32_crc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend, + stm32_crc_resume) SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend, stm32_crc_runtime_resume, NULL) }; -- cgit v1.2.3 From 7795c0baf5ac25e104fec8677ad134066a8fb8d3 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:13 +0200 Subject: crypto: stm32/crc32 - protect from concurrent accesses Protect STM32 CRC device from concurrent accesses. As we create a spinlocked section that increase with buffer size, we provide a module parameter to release the pressure by splitting critical section in chunks. Size of each chunk is defined in burst_size module parameter. By default burst_size=0, i.e. don't split incoming buffer. Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 47 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 413415c216ef..3ba41148c2a4 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -35,11 +35,16 @@ #define CRC_AUTOSUSPEND_DELAY 50 +static unsigned int burst_size; +module_param(burst_size, uint, 0644); +MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)"); + struct stm32_crc { struct list_head list; struct device *dev; void __iomem *regs; struct clk *clk; + spinlock_t lock; }; struct stm32_crc_list { @@ -109,6 +114,7 @@ static int stm32_crc_init(struct shash_desc *desc) struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; + unsigned long flags; crc = stm32_crc_get_next_crc(); if (!crc) @@ -116,6 +122,8 @@ static int stm32_crc_init(struct shash_desc *desc) pm_runtime_get_sync(crc->dev); + spin_lock_irqsave(&crc->lock, flags); + /* Reset, set key, poly and configure in bit reverse mode */ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); @@ -125,18 +133,21 @@ static int stm32_crc_init(struct shash_desc *desc) /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); + spin_unlock_irqrestore(&crc->lock, flags); + pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } -static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, - unsigned int length) +static int burst_update(struct shash_desc *desc, const u8 *d8, + size_t length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; + unsigned long flags; crc = stm32_crc_get_next_crc(); if (!crc) @@ -144,6 +155,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_get_sync(crc->dev); + spin_lock_irqsave(&crc->lock, flags); + /* * Restore previously calculated CRC for this context as init value * Restore polynomial configuration @@ -182,12 +195,40 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); + spin_unlock_irqrestore(&crc->lock, flags); + pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } +static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, + unsigned int length) +{ + const unsigned int burst_sz = burst_size; + unsigned int rem_sz; + const u8 *cur; + size_t size; + int ret; + + if (!burst_sz) + return burst_update(desc, d8, length); + + /* Digest first bytes not 32bit aligned at first pass in the loop */ + size = min(length, + burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8, + sizeof(u32))); + for (rem_sz = length, cur = d8; rem_sz; + rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) { + ret = burst_update(desc, cur, size); + if (ret) + return ret; + } + + return 0; +} + static int stm32_crc_final(struct shash_desc *desc, u8 *out) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); @@ -300,6 +341,8 @@ static int stm32_crc_probe(struct platform_device *pdev) pm_runtime_irq_safe(dev); pm_runtime_enable(dev); + spin_lock_init(&crc->lock); + platform_set_drvdata(pdev, crc); spin_lock(&crc_list.lock); -- cgit v1.2.3 From 8502652542c6684dd142f74c1bd1772730f653bd Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:54 +0800 Subject: crypto: hisilicon/qm - add debugfs for QM Add DebugFS method to get the information of IRQ/Requests/QP .etc of QM for HPRE/ZIP/SEC drivers. Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 51 +++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 9 ++++++++ 2 files changed, 60 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 6365f931bb73..744d1310f6a9 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -324,6 +324,19 @@ struct hisi_qm_hw_ops { enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); }; +struct qm_dfx_item { + const char *name; + u32 offset; +}; + +static struct qm_dfx_item qm_dfx_files[] = { + {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, + {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, + {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, + {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, + {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, +}; + static const char * const qm_debug_file_name[] = { [CURRENT_Q] = "current_q", [CLEAR_ENABLE] = "clear_enable", @@ -514,6 +527,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, busy_unlock: mutex_unlock(&qm->mailbox_lock); + if (ret) + atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; } @@ -671,6 +686,7 @@ static irqreturn_t qm_irq(int irq, void *data) if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) return do_qm_irq(irq, data); + atomic64_inc(&qm->debug.dfx.err_irq_cnt); dev_err(&qm->pdev->dev, "invalid int source\n"); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); @@ -683,6 +699,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; u32 type; + atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) return IRQ_NONE; @@ -1192,6 +1209,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) if (qm->qp_in_used == qm->qp_num) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); + atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } @@ -1199,6 +1217,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) if (qp_id < 0) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); + atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } @@ -2249,6 +2268,26 @@ err_unlock: } EXPORT_SYMBOL_GPL(hisi_qm_stop); +static int qm_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + + return 0; +} + +static int qm_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, + qm_debugfs_atomic64_set, "%llu\n"); + /** * hisi_qm_debug_init() - Initialize qm related debugfs files. * @qm: The qm for which we want to add debugfs files. @@ -2257,7 +2296,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop); */ int hisi_qm_debug_init(struct hisi_qm *qm) { + struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; + void *data; int i, ret; qm_d = debugfs_create_dir("qm", qm->debug.debug_root); @@ -2273,6 +2314,15 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); + debugfs_create_file(qm_dfx_files[i].name, + 0644, + qm_d, + data, + &qm_atomic64_ops); + } + return 0; failed_to_create: @@ -3311,6 +3361,7 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data) struct hisi_qm *qm = data; enum acc_err_result ret; + atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); ret = qm_process_dev_error(qm); if (ret == ACC_ERR_NEED_RESET) schedule_work(&qm->rst_work); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index a431ff2fac3c..e4b46a7b10ef 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -121,6 +121,14 @@ enum qm_debug_file { DEBUG_FILE_NUM, }; +struct qm_dfx { + atomic64_t err_irq_cnt; + atomic64_t aeq_irq_cnt; + atomic64_t abnormal_irq_cnt; + atomic64_t create_qp_err_cnt; + atomic64_t mb_err_cnt; +}; + struct debugfs_file { enum qm_debug_file index; struct mutex lock; @@ -129,6 +137,7 @@ struct debugfs_file { struct qm_debug { u32 curr_qm_qp_num; + struct qm_dfx dfx; struct dentry *debug_root; struct dentry *qm_d; struct debugfs_file files[DEBUG_FILE_NUM]; -- cgit v1.2.3 From 0a3a3960210b4bc6cfe5db45b4af714ee4a010e1 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:55 +0800 Subject: crypto: hisilicon/qm - add debugfs to the QM state machine The QM driver uses debugfs to provides the current state of the QM state machine Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 744d1310f6a9..7c1982fd2156 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -175,6 +175,7 @@ #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) +#define QM_DBG_READ_LEN 256 #define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 @@ -2268,6 +2269,37 @@ err_unlock: } EXPORT_SYMBOL_GPL(hisi_qm_stop); +static ssize_t qm_status_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char buf[QM_DBG_READ_LEN]; + int val, cp_len, len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + val = atomic_read(&qm->status.flags); + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); + if (!len) + return -EFAULT; + + cp_len = copy_to_user(buffer, buf, len); + if (cp_len) + return -EFAULT; + + return (*pos = len); +} + +static const struct file_operations qm_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_status_read, +}; + static int qm_debugfs_atomic64_set(void *data, u64 val) { if (val) @@ -2314,6 +2346,8 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, + &qm_status_fops); for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); debugfs_create_file(qm_dfx_files[i].name, -- cgit v1.2.3 From 8213a1a60c5329501ff903339b248ceb84278cc3 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Fri, 15 May 2020 17:13:56 +0800 Subject: crypto: hisilicon/sec2 - add debugfs for Hisilicon SEC Hisilicon SEC engine driver uses debugfs to provides IO operation debug information Signed-off-by: Kai Ye Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 4 +++ drivers/crypto/hisilicon/sec2/sec_crypto.c | 15 ++++++++--- drivers/crypto/hisilicon/sec2/sec_main.c | 43 +++++++++++++++++++++++++----- 3 files changed, 52 insertions(+), 10 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 2326634a1d71..7b64aca704d6 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -160,6 +160,10 @@ struct sec_debug_file { struct sec_dfx { atomic64_t send_cnt; atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t err_bd_cnt; + atomic64_t invalid_req_cnt; + atomic64_t done_flag_cnt; }; struct sec_debug { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 848ab492d26e..64614a9bdf21 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req) static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; struct sec_sqe *bd = resp; struct sec_ctx *ctx; struct sec_req *req; @@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) type = bd->type_cipher_auth & SEC_TYPE_MASK; if (unlikely(type != SEC_BD_TYPE2)) { + atomic64_inc(&dfx->err_bd_cnt); pr_err("err bd type [%d]\n", type); return; } req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; + if (unlikely(!req)) { + atomic64_inc(&dfx->invalid_req_cnt); + return; + } req->err_type = bd->type2.error_type; ctx = req->ctx; done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; @@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; + atomic64_inc(&dfx->done_flag_cnt); } if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) err = sec_aead_verify(req); - atomic64_inc(&ctx->sec->debug.dfx.recv_cnt); + atomic64_inc(&dfx->recv_cnt); ctx->req_op->buf_unmap(ctx, req); @@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) return -ENOBUFS; if (!ret) { - if (req->fake_busy) + if (req->fake_busy) { + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); ret = -EBUSY; - else + } else { ret = -EINPROGRESS; + } } return ret; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index c3381f253d55..5ea44ad8d51c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -88,6 +88,11 @@ struct sec_hw_error { const char *msg; }; +struct sec_dfx_item { + const char *name; + u32 offset; +}; + static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; static struct hisi_qm_list sec_devices; @@ -110,6 +115,15 @@ static const char * const sec_dbg_file_name[] = { [SEC_CLEAR_ENABLE] = "clear_enable", }; +static struct sec_dfx_item sec_dfx_labels[] = { + {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, + {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, + {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, +}; + static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, {"SEC_SAA_EN ", 0x301270}, @@ -543,10 +557,22 @@ static const struct file_operations sec_dbg_fops = { static int sec_debugfs_atomic64_get(void *data, u64 *val) { *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +static int sec_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + return 0; } + DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, - NULL, "%lld\n"); + sec_debugfs_atomic64_set, "%lld\n"); static int sec_core_debug_init(struct sec_dev *sec) { @@ -555,6 +581,7 @@ static int sec_core_debug_init(struct sec_dev *sec) struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; + int i; tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); @@ -566,13 +593,15 @@ static int sec_core_debug_init(struct sec_dev *sec) regset->nregs = ARRAY_SIZE(sec_dfx_regs); regset->base = qm->io_base; - debugfs_create_regset32("regs", 0444, tmp_d, regset); + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) + debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_file("send_cnt", 0444, tmp_d, - &dfx->send_cnt, &sec_atomic64_ops); - - debugfs_create_file("recv_cnt", 0444, tmp_d, - &dfx->recv_cnt, &sec_atomic64_ops); + for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { + atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + + sec_dfx_labels[i].offset); + debugfs_create_file(sec_dfx_labels[i].name, 0644, + tmp_d, data, &sec_atomic64_ops); + } return 0; } -- cgit v1.2.3 From 64a6301ebee769073e84daa14eeee01125aef79d Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 15 May 2020 17:13:57 +0800 Subject: crypto: hisilicon/hpre - add debugfs for Hisilicon HPRE Add debugfs to provides IO operation debug information and add BD processing timeout count function Signed-off-by: Hui Tang Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre.h | 17 +++++ drivers/crypto/hisilicon/hpre/hpre_crypto.c | 99 ++++++++++++++++++++++++----- drivers/crypto/hisilicon/hpre/hpre_main.c | 56 ++++++++++++++++ 3 files changed, 157 insertions(+), 15 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 0a8ba468e2be..ed730d173e95 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file { HPRE_DEBUG_FILE_NUM, }; +enum hpre_dfx_dbgfs_file { + HPRE_SEND_CNT, + HPRE_RECV_CNT, + HPRE_SEND_FAIL_CNT, + HPRE_SEND_BUSY_CNT, + HPRE_OVER_THRHLD_CNT, + HPRE_OVERTIME_THRHLD, + HPRE_INVALID_REQ_CNT, + HPRE_DFX_FILE_NUM +}; + #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1) struct hpre_debugfs_file { @@ -34,6 +45,11 @@ struct hpre_debugfs_file { struct hpre_debug *debug; }; +struct hpre_dfx { + atomic64_t value; + enum hpre_dfx_dbgfs_file type; +}; + /* * One HPRE controller has one PF and multiple VFs, some global configurations * which PF has need this structure. @@ -41,6 +57,7 @@ struct hpre_debugfs_file { */ struct hpre_debug { struct dentry *debug_root; + struct hpre_dfx dfx[HPRE_DFX_FILE_NUM]; struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM]; }; diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 65425250b2e9..7b5cb27d473d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "hpre.h" struct hpre_ctx; @@ -32,6 +33,9 @@ struct hpre_ctx; #define HPRE_SQE_DONE_SHIFT 30 #define HPRE_DH_MAX_P_SZ 512 +#define HPRE_DFX_SEC_TO_US 1000000 +#define HPRE_DFX_US_TO_NS 1000 + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -68,6 +72,7 @@ struct hpre_dh_ctx { struct hpre_ctx { struct hisi_qp *qp; struct hpre_asym_request **req_list; + struct hpre *hpre; spinlock_t req_lock; unsigned int key_sz; bool crt_g2_mode; @@ -90,6 +95,7 @@ struct hpre_asym_request { int err; int req_id; hpre_cb cb; + struct timespec64 req_time; }; static DEFINE_MUTEX(hpre_alg_lock); @@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) { struct hpre_ctx *ctx; + struct hpre_dfx *dfx; int id; ctx = hpre_req->ctx; @@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) ctx->req_list[id] = hpre_req; hpre_req->req_id = id; + dfx = ctx->hpre->debug.dfx; + if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) + ktime_get_ts64(&hpre_req->req_time); + return id; } @@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) { + struct hpre *hpre; + if (!ctx || !qp || qlen < 0) return -EINVAL; spin_lock_init(&ctx->req_lock); ctx->qp = qp; + hpre = container_of(ctx->qp->qm, struct hpre, qm); + ctx->hpre = hpre; ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); if (!ctx->req_list) return -ENOMEM; @@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) ctx->key_sz = 0; } +static bool hpre_is_bd_timeout(struct hpre_asym_request *req, + u64 overtime_thrhld) +{ + struct timespec64 reply_time; + u64 time_use_us; + + ktime_get_ts64(&reply_time); + time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * + HPRE_DFX_SEC_TO_US + + (reply_time.tv_nsec - req->req_time.tv_nsec) / + HPRE_DFX_US_TO_NS; + + if (time_use_us <= overtime_thrhld) + return false; + + return true; +} + static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) { + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct kpp_request *areq; + u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); areq = req->areq.dh; areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) { + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct akcipher_request *areq; + u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + areq = req->areq.rsa; areq->dst_len = ctx->key_sz; hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); akcipher_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_alg_cb(struct hisi_qp *qp, void *resp) { struct hpre_ctx *ctx = qp->qp_ctx; + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_sqe *sqe = resp; + struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp); + + if (unlikely(!req)) { + atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); + return; + } + + req->cb(ctx, resp); } static int hpre_ctx_init(struct hpre_ctx *ctx) @@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) return 0; } +static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + int ctr = 0; + int ret; + + do { + atomic64_inc(&dfx[HPRE_SEND_CNT].value); + ret = hisi_qp_send(ctx->qp, msg); + if (ret != -EBUSY) + break; + atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); + } while (ctr++ < HPRE_TRY_SEND_TIMES); + + if (likely(!ret)) + return ret; + + if (ret != -EBUSY) + atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); + + return ret; +} + #ifdef CONFIG_CRYPTO_DH static int hpre_dh_compute_value(struct kpp_request *req) { @@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) void *tmp = kpp_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; ret = hpre_msg_request_set(ctx, req, false); @@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req) msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); else msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req) void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ @@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) if (unlikely(ret)) goto clear_all; - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); - /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req) void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ @@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) if (unlikely(ret)) goto clear_all; - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); - /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index c5ddd3a8ec85..fb3988f347f6 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -159,6 +159,16 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"INT_STATUS ", HPRE_INT_STATUS}, }; +static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { + "send_cnt", + "recv_cnt", + "send_fail_cnt", + "send_busy_cnt", + "over_thrhld_cnt", + "overtime_thrhld", + "invalid_req_cnt" +}; + static int pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); @@ -524,6 +534,33 @@ static const struct file_operations hpre_ctrl_debug_fops = { .write = hpre_ctrl_debug_write, }; +static int hpre_debugfs_atomic64_get(void *data, u64 *val) +{ + struct hpre_dfx *dfx_item = data; + + *val = atomic64_read(&dfx_item->value); + + return 0; +} + +static int hpre_debugfs_atomic64_set(void *data, u64 val) +{ + struct hpre_dfx *dfx_item = data; + struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + + if (val) + return -EINVAL; + + if (dfx_item->type == HPRE_OVERTIME_THRHLD) + atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + atomic64_set(&dfx_item->value, val); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, + hpre_debugfs_atomic64_set, "%llu\n"); + static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { @@ -621,6 +658,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug) return hpre_cluster_debugfs_init(debug); } +static void hpre_dfx_debug_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre_dfx *dfx = hpre->debug.dfx; + struct hisi_qm *qm = &hpre->qm; + struct dentry *parent; + int i; + + parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); + for (i = 0; i < HPRE_DFX_FILE_NUM; i++) { + dfx[i].type = i; + debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], + &hpre_atomic64_ops); + } +} + static int hpre_debugfs_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; @@ -641,6 +694,9 @@ static int hpre_debugfs_init(struct hpre *hpre) if (ret) goto failed_to_create; } + + hpre_dfx_debug_init(&hpre->debug); + return 0; failed_to_create: -- cgit v1.2.3 From 6621e6492fbdf55d25ea7dd09c8a4cd520c0028d Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:58 +0800 Subject: crypto: hisilicon/zip - add debugfs for Hisilicon ZIP Hisilicon ZIP engine driver uses debugfs to provides IO operation debug information Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip.h | 8 +++++ drivers/crypto/hisilicon/zip/zip_crypto.c | 9 +++++- drivers/crypto/hisilicon/zip/zip_main.c | 54 +++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 82dc6f867171..f3ed4c0e5493 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -28,12 +28,20 @@ enum hisi_zip_error_type { HZIP_NC_ERR = 0x0d, }; +struct hisi_zip_dfx { + atomic64_t send_cnt; + atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t err_bd_cnt; +}; + struct hisi_zip_ctrl; struct hisi_zip { struct hisi_qm qm; struct list_head list; struct hisi_zip_ctrl *ctrl; + struct hisi_zip_dfx dfx; }; struct hisi_zip_sqe { diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 5fb9d4b41bb9..c73707c2e539 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -332,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) { struct hisi_zip_sqe *sqe = data; struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *req = req_q->q + sqe->tag; struct acomp_req *acomp_req = req->req; @@ -339,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) u32 status, dlen, head_size; int err = 0; + atomic64_inc(&dfx->recv_cnt); status = sqe->dw3 & HZIP_BD_STATUS_M; if (status != 0 && status != HZIP_NC_ERR) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, sqe->produced); + atomic64_inc(&dfx->err_bd_cnt); err = -EIO; } dlen = sqe->produced; @@ -487,6 +490,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct hisi_zip_sqe zip_sqe; dma_addr_t input; dma_addr_t output; @@ -516,9 +520,12 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, hisi_zip_config_tag(&zip_sqe, req->req_id); /* send command to start a task */ + atomic64_inc(&dfx->send_cnt); ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) + if (ret < 0) { + atomic64_inc(&dfx->send_busy_cnt); goto err_unmap_output; + } return -EINPROGRESS; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6161b1025b7f..cb3ed6bd3d86 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -99,6 +99,18 @@ struct hisi_zip_hw_error { const char *msg; }; +struct zip_dfx_item { + const char *name; + u32 offset; +}; + +static struct zip_dfx_item zip_dfx_files[] = { + {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)}, + {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)}, +}; + static const struct hisi_zip_hw_error zip_hw_error[] = { { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, @@ -469,6 +481,27 @@ static const struct file_operations ctrl_debug_fops = { .write = ctrl_debug_write, }; + +static int zip_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + + return 0; +} + +static int zip_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get, + zip_debugfs_atomic64_set, "%llu\n"); + static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) { struct hisi_zip *hisi_zip = ctrl->hisi_zip; @@ -500,6 +533,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) return 0; } +static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) +{ + struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); + struct hisi_zip_dfx *dfx = &zip->dfx; + struct dentry *tmp_dir; + void *data; + int i; + + tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root); + for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset); + debugfs_create_file(zip_dfx_files[i].name, + 0644, + tmp_dir, + data, + &zip_atomic64_ops); + } +} + static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) { int i; @@ -538,6 +590,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) goto failed_to_create; } + hisi_zip_dfx_debug_init(qm); + return 0; failed_to_create: -- cgit v1.2.3 From c31dc9fe165d1b53c0494e0260a798d491de7bb4 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 15 May 2020 17:13:59 +0800 Subject: crypto: hisilicon/qm - add DebugFS for xQC and xQE dump Add dump information of SQC/CQC/EQC/AEQC/SQE/CQE/EQE/AEQE. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 4 + drivers/crypto/hisilicon/qm.c | 511 +++++++++++++++++++++++++++--- drivers/crypto/hisilicon/qm.h | 2 + drivers/crypto/hisilicon/sec2/sec_main.c | 6 + drivers/crypto/hisilicon/zip/zip_main.c | 4 + 5 files changed, 488 insertions(+), 39 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index fb3988f347f6..38405b5cb884 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -84,6 +84,8 @@ #define HPRE_OOO_ECC_2BIT_ERR BIT(5) #define HPRE_VIA_MSI_DSM 1 +#define HPRE_SQE_MASK_OFFSET 8 +#define HPRE_SQE_MASK_LEN 24 static struct hisi_qm_list hpre_devices; static const char hpre_name[] = "hisi_hpre"; @@ -683,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre) dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); qm->debug.debug_root = dir; + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 7c1982fd2156..57ad13149e05 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -176,9 +176,12 @@ #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_READ_LEN 256 +#define QM_DBG_WRITE_LEN 1024 #define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 +#define QM_SQE_ADDR_MASK GENMASK(7, 0) + #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ @@ -1064,6 +1067,473 @@ static const struct file_operations qm_regs_fops = { .release = single_release, }; +static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + char buf[QM_DBG_READ_LEN]; + int len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + + return (*pos = len); +} + +static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + void *ctx_addr; + + ctx_addr = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx_addr) + return ERR_PTR(-ENOMEM); + + *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_err(dev, "DMA mapping error!\n"); + kfree(ctx_addr); + return ERR_PTR(-ENOMEM); + } + + return ctx_addr; +} + +static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + + dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); + kfree(ctx_addr); +} + +static int dump_show(struct hisi_qm *qm, void *info, + unsigned int info_size, char *info_name) +{ + struct device *dev = &qm->pdev->dev; + u8 *info_buf, *info_curr = info; + u32 i; +#define BYTE_PER_DW 4 + + info_buf = kzalloc(info_size, GFP_KERNEL); + if (!info_buf) + return -ENOMEM; + + for (i = 0; i < info_size; i++, info_curr++) { + if (i % BYTE_PER_DW == 0) + info_buf[i + 3UL] = *info_curr; + else if (i % BYTE_PER_DW == 1) + info_buf[i + 1UL] = *info_curr; + else if (i % BYTE_PER_DW == 2) + info_buf[i - 1] = *info_curr; + else if (i % BYTE_PER_DW == 3) + info_buf[i - 3] = *info_curr; + } + + dev_info(dev, "%s DUMP\n", info_name); + for (i = 0; i < info_size; i += BYTE_PER_DW) { + pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW, + info_buf[i], info_buf[i + 1UL], + info_buf[i + 2UL], info_buf[i + 3UL]); + } + + kfree(info_buf); + + return 0; +} + +static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); +} + +static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); +} + +static int qm_sqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc, *sqc_curr; + dma_addr_t sqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); + if (IS_ERR(sqc)) + return PTR_ERR(sqc); + + ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id); + if (ret) { + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; + + ret = dump_show(qm, sqc_curr, sizeof(*sqc), + "SOFT SQC"); + if (ret) + dev_info(dev, "Show soft sqc failed!\n"); + } + up_read(&qm->qps_lock); + + goto err_free_ctx; + } + + ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); + if (ret) + dev_info(dev, "Show hw sqc failed!\n"); + +err_free_ctx: + qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); + return ret; +} + +static int qm_cqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqc *cqc, *cqc_curr; + dma_addr_t cqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); + if (IS_ERR(cqc)) + return PTR_ERR(cqc); + + ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id); + if (ret) { + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; + + ret = dump_show(qm, cqc_curr, sizeof(*cqc), + "SOFT CQC"); + if (ret) + dev_info(dev, "Show soft cqc failed!\n"); + } + up_read(&qm->qps_lock); + + goto err_free_ctx; + } + + ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); + if (ret) + dev_info(dev, "Show hw cqc failed!\n"); + +err_free_ctx: + qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); + return ret; +} + +static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, + int cmd, char *name) +{ + struct device *dev = &qm->pdev->dev; + dma_addr_t xeqc_dma; + void *xeqc; + int ret; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + xeqc = qm_ctx_alloc(qm, size, &xeqc_dma); + if (IS_ERR(xeqc)) + return PTR_ERR(xeqc); + + ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); + if (ret) + goto err_free_ctx; + + ret = dump_show(qm, xeqc, size, name); + if (ret) + dev_info(dev, "Show hw %s failed!\n", name); + +err_free_ctx: + qm_ctx_free(qm, size, xeqc, &xeqc_dma); + return ret; +} + +static int q_dump_param_parse(struct hisi_qm *qm, char *s, + u32 *e_id, u32 *q_id) +{ + struct device *dev = &qm->pdev->dev; + unsigned int qp_num = qm->qp_num; + char *presult; + int ret; + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input qp number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, q_id); + if (ret || *q_id >= qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qp_num - 1); + return -EINVAL; + } + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input sqe number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, e_id); + if (ret || *e_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_sq_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + void *sqe, *sqe_curr; + struct hisi_qp *qp; + u32 qp_id, sqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); + if (ret) + return ret; + + sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); + if (!sqe) + return -ENOMEM; + + qp = &qm->qp_array[qp_id]; + memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); + sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); + memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, + qm->debug.sqe_mask_len); + + ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); + if (ret) + dev_info(dev, "Show sqe failed!\n"); + + kfree(sqe); + + return ret; +} + +static int qm_cq_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqe *cqe_curr; + struct hisi_qp *qp; + u32 qp_id, cqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); + if (ret) + return ret; + + qp = &qm->qp_array[qp_id]; + cqe_curr = qp->cqe + cqe_id; + ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); + if (ret) + dev_info(dev, "Show cqe failed!\n"); + + return ret; +} + +static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, + size_t size, char *name) +{ + struct device *dev = &qm->pdev->dev; + void *xeqe; + u32 xeqe_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &xeqe_id); + if (ret || xeqe_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + down_read(&qm->qps_lock); + + if (qm->eqe && !strcmp(name, "EQE")) { + xeqe = qm->eqe + xeqe_id; + } else if (qm->aeqe && !strcmp(name, "AEQE")) { + xeqe = qm->aeqe + xeqe_id; + } else { + ret = -EINVAL; + goto err_unlock; + } + + ret = dump_show(qm, xeqe, size, name); + if (ret) + dev_info(dev, "Show %s failed!\n", name); + +err_unlock: + up_read(&qm->qps_lock); + return ret; +} + +static int qm_dbg_help(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + dev_info(dev, "available commands:\n"); + dev_info(dev, "sqc \n"); + dev_info(dev, "cqc \n"); + dev_info(dev, "eqc\n"); + dev_info(dev, "aeqc\n"); + dev_info(dev, "sq \n"); + dev_info(dev, "cq \n"); + dev_info(dev, "eq \n"); + dev_info(dev, "aeq \n"); + + return 0; +} + +static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) +{ + struct device *dev = &qm->pdev->dev; + char *presult, *s; + int ret; + + s = kstrdup(cmd_buf, GFP_KERNEL); + if (!s) + return -ENOMEM; + + presult = strsep(&s, " "); + if (!presult) { + kfree(s); + return -EINVAL; + } + + if (!strcmp(presult, "sqc")) + ret = qm_sqc_dump(qm, s); + else if (!strcmp(presult, "cqc")) + ret = qm_cqc_dump(qm, s); + else if (!strcmp(presult, "eqc")) + ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc), + QM_MB_CMD_EQC, "EQC"); + else if (!strcmp(presult, "aeqc")) + ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc), + QM_MB_CMD_AEQC, "AEQC"); + else if (!strcmp(presult, "sq")) + ret = qm_sq_dump(qm, s); + else if (!strcmp(presult, "cq")) + ret = qm_cq_dump(qm, s); + else if (!strcmp(presult, "eq")) + ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE"); + else if (!strcmp(presult, "aeq")) + ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE"); + else if (!strcmp(presult, "help")) + ret = qm_dbg_help(qm, s); + else + ret = -EINVAL; + + if (ret) + dev_info(dev, "Please echo help\n"); + + kfree(s); + + return ret; +} + +static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int ret; + + if (*pos) + return 0; + + /* Judge if the instance is being reset. */ + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) + return 0; + + if (count > QM_DBG_WRITE_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return -ENOMEM; + + if (copy_from_user(cmd_buf, buffer, count)) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + ret = qm_cmd_write_dump(qm, cmd_buf); + if (ret) { + kfree(cmd_buf); + return ret; + } + + kfree(cmd_buf); + + return count; +} + +static const struct file_operations qm_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_cmd_read, + .write = qm_cmd_write, +}; + static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) { struct dentry *qm_d = qm->debug.qm_d; @@ -1389,45 +1859,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp); -static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - /** * Determine whether the queue is cleared by judging the tail pointers of * sq and cq. @@ -2346,6 +2777,8 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, &qm_status_fops); for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index e4b46a7b10ef..632674423c04 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -137,6 +137,8 @@ struct debugfs_file { struct qm_debug { u32 curr_qm_qp_num; + u32 sqe_mask_offset; + u32 sqe_mask_len; struct qm_dfx dfx; struct dentry *debug_root; struct dentry *qm_d; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 5ea44ad8d51c..829959bef8fb 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -80,6 +80,9 @@ #define SEC_VF_CNT_MASK 0xffffffc0 #define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SQE_MASK_OFFSET 64 +#define SEC_SQE_MASK_LEN 48 + #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) @@ -632,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec) qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); + + qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index cb3ed6bd3d86..87db2e1f5d37 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -89,6 +89,8 @@ #define HZIP_WR_PORT BIT(11) #define HZIP_BUF_SIZE 22 +#define HZIP_SQE_MASK_OFFSET 64 +#define HZIP_SQE_MASK_LEN 48 static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; @@ -578,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); + qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; ret = hisi_qm_debug_init(qm); if (ret) -- cgit v1.2.3 From 988453fb2f18be3f3915220e4c6f18018186aa89 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 15 May 2020 17:14:00 +0800 Subject: crypto: hisilicon/qm - change debugfs file name from qm_regs to regs The debugfs qm_regs file is already in the qm directory, so no qm_ prefix is required. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 57ad13149e05..a781c0225198 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2775,7 +2775,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm) goto failed_to_create; } - debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); -- cgit v1.2.3 From ae4052c59c2d12ee68b3e48eeef1d5ef202b1a0a Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 19 May 2020 22:36:54 +0200 Subject: crypto: cavium/nitrox - Fix a typo in a comment s/NITORX/NITROX/ Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index e91be9b8b083..788c6607078b 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) } /** - * nitrox_bist_check - Check NITORX BIST registers status + * nitrox_bist_check - Check NITROX BIST registers status * @ndev: NITROX device */ static int nitrox_bist_check(struct nitrox_device *ndev) -- cgit v1.2.3 From 58ca0060ec4e51208d2eee12198fc55fd9e4feb3 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Wed, 20 May 2020 17:19:50 +0800 Subject: crypto: hisilicon - fix driver compatibility issue with different versions of devices In order to be compatible with devices of different versions, V1 in the accelerator driver is now isolated, and other versions are the previous V2 processing flow. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 10 +--- drivers/crypto/hisilicon/qm.c | 89 ++++++++++--------------------- drivers/crypto/hisilicon/qm.h | 13 ++--- drivers/crypto/hisilicon/sec2/sec_main.c | 19 ++----- drivers/crypto/hisilicon/zip/zip_main.c | 20 ++----- 5 files changed, 39 insertions(+), 112 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 38405b5cb884..a3ee127a70e3 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -717,19 +717,13 @@ static void hpre_debugfs_exit(struct hpre *hpre) static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; - - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id < 0) - return -ENODEV; - - if (rev_id == QM_HW_V1) { + if (pdev->revision == QM_HW_V1) { pci_warn(pdev, "HPRE version 1 is not supported!\n"); return -EINVAL; } qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a781c0225198..9bb263cec6c3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -737,13 +737,14 @@ static void qm_irq_unregister(struct hisi_qm *qm) free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - if (qm->ver == QM_HW_V2) { - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + if (qm->ver == QM_HW_V1) + return; - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); - } + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + + if (qm->fun_type == QM_HW_PF) + free_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); } static void qm_init_qp_status(struct hisi_qp *qp) @@ -764,36 +765,26 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, if (number > 0) { switch (type) { case SQC_VFT: - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) { tmp = QM_SQC_VFT_BUF_SIZE | QM_SQC_VFT_SQC_SIZE | QM_SQC_VFT_INDEX_NUMBER | QM_SQC_VFT_VALID | (u64)base << QM_SQC_VFT_START_SQN_SHIFT; - break; - case QM_HW_V2: + } else { tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | QM_SQC_VFT_VALID | (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; - break; - case QM_HW_UNKNOWN: - break; } break; case CQC_VFT: - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) { tmp = QM_CQC_VFT_BUF_SIZE | QM_CQC_VFT_SQC_SIZE | QM_CQC_VFT_INDEX_NUMBER | QM_CQC_VFT_VALID; - break; - case QM_HW_V2: + } else { tmp = QM_CQC_VFT_VALID; - break; - case QM_HW_UNKNOWN: - break; } break; } @@ -1777,7 +1768,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) if (ver == QM_HW_V1) { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); - } else if (ver == QM_HW_V2) { + } else { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); sqc->w8 = 0; /* rand_qc */ } @@ -1804,7 +1795,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) if (ver == QM_HW_V1) { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4)); cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); - } else if (ver == QM_HW_V2) { + } else { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4)); cqc->w8 = 0; } @@ -2020,12 +2011,13 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm) { unsigned int val; - if (qm->ver == QM_HW_V2) { - writel(0x1, qm->io_base + QM_CACHE_WB_START); - if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, - val, val & BIT(0), 10, 1000)) - dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); - } + if (qm->ver == QM_HW_V1) + return; + + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), 10, 1000)) + dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); } static void qm_qp_event_notifier(struct hisi_qp *qp) @@ -2082,12 +2074,12 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, switch (qfr->type) { case UACCE_QFRT_MMIO: - if (qm->ver == QM_HW_V2) { - if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + - QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) + if (qm->ver == QM_HW_V1) { + if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; } else { - if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) + if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; } @@ -2342,16 +2334,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; - break; - case QM_HW_V2: + else qm->ops = &qm_hw_ops_v2; - break; - default: - return; - } pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); @@ -2859,25 +2845,6 @@ static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) return qm->ops->hw_error_handle(qm); } -/** - * hisi_qm_get_hw_version() - Get hardware version of a qm. - * @pdev: The device which hardware version we want to get. - * - * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN - * if the hardware version is not supported. - */ -enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev) -{ - switch (pdev->revision) { - case QM_HW_V1: - case QM_HW_V2: - return pdev->revision; - default: - return QM_HW_UNKNOWN; - } -} -EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version); - /** * hisi_qm_dev_err_init() - Initialize device error configuration. * @qm: The qm for which we want to do error initialization. @@ -3846,7 +3813,7 @@ static int qm_irq_register(struct hisi_qm *qm) if (ret) return ret; - if (qm->ver == QM_HW_V2) { + if (qm->ver != QM_HW_V1) { ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); if (ret) @@ -3942,7 +3909,7 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_free_irq_vectors; - if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { /* v2 starts to support get vft by mailbox */ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); if (ret) diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 632674423c04..0a351de8d838 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -108,6 +108,7 @@ enum qm_hw_ver { QM_HW_UNKNOWN = -1, QM_HW_V1 = 0x20, QM_HW_V2 = 0x21, + QM_HW_V3 = 0x30, }; enum qm_fun_type { @@ -287,7 +288,6 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); u32 n, q_num; - u8 rev_id; int ret; if (!val) @@ -298,17 +298,10 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, pr_info("No device found currently, suppose queue number is %d\n", q_num); } else { - rev_id = pdev->revision; - switch (rev_id) { - case QM_HW_V1: + if (pdev->revision == QM_HW_V1) q_num = QM_QNUM_V1; - break; - case QM_HW_V2: + else q_num = QM_QNUM_V2; - break; - default: - return -EINVAL; - } } ret = kstrtou32(val, 10, &n); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 829959bef8fb..a4cb58b54b25 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -728,18 +728,10 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; - break; - - case QM_HW_V2: + else qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; - break; - - default: - return -EINVAL; - } qm->err_ini = &sec_err_ini; @@ -755,15 +747,10 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; int ret; - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id == QM_HW_UNKNOWN) - return -ENODEV; - qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 87db2e1f5d37..2229a21ae7c8 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -719,18 +719,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; - break; - - case QM_HW_V2: + else qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; - break; - - default: - return -EINVAL; - } qm->err_ini = &hisi_zip_err_ini; @@ -743,14 +735,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; - - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id == QM_HW_UNKNOWN) - return -EINVAL; - qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; -- cgit v1.2.3