1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #ifndef HISI_ACC_QM_H 4 #define HISI_ACC_QM_H 5 6 #include <linux/bitfield.h> 7 #include <linux/debugfs.h> 8 #include <linux/iopoll.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #define QM_QNUM_V1 4096 13 #define QM_QNUM_V2 1024 14 #define QM_MAX_VFS_NUM_V2 63 15 16 /* qm user domain */ 17 #define QM_ARUSER_M_CFG_1 0x100088 18 #define AXUSER_SNOOP_ENABLE BIT(30) 19 #define AXUSER_CMD_TYPE GENMASK(14, 12) 20 #define AXUSER_CMD_SMMU_NORMAL 1 21 #define AXUSER_NS BIT(6) 22 #define AXUSER_NO BIT(5) 23 #define AXUSER_FP BIT(4) 24 #define AXUSER_SSV BIT(0) 25 #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ 26 FIELD_PREP(AXUSER_CMD_TYPE, \ 27 AXUSER_CMD_SMMU_NORMAL) | \ 28 AXUSER_NS | AXUSER_NO | AXUSER_FP) 29 #define QM_ARUSER_M_CFG_ENABLE 0x100090 30 #define ARUSER_M_CFG_ENABLE 0xfffffffe 31 #define QM_AWUSER_M_CFG_1 0x100098 32 #define QM_AWUSER_M_CFG_ENABLE 0x1000a0 33 #define AWUSER_M_CFG_ENABLE 0xfffffffe 34 #define QM_WUSER_M_CFG_ENABLE 0x1000a8 35 #define WUSER_M_CFG_ENABLE 0xffffffff 36 37 /* mailbox */ 38 #define QM_MB_CMD_SQC 0x0 39 #define QM_MB_CMD_CQC 0x1 40 #define QM_MB_CMD_EQC 0x2 41 #define QM_MB_CMD_AEQC 0x3 42 #define QM_MB_CMD_SQC_BT 0x4 43 #define QM_MB_CMD_CQC_BT 0x5 44 #define QM_MB_CMD_SQC_VFT_V2 0x6 45 #define QM_MB_CMD_STOP_QP 0x8 46 #define QM_MB_CMD_SRC 0xc 47 #define QM_MB_CMD_DST 0xd 48 49 #define QM_MB_CMD_SEND_BASE 0x300 50 #define QM_MB_EVENT_SHIFT 8 51 #define QM_MB_BUSY_SHIFT 13 52 #define QM_MB_OP_SHIFT 14 53 #define QM_MB_CMD_DATA_ADDR_L 0x304 54 #define QM_MB_CMD_DATA_ADDR_H 0x308 55 #define QM_MB_MAX_WAIT_CNT 6000 56 57 /* doorbell */ 58 #define QM_DOORBELL_CMD_SQ 0 59 #define QM_DOORBELL_CMD_CQ 1 60 #define QM_DOORBELL_CMD_EQ 2 61 #define QM_DOORBELL_CMD_AEQ 3 62 63 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 64 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 65 #define QM_QP_MAX_NUM_SHIFT 11 66 #define QM_DB_CMD_SHIFT_V2 12 67 #define QM_DB_RAND_SHIFT_V2 16 68 #define QM_DB_INDEX_SHIFT_V2 32 69 #define QM_DB_PRIORITY_SHIFT_V2 48 70 #define QM_VF_STATE 0x60 71 72 /* qm cache */ 73 #define QM_CACHE_CTL 0x100050 74 #define SQC_CACHE_ENABLE BIT(0) 75 #define CQC_CACHE_ENABLE BIT(1) 76 #define SQC_CACHE_WB_ENABLE BIT(4) 77 #define SQC_CACHE_WB_THRD GENMASK(10, 5) 78 #define CQC_CACHE_WB_ENABLE BIT(11) 79 #define CQC_CACHE_WB_THRD GENMASK(17, 12) 80 #define QM_AXI_M_CFG 0x1000ac 81 #define AXI_M_CFG 0xffff 82 #define QM_AXI_M_CFG_ENABLE 0x1000b0 83 #define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 84 #define AXI_M_CFG_ENABLE 0xffffffff 85 #define QM_PEH_AXUSER_CFG 0x1000cc 86 #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 87 #define PEH_AXUSER_CFG 0x401001 88 #define PEH_AXUSER_CFG_ENABLE 0xffffffff 89 90 #define QM_MIN_QNUM 2 91 #define HISI_ACC_SGL_SGE_NR_MAX 255 92 #define QM_SHAPER_CFG 0x100164 93 #define QM_SHAPER_ENABLE BIT(30) 94 #define QM_SHAPER_TYPE1_OFFSET 10 95 96 /* page number for queue file region */ 97 #define QM_DOORBELL_PAGE_NR 1 98 99 /* uacce mode of the driver */ 100 #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ 101 #define UACCE_MODE_SVA 1 /* use uacce sva mode */ 102 #define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" 103 104 enum qm_stop_reason { 105 QM_NORMAL, 106 QM_SOFT_RESET, 107 QM_FLR, 108 }; 109 110 enum qm_state { 111 QM_INIT = 0, 112 QM_START, 113 QM_CLOSE, 114 QM_STOP, 115 }; 116 117 enum qp_state { 118 QP_INIT = 1, 119 QP_START, 120 QP_STOP, 121 QP_CLOSE, 122 }; 123 124 enum qm_hw_ver { 125 QM_HW_UNKNOWN = -1, 126 QM_HW_V1 = 0x20, 127 QM_HW_V2 = 0x21, 128 QM_HW_V3 = 0x30, 129 }; 130 131 enum qm_fun_type { 132 QM_HW_PF, 133 QM_HW_VF, 134 }; 135 136 enum qm_debug_file { 137 CURRENT_QM, 138 CURRENT_Q, 139 CLEAR_ENABLE, 140 DEBUG_FILE_NUM, 141 }; 142 143 enum qm_vf_state { 144 QM_READY = 0, 145 QM_NOT_READY, 146 }; 147 148 enum qm_cap_bits { 149 QM_SUPPORT_DB_ISOLATION = 0x0, 150 QM_SUPPORT_FUNC_QOS, 151 QM_SUPPORT_STOP_QP, 152 QM_SUPPORT_MB_COMMAND, 153 QM_SUPPORT_SVA_PREFETCH, 154 QM_SUPPORT_RPM, 155 }; 156 157 struct dfx_diff_registers { 158 u32 *regs; 159 u32 reg_offset; 160 u32 reg_len; 161 }; 162 163 struct qm_dfx { 164 atomic64_t err_irq_cnt; 165 atomic64_t aeq_irq_cnt; 166 atomic64_t abnormal_irq_cnt; 167 atomic64_t create_qp_err_cnt; 168 atomic64_t mb_err_cnt; 169 }; 170 171 struct debugfs_file { 172 enum qm_debug_file index; 173 struct mutex lock; 174 struct qm_debug *debug; 175 }; 176 177 struct qm_debug { 178 u32 curr_qm_qp_num; 179 u32 sqe_mask_offset; 180 u32 sqe_mask_len; 181 struct qm_dfx dfx; 182 struct dentry *debug_root; 183 struct dentry *qm_d; 184 struct debugfs_file files[DEBUG_FILE_NUM]; 185 unsigned int *qm_last_words; 186 /* ACC engines recoreding last regs */ 187 unsigned int *last_words; 188 struct dfx_diff_registers *qm_diff_regs; 189 struct dfx_diff_registers *acc_diff_regs; 190 }; 191 192 struct qm_shaper_factor { 193 u32 func_qos; 194 u64 cir_b; 195 u64 cir_u; 196 u64 cir_s; 197 u64 cbs_s; 198 }; 199 200 struct qm_dma { 201 void *va; 202 dma_addr_t dma; 203 size_t size; 204 }; 205 206 struct hisi_qm_status { 207 u32 eq_head; 208 bool eqc_phase; 209 u32 aeq_head; 210 bool aeqc_phase; 211 atomic_t flags; 212 int stop_reason; 213 }; 214 215 struct hisi_qm; 216 217 struct hisi_qm_err_info { 218 char *acpi_rst; 219 u32 msi_wr_port; 220 u32 ecc_2bits_mask; 221 u32 qm_shutdown_mask; 222 u32 dev_shutdown_mask; 223 u32 qm_reset_mask; 224 u32 dev_reset_mask; 225 u32 ce; 226 u32 nfe; 227 u32 fe; 228 }; 229 230 struct hisi_qm_err_status { 231 u32 is_qm_ecc_mbit; 232 u32 is_dev_ecc_mbit; 233 }; 234 235 struct hisi_qm_err_ini { 236 int (*hw_init)(struct hisi_qm *qm); 237 void (*hw_err_enable)(struct hisi_qm *qm); 238 void (*hw_err_disable)(struct hisi_qm *qm); 239 u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); 240 void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); 241 void (*open_axi_master_ooo)(struct hisi_qm *qm); 242 void (*close_axi_master_ooo)(struct hisi_qm *qm); 243 void (*open_sva_prefetch)(struct hisi_qm *qm); 244 void (*close_sva_prefetch)(struct hisi_qm *qm); 245 void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); 246 void (*show_last_dfx_regs)(struct hisi_qm *qm); 247 void (*err_info_init)(struct hisi_qm *qm); 248 }; 249 250 struct hisi_qm_cap_info { 251 u32 type; 252 /* Register offset */ 253 u32 offset; 254 /* Bit offset in register */ 255 u32 shift; 256 u32 mask; 257 u32 v1_val; 258 u32 v2_val; 259 u32 v3_val; 260 }; 261 262 struct hisi_qm_list { 263 struct mutex lock; 264 struct list_head list; 265 int (*register_to_crypto)(struct hisi_qm *qm); 266 void (*unregister_from_crypto)(struct hisi_qm *qm); 267 }; 268 269 struct hisi_qm_poll_data { 270 struct hisi_qm *qm; 271 struct work_struct work; 272 u16 *qp_finish_id; 273 }; 274 275 struct hisi_qm { 276 enum qm_hw_ver ver; 277 enum qm_fun_type fun_type; 278 const char *dev_name; 279 struct pci_dev *pdev; 280 void __iomem *io_base; 281 void __iomem *db_io_base; 282 283 /* Capbility version, 0: not supports */ 284 u32 cap_ver; 285 u32 sqe_size; 286 u32 qp_base; 287 u32 qp_num; 288 u32 qp_in_used; 289 u32 ctrl_qp_num; 290 u32 max_qp_num; 291 u32 vfs_num; 292 u32 db_interval; 293 u16 eq_depth; 294 u16 aeq_depth; 295 struct list_head list; 296 struct hisi_qm_list *qm_list; 297 298 struct qm_dma qdma; 299 struct qm_sqc *sqc; 300 struct qm_cqc *cqc; 301 struct qm_eqe *eqe; 302 struct qm_aeqe *aeqe; 303 dma_addr_t sqc_dma; 304 dma_addr_t cqc_dma; 305 dma_addr_t eqe_dma; 306 dma_addr_t aeqe_dma; 307 308 struct hisi_qm_status status; 309 const struct hisi_qm_err_ini *err_ini; 310 struct hisi_qm_err_info err_info; 311 struct hisi_qm_err_status err_status; 312 unsigned long misc_ctl; /* driver removing and reset sched */ 313 /* Device capability bit */ 314 unsigned long caps; 315 316 struct rw_semaphore qps_lock; 317 struct idr qp_idr; 318 struct hisi_qp *qp_array; 319 struct hisi_qm_poll_data *poll_data; 320 321 struct mutex mailbox_lock; 322 323 const struct hisi_qm_hw_ops *ops; 324 325 struct qm_debug debug; 326 327 u32 error_mask; 328 329 struct workqueue_struct *wq; 330 struct work_struct rst_work; 331 struct work_struct cmd_process; 332 333 const char *algs; 334 bool use_sva; 335 bool is_frozen; 336 337 resource_size_t phys_base; 338 resource_size_t db_phys_base; 339 struct uacce_device *uacce; 340 int mode; 341 struct qm_shaper_factor *factor; 342 u32 mb_qos; 343 u32 type_rate; 344 }; 345 346 struct hisi_qp_status { 347 atomic_t used; 348 u16 sq_tail; 349 u16 cq_head; 350 bool cqc_phase; 351 atomic_t flags; 352 }; 353 354 struct hisi_qp_ops { 355 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); 356 }; 357 358 struct hisi_qp { 359 u32 qp_id; 360 u16 sq_depth; 361 u16 cq_depth; 362 u8 alg_type; 363 u8 req_type; 364 365 struct qm_dma qdma; 366 void *sqe; 367 struct qm_cqe *cqe; 368 dma_addr_t sqe_dma; 369 dma_addr_t cqe_dma; 370 371 struct hisi_qp_status qp_status; 372 struct hisi_qp_ops *hw_ops; 373 void *qp_ctx; 374 void (*req_cb)(struct hisi_qp *qp, void *data); 375 void (*event_cb)(struct hisi_qp *qp); 376 377 struct hisi_qm *qm; 378 bool is_resetting; 379 bool is_in_kernel; 380 u16 pasid; 381 struct uacce_queue *uacce_q; 382 }; 383 384 static inline int q_num_set(const char *val, const struct kernel_param *kp, 385 unsigned int device) 386 { 387 struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, 388 device, NULL); 389 u32 n, q_num; 390 int ret; 391 392 if (!val) 393 return -EINVAL; 394 395 if (!pdev) { 396 q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); 397 pr_info("No device found currently, suppose queue number is %u\n", 398 q_num); 399 } else { 400 if (pdev->revision == QM_HW_V1) 401 q_num = QM_QNUM_V1; 402 else 403 q_num = QM_QNUM_V2; 404 } 405 406 ret = kstrtou32(val, 10, &n); 407 if (ret || n < QM_MIN_QNUM || n > q_num) 408 return -EINVAL; 409 410 return param_set_int(val, kp); 411 } 412 413 static inline int vfs_num_set(const char *val, const struct kernel_param *kp) 414 { 415 u32 n; 416 int ret; 417 418 if (!val) 419 return -EINVAL; 420 421 ret = kstrtou32(val, 10, &n); 422 if (ret < 0) 423 return ret; 424 425 if (n > QM_MAX_VFS_NUM_V2) 426 return -EINVAL; 427 428 return param_set_int(val, kp); 429 } 430 431 static inline int mode_set(const char *val, const struct kernel_param *kp) 432 { 433 u32 n; 434 int ret; 435 436 if (!val) 437 return -EINVAL; 438 439 ret = kstrtou32(val, 10, &n); 440 if (ret != 0 || (n != UACCE_MODE_SVA && 441 n != UACCE_MODE_NOUACCE)) 442 return -EINVAL; 443 444 return param_set_int(val, kp); 445 } 446 447 static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) 448 { 449 return mode_set(val, kp); 450 } 451 452 static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) 453 { 454 INIT_LIST_HEAD(&qm_list->list); 455 mutex_init(&qm_list->lock); 456 } 457 458 int hisi_qm_init(struct hisi_qm *qm); 459 void hisi_qm_uninit(struct hisi_qm *qm); 460 int hisi_qm_start(struct hisi_qm *qm); 461 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); 462 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); 463 int hisi_qm_stop_qp(struct hisi_qp *qp); 464 int hisi_qp_send(struct hisi_qp *qp, const void *msg); 465 void hisi_qm_debug_init(struct hisi_qm *qm); 466 void hisi_qm_debug_regs_clear(struct hisi_qm *qm); 467 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); 468 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); 469 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); 470 void hisi_qm_dev_err_init(struct hisi_qm *qm); 471 void hisi_qm_dev_err_uninit(struct hisi_qm *qm); 472 int hisi_qm_diff_regs_init(struct hisi_qm *qm, 473 struct dfx_diff_registers *dregs, int reg_len); 474 void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len); 475 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, 476 struct dfx_diff_registers *dregs, int regs_len); 477 478 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 479 pci_channel_state_t state); 480 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); 481 void hisi_qm_reset_prepare(struct pci_dev *pdev); 482 void hisi_qm_reset_done(struct pci_dev *pdev); 483 484 int hisi_qm_wait_mb_ready(struct hisi_qm *qm); 485 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 486 bool op); 487 488 struct hisi_acc_sgl_pool; 489 struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, 490 struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, 491 u32 index, dma_addr_t *hw_sgl_dma); 492 void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, 493 struct hisi_acc_hw_sgl *hw_sgl); 494 struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, 495 u32 count, u32 sge_nr); 496 void hisi_acc_free_sgl_pool(struct device *dev, 497 struct hisi_acc_sgl_pool *pool); 498 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 499 u8 alg_type, int node, struct hisi_qp **qps); 500 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); 501 void hisi_qm_dev_shutdown(struct pci_dev *pdev); 502 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); 503 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); 504 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); 505 int hisi_qm_resume(struct device *dev); 506 int hisi_qm_suspend(struct device *dev); 507 void hisi_qm_pm_uninit(struct hisi_qm *qm); 508 void hisi_qm_pm_init(struct hisi_qm *qm); 509 int hisi_qm_get_dfx_access(struct hisi_qm *qm); 510 void hisi_qm_put_dfx_access(struct hisi_qm *qm); 511 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); 512 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 513 const struct hisi_qm_cap_info *info_table, 514 u32 index, bool is_read); 515 516 /* Used by VFIO ACC live migration driver */ 517 struct pci_driver *hisi_sec_get_pf_driver(void); 518 struct pci_driver *hisi_hpre_get_pf_driver(void); 519 struct pci_driver *hisi_zip_get_pf_driver(void); 520 #endif 521