1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5
6 #include "qat_freebsd.h"
7 #include "adf_cfg_common.h"
8 #include "adf_pfvf_msg.h"
9
10 #define ADF_CFG_NUM_SERVICES 4
11
12 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
13 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
14 #define ADF_C62X_DEVICE_NAME "c6xx"
15 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
16 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
17 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
18 #define ADF_200XX_DEVICE_NAME "200xx"
19 #define ADF_200XXVF_DEVICE_NAME "200xxvf"
20 #define ADF_C4XXX_DEVICE_NAME "c4xxx"
21 #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf"
22 #define ADF_4XXX_DEVICE_NAME "4xxx"
23 #define ADF_4XXXVF_DEVICE_NAME "4xxxvf"
24 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
25 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
26 #define ADF_C62X_PCI_DEVICE_ID 0x37c8
27 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
28 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
29 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
30 #define ADF_200XX_PCI_DEVICE_ID 0x18ee
31 #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef
32 #define ADF_D15XX_PCI_DEVICE_ID 0x6f54
33 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55
34 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0
35 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1
36 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
37 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
38 #define ADF_401XX_PCI_DEVICE_ID 0x4942
39 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
40
41 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
42 static inline bool
IS_QAT_GEN4(const unsigned int id)43 IS_QAT_GEN4(const unsigned int id)
44 {
45 return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID ||
46 id == ADF_4XXXIOV_PCI_DEVICE_ID ||
47 id == ADF_401XXIOV_PCI_DEVICE_ID);
48 }
49
50 #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID))
51 #define ADF_VF2PF_SET_SIZE 32
52 #define ADF_MAX_VF2PF_SET 4
53 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE)
54 #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE)
55 #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \
56 ({ \
57 u32 vf_nr_ = (vf_nr); \
58 BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \
59 vf_nr_)); \
60 })
61
62 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
63 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
64 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
65 #define ADF_PCI_MAX_BARS 3
66 #define ADF_DEVICE_NAME_LENGTH 32
67 #define ADF_ETR_MAX_RINGS_PER_BANK 16
68 #define ADF_MAX_MSIX_VECTOR_NAME 32
69 #define ADF_DEVICE_NAME_PREFIX "qat_"
70 #define ADF_STOP_RETRY 50
71 #define ADF_NUM_THREADS_PER_AE (8)
72 #define ADF_AE_ADMIN_THREAD (7)
73 #define ADF_NUM_PKE_STRAND (2)
74 #define ADF_AE_STRAND0_THREAD (8)
75 #define ADF_AE_STRAND1_THREAD (9)
76 #define ADF_CFG_NUM_SERVICES 4
77 #define ADF_SRV_TYPE_BIT_LEN 3
78 #define ADF_SRV_TYPE_MASK 0x7
79 #define ADF_RINGS_PER_SRV_TYPE 2
80 #define ADF_THRD_ABILITY_BIT_LEN 4
81 #define ADF_THRD_ABILITY_MASK 0xf
82 #define ADF_VF_OFFSET 0x8
83 #define ADF_MAX_FUNC_PER_DEV 0x7
84 #define ADF_PCI_DEV_OFFSET 0x3
85
86 #define ADF_SRV_TYPE_BIT_LEN 3
87 #define ADF_SRV_TYPE_MASK 0x7
88
89 #define GET_SRV_TYPE(ena_srv_mask, srv) \
90 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
91
92 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
93 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops)
94 #define ADF_DEFAULT_RING_TO_SRV_MAP \
95 (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
96 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
97 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
98
99 enum adf_accel_capabilities {
100 ADF_ACCEL_CAPABILITIES_NULL = 0,
101 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
102 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
103 ADF_ACCEL_CAPABILITIES_CIPHER = 4,
104 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
105 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
106 ADF_ACCEL_CAPABILITIES_DEPRECATED = 64,
107 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
108 };
109
110 struct adf_bar {
111 rman_res_t base_addr;
112 struct resource *virt_addr;
113 rman_res_t size;
114 } __packed;
115
116 struct adf_accel_msix {
117 struct msix_entry *entries;
118 u32 num_entries;
119 } __packed;
120
121 struct adf_accel_pci {
122 device_t pci_dev;
123 struct adf_accel_msix msix_entries;
124 struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
125 uint8_t revid;
126 uint8_t sku;
127 int node;
128 } __packed;
129
130 enum dev_state { DEV_DOWN = 0, DEV_UP };
131
132 enum dev_sku_info {
133 DEV_SKU_1 = 0,
134 DEV_SKU_2,
135 DEV_SKU_3,
136 DEV_SKU_4,
137 DEV_SKU_VF,
138 DEV_SKU_1_CY,
139 DEV_SKU_2_CY,
140 DEV_SKU_3_CY,
141 DEV_SKU_UNKNOWN
142 };
143
144 static inline const char *
get_sku_info(enum dev_sku_info info)145 get_sku_info(enum dev_sku_info info)
146 {
147 switch (info) {
148 case DEV_SKU_1:
149 return "SKU1";
150 case DEV_SKU_1_CY:
151 return "SKU1CY";
152 case DEV_SKU_2:
153 return "SKU2";
154 case DEV_SKU_2_CY:
155 return "SKU2CY";
156 case DEV_SKU_3:
157 return "SKU3";
158 case DEV_SKU_3_CY:
159 return "SKU3CY";
160 case DEV_SKU_4:
161 return "SKU4";
162 case DEV_SKU_VF:
163 return "SKUVF";
164 case DEV_SKU_UNKNOWN:
165 default:
166 break;
167 }
168 return "Unknown SKU";
169 }
170
171 enum adf_accel_unit_services {
172 ADF_ACCEL_SERVICE_NULL = 0,
173 ADF_ACCEL_INLINE_CRYPTO = 1,
174 ADF_ACCEL_CRYPTO = 2,
175 ADF_ACCEL_COMPRESSION = 4,
176 ADF_ACCEL_ASYM = 8,
177 ADF_ACCEL_ADMIN = 16
178 };
179
180 struct adf_ae_info {
181 u32 num_asym_thd;
182 u32 num_sym_thd;
183 u32 num_dc_thd;
184 } __packed;
185
186 struct adf_accel_unit {
187 u8 au_mask;
188 u32 accel_mask;
189 u64 ae_mask;
190 u64 comp_ae_mask;
191 u32 num_ae;
192 enum adf_accel_unit_services services;
193 } __packed;
194
195 struct adf_accel_unit_info {
196 u32 inline_ingress_msk;
197 u32 inline_egress_msk;
198 u32 sym_ae_msk;
199 u32 asym_ae_msk;
200 u32 dc_ae_msk;
201 u8 num_cy_au;
202 u8 num_dc_au;
203 u8 num_asym_au;
204 u8 num_inline_au;
205 struct adf_accel_unit *au;
206 const struct adf_ae_info *ae_info;
207 } __packed;
208
209 struct adf_hw_aram_info {
210 /* Inline Egress mask. "1" = AE is working with egress traffic */
211 u32 inline_direction_egress_mask;
212 /* Inline congestion managmenet profiles set in config file */
213 u32 inline_congest_mngt_profile;
214 /* Initialise CY AE mask, "1" = AE is used for CY operations */
215 u32 cy_ae_mask;
216 /* Initialise DC AE mask, "1" = AE is used for DC operations */
217 u32 dc_ae_mask;
218 /* Number of long words used to define the ARAM regions */
219 u32 num_aram_lw_entries;
220 /* ARAM region definitions */
221 u32 mmp_region_size;
222 u32 mmp_region_offset;
223 u32 skm_region_size;
224 u32 skm_region_offset;
225 /*
226 * Defines size and offset of compression intermediate buffers stored
227 * in ARAM (device's on-chip memory).
228 */
229 u32 inter_buff_aram_region_size;
230 u32 inter_buff_aram_region_offset;
231 u32 sadb_region_size;
232 u32 sadb_region_offset;
233 } __packed;
234
235 struct adf_hw_device_class {
236 const char *name;
237 const enum adf_device_type type;
238 uint32_t instances;
239 } __packed;
240
241 struct arb_info {
242 u32 arbiter_offset;
243 u32 wrk_thd_2_srv_arb_map;
244 u32 wrk_cfg_offset;
245 } __packed;
246
247 struct admin_info {
248 u32 admin_msg_ur;
249 u32 admin_msg_lr;
250 u32 mailbox_offset;
251 } __packed;
252
253 struct adf_hw_csr_ops {
254 u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size);
255 u32 (*read_csr_ring_head)(struct resource *csr_base_addr,
256 u32 bank,
257 u32 ring);
258 void (*write_csr_ring_head)(struct resource *csr_base_addr,
259 u32 bank,
260 u32 ring,
261 u32 value);
262 u32 (*read_csr_ring_tail)(struct resource *csr_base_addr,
263 u32 bank,
264 u32 ring);
265 void (*write_csr_ring_tail)(struct resource *csr_base_addr,
266 u32 bank,
267 u32 ring,
268 u32 value);
269 u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank);
270 void (*write_csr_ring_config)(struct resource *csr_base_addr,
271 u32 bank,
272 u32 ring,
273 u32 value);
274 bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr,
275 u32 bank,
276 u32 ring);
277 void (*write_csr_ring_base)(struct resource *csr_base_addr,
278 u32 bank,
279 u32 ring,
280 bus_addr_t addr);
281 void (*write_csr_int_flag)(struct resource *csr_base_addr,
282 u32 bank,
283 u32 value);
284 void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank);
285 void (*write_csr_int_col_en)(struct resource *csr_base_addr,
286 u32 bank,
287 u32 value);
288 void (*write_csr_int_col_ctl)(struct resource *csr_base_addr,
289 u32 bank,
290 u32 value);
291 void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr,
292 u32 bank,
293 u32 value);
294 u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
295 u32 bank);
296 void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
297 u32 bank,
298 u32 value);
299 u32 (*get_src_sel_mask)(void);
300 u32 (*get_int_col_ctl_enable_mask)(void);
301 u32 (*get_bank_irq_mask)(u32 irq_mask);
302 };
303
304 struct adf_cfg_device_data;
305 struct adf_accel_dev;
306 struct adf_etr_data;
307 struct adf_etr_ring_data;
308
309 struct adf_pfvf_ops {
310 int (*enable_comms)(struct adf_accel_dev *accel_dev);
311 u32 (*get_pf2vf_offset)(u32 i);
312 u32 (*get_vf2pf_offset)(u32 i);
313 void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr,
314 u32 vf_mask);
315 void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr);
316 u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr);
317 int (*send_msg)(struct adf_accel_dev *accel_dev,
318 struct pfvf_message msg,
319 u32 pfvf_offset,
320 struct mutex *csr_lock);
321 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
322 u32 pfvf_offset,
323 u8 compat_ver);
324 };
325
326 struct adf_hw_csr_info {
327 struct adf_hw_csr_ops csr_ops;
328 struct adf_pfvf_ops pfvf_ops;
329 u32 csr_addr_offset;
330 u32 ring_bundle_size;
331 u32 bank_int_flag_clear_mask;
332 u32 num_rings_per_int_srcsel;
333 u32 arb_enable_mask;
334 };
335
336 struct adf_hw_device_data {
337 struct adf_hw_device_class *dev_class;
338 uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev);
339 uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev);
340 uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
341 uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
342 uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
343 uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
344 uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
345 void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev);
346 bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev);
347 void (*get_arb_info)(struct arb_info *arb_csrs_info);
348 void (*get_admin_info)(struct admin_info *admin_csrs_info);
349 void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5);
350 uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self);
351 int (*init_accel_units)(struct adf_accel_dev *accel_dev);
352 void (*exit_accel_units)(struct adf_accel_dev *accel_dev);
353 uint32_t (*get_clock_speed)(struct adf_hw_device_data *self);
354 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
355 bool (*check_prod_sku)(struct adf_accel_dev *accel_dev);
356 int (*alloc_irq)(struct adf_accel_dev *accel_dev);
357 void (*free_irq)(struct adf_accel_dev *accel_dev);
358 void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
359 int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev);
360 void (*print_err_registers)(struct adf_accel_dev *accel_dev);
361 void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev);
362 int (*init_ras)(struct adf_accel_dev *accel_dev);
363 void (*exit_ras)(struct adf_accel_dev *accel_dev);
364 void (*disable_arb)(struct adf_accel_dev *accel_dev);
365 void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error);
366 bool (*ras_interrupts)(struct adf_accel_dev *accel_dev,
367 bool *reset_required);
368 int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
369 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
370 int (*send_admin_init)(struct adf_accel_dev *accel_dev);
371 void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev);
372 int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev,
373 u16 *ring_to_svc_map);
374 uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev);
375 int (*init_arb)(struct adf_accel_dev *accel_dev);
376 void (*exit_arb)(struct adf_accel_dev *accel_dev);
377 void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
378 const uint32_t **cfg);
379 int (*init_device)(struct adf_accel_dev *accel_dev);
380 int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev);
381 int (*int_timer_init)(struct adf_accel_dev *accel_dev);
382 void (*int_timer_exit)(struct adf_accel_dev *accel_dev);
383 uint32_t (*get_ae_clock)(struct adf_hw_device_data *self);
384 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
385 void (*disable_iov)(struct adf_accel_dev *accel_dev);
386 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
387 bool enable);
388 void (*enable_ints)(struct adf_accel_dev *accel_dev);
389 bool (*check_slice_hang)(struct adf_accel_dev *accel_dev);
390 int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
391 void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev);
392 void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev);
393 int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev);
394 int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev);
395 void (*reset_device)(struct adf_accel_dev *accel_dev);
396 void (*reset_hw_units)(struct adf_accel_dev *accel_dev);
397 int (*measure_clock)(struct adf_accel_dev *accel_dev);
398 void (*restore_device)(struct adf_accel_dev *accel_dev);
399 uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev,
400 enum adf_accel_unit_services services);
401 enum adf_accel_unit_services (
402 *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num);
403 int (*add_pke_stats)(struct adf_accel_dev *accel_dev);
404 void (*remove_pke_stats)(struct adf_accel_dev *accel_dev);
405 int (*add_misc_error)(struct adf_accel_dev *accel_dev);
406 int (*count_ras_event)(struct adf_accel_dev *accel_dev,
407 u32 *ras_event,
408 char *aeidstr);
409 void (*remove_misc_error)(struct adf_accel_dev *accel_dev);
410 int (*configure_accel_units)(struct adf_accel_dev *accel_dev);
411 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev,
412 u32 bank_number);
413 void (*config_ring_irq)(struct adf_accel_dev *accel_dev,
414 u32 bank_number,
415 u16 ring_mask);
416 uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev);
417 const char *(*get_obj_name)(struct adf_accel_dev *accel_dev,
418 enum adf_accel_unit_services services);
419 void (*pre_reset)(struct adf_accel_dev *accel_dev);
420 void (*post_reset)(struct adf_accel_dev *accel_dev);
421 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
422 void (*get_ring_svc_map_data)(int ring_pair_index,
423 u16 ring_to_svc_map,
424 u8 *serv_type,
425 int *ring_index,
426 int *num_rings_per_srv,
427 int bundle_num);
428 struct adf_hw_csr_info csr_info;
429 const char *fw_name;
430 const char *fw_mmp_name;
431 bool reset_ack;
432 uint32_t fuses;
433 uint32_t accel_capabilities_mask;
434 uint32_t instance_id;
435 uint16_t accel_mask;
436 u32 aerucm_mask;
437 u32 ae_mask;
438 u32 admin_ae_mask;
439 u32 service_mask;
440 u32 service_to_load_mask;
441 u32 heartbeat_ctr_num;
442 uint16_t tx_rings_mask;
443 uint8_t tx_rx_gap;
444 uint8_t num_banks;
445 u8 num_rings_per_bank;
446 uint8_t num_accel;
447 uint8_t num_logical_accel;
448 uint8_t num_engines;
449 int (*get_storage_enabled)(struct adf_accel_dev *accel_dev,
450 uint32_t *storage_enabled);
451 u8 query_storage_cap;
452 u32 clock_frequency;
453 u8 storage_enable;
454 u32 extended_dc_capabilities;
455 int (*config_device)(struct adf_accel_dev *accel_dev);
456 u32 asym_ae_active_thd_mask;
457 u16 asym_rings_mask;
458 int (*get_fw_image_type)(struct adf_accel_dev *accel_dev,
459 enum adf_cfg_fw_image_type *fw_image_type);
460 u16 ring_to_svc_map;
461 } __packed;
462
463 /* helper enum for performing CSR operations */
464 enum operation {
465 AND,
466 OR,
467 };
468
469 /* 32-bit CSR write macro */
470 #define ADF_CSR_WR(csr_base, csr_offset, val) \
471 bus_write_4(csr_base, csr_offset, val)
472
473 /* 64-bit CSR write macro */
474 #ifdef __x86_64__
475 #define ADF_CSR_WR64(csr_base, csr_offset, val) \
476 bus_write_8(csr_base, csr_offset, val)
477 #else
478 static __inline void
adf_csr_wr64(struct resource * csr_base,bus_size_t offset,uint64_t value)479 adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value)
480 {
481 bus_write_4(csr_base, offset, (uint32_t)value);
482 bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32));
483 }
484 #define ADF_CSR_WR64(csr_base, csr_offset, val) \
485 adf_csr_wr64(csr_base, csr_offset, val)
486 #endif
487
488 /* 32-bit CSR read macro */
489 #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset)
490
491 /* 64-bit CSR read macro */
492 #ifdef __x86_64__
493 #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset)
494 #else
495 static __inline uint64_t
adf_csr_rd64(struct resource * csr_base,bus_size_t offset)496 adf_csr_rd64(struct resource *csr_base, bus_size_t offset)
497 {
498 return (((uint64_t)bus_read_4(csr_base, offset)) |
499 (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32));
500 }
501 #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset)
502 #endif
503
504 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev)
505 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
506 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
507 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
508 #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku)
509 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
510 (GET_HW_DATA(accel_dev)->num_rings_per_bank)
511 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
512 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
513 #define GET_SRV_TYPE(ena_srv_mask, srv) \
514 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
515 #define SET_ASYM_MASK(asym_mask, srv) \
516 ({ \
517 typeof(srv) srv_ = (srv); \
518 (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \
519 (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \
520 })
521
522 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
523 (GET_HW_DATA(accel_dev)->num_rings_per_bank)
524 #define GET_MAX_PROCESSES(accel_dev) \
525 ({ \
526 typeof(accel_dev) dev = (accel_dev); \
527 (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \
528 })
529 #define GET_DU_TABLE(accel_dev) (accel_dev->du_table)
530
531 static inline void
adf_csr_fetch_and_and(struct resource * csr,size_t offs,unsigned long mask)532 adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask)
533 {
534 unsigned int val = ADF_CSR_RD(csr, offs);
535
536 val &= mask;
537 ADF_CSR_WR(csr, offs, val);
538 }
539
540 static inline void
adf_csr_fetch_and_or(struct resource * csr,size_t offs,unsigned long mask)541 adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask)
542 {
543 unsigned int val = ADF_CSR_RD(csr, offs);
544
545 val |= mask;
546 ADF_CSR_WR(csr, offs, val);
547 }
548
549 static inline void
adf_csr_fetch_and_update(enum operation op,struct resource * csr,size_t offs,unsigned long mask)550 adf_csr_fetch_and_update(enum operation op,
551 struct resource *csr,
552 size_t offs,
553 unsigned long mask)
554 {
555 switch (op) {
556 case AND:
557 adf_csr_fetch_and_and(csr, offs, mask);
558 break;
559 case OR:
560 adf_csr_fetch_and_or(csr, offs, mask);
561 break;
562 }
563 }
564
565 struct pfvf_stats {
566 struct dentry *stats_file;
567 /* Messages put in CSR */
568 unsigned int tx;
569 /* Messages read from CSR */
570 unsigned int rx;
571 /* Interrupt fired but int bit was clear */
572 unsigned int spurious;
573 /* Block messages sent */
574 unsigned int blk_tx;
575 /* Block messages received */
576 unsigned int blk_rx;
577 /* Blocks received with CRC errors */
578 unsigned int crc_err;
579 /* CSR in use by other side */
580 unsigned int busy;
581 /* Receiver did not acknowledge */
582 unsigned int no_ack;
583 /* Collision detected */
584 unsigned int collision;
585 /* Couldn't send a response */
586 unsigned int tx_timeout;
587 /* Didn't receive a response */
588 unsigned int rx_timeout;
589 /* Responses received */
590 unsigned int rx_rsp;
591 /* Messages re-transmitted */
592 unsigned int retry;
593 /* Event put timeout */
594 unsigned int event_timeout;
595 };
596
597 #define NUM_PFVF_COUNTERS 14
598
599 void adf_get_admin_info(struct admin_info *admin_csrs_info);
600 struct adf_admin_comms {
601 bus_addr_t phy_addr;
602 bus_addr_t const_tbl_addr;
603 bus_addr_t aram_map_phys_addr;
604 bus_addr_t phy_hb_addr;
605 bus_dmamap_t aram_map;
606 bus_dmamap_t const_tbl_map;
607 bus_dmamap_t hb_map;
608 char *virt_addr;
609 char *virt_hb_addr;
610 struct resource *mailbox_addr;
611 struct sx lock;
612 struct bus_dmamem dma_mem;
613 struct bus_dmamem dma_hb;
614 };
615
616 struct icp_qat_fw_loader_handle;
617 struct adf_fw_loader_data {
618 struct icp_qat_fw_loader_handle *fw_loader;
619 const struct firmware *uof_fw;
620 const struct firmware *mmp_fw;
621 };
622
623 struct adf_accel_vf_info {
624 struct adf_accel_dev *accel_dev;
625 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
626 u32 vf_nr;
627 bool init;
628 u8 compat_ver;
629 struct pfvf_stats pfvf_counters;
630 };
631
632 struct adf_fw_versions {
633 u8 fw_version_major;
634 u8 fw_version_minor;
635 u8 fw_version_patch;
636 u8 mmp_version_major;
637 u8 mmp_version_minor;
638 u8 mmp_version_patch;
639 };
640
641 struct adf_int_timer {
642 struct adf_accel_dev *accel_dev;
643 struct workqueue_struct *timer_irq_wq;
644 struct timer_list timer;
645 u32 timeout_val;
646 u32 int_cnt;
647 bool enabled;
648 };
649
650 #define ADF_COMPAT_CHECKER_MAX 8
651 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev,
652 u8 vf_compat_ver);
653 struct adf_accel_compat_manager {
654 u8 num_chker;
655 adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX];
656 };
657
658 struct adf_heartbeat;
659 struct adf_accel_dev {
660 struct adf_hw_aram_info *aram_info;
661 struct adf_accel_unit_info *au_info;
662 struct adf_etr_data *transport;
663 struct adf_hw_device_data *hw_device;
664 struct adf_cfg_device_data *cfg;
665 struct adf_fw_loader_data *fw_loader;
666 struct adf_admin_comms *admin;
667 struct adf_uio_control_accel *accel;
668 struct adf_heartbeat *heartbeat;
669 struct adf_int_timer *int_timer;
670 struct adf_fw_versions fw_versions;
671 unsigned int autoreset_on_error;
672 struct adf_fw_counters_data *fw_counters_data;
673 struct sysctl_oid *debugfs_ae_config;
674 struct list_head crypto_list;
675 atomic_t *ras_counters;
676 unsigned long status;
677 atomic_t ref_count;
678 bus_dma_tag_t dma_tag;
679 struct sysctl_ctx_list sysctl_ctx;
680 struct sysctl_oid *ras_correctable;
681 struct sysctl_oid *ras_uncorrectable;
682 struct sysctl_oid *ras_fatal;
683 struct sysctl_oid *ras_reset;
684 struct sysctl_oid *pke_replay_dbgfile;
685 struct sysctl_oid *misc_error_dbgfile;
686 struct list_head list;
687 struct adf_accel_pci accel_pci_dev;
688 struct adf_accel_compat_manager *cm;
689 u8 compat_ver;
690 union {
691 struct {
692 /* vf_info is non-zero when SR-IOV is init'ed */
693 struct adf_accel_vf_info *vf_info;
694 int num_vfs;
695 } pf;
696 struct {
697 bool irq_enabled;
698 struct resource *irq;
699 void *cookie;
700 struct task pf2vf_bh_tasklet;
701 struct mutex vf2pf_lock; /* protect CSR access */
702 struct completion msg_received;
703 struct pfvf_message
704 response; /* temp field holding pf2vf response */
705 enum ring_reset_result rpreset_sts;
706 struct mutex rpreset_lock; /* protect rpreset_sts */
707 struct pfvf_stats pfvf_counters;
708 u8 pf_compat_ver;
709 } vf;
710 } u1;
711 bool is_vf;
712 u32 accel_id;
713 void *lac_dev;
714 };
715 #endif
716