1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_ARB_NUM 4
19 #define ADF_ARB_REG_SIZE 0x4
20 #define ADF_ARB_WTR_SIZE 0x20
21 #define ADF_ARB_OFFSET 0x30000
22 #define ADF_ARB_REG_SLOT 0x1000
23 #define ADF_ARB_WTR_OFFSET 0x010
24 #define ADF_ARB_RO_EN_OFFSET 0x090
25 #define ADF_ARB_WQCFG_OFFSET 0x100
26 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
28 
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value)                     \
30 	ADF_CSR_WR(csr_addr,                                                   \
31 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
32 		   value)
33 
34 #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value)            \
35 	ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value)
36 #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index)                             \
37 	ADF_CSR_RD(csr_addr,                                                   \
38 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
39 
40 static DEFINE_MUTEX(csr_arb_lock);
41 
42 #define WRITE_CSR_ARB_WRK_2_SER_MAP(                                           \
43     csr_addr, csr_offset, wrk_to_ser_map_offset, index, value)                 \
44 	ADF_CSR_WR(csr_addr,                                                   \
45 		   ((csr_offset) + (wrk_to_ser_map_offset)) +                  \
46 		       (ADF_ARB_REG_SIZE * (index)),                           \
47 		   value)
48 
49 int
50 adf_init_arb(struct adf_accel_dev *accel_dev)
51 {
52 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
53 	struct arb_info info;
54 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
55 	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
56 	u32 arb;
57 
58 	hw_data->get_arb_info(&info);
59 
60 	/* Service arb configured for 32 bytes responses and
61 	 * ring flow control check enabled.
62 	 */
63 	for (arb = 0; arb < ADF_ARB_NUM; arb++)
64 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg);
65 
66 	return 0;
67 }
68 
69 int
70 adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
71 {
72 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
73 	struct arb_info info;
74 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
75 	u32 i;
76 	const u32 *thd_2_arb_cfg;
77 
78 	/* invoke common adf_init_arb */
79 	adf_init_arb(accel_dev);
80 
81 	hw_data->get_arb_info(&info);
82 
83 	/* Map worker threads to service arbiters */
84 	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
85 	if (!thd_2_arb_cfg)
86 		return EFAULT;
87 
88 	for (i = 0; i < hw_data->num_engines; i++)
89 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
90 					    info.arbiter_offset,
91 					    info.wrk_thd_2_srv_arb_map,
92 					    i,
93 					    *(thd_2_arb_cfg + i));
94 	return 0;
95 }
96 
97 void
98 adf_update_ring_arb(struct adf_etr_ring_data *ring)
99 {
100 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
101 				   ring->bank->bank_number,
102 				   ring->bank->ring_mask & 0xFF);
103 }
104 
105 void
106 adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
107 {
108 	struct resource *csr = csr_addr;
109 	u32 arbenable;
110 
111 	if (!csr)
112 		return;
113 
114 	mutex_lock(&csr_arb_lock);
115 	arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr);
116 	arbenable |= mask & 0xFF;
117 	WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable);
118 
119 	mutex_unlock(&csr_arb_lock);
120 }
121 
122 void
123 adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
124 {
125 	struct resource *csr = csr_addr;
126 	u32 arbenable;
127 
128 	if (!csr_addr)
129 		return;
130 
131 	mutex_lock(&csr_arb_lock);
132 	arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr);
133 	arbenable &= ~mask & 0xFF;
134 	WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable);
135 	mutex_unlock(&csr_arb_lock);
136 }
137 
138 void
139 adf_exit_arb(struct adf_accel_dev *accel_dev)
140 {
141 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
142 	struct arb_info info;
143 	struct resource *csr;
144 	unsigned int i;
145 
146 	if (!accel_dev->transport)
147 		return;
148 
149 	csr = accel_dev->transport->banks[0].csr_addr;
150 
151 	hw_data->get_arb_info(&info);
152 
153 	/* Reset arbiter configuration */
154 	for (i = 0; i < ADF_ARB_NUM; i++)
155 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0);
156 
157 	/* Unmap worker threads to service arbiters */
158 	if (hw_data->get_arb_mapping) {
159 		for (i = 0; i < hw_data->num_engines; i++)
160 			WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
161 						    info.arbiter_offset,
162 						    info.wrk_thd_2_srv_arb_map,
163 						    i,
164 						    0);
165 	}
166 
167 	/* Disable arbitration on all rings */
168 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
169 		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
170 }
171 
172 void
173 adf_disable_arb(struct adf_accel_dev *accel_dev)
174 {
175 	struct resource *csr;
176 	unsigned int i;
177 
178 	if (!accel_dev || !accel_dev->transport)
179 		return;
180 
181 	csr = accel_dev->transport->banks[0].csr_addr;
182 
183 	/* Disable arbitration on all rings */
184 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
185 		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
186 }
187