1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "adf_accel_devices.h"
4 #include "adf_common_drv.h"
5 #include "adf_cfg_common.h"
6 #include "adf_transport_internal.h"
7 #include "icp_qat_hw.h"
8 #include "adf_c4xxx_hw_data.h"
9 
10 #define ADF_C4XXX_PARTTITION_SHIFT 8
11 #define ADF_C4XXX_PARTITION(svc, ring)                                         \
12 	((svc) << ((ring)*ADF_C4XXX_PARTTITION_SHIFT))
13 
14 static void
adf_get_partitions_mask(struct adf_accel_dev * accel_dev,u32 * partitions_mask)15 adf_get_partitions_mask(struct adf_accel_dev *accel_dev, u32 *partitions_mask)
16 {
17 	device_t dev = accel_to_pci_dev(accel_dev);
18 	u32 enabled_partitions_msk = 0;
19 	u8 ring_pair = 0;
20 	enum adf_cfg_service_type serv_type = 0;
21 	u16 ring_to_svc_map = accel_dev->hw_device->ring_to_svc_map;
22 
23 	for (ring_pair = 0; ring_pair < ADF_CFG_NUM_SERVICES; ring_pair++) {
24 		serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair);
25 		switch (serv_type) {
26 		case CRYPTO: {
27 			enabled_partitions_msk |=
28 			    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_ASYM,
29 						ring_pair++);
30 			if (ring_pair < ADF_CFG_NUM_SERVICES)
31 				enabled_partitions_msk |=
32 				    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_SYM,
33 							ring_pair);
34 			else
35 				device_printf(
36 				    dev, "Failed to enable SYM partition.\n");
37 			break;
38 		}
39 		case COMP:
40 			enabled_partitions_msk |=
41 			    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_DC, ring_pair);
42 			break;
43 		case SYM:
44 			enabled_partitions_msk |=
45 			    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_SYM, ring_pair);
46 			break;
47 		case ASYM:
48 			enabled_partitions_msk |=
49 			    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_ASYM, ring_pair);
50 			break;
51 		default:
52 			enabled_partitions_msk |=
53 			    ADF_C4XXX_PARTITION(ADF_C4XXX_PART_UNUSED,
54 						ring_pair);
55 			break;
56 		}
57 	}
58 	*partitions_mask = enabled_partitions_msk;
59 }
60 
61 static void
adf_enable_sym_threads(struct adf_accel_dev * accel_dev,u32 ae,u32 partition)62 adf_enable_sym_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
63 {
64 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
65 	const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
66 	u32 num_sym_thds = ae_info[ae].num_sym_thd;
67 	u32 i;
68 	u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
69 	u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
70 	    (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
71 
72 	for (i = 0; i < num_sym_thds; i++)
73 		WRITE_CSR_WQM(csr,
74 			      ADF_C4XXX_WRKTHD2PARTMAP,
75 			      (ae * ADF_NUM_THREADS_PER_AE + i),
76 			      wkrthd2_partmap);
77 }
78 
79 static void
adf_enable_asym_threads(struct adf_accel_dev * accel_dev,u32 ae,u32 partition)80 adf_enable_asym_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
81 {
82 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
83 	const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
84 	u32 num_asym_thds = ae_info[ae].num_asym_thd;
85 	u32 i;
86 	u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
87 	u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
88 	    (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
89 	/* For asymmetric cryptography SKU we have one thread less */
90 	u32 num_all_thds = ADF_NUM_THREADS_PER_AE - 2;
91 
92 	for (i = num_all_thds; i > (num_all_thds - num_asym_thds); i--)
93 		WRITE_CSR_WQM(csr,
94 			      ADF_C4XXX_WRKTHD2PARTMAP,
95 			      (ae * ADF_NUM_THREADS_PER_AE + i),
96 			      wkrthd2_partmap);
97 }
98 
99 static void
adf_enable_dc_threads(struct adf_accel_dev * accel_dev,u32 ae,u32 partition)100 adf_enable_dc_threads(struct adf_accel_dev *accel_dev, u32 ae, u32 partition)
101 {
102 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
103 	const struct adf_ae_info *ae_info = accel_dev->au_info->ae_info;
104 	u32 num_dc_thds = ae_info[ae].num_dc_thd;
105 	u32 i;
106 	u32 part_group = partition / ADF_C4XXX_PARTS_PER_GRP;
107 	u32 wkrthd2_partmap = part_group << ADF_C4XXX_PARTS_PER_GRP |
108 	    (BIT(partition % ADF_C4XXX_PARTS_PER_GRP));
109 
110 	for (i = 0; i < num_dc_thds; i++)
111 		WRITE_CSR_WQM(csr,
112 			      ADF_C4XXX_WRKTHD2PARTMAP,
113 			      (ae * ADF_NUM_THREADS_PER_AE + i),
114 			      wkrthd2_partmap);
115 }
116 
117 /* Initialise Resource partitioning.
118  * Initialise a default set of 4 partitions to arbitrate
119  * request rings per bundle.
120  */
121 int
adf_init_arb_c4xxx(struct adf_accel_dev * accel_dev)122 adf_init_arb_c4xxx(struct adf_accel_dev *accel_dev)
123 {
124 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
125 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
126 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
127 	u32 i;
128 	unsigned long ae_mask;
129 	u32 partitions_mask = 0;
130 
131 	/* invoke common adf_init_arb */
132 	adf_init_arb(accel_dev);
133 
134 	adf_get_partitions_mask(accel_dev, &partitions_mask);
135 	for (i = 0; i < hw_data->num_banks; i++)
136 		WRITE_CSR_WQM(csr,
137 			      ADF_C4XXX_PARTITION_LUT_OFFSET,
138 			      i,
139 			      partitions_mask);
140 
141 	ae_mask = hw_data->ae_mask;
142 
143 	/* Assigning default partitions to accel engine
144 	 * worker threads
145 	 */
146 	for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES)
147 	{
148 		if (BIT(i) & au_info->sym_ae_msk)
149 			adf_enable_sym_threads(accel_dev,
150 					       i,
151 					       ADF_C4XXX_PART_SYM);
152 		if (BIT(i) & au_info->asym_ae_msk)
153 			adf_enable_asym_threads(accel_dev,
154 						i,
155 						ADF_C4XXX_PART_ASYM);
156 		if (BIT(i) & au_info->dc_ae_msk)
157 			adf_enable_dc_threads(accel_dev, i, ADF_C4XXX_PART_DC);
158 	}
159 
160 	return 0;
161 }
162 
163 /* Disable the resource partitioning feature
164  * and restore the default partitioning scheme
165  */
166 void
adf_exit_arb_c4xxx(struct adf_accel_dev * accel_dev)167 adf_exit_arb_c4xxx(struct adf_accel_dev *accel_dev)
168 {
169 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
170 	struct resource *csr;
171 	u32 i;
172 	unsigned long ae_mask;
173 
174 	if (!accel_dev->transport)
175 		return;
176 	csr = accel_dev->transport->banks[0].csr_addr;
177 
178 	/* Restore the default partitionLUT registers */
179 	for (i = 0; i < hw_data->num_banks; i++)
180 		WRITE_CSR_WQM(csr,
181 			      ADF_C4XXX_PARTITION_LUT_OFFSET,
182 			      i,
183 			      ADF_C4XXX_DEFAULT_PARTITIONS);
184 
185 	ae_mask = hw_data->ae_mask;
186 
187 	/* Reset worker thread to partition mapping */
188 	for (i = 0; i < hw_data->num_engines * ADF_NUM_THREADS_PER_AE; i++) {
189 		if (!test_bit((u32)(i / ADF_NUM_THREADS_PER_AE), &ae_mask))
190 			continue;
191 
192 		WRITE_CSR_WQM(csr, ADF_C4XXX_WRKTHD2PARTMAP, i, 0);
193 	}
194 }
195