1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2018-2023 Intel Corporation
4 */
5 #include "iwl-trans.h"
6 #include "iwl-fh.h"
7 #include "iwl-context-info-gen3.h"
8 #include "internal.h"
9 #include "iwl-prph.h"
10
11 static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans * trans,struct iwl_prph_scratch_hwm_cfg * dbg_cfg,u32 * control_flags)12 iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
13 struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
14 u32 *control_flags)
15 {
16 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
17 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
18 u32 dbg_flags = 0;
19
20 if (!iwl_trans_dbg_ini_valid(trans)) {
21 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
22
23 iwl_pcie_alloc_fw_monitor(trans, 0);
24
25 if (fw_mon->size) {
26 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
27
28 IWL_DEBUG_FW(trans,
29 "WRT: Applying DRAM buffer destination\n");
30
31 dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
32 dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
33 }
34
35 goto out;
36 }
37
38 fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
39
40 switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
41 case IWL_FW_INI_LOCATION_SRAM_PATH:
42 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
43 IWL_DEBUG_FW(trans,
44 "WRT: Applying SMEM buffer destination\n");
45 break;
46
47 case IWL_FW_INI_LOCATION_NPK_PATH:
48 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
49 IWL_DEBUG_FW(trans,
50 "WRT: Applying NPK buffer destination\n");
51 break;
52
53 case IWL_FW_INI_LOCATION_DRAM_PATH:
54 if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
55 struct iwl_dram_data *frag =
56 &trans->dbg.fw_mon_ini[alloc_id].frags[0];
57 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
58 dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
59 dbg_cfg->hwm_size = cpu_to_le32(frag->size);
60 dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
61 IWL_DEBUG_FW(trans,
62 "WRT: Applying DRAM destination (debug_token_config=%u)\n",
63 dbg_cfg->debug_token_config);
64 IWL_DEBUG_FW(trans,
65 "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
66 alloc_id,
67 trans->dbg.fw_mon_ini[alloc_id].num_frags);
68 }
69 break;
70 default:
71 #if defined(__linux__)
72 IWL_ERR(trans, "WRT: Invalid buffer destination\n");
73 #elif defined(__FreeBSD__)
74 IWL_ERR(trans, "WRT: Invalid buffer destination: %d\n", le32_to_cpu(fw_mon_cfg->buf_location));
75 #endif
76 }
77 out:
78 if (dbg_flags)
79 *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
80 }
81
iwl_pcie_ctxt_info_gen3_init(struct iwl_trans * trans,const struct fw_img * fw)82 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
83 const struct fw_img *fw)
84 {
85 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
86 struct iwl_context_info_gen3 *ctxt_info_gen3;
87 struct iwl_prph_scratch *prph_scratch;
88 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
89 struct iwl_prph_info *prph_info;
90 u32 control_flags = 0;
91 int ret;
92 int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
93 trans->cfg->min_txq_size);
94
95 switch (trans_pcie->rx_buf_size) {
96 case IWL_AMSDU_DEF:
97 return -EINVAL;
98 case IWL_AMSDU_2K:
99 break;
100 case IWL_AMSDU_4K:
101 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
102 break;
103 case IWL_AMSDU_8K:
104 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
105 /* if firmware supports the ext size, tell it */
106 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
107 break;
108 case IWL_AMSDU_12K:
109 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
110 /* if firmware supports the ext size, tell it */
111 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
112 break;
113 }
114
115 /* Allocate prph scratch */
116 prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
117 &trans_pcie->prph_scratch_dma_addr,
118 GFP_KERNEL);
119 if (!prph_scratch)
120 return -ENOMEM;
121
122 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
123
124 prph_sc_ctrl->version.version = 0;
125 prph_sc_ctrl->version.mac_id =
126 cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
127 prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
128
129 control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
130 control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
131
132 if (trans->trans_cfg->imr_enabled)
133 control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
134
135 /* initialize RX default queue */
136 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
137 cpu_to_le64(trans_pcie->rxq->bd_dma);
138
139 iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
140 &control_flags);
141 prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
142
143 /* initialize the Step equalizer data */
144 prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
145 prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
146
147 /* allocate ucode sections in dram and set addresses */
148 ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
149 if (ret)
150 goto err_free_prph_scratch;
151
152
153 /* Allocate prph information
154 * currently we don't assign to the prph info anything, but it would get
155 * assigned later
156 *
157 * We also use the second half of this page to give the device some
158 * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
159 * use this, but the hardware still reads/writes there and we can't let
160 * it go do that with a NULL pointer.
161 */
162 BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
163 prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
164 &trans_pcie->prph_info_dma_addr,
165 GFP_KERNEL);
166 if (!prph_info) {
167 ret = -ENOMEM;
168 goto err_free_prph_scratch;
169 }
170
171 /* Allocate context info */
172 ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
173 sizeof(*ctxt_info_gen3),
174 &trans_pcie->ctxt_info_dma_addr,
175 GFP_KERNEL);
176 if (!ctxt_info_gen3) {
177 ret = -ENOMEM;
178 goto err_free_prph_info;
179 }
180
181 ctxt_info_gen3->prph_info_base_addr =
182 cpu_to_le64(trans_pcie->prph_info_dma_addr);
183 ctxt_info_gen3->prph_scratch_base_addr =
184 cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
185 ctxt_info_gen3->prph_scratch_size =
186 cpu_to_le32(sizeof(*prph_scratch));
187 ctxt_info_gen3->cr_head_idx_arr_base_addr =
188 cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
189 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
190 cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
191 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
192 cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
193 ctxt_info_gen3->mtr_base_addr =
194 cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
195 ctxt_info_gen3->mcr_base_addr =
196 cpu_to_le64(trans_pcie->rxq->used_bd_dma);
197 ctxt_info_gen3->mtr_size =
198 cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
199 ctxt_info_gen3->mcr_size =
200 cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
201
202 trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
203 trans_pcie->prph_info = prph_info;
204 trans_pcie->prph_scratch = prph_scratch;
205
206 /* Allocate IML */
207 trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
208 &trans_pcie->iml_dma_addr,
209 GFP_KERNEL);
210 if (!trans_pcie->iml) {
211 ret = -ENOMEM;
212 goto err_free_ctxt_info;
213 }
214
215 memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
216
217 iwl_enable_fw_load_int_ctx_info(trans);
218
219 /* kick FW self load */
220 iwl_write64(trans, CSR_CTXT_INFO_ADDR,
221 trans_pcie->ctxt_info_dma_addr);
222 iwl_write64(trans, CSR_IML_DATA_ADDR,
223 trans_pcie->iml_dma_addr);
224 iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
225
226 iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
227 CSR_AUTO_FUNC_BOOT_ENA);
228
229 return 0;
230
231 err_free_ctxt_info:
232 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
233 trans_pcie->ctxt_info_gen3,
234 trans_pcie->ctxt_info_dma_addr);
235 trans_pcie->ctxt_info_gen3 = NULL;
236 err_free_prph_info:
237 dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
238 trans_pcie->prph_info_dma_addr);
239
240 err_free_prph_scratch:
241 dma_free_coherent(trans->dev,
242 sizeof(*prph_scratch),
243 prph_scratch,
244 trans_pcie->prph_scratch_dma_addr);
245 return ret;
246
247 }
248
iwl_pcie_ctxt_info_gen3_free(struct iwl_trans * trans,bool alive)249 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
250 {
251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
252
253 if (trans_pcie->iml) {
254 dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
255 trans_pcie->iml_dma_addr);
256 trans_pcie->iml_dma_addr = 0;
257 trans_pcie->iml = NULL;
258 }
259
260 iwl_pcie_ctxt_info_free_fw_img(trans);
261
262 if (alive)
263 return;
264
265 if (!trans_pcie->ctxt_info_gen3)
266 return;
267
268 /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
269 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
270 trans_pcie->ctxt_info_gen3,
271 trans_pcie->ctxt_info_dma_addr);
272 trans_pcie->ctxt_info_dma_addr = 0;
273 trans_pcie->ctxt_info_gen3 = NULL;
274
275 dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
276 trans_pcie->prph_scratch,
277 trans_pcie->prph_scratch_dma_addr);
278 trans_pcie->prph_scratch_dma_addr = 0;
279 trans_pcie->prph_scratch = NULL;
280
281 /* this is needed for the entire lifetime */
282 dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
283 trans_pcie->prph_info_dma_addr);
284 trans_pcie->prph_info_dma_addr = 0;
285 trans_pcie->prph_info = NULL;
286 }
287
iwl_pcie_load_payloads_continuously(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_data,struct iwl_dram_data * dram)288 static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
289 const struct iwl_pnvm_image *pnvm_data,
290 struct iwl_dram_data *dram)
291 {
292 u32 len, len0, len1;
293
294 if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
295 IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
296 pnvm_data->n_chunks);
297 return -EINVAL;
298 }
299
300 len0 = pnvm_data->chunks[0].len;
301 len1 = pnvm_data->chunks[1].len;
302 if (len1 > 0xFFFFFFFF - len0) {
303 IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
304 return -EINVAL;
305 }
306 len = len0 + len1;
307
308 dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
309 &dram->physical);
310 if (!dram->block) {
311 IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
312 return -ENOMEM;
313 }
314
315 dram->size = len;
316 memcpy(dram->block, pnvm_data->chunks[0].data, len0);
317 memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
318
319 return 0;
320 }
321
iwl_pcie_load_payloads_segments(struct iwl_trans * trans,struct iwl_dram_regions * dram_regions,const struct iwl_pnvm_image * pnvm_data)322 static int iwl_pcie_load_payloads_segments
323 (struct iwl_trans *trans,
324 struct iwl_dram_regions *dram_regions,
325 const struct iwl_pnvm_image *pnvm_data)
326 {
327 struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
328 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
329 struct iwl_prph_scrath_mem_desc_addr_array *addresses;
330 const void *data;
331 u32 len;
332 int i;
333
334 /* allocate and init DRAM descriptors array */
335 len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
336 desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
337 (trans,
338 len,
339 &desc_dram->physical);
340 if (!desc_dram->block) {
341 IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
342 return -ENOMEM;
343 }
344 desc_dram->size = len;
345 memset(desc_dram->block, 0, len);
346
347 /* allocate DRAM region for each payload */
348 dram_regions->n_regions = 0;
349 for (i = 0; i < pnvm_data->n_chunks; i++) {
350 len = pnvm_data->chunks[i].len;
351 data = pnvm_data->chunks[i].data;
352
353 if (iwl_pcie_ctxt_info_alloc_dma(trans,
354 data,
355 len,
356 cur_payload_dram)) {
357 iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
358 trans->dev);
359 return -ENOMEM;
360 }
361
362 dram_regions->n_regions++;
363 cur_payload_dram++;
364 }
365
366 /* fill desc with the DRAM payloads addresses */
367 addresses = desc_dram->block;
368 for (i = 0; i < pnvm_data->n_chunks; i++) {
369 addresses->mem_descs[i] =
370 cpu_to_le64(dram_regions->drams[i].physical);
371 }
372
373 return 0;
374
375 }
376
iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_payloads,const struct iwl_ucode_capabilities * capa)377 int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
378 const struct iwl_pnvm_image *pnvm_payloads,
379 const struct iwl_ucode_capabilities *capa)
380 {
381 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
382 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
383 &trans_pcie->prph_scratch->ctrl_cfg;
384 struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
385 int ret = 0;
386
387 /* only allocate the DRAM if not allocated yet */
388 if (trans->pnvm_loaded)
389 return 0;
390
391 if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
392 return -EBUSY;
393
394 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
395 return 0;
396
397 if (!pnvm_payloads->n_chunks) {
398 IWL_DEBUG_FW(trans, "no payloads\n");
399 return -EINVAL;
400 }
401
402 /* save payloads in several DRAM sections */
403 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
404 ret = iwl_pcie_load_payloads_segments(trans,
405 dram_regions,
406 pnvm_payloads);
407 if (!ret)
408 trans->pnvm_loaded = true;
409 } else {
410 /* save only in one DRAM section */
411 ret = iwl_pcie_load_payloads_continuously
412 (trans,
413 pnvm_payloads,
414 &dram_regions->drams[0]);
415 if (!ret) {
416 dram_regions->n_regions = 1;
417 trans->pnvm_loaded = true;
418 }
419 }
420
421 return ret;
422 }
423
424 static inline size_t
iwl_dram_regions_size(const struct iwl_dram_regions * dram_regions)425 iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
426 {
427 size_t total_size = 0;
428 int i;
429
430 for (i = 0; i < dram_regions->n_regions; i++)
431 total_size += dram_regions->drams[i].size;
432
433 return total_size;
434 }
435
iwl_pcie_set_pnvm_segments(struct iwl_trans * trans)436 static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
437 {
438 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
439 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
440 &trans_pcie->prph_scratch->ctrl_cfg;
441 struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
442
443 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
444 cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
445 prph_sc_ctrl->pnvm_cfg.pnvm_size =
446 cpu_to_le32(iwl_dram_regions_size(dram_regions));
447 }
448
iwl_pcie_set_continuous_pnvm(struct iwl_trans * trans)449 static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
450 {
451 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
452 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
453 &trans_pcie->prph_scratch->ctrl_cfg;
454
455 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
456 cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
457 prph_sc_ctrl->pnvm_cfg.pnvm_size =
458 cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
459 }
460
iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)461 void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
462 const struct iwl_ucode_capabilities *capa)
463 {
464 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
465 return;
466
467 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
468 iwl_pcie_set_pnvm_segments(trans);
469 else
470 iwl_pcie_set_continuous_pnvm(trans);
471 }
472
iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans * trans,const struct iwl_pnvm_image * payloads,const struct iwl_ucode_capabilities * capa)473 int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
474 const struct iwl_pnvm_image *payloads,
475 const struct iwl_ucode_capabilities *capa)
476 {
477 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
478 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
479 &trans_pcie->prph_scratch->ctrl_cfg;
480 struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
481 int ret = 0;
482
483 /* only allocate the DRAM if not allocated yet */
484 if (trans->reduce_power_loaded)
485 return 0;
486
487 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
488 return 0;
489
490 if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
491 return -EBUSY;
492
493 if (!payloads->n_chunks) {
494 IWL_DEBUG_FW(trans, "no payloads\n");
495 return -EINVAL;
496 }
497
498 /* save payloads in several DRAM sections */
499 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
500 ret = iwl_pcie_load_payloads_segments(trans,
501 dram_regions,
502 payloads);
503 if (!ret)
504 trans->reduce_power_loaded = true;
505 } else {
506 /* save only in one DRAM section */
507 ret = iwl_pcie_load_payloads_continuously
508 (trans,
509 payloads,
510 &dram_regions->drams[0]);
511 if (!ret) {
512 dram_regions->n_regions = 1;
513 trans->reduce_power_loaded = true;
514 }
515 }
516
517 return ret;
518 }
519
iwl_pcie_set_reduce_power_segments(struct iwl_trans * trans)520 static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
521 {
522 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
523 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
524 &trans_pcie->prph_scratch->ctrl_cfg;
525 struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
526
527 prph_sc_ctrl->reduce_power_cfg.base_addr =
528 cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
529 prph_sc_ctrl->reduce_power_cfg.size =
530 cpu_to_le32(iwl_dram_regions_size(dram_regions));
531 }
532
iwl_pcie_set_continuous_reduce_power(struct iwl_trans * trans)533 static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
534 {
535 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
536 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
537 &trans_pcie->prph_scratch->ctrl_cfg;
538
539 prph_sc_ctrl->reduce_power_cfg.base_addr =
540 cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
541 prph_sc_ctrl->reduce_power_cfg.size =
542 cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
543 }
544
545 void
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)546 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
547 const struct iwl_ucode_capabilities *capa)
548 {
549 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
550 return;
551
552 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
553 iwl_pcie_set_reduce_power_segments(trans);
554 else
555 iwl_pcie_set_continuous_reduce_power(trans);
556 }
557
558