1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17
18 #include "iwl-drv.h"
19 #include "iwl-trans.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-scd.h"
23 #include "iwl-agn-hw.h"
24 #include "fw/error-dump.h"
25 #include "fw/dbg.h"
26 #include "fw/api/tx.h"
27 #include "internal.h"
28 #include "iwl-fh.h"
29 #include "iwl-context-info-gen3.h"
30
31 /* extended range in FW SRAM */
32 #define IWL_FW_MEM_EXTENDED_START 0x40000
33 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
34
iwl_trans_pcie_dump_regs(struct iwl_trans * trans)35 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
36 {
37 #define PCI_DUMP_SIZE 352
38 #define PCI_MEM_DUMP_SIZE 64
39 #define PCI_PARENT_DUMP_SIZE 524
40 #define PREFIX_LEN 32
41 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
42 struct pci_dev *pdev = trans_pcie->pci_dev;
43 u32 i, pos, alloc_size, *ptr, *buf;
44 char *prefix;
45
46 if (trans_pcie->pcie_dbg_dumped_once)
47 return;
48
49 /* Should be a multiple of 4 */
50 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
51 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
52 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
53
54 /* Alloc a max size buffer */
55 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
56 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
57 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
58 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
59
60 buf = kmalloc(alloc_size, GFP_ATOMIC);
61 if (!buf)
62 return;
63 prefix = (char *)buf + alloc_size - PREFIX_LEN;
64
65 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
66
67 /* Print wifi device registers */
68 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
69 IWL_ERR(trans, "iwlwifi device config registers:\n");
70 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
71 if (pci_read_config_dword(pdev, i, ptr))
72 goto err_read;
73 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
74
75 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
76 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
77 *ptr = iwl_read32(trans, i);
78 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
79
80 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
81 if (pos) {
82 IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
83 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
84 if (pci_read_config_dword(pdev, pos + i, ptr))
85 goto err_read;
86 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
87 32, 4, buf, i, 0);
88 }
89
90 /* Print parent device registers next */
91 if (!pdev->bus->self)
92 goto out;
93
94 pdev = pdev->bus->self;
95 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
96
97 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
98 pci_name(pdev));
99 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
100 if (pci_read_config_dword(pdev, i, ptr))
101 goto err_read;
102 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
103
104 /* Print root port AER registers */
105 pos = 0;
106 pdev = pcie_find_root_port(pdev);
107 if (pdev)
108 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
109 if (pos) {
110 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
111 pci_name(pdev));
112 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
113 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
114 if (pci_read_config_dword(pdev, pos + i, ptr))
115 goto err_read;
116 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
117 4, buf, i, 0);
118 }
119 goto out;
120
121 err_read:
122 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
123 IWL_ERR(trans, "Read failed at 0x%X\n", i);
124 out:
125 trans_pcie->pcie_dbg_dumped_once = 1;
126 kfree(buf);
127 }
128
iwl_trans_pcie_sw_reset(struct iwl_trans * trans)129 static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
130 {
131 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
132 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
133 usleep_range(5000, 6000);
134 }
135
iwl_pcie_free_fw_monitor(struct iwl_trans * trans)136 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
137 {
138 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
139
140 if (!fw_mon->size)
141 return;
142
143 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
144 fw_mon->physical);
145
146 fw_mon->block = NULL;
147 fw_mon->physical = 0;
148 fw_mon->size = 0;
149 }
150
iwl_pcie_alloc_fw_monitor_block(struct iwl_trans * trans,u8 max_power,u8 min_power)151 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
152 u8 max_power, u8 min_power)
153 {
154 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
155 void *block = NULL;
156 dma_addr_t physical = 0;
157 u32 size = 0;
158 u8 power;
159
160 if (fw_mon->size)
161 return;
162
163 for (power = max_power; power >= min_power; power--) {
164 size = BIT(power);
165 block = dma_alloc_coherent(trans->dev, size, &physical,
166 GFP_KERNEL | __GFP_NOWARN);
167 if (!block)
168 continue;
169
170 IWL_INFO(trans,
171 "Allocated 0x%08x bytes for firmware monitor.\n",
172 size);
173 break;
174 }
175
176 if (WARN_ON_ONCE(!block))
177 return;
178
179 if (power != max_power)
180 IWL_ERR(trans,
181 "Sorry - debug buffer is only %luK while you requested %luK\n",
182 (unsigned long)BIT(power - 10),
183 (unsigned long)BIT(max_power - 10));
184
185 fw_mon->block = block;
186 fw_mon->physical = physical;
187 fw_mon->size = size;
188 }
189
iwl_pcie_alloc_fw_monitor(struct iwl_trans * trans,u8 max_power)190 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
191 {
192 if (!max_power) {
193 /* default max_power is maximum */
194 max_power = 26;
195 } else {
196 max_power += 11;
197 }
198
199 if (WARN(max_power > 26,
200 "External buffer size for monitor is too big %d, check the FW TLV\n",
201 max_power))
202 return;
203
204 if (trans->dbg.fw_mon.size)
205 return;
206
207 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
208 }
209
iwl_trans_pcie_read_shr(struct iwl_trans * trans,u32 reg)210 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
211 {
212 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
213 ((reg & 0x0000ffff) | (2 << 28)));
214 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
215 }
216
iwl_trans_pcie_write_shr(struct iwl_trans * trans,u32 reg,u32 val)217 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
218 {
219 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
220 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
221 ((reg & 0x0000ffff) | (3 << 28)));
222 }
223
iwl_pcie_set_pwr(struct iwl_trans * trans,bool vaux)224 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
225 {
226 if (trans->cfg->apmg_not_supported)
227 return;
228
229 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
230 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
231 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
232 ~APMG_PS_CTRL_MSK_PWR_SRC);
233 else
234 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
235 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
236 ~APMG_PS_CTRL_MSK_PWR_SRC);
237 }
238
239 /* PCI registers */
240 #define PCI_CFG_RETRY_TIMEOUT 0x041
241
iwl_pcie_apm_config(struct iwl_trans * trans)242 void iwl_pcie_apm_config(struct iwl_trans *trans)
243 {
244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
245 u16 lctl;
246 u16 cap;
247
248 /*
249 * L0S states have been found to be unstable with our devices
250 * and in newer hardware they are not officially supported at
251 * all, so we must always set the L0S_DISABLED bit.
252 */
253 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
254
255 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
256 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
257
258 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
259 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
260 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
261 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
262 trans->ltr_enabled ? "En" : "Dis");
263 }
264
265 /*
266 * Start up NIC's basic functionality after it has been reset
267 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
268 * NOTE: This does not load uCode nor start the embedded processor
269 */
iwl_pcie_apm_init(struct iwl_trans * trans)270 static int iwl_pcie_apm_init(struct iwl_trans *trans)
271 {
272 int ret;
273
274 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
275
276 /*
277 * Use "set_bit" below rather than "write", to preserve any hardware
278 * bits already set by default after reset.
279 */
280
281 /* Disable L0S exit timer (platform NMI Work/Around) */
282 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
283 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
284 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
285
286 /*
287 * Disable L0s without affecting L1;
288 * don't wait for ICH L0s (ICH bug W/A)
289 */
290 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
291 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
292
293 /* Set FH wait threshold to maximum (HW error during stress W/A) */
294 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
295
296 /*
297 * Enable HAP INTA (interrupt from management bus) to
298 * wake device's PCI Express link L1a -> L0s
299 */
300 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
301 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
302
303 iwl_pcie_apm_config(trans);
304
305 /* Configure analog phase-lock-loop before activating to D0A */
306 if (trans->trans_cfg->base_params->pll_cfg)
307 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
308
309 ret = iwl_finish_nic_init(trans, trans->trans_cfg);
310 if (ret)
311 return ret;
312
313 if (trans->cfg->host_interrupt_operation_mode) {
314 /*
315 * This is a bit of an abuse - This is needed for 7260 / 3160
316 * only check host_interrupt_operation_mode even if this is
317 * not related to host_interrupt_operation_mode.
318 *
319 * Enable the oscillator to count wake up time for L1 exit. This
320 * consumes slightly more power (100uA) - but allows to be sure
321 * that we wake up from L1 on time.
322 *
323 * This looks weird: read twice the same register, discard the
324 * value, set a bit, and yet again, read that same register
325 * just to discard the value. But that's the way the hardware
326 * seems to like it.
327 */
328 iwl_read_prph(trans, OSC_CLK);
329 iwl_read_prph(trans, OSC_CLK);
330 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
331 iwl_read_prph(trans, OSC_CLK);
332 iwl_read_prph(trans, OSC_CLK);
333 }
334
335 /*
336 * Enable DMA clock and wait for it to stabilize.
337 *
338 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
339 * bits do not disable clocks. This preserves any hardware
340 * bits already set by default in "CLK_CTRL_REG" after reset.
341 */
342 if (!trans->cfg->apmg_not_supported) {
343 iwl_write_prph(trans, APMG_CLK_EN_REG,
344 APMG_CLK_VAL_DMA_CLK_RQT);
345 udelay(20);
346
347 /* Disable L1-Active */
348 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
350
351 /* Clear the interrupt in APMG if the NIC is in RFKILL */
352 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
353 APMG_RTC_INT_STT_RFKILL);
354 }
355
356 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
357
358 return 0;
359 }
360
361 /*
362 * Enable LP XTAL to avoid HW bug where device may consume much power if
363 * FW is not loaded after device reset. LP XTAL is disabled by default
364 * after device HW reset. Do it only if XTAL is fed by internal source.
365 * Configure device's "persistence" mode to avoid resetting XTAL again when
366 * SHRD_HW_RST occurs in S3.
367 */
iwl_pcie_apm_lp_xtal_enable(struct iwl_trans * trans)368 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
369 {
370 int ret;
371 u32 apmg_gp1_reg;
372 u32 apmg_xtal_cfg_reg;
373 u32 dl_cfg_reg;
374
375 /* Force XTAL ON */
376 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
377 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
378
379 iwl_trans_pcie_sw_reset(trans);
380
381 ret = iwl_finish_nic_init(trans, trans->trans_cfg);
382 if (WARN_ON(ret)) {
383 /* Release XTAL ON request */
384 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
385 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
386 return;
387 }
388
389 /*
390 * Clear "disable persistence" to avoid LP XTAL resetting when
391 * SHRD_HW_RST is applied in S3.
392 */
393 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
394 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
395
396 /*
397 * Force APMG XTAL to be active to prevent its disabling by HW
398 * caused by APMG idle state.
399 */
400 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
401 SHR_APMG_XTAL_CFG_REG);
402 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
403 apmg_xtal_cfg_reg |
404 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
405
406 iwl_trans_pcie_sw_reset(trans);
407
408 /* Enable LP XTAL by indirect access through CSR */
409 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
410 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
411 SHR_APMG_GP1_WF_XTAL_LP_EN |
412 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
413
414 /* Clear delay line clock power up */
415 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
416 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
417 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
418
419 /*
420 * Enable persistence mode to avoid LP XTAL resetting when
421 * SHRD_HW_RST is applied in S3.
422 */
423 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
424 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
425
426 /*
427 * Clear "initialization complete" bit to move adapter from
428 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
429 */
430 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
431
432 /* Activates XTAL resources monitor */
433 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
434 CSR_MONITOR_XTAL_RESOURCES);
435
436 /* Release XTAL ON request */
437 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
438 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
439 udelay(10);
440
441 /* Release APMG XTAL */
442 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
443 apmg_xtal_cfg_reg &
444 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
445 }
446
iwl_pcie_apm_stop_master(struct iwl_trans * trans)447 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
448 {
449 int ret;
450
451 /* stop device's busmaster DMA activity */
452 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
453
454 ret = iwl_poll_bit(trans, CSR_RESET,
455 CSR_RESET_REG_FLAG_MASTER_DISABLED,
456 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
457 if (ret < 0)
458 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
459
460 IWL_DEBUG_INFO(trans, "stop master\n");
461 }
462
iwl_pcie_apm_stop(struct iwl_trans * trans,bool op_mode_leave)463 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
464 {
465 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
466
467 if (op_mode_leave) {
468 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
469 iwl_pcie_apm_init(trans);
470
471 /* inform ME that we are leaving */
472 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
473 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
474 APMG_PCIDEV_STT_VAL_WAKE_ME);
475 else if (trans->trans_cfg->device_family >=
476 IWL_DEVICE_FAMILY_8000) {
477 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
478 CSR_RESET_LINK_PWR_MGMT_DISABLED);
479 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
480 CSR_HW_IF_CONFIG_REG_PREPARE |
481 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
482 mdelay(1);
483 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
484 CSR_RESET_LINK_PWR_MGMT_DISABLED);
485 }
486 mdelay(5);
487 }
488
489 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
490
491 /* Stop device's DMA activity */
492 iwl_pcie_apm_stop_master(trans);
493
494 if (trans->cfg->lp_xtal_workaround) {
495 iwl_pcie_apm_lp_xtal_enable(trans);
496 return;
497 }
498
499 iwl_trans_pcie_sw_reset(trans);
500
501 /*
502 * Clear "initialization complete" bit to move adapter from
503 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
504 */
505 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
506 }
507
iwl_pcie_nic_init(struct iwl_trans * trans)508 static int iwl_pcie_nic_init(struct iwl_trans *trans)
509 {
510 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
511 int ret;
512
513 /* nic_init */
514 spin_lock_bh(&trans_pcie->irq_lock);
515 ret = iwl_pcie_apm_init(trans);
516 spin_unlock_bh(&trans_pcie->irq_lock);
517
518 if (ret)
519 return ret;
520
521 iwl_pcie_set_pwr(trans, false);
522
523 iwl_op_mode_nic_config(trans->op_mode);
524
525 /* Allocate the RX queue, or reset if it is already allocated */
526 ret = iwl_pcie_rx_init(trans);
527 if (ret)
528 return ret;
529
530 /* Allocate or reset and init all Tx and Command queues */
531 if (iwl_pcie_tx_init(trans)) {
532 iwl_pcie_rx_free(trans);
533 return -ENOMEM;
534 }
535
536 if (trans->trans_cfg->base_params->shadow_reg_enable) {
537 /* enable shadow regs in HW */
538 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
539 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
540 }
541
542 return 0;
543 }
544
545 #define HW_READY_TIMEOUT (50)
546
547 /* Note: returns poll_bit return value, which is >= 0 if success */
iwl_pcie_set_hw_ready(struct iwl_trans * trans)548 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
549 {
550 int ret;
551
552 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
553 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
554
555 /* See if we got it */
556 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
557 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
558 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
559 HW_READY_TIMEOUT);
560
561 if (ret >= 0)
562 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
563
564 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
565 return ret;
566 }
567
568 /* Note: returns standard 0/-ERROR code */
iwl_pcie_prepare_card_hw(struct iwl_trans * trans)569 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
570 {
571 int ret;
572 int t = 0;
573 int iter;
574
575 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
576
577 ret = iwl_pcie_set_hw_ready(trans);
578 /* If the card is ready, exit 0 */
579 if (ret >= 0)
580 return 0;
581
582 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
583 CSR_RESET_LINK_PWR_MGMT_DISABLED);
584 usleep_range(1000, 2000);
585
586 for (iter = 0; iter < 10; iter++) {
587 /* If HW is not ready, prepare the conditions to check again */
588 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
589 CSR_HW_IF_CONFIG_REG_PREPARE);
590
591 do {
592 ret = iwl_pcie_set_hw_ready(trans);
593 if (ret >= 0)
594 return 0;
595
596 usleep_range(200, 1000);
597 t += 200;
598 } while (t < 150000);
599 msleep(25);
600 }
601
602 IWL_ERR(trans, "Couldn't prepare the card\n");
603
604 return ret;
605 }
606
607 /*
608 * ucode
609 */
iwl_pcie_load_firmware_chunk_fh(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)610 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
611 u32 dst_addr, dma_addr_t phy_addr,
612 u32 byte_cnt)
613 {
614 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
616
617 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
618 dst_addr);
619
620 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
621 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
622
623 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
624 (iwl_get_dma_hi_addr(phy_addr)
625 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
626
627 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
628 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
629 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
630 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
631
632 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
633 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
634 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
635 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
636 }
637
iwl_pcie_load_firmware_chunk(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)638 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
639 u32 dst_addr, dma_addr_t phy_addr,
640 u32 byte_cnt)
641 {
642 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
643 int ret;
644
645 trans_pcie->ucode_write_complete = false;
646
647 if (!iwl_trans_grab_nic_access(trans))
648 return -EIO;
649
650 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
651 byte_cnt);
652 iwl_trans_release_nic_access(trans);
653
654 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
655 trans_pcie->ucode_write_complete, 5 * HZ);
656 if (!ret) {
657 IWL_ERR(trans, "Failed to load firmware chunk!\n");
658 iwl_trans_pcie_dump_regs(trans);
659 return -ETIMEDOUT;
660 }
661
662 return 0;
663 }
664
iwl_pcie_load_section(struct iwl_trans * trans,u8 section_num,const struct fw_desc * section)665 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
666 const struct fw_desc *section)
667 {
668 u8 *v_addr;
669 dma_addr_t p_addr;
670 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
671 int ret = 0;
672
673 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
674 section_num);
675
676 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
677 GFP_KERNEL | __GFP_NOWARN);
678 if (!v_addr) {
679 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
680 chunk_sz = PAGE_SIZE;
681 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
682 &p_addr, GFP_KERNEL);
683 if (!v_addr)
684 return -ENOMEM;
685 }
686
687 for (offset = 0; offset < section->len; offset += chunk_sz) {
688 u32 copy_size, dst_addr;
689 bool extended_addr = false;
690
691 copy_size = min_t(u32, chunk_sz, section->len - offset);
692 dst_addr = section->offset + offset;
693
694 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
695 dst_addr <= IWL_FW_MEM_EXTENDED_END)
696 extended_addr = true;
697
698 if (extended_addr)
699 iwl_set_bits_prph(trans, LMPM_CHICK,
700 LMPM_CHICK_EXTENDED_ADDR_SPACE);
701
702 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
703 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
704 copy_size);
705
706 if (extended_addr)
707 iwl_clear_bits_prph(trans, LMPM_CHICK,
708 LMPM_CHICK_EXTENDED_ADDR_SPACE);
709
710 if (ret) {
711 IWL_ERR(trans,
712 "Could not load the [%d] uCode section\n",
713 section_num);
714 break;
715 }
716 }
717
718 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
719 return ret;
720 }
721
iwl_pcie_load_cpu_sections_8000(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)722 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
723 const struct fw_img *image,
724 int cpu,
725 int *first_ucode_section)
726 {
727 int shift_param;
728 int i, ret = 0, sec_num = 0x1;
729 u32 val, last_read_idx = 0;
730
731 if (cpu == 1) {
732 shift_param = 0;
733 *first_ucode_section = 0;
734 } else {
735 shift_param = 16;
736 (*first_ucode_section)++;
737 }
738
739 for (i = *first_ucode_section; i < image->num_sec; i++) {
740 last_read_idx = i;
741
742 /*
743 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
744 * CPU1 to CPU2.
745 * PAGING_SEPARATOR_SECTION delimiter - separate between
746 * CPU2 non paged to CPU2 paging sec.
747 */
748 if (!image->sec[i].data ||
749 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
750 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
751 IWL_DEBUG_FW(trans,
752 "Break since Data not valid or Empty section, sec = %d\n",
753 i);
754 break;
755 }
756
757 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
758 if (ret)
759 return ret;
760
761 /* Notify ucode of loaded section number and status */
762 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
763 val = val | (sec_num << shift_param);
764 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
765
766 sec_num = (sec_num << 1) | 0x1;
767 }
768
769 *first_ucode_section = last_read_idx;
770
771 iwl_enable_interrupts(trans);
772
773 if (trans->trans_cfg->use_tfh) {
774 if (cpu == 1)
775 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
776 0xFFFF);
777 else
778 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
779 0xFFFFFFFF);
780 } else {
781 if (cpu == 1)
782 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
783 0xFFFF);
784 else
785 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
786 0xFFFFFFFF);
787 }
788
789 return 0;
790 }
791
iwl_pcie_load_cpu_sections(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)792 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
793 const struct fw_img *image,
794 int cpu,
795 int *first_ucode_section)
796 {
797 int i, ret = 0;
798 u32 last_read_idx = 0;
799
800 if (cpu == 1)
801 *first_ucode_section = 0;
802 else
803 (*first_ucode_section)++;
804
805 for (i = *first_ucode_section; i < image->num_sec; i++) {
806 last_read_idx = i;
807
808 /*
809 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
810 * CPU1 to CPU2.
811 * PAGING_SEPARATOR_SECTION delimiter - separate between
812 * CPU2 non paged to CPU2 paging sec.
813 */
814 if (!image->sec[i].data ||
815 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
816 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
817 IWL_DEBUG_FW(trans,
818 "Break since Data not valid or Empty section, sec = %d\n",
819 i);
820 break;
821 }
822
823 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
824 if (ret)
825 return ret;
826 }
827
828 *first_ucode_section = last_read_idx;
829
830 return 0;
831 }
832
iwl_pcie_apply_destination_ini(struct iwl_trans * trans)833 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
834 {
835 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
836 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
837 &trans->dbg.fw_mon_cfg[alloc_id];
838 struct iwl_dram_data *frag;
839
840 if (!iwl_trans_dbg_ini_valid(trans))
841 return;
842
843 if (le32_to_cpu(fw_mon_cfg->buf_location) ==
844 IWL_FW_INI_LOCATION_SRAM_PATH) {
845 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
846 /* set sram monitor by enabling bit 7 */
847 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
848 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
849
850 return;
851 }
852
853 if (le32_to_cpu(fw_mon_cfg->buf_location) !=
854 IWL_FW_INI_LOCATION_DRAM_PATH ||
855 !trans->dbg.fw_mon_ini[alloc_id].num_frags)
856 return;
857
858 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
859
860 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
861 alloc_id);
862
863 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
864 frag->physical >> MON_BUFF_SHIFT_VER2);
865 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
866 (frag->physical + frag->size - 256) >>
867 MON_BUFF_SHIFT_VER2);
868 }
869
iwl_pcie_apply_destination(struct iwl_trans * trans)870 void iwl_pcie_apply_destination(struct iwl_trans *trans)
871 {
872 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
873 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
874 int i;
875
876 if (iwl_trans_dbg_ini_valid(trans)) {
877 iwl_pcie_apply_destination_ini(trans);
878 return;
879 }
880
881 IWL_INFO(trans, "Applying debug destination %s\n",
882 get_fw_dbg_mode_string(dest->monitor_mode));
883
884 if (dest->monitor_mode == EXTERNAL_MODE)
885 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
886 else
887 IWL_WARN(trans, "PCI should have external buffer debug\n");
888
889 for (i = 0; i < trans->dbg.n_dest_reg; i++) {
890 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
891 u32 val = le32_to_cpu(dest->reg_ops[i].val);
892
893 switch (dest->reg_ops[i].op) {
894 case CSR_ASSIGN:
895 iwl_write32(trans, addr, val);
896 break;
897 case CSR_SETBIT:
898 iwl_set_bit(trans, addr, BIT(val));
899 break;
900 case CSR_CLEARBIT:
901 iwl_clear_bit(trans, addr, BIT(val));
902 break;
903 case PRPH_ASSIGN:
904 iwl_write_prph(trans, addr, val);
905 break;
906 case PRPH_SETBIT:
907 iwl_set_bits_prph(trans, addr, BIT(val));
908 break;
909 case PRPH_CLEARBIT:
910 iwl_clear_bits_prph(trans, addr, BIT(val));
911 break;
912 case PRPH_BLOCKBIT:
913 if (iwl_read_prph(trans, addr) & BIT(val)) {
914 IWL_ERR(trans,
915 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
916 val, addr);
917 goto monitor;
918 }
919 break;
920 default:
921 IWL_ERR(trans, "FW debug - unknown OP %d\n",
922 dest->reg_ops[i].op);
923 break;
924 }
925 }
926
927 monitor:
928 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
929 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
930 fw_mon->physical >> dest->base_shift);
931 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
932 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
933 (fw_mon->physical + fw_mon->size -
934 256) >> dest->end_shift);
935 else
936 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
937 (fw_mon->physical + fw_mon->size) >>
938 dest->end_shift);
939 }
940 }
941
iwl_pcie_load_given_ucode(struct iwl_trans * trans,const struct fw_img * image)942 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
943 const struct fw_img *image)
944 {
945 int ret = 0;
946 int first_ucode_section;
947
948 IWL_DEBUG_FW(trans, "working with %s CPU\n",
949 image->is_dual_cpus ? "Dual" : "Single");
950
951 /* load to FW the binary non secured sections of CPU1 */
952 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
953 if (ret)
954 return ret;
955
956 if (image->is_dual_cpus) {
957 /* set CPU2 header address */
958 iwl_write_prph(trans,
959 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
960 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
961
962 /* load to FW the binary sections of CPU2 */
963 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
964 &first_ucode_section);
965 if (ret)
966 return ret;
967 }
968
969 if (iwl_pcie_dbg_on(trans))
970 iwl_pcie_apply_destination(trans);
971
972 iwl_enable_interrupts(trans);
973
974 /* release CPU reset */
975 iwl_write32(trans, CSR_RESET, 0);
976
977 return 0;
978 }
979
iwl_pcie_load_given_ucode_8000(struct iwl_trans * trans,const struct fw_img * image)980 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
981 const struct fw_img *image)
982 {
983 int ret = 0;
984 int first_ucode_section;
985
986 IWL_DEBUG_FW(trans, "working with %s CPU\n",
987 image->is_dual_cpus ? "Dual" : "Single");
988
989 if (iwl_pcie_dbg_on(trans))
990 iwl_pcie_apply_destination(trans);
991
992 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
993 iwl_read_prph(trans, WFPM_GP2));
994
995 /*
996 * Set default value. On resume reading the values that were
997 * zeored can provide debug data on the resume flow.
998 * This is for debugging only and has no functional impact.
999 */
1000 iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1001
1002 /* configure the ucode to be ready to get the secured image */
1003 /* release CPU reset */
1004 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1005
1006 /* load to FW the binary Secured sections of CPU1 */
1007 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1008 &first_ucode_section);
1009 if (ret)
1010 return ret;
1011
1012 /* load to FW the binary sections of CPU2 */
1013 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1014 &first_ucode_section);
1015 }
1016
iwl_pcie_check_hw_rf_kill(struct iwl_trans * trans)1017 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1018 {
1019 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1020 bool hw_rfkill = iwl_is_rfkill_set(trans);
1021 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1022 bool report;
1023
1024 if (hw_rfkill) {
1025 set_bit(STATUS_RFKILL_HW, &trans->status);
1026 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1027 } else {
1028 clear_bit(STATUS_RFKILL_HW, &trans->status);
1029 if (trans_pcie->opmode_down)
1030 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1031 }
1032
1033 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1034
1035 if (prev != report)
1036 iwl_trans_pcie_rf_kill(trans, report);
1037
1038 return hw_rfkill;
1039 }
1040
1041 struct iwl_causes_list {
1042 u32 cause_num;
1043 u32 mask_reg;
1044 u8 addr;
1045 };
1046
1047 static struct iwl_causes_list causes_list[] = {
1048 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
1049 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
1050 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
1051 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
1052 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
1053 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
1054 {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12},
1055 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
1056 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
1057 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
1058 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
1059 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1060 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1061 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1062 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1063 };
1064
iwl_pcie_map_non_rx_causes(struct iwl_trans * trans)1065 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1066 {
1067 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1068 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1069 int i, arr_size = ARRAY_SIZE(causes_list);
1070 struct iwl_causes_list *causes = causes_list;
1071
1072 /*
1073 * Access all non RX causes and map them to the default irq.
1074 * In case we are missing at least one interrupt vector,
1075 * the first interrupt vector will serve non-RX and FBQ causes.
1076 */
1077 for (i = 0; i < arr_size; i++) {
1078 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1079 iwl_clear_bit(trans, causes[i].mask_reg,
1080 causes[i].cause_num);
1081 }
1082 }
1083
iwl_pcie_map_rx_causes(struct iwl_trans * trans)1084 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1085 {
1086 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1087 u32 offset =
1088 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1089 u32 val, idx;
1090
1091 /*
1092 * The first RX queue - fallback queue, which is designated for
1093 * management frame, command responses etc, is always mapped to the
1094 * first interrupt vector. The other RX queues are mapped to
1095 * the other (N - 2) interrupt vectors.
1096 */
1097 val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1098 for (idx = 1; idx < trans->num_rx_queues; idx++) {
1099 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1100 MSIX_FH_INT_CAUSES_Q(idx - offset));
1101 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1102 }
1103 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1104
1105 val = MSIX_FH_INT_CAUSES_Q(0);
1106 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1107 val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1108 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1109
1110 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1111 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1112 }
1113
iwl_pcie_conf_msix_hw(struct iwl_trans_pcie * trans_pcie)1114 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1115 {
1116 struct iwl_trans *trans = trans_pcie->trans;
1117
1118 if (!trans_pcie->msix_enabled) {
1119 if (trans->trans_cfg->mq_rx_supported &&
1120 test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1121 iwl_write_umac_prph(trans, UREG_CHICK,
1122 UREG_CHICK_MSI_ENABLE);
1123 return;
1124 }
1125 /*
1126 * The IVAR table needs to be configured again after reset,
1127 * but if the device is disabled, we can't write to
1128 * prph.
1129 */
1130 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1131 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1132
1133 /*
1134 * Each cause from the causes list above and the RX causes is
1135 * represented as a byte in the IVAR table. The first nibble
1136 * represents the bound interrupt vector of the cause, the second
1137 * represents no auto clear for this cause. This will be set if its
1138 * interrupt vector is bound to serve other causes.
1139 */
1140 iwl_pcie_map_rx_causes(trans);
1141
1142 iwl_pcie_map_non_rx_causes(trans);
1143 }
1144
iwl_pcie_init_msix(struct iwl_trans_pcie * trans_pcie)1145 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1146 {
1147 struct iwl_trans *trans = trans_pcie->trans;
1148
1149 iwl_pcie_conf_msix_hw(trans_pcie);
1150
1151 if (!trans_pcie->msix_enabled)
1152 return;
1153
1154 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1155 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1156 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1157 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1158 }
1159
_iwl_trans_pcie_stop_device(struct iwl_trans * trans)1160 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1161 {
1162 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1163
1164 lockdep_assert_held(&trans_pcie->mutex);
1165
1166 if (trans_pcie->is_down)
1167 return;
1168
1169 trans_pcie->is_down = true;
1170
1171 /* tell the device to stop sending interrupts */
1172 iwl_disable_interrupts(trans);
1173
1174 /* device going down, Stop using ICT table */
1175 iwl_pcie_disable_ict(trans);
1176
1177 /*
1178 * If a HW restart happens during firmware loading,
1179 * then the firmware loading might call this function
1180 * and later it might be called again due to the
1181 * restart. So don't process again if the device is
1182 * already dead.
1183 */
1184 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1185 IWL_DEBUG_INFO(trans,
1186 "DEVICE_ENABLED bit was set and is now cleared\n");
1187 iwl_pcie_tx_stop(trans);
1188 iwl_pcie_rx_stop(trans);
1189
1190 /* Power-down device's busmaster DMA clocks */
1191 if (!trans->cfg->apmg_not_supported) {
1192 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1193 APMG_CLK_VAL_DMA_CLK_RQT);
1194 udelay(5);
1195 }
1196 }
1197
1198 /* Make sure (redundant) we've released our request to stay awake */
1199 iwl_clear_bit(trans, CSR_GP_CNTRL,
1200 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1201
1202 /* Stop the device, and put it in low power state */
1203 iwl_pcie_apm_stop(trans, false);
1204
1205 iwl_trans_pcie_sw_reset(trans);
1206
1207 /*
1208 * Upon stop, the IVAR table gets erased, so msi-x won't
1209 * work. This causes a bug in RF-KILL flows, since the interrupt
1210 * that enables radio won't fire on the correct irq, and the
1211 * driver won't be able to handle the interrupt.
1212 * Configure the IVAR table again after reset.
1213 */
1214 iwl_pcie_conf_msix_hw(trans_pcie);
1215
1216 /*
1217 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1218 * This is a bug in certain verions of the hardware.
1219 * Certain devices also keep sending HW RF kill interrupt all
1220 * the time, unless the interrupt is ACKed even if the interrupt
1221 * should be masked. Re-ACK all the interrupts here.
1222 */
1223 iwl_disable_interrupts(trans);
1224
1225 /* clear all status bits */
1226 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1227 clear_bit(STATUS_INT_ENABLED, &trans->status);
1228 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1229
1230 /*
1231 * Even if we stop the HW, we still want the RF kill
1232 * interrupt
1233 */
1234 iwl_enable_rfkill_int(trans);
1235
1236 /* re-take ownership to prevent other users from stealing the device */
1237 iwl_pcie_prepare_card_hw(trans);
1238 }
1239
iwl_pcie_synchronize_irqs(struct iwl_trans * trans)1240 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1241 {
1242 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1243
1244 if (trans_pcie->msix_enabled) {
1245 int i;
1246
1247 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1248 synchronize_irq(trans_pcie->msix_entries[i].vector);
1249 } else {
1250 synchronize_irq(trans_pcie->pci_dev->irq);
1251 }
1252 }
1253
iwl_trans_pcie_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1254 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1255 const struct fw_img *fw, bool run_in_rfkill)
1256 {
1257 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1258 bool hw_rfkill;
1259 int ret;
1260
1261 /* This may fail if AMT took ownership of the device */
1262 if (iwl_pcie_prepare_card_hw(trans)) {
1263 IWL_WARN(trans, "Exit HW not ready\n");
1264 ret = -EIO;
1265 goto out;
1266 }
1267
1268 iwl_enable_rfkill_int(trans);
1269
1270 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1271
1272 /*
1273 * We enabled the RF-Kill interrupt and the handler may very
1274 * well be running. Disable the interrupts to make sure no other
1275 * interrupt can be fired.
1276 */
1277 iwl_disable_interrupts(trans);
1278
1279 /* Make sure it finished running */
1280 iwl_pcie_synchronize_irqs(trans);
1281
1282 mutex_lock(&trans_pcie->mutex);
1283
1284 /* If platform's RF_KILL switch is NOT set to KILL */
1285 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1286 if (hw_rfkill && !run_in_rfkill) {
1287 ret = -ERFKILL;
1288 goto out;
1289 }
1290
1291 /* Someone called stop_device, don't try to start_fw */
1292 if (trans_pcie->is_down) {
1293 IWL_WARN(trans,
1294 "Can't start_fw since the HW hasn't been started\n");
1295 ret = -EIO;
1296 goto out;
1297 }
1298
1299 /* make sure rfkill handshake bits are cleared */
1300 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1301 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1302 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1303
1304 /* clear (again), then enable host interrupts */
1305 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1306
1307 ret = iwl_pcie_nic_init(trans);
1308 if (ret) {
1309 IWL_ERR(trans, "Unable to init nic\n");
1310 goto out;
1311 }
1312
1313 /*
1314 * Now, we load the firmware and don't want to be interrupted, even
1315 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1316 * FH_TX interrupt which is needed to load the firmware). If the
1317 * RF-Kill switch is toggled, we will find out after having loaded
1318 * the firmware and return the proper value to the caller.
1319 */
1320 iwl_enable_fw_load_int(trans);
1321
1322 /* really make sure rfkill handshake bits are cleared */
1323 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1324 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1325
1326 /* Load the given image to the HW */
1327 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1328 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1329 else
1330 ret = iwl_pcie_load_given_ucode(trans, fw);
1331
1332 /* re-check RF-Kill state since we may have missed the interrupt */
1333 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1334 if (hw_rfkill && !run_in_rfkill)
1335 ret = -ERFKILL;
1336
1337 out:
1338 mutex_unlock(&trans_pcie->mutex);
1339 return ret;
1340 }
1341
iwl_trans_pcie_fw_alive(struct iwl_trans * trans,u32 scd_addr)1342 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1343 {
1344 iwl_pcie_reset_ict(trans);
1345 iwl_pcie_tx_start(trans, scd_addr);
1346 }
1347
iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans * trans,bool was_in_rfkill)1348 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1349 bool was_in_rfkill)
1350 {
1351 bool hw_rfkill;
1352
1353 /*
1354 * Check again since the RF kill state may have changed while
1355 * all the interrupts were disabled, in this case we couldn't
1356 * receive the RF kill interrupt and update the state in the
1357 * op_mode.
1358 * Don't call the op_mode if the rkfill state hasn't changed.
1359 * This allows the op_mode to call stop_device from the rfkill
1360 * notification without endless recursion. Under very rare
1361 * circumstances, we might have a small recursion if the rfkill
1362 * state changed exactly now while we were called from stop_device.
1363 * This is very unlikely but can happen and is supported.
1364 */
1365 hw_rfkill = iwl_is_rfkill_set(trans);
1366 if (hw_rfkill) {
1367 set_bit(STATUS_RFKILL_HW, &trans->status);
1368 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1369 } else {
1370 clear_bit(STATUS_RFKILL_HW, &trans->status);
1371 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1372 }
1373 if (hw_rfkill != was_in_rfkill)
1374 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1375 }
1376
iwl_trans_pcie_stop_device(struct iwl_trans * trans)1377 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1378 {
1379 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1380 bool was_in_rfkill;
1381
1382 iwl_op_mode_time_point(trans->op_mode,
1383 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1384 NULL);
1385
1386 mutex_lock(&trans_pcie->mutex);
1387 trans_pcie->opmode_down = true;
1388 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1389 _iwl_trans_pcie_stop_device(trans);
1390 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1391 mutex_unlock(&trans_pcie->mutex);
1392 }
1393
iwl_trans_pcie_rf_kill(struct iwl_trans * trans,bool state)1394 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1395 {
1396 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1397 IWL_TRANS_GET_PCIE_TRANS(trans);
1398
1399 lockdep_assert_held(&trans_pcie->mutex);
1400
1401 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1402 state ? "disabled" : "enabled");
1403 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1404 if (trans->trans_cfg->gen2)
1405 _iwl_trans_pcie_gen2_stop_device(trans);
1406 else
1407 _iwl_trans_pcie_stop_device(trans);
1408 }
1409 }
1410
iwl_pcie_d3_complete_suspend(struct iwl_trans * trans,bool test,bool reset)1411 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1412 bool test, bool reset)
1413 {
1414 iwl_disable_interrupts(trans);
1415
1416 /*
1417 * in testing mode, the host stays awake and the
1418 * hardware won't be reset (not even partially)
1419 */
1420 if (test)
1421 return;
1422
1423 iwl_pcie_disable_ict(trans);
1424
1425 iwl_pcie_synchronize_irqs(trans);
1426
1427 iwl_clear_bit(trans, CSR_GP_CNTRL,
1428 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1429 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1430
1431 if (reset) {
1432 /*
1433 * reset TX queues -- some of their registers reset during S3
1434 * so if we don't reset everything here the D3 image would try
1435 * to execute some invalid memory upon resume
1436 */
1437 iwl_trans_pcie_tx_reset(trans);
1438 }
1439
1440 iwl_pcie_set_pwr(trans, true);
1441 }
1442
iwl_trans_pcie_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1443 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1444 bool reset)
1445 {
1446 int ret;
1447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1448
1449 if (!reset)
1450 /* Enable persistence mode to avoid reset */
1451 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1452 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1453
1454 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1455 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1456 UREG_DOORBELL_TO_ISR6_SUSPEND);
1457
1458 ret = wait_event_timeout(trans_pcie->sx_waitq,
1459 trans_pcie->sx_complete, 2 * HZ);
1460 /*
1461 * Invalidate it toward resume.
1462 */
1463 trans_pcie->sx_complete = false;
1464
1465 if (!ret) {
1466 IWL_ERR(trans, "Timeout entering D3\n");
1467 return -ETIMEDOUT;
1468 }
1469 }
1470 iwl_pcie_d3_complete_suspend(trans, test, reset);
1471
1472 return 0;
1473 }
1474
iwl_trans_pcie_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1475 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1476 enum iwl_d3_status *status,
1477 bool test, bool reset)
1478 {
1479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1480 u32 val;
1481 int ret;
1482
1483 if (test) {
1484 iwl_enable_interrupts(trans);
1485 *status = IWL_D3_STATUS_ALIVE;
1486 goto out;
1487 }
1488
1489 iwl_set_bit(trans, CSR_GP_CNTRL,
1490 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1491
1492 ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1493 if (ret)
1494 return ret;
1495
1496 /*
1497 * Reconfigure IVAR table in case of MSIX or reset ict table in
1498 * MSI mode since HW reset erased it.
1499 * Also enables interrupts - none will happen as
1500 * the device doesn't know we're waking it up, only when
1501 * the opmode actually tells it after this call.
1502 */
1503 iwl_pcie_conf_msix_hw(trans_pcie);
1504 if (!trans_pcie->msix_enabled)
1505 iwl_pcie_reset_ict(trans);
1506 iwl_enable_interrupts(trans);
1507
1508 iwl_pcie_set_pwr(trans, false);
1509
1510 if (!reset) {
1511 iwl_clear_bit(trans, CSR_GP_CNTRL,
1512 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1513 } else {
1514 iwl_trans_pcie_tx_reset(trans);
1515
1516 ret = iwl_pcie_rx_init(trans);
1517 if (ret) {
1518 IWL_ERR(trans,
1519 "Failed to resume the device (RX reset)\n");
1520 return ret;
1521 }
1522 }
1523
1524 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1525 iwl_read_umac_prph(trans, WFPM_GP2));
1526
1527 val = iwl_read32(trans, CSR_RESET);
1528 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1529 *status = IWL_D3_STATUS_RESET;
1530 else
1531 *status = IWL_D3_STATUS_ALIVE;
1532
1533 out:
1534 if (*status == IWL_D3_STATUS_ALIVE &&
1535 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1536 trans_pcie->sx_complete = false;
1537 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1538 UREG_DOORBELL_TO_ISR6_RESUME);
1539
1540 ret = wait_event_timeout(trans_pcie->sx_waitq,
1541 trans_pcie->sx_complete, 2 * HZ);
1542 /*
1543 * Invalidate it toward next suspend.
1544 */
1545 trans_pcie->sx_complete = false;
1546
1547 if (!ret) {
1548 IWL_ERR(trans, "Timeout exiting D3\n");
1549 return -ETIMEDOUT;
1550 }
1551 }
1552 return 0;
1553 }
1554
1555 static void
iwl_pcie_set_interrupt_capa(struct pci_dev * pdev,struct iwl_trans * trans,const struct iwl_cfg_trans_params * cfg_trans)1556 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1557 struct iwl_trans *trans,
1558 const struct iwl_cfg_trans_params *cfg_trans)
1559 {
1560 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1561 int max_irqs, num_irqs, i, ret;
1562 u16 pci_cmd;
1563 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1564
1565 if (!cfg_trans->mq_rx_supported)
1566 goto enable_msi;
1567
1568 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1569 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1570
1571 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1572 for (i = 0; i < max_irqs; i++)
1573 trans_pcie->msix_entries[i].entry = i;
1574
1575 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1576 MSIX_MIN_INTERRUPT_VECTORS,
1577 max_irqs);
1578 if (num_irqs < 0) {
1579 IWL_DEBUG_INFO(trans,
1580 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1581 num_irqs);
1582 goto enable_msi;
1583 }
1584 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1585
1586 IWL_DEBUG_INFO(trans,
1587 "MSI-X enabled. %d interrupt vectors were allocated\n",
1588 num_irqs);
1589
1590 /*
1591 * In case the OS provides fewer interrupts than requested, different
1592 * causes will share the same interrupt vector as follows:
1593 * One interrupt less: non rx causes shared with FBQ.
1594 * Two interrupts less: non rx causes shared with FBQ and RSS.
1595 * More than two interrupts: we will use fewer RSS queues.
1596 */
1597 if (num_irqs <= max_irqs - 2) {
1598 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1599 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1600 IWL_SHARED_IRQ_FIRST_RSS;
1601 } else if (num_irqs == max_irqs - 1) {
1602 trans_pcie->trans->num_rx_queues = num_irqs;
1603 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1604 } else {
1605 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1606 }
1607
1608 IWL_DEBUG_INFO(trans,
1609 "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1610 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1611
1612 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1613
1614 trans_pcie->alloc_vecs = num_irqs;
1615 trans_pcie->msix_enabled = true;
1616 return;
1617
1618 enable_msi:
1619 ret = pci_enable_msi(pdev);
1620 if (ret) {
1621 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1622 /* enable rfkill interrupt: hw bug w/a */
1623 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1624 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1625 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1626 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1627 }
1628 }
1629 }
1630
iwl_pcie_irq_set_affinity(struct iwl_trans * trans)1631 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1632 {
1633 int iter_rx_q, i, ret, cpu, offset;
1634 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1635
1636 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1637 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1638 offset = 1 + i;
1639 for (; i < iter_rx_q ; i++) {
1640 /*
1641 * Get the cpu prior to the place to search
1642 * (i.e. return will be > i - 1).
1643 */
1644 cpu = cpumask_next(i - offset, cpu_online_mask);
1645 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1646 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1647 &trans_pcie->affinity_mask[i]);
1648 if (ret)
1649 IWL_ERR(trans_pcie->trans,
1650 "Failed to set affinity mask for IRQ %d\n",
1651 i);
1652 }
1653 }
1654
iwl_pcie_init_msix_handler(struct pci_dev * pdev,struct iwl_trans_pcie * trans_pcie)1655 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1656 struct iwl_trans_pcie *trans_pcie)
1657 {
1658 int i;
1659
1660 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1661 int ret;
1662 struct msix_entry *msix_entry;
1663 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1664
1665 if (!qname)
1666 return -ENOMEM;
1667
1668 msix_entry = &trans_pcie->msix_entries[i];
1669 ret = devm_request_threaded_irq(&pdev->dev,
1670 msix_entry->vector,
1671 iwl_pcie_msix_isr,
1672 (i == trans_pcie->def_irq) ?
1673 iwl_pcie_irq_msix_handler :
1674 iwl_pcie_irq_rx_msix_handler,
1675 IRQF_SHARED,
1676 qname,
1677 msix_entry);
1678 if (ret) {
1679 IWL_ERR(trans_pcie->trans,
1680 "Error allocating IRQ %d\n", i);
1681
1682 return ret;
1683 }
1684 }
1685 iwl_pcie_irq_set_affinity(trans_pcie->trans);
1686
1687 return 0;
1688 }
1689
iwl_trans_pcie_clear_persistence_bit(struct iwl_trans * trans)1690 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1691 {
1692 u32 hpm, wprot;
1693
1694 switch (trans->trans_cfg->device_family) {
1695 case IWL_DEVICE_FAMILY_9000:
1696 wprot = PREG_PRPH_WPROT_9000;
1697 break;
1698 case IWL_DEVICE_FAMILY_22000:
1699 wprot = PREG_PRPH_WPROT_22000;
1700 break;
1701 default:
1702 return 0;
1703 }
1704
1705 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1706 if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1707 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1708
1709 if (wprot_val & PREG_WFPM_ACCESS) {
1710 IWL_ERR(trans,
1711 "Error, can not clear persistence bit\n");
1712 return -EPERM;
1713 }
1714 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1715 hpm & ~PERSISTENCE_BIT);
1716 }
1717
1718 return 0;
1719 }
1720
iwl_pcie_gen2_force_power_gating(struct iwl_trans * trans)1721 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1722 {
1723 int ret;
1724
1725 ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1726 if (ret < 0)
1727 return ret;
1728
1729 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1730 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1731 udelay(20);
1732 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1733 HPM_HIPM_GEN_CFG_CR_PG_EN |
1734 HPM_HIPM_GEN_CFG_CR_SLP_EN);
1735 udelay(20);
1736 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1737 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1738
1739 iwl_trans_pcie_sw_reset(trans);
1740
1741 return 0;
1742 }
1743
_iwl_trans_pcie_start_hw(struct iwl_trans * trans)1744 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1745 {
1746 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1747 int err;
1748
1749 lockdep_assert_held(&trans_pcie->mutex);
1750
1751 err = iwl_pcie_prepare_card_hw(trans);
1752 if (err) {
1753 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1754 return err;
1755 }
1756
1757 err = iwl_trans_pcie_clear_persistence_bit(trans);
1758 if (err)
1759 return err;
1760
1761 iwl_trans_pcie_sw_reset(trans);
1762
1763 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1764 trans->trans_cfg->integrated) {
1765 err = iwl_pcie_gen2_force_power_gating(trans);
1766 if (err)
1767 return err;
1768 }
1769
1770 err = iwl_pcie_apm_init(trans);
1771 if (err)
1772 return err;
1773
1774 iwl_pcie_init_msix(trans_pcie);
1775
1776 /* From now on, the op_mode will be kept updated about RF kill state */
1777 iwl_enable_rfkill_int(trans);
1778
1779 trans_pcie->opmode_down = false;
1780
1781 /* Set is_down to false here so that...*/
1782 trans_pcie->is_down = false;
1783
1784 /* ...rfkill can call stop_device and set it false if needed */
1785 iwl_pcie_check_hw_rf_kill(trans);
1786
1787 return 0;
1788 }
1789
iwl_trans_pcie_start_hw(struct iwl_trans * trans)1790 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1791 {
1792 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1793 int ret;
1794
1795 mutex_lock(&trans_pcie->mutex);
1796 ret = _iwl_trans_pcie_start_hw(trans);
1797 mutex_unlock(&trans_pcie->mutex);
1798
1799 return ret;
1800 }
1801
iwl_trans_pcie_op_mode_leave(struct iwl_trans * trans)1802 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1803 {
1804 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1805
1806 mutex_lock(&trans_pcie->mutex);
1807
1808 /* disable interrupts - don't enable HW RF kill interrupt */
1809 iwl_disable_interrupts(trans);
1810
1811 iwl_pcie_apm_stop(trans, true);
1812
1813 iwl_disable_interrupts(trans);
1814
1815 iwl_pcie_disable_ict(trans);
1816
1817 mutex_unlock(&trans_pcie->mutex);
1818
1819 iwl_pcie_synchronize_irqs(trans);
1820 }
1821
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)1822 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1823 {
1824 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1825 }
1826
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)1827 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1828 {
1829 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1830 }
1831
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)1832 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1833 {
1834 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1835 }
1836
iwl_trans_pcie_prph_msk(struct iwl_trans * trans)1837 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1838 {
1839 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1840 return 0x00FFFFFF;
1841 else
1842 return 0x000FFFFF;
1843 }
1844
iwl_trans_pcie_read_prph(struct iwl_trans * trans,u32 reg)1845 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1846 {
1847 u32 mask = iwl_trans_pcie_prph_msk(trans);
1848
1849 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1850 ((reg & mask) | (3 << 24)));
1851 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1852 }
1853
iwl_trans_pcie_write_prph(struct iwl_trans * trans,u32 addr,u32 val)1854 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1855 u32 val)
1856 {
1857 u32 mask = iwl_trans_pcie_prph_msk(trans);
1858
1859 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1860 ((addr & mask) | (3 << 24)));
1861 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1862 }
1863
iwl_trans_pcie_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)1864 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1865 const struct iwl_trans_config *trans_cfg)
1866 {
1867 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1868
1869 trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1870 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1871 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1872 trans->txqs.page_offs = trans_cfg->cb_data_offs;
1873 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1874
1875 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1876 trans_pcie->n_no_reclaim_cmds = 0;
1877 else
1878 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1879 if (trans_pcie->n_no_reclaim_cmds)
1880 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1881 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1882
1883 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1884 trans_pcie->rx_page_order =
1885 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1886 trans_pcie->rx_buf_bytes =
1887 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1888 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1889 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1890 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1891
1892 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1893 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1894
1895 trans->command_groups = trans_cfg->command_groups;
1896 trans->command_groups_size = trans_cfg->command_groups_size;
1897
1898 /* Initialize NAPI here - it should be before registering to mac80211
1899 * in the opmode but after the HW struct is allocated.
1900 * As this function may be called again in some corner cases don't
1901 * do anything if NAPI was already initialized.
1902 */
1903 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1904 init_dummy_netdev(&trans_pcie->napi_dev);
1905
1906 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
1907 }
1908
iwl_trans_pcie_free(struct iwl_trans * trans)1909 void iwl_trans_pcie_free(struct iwl_trans *trans)
1910 {
1911 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1912 int i;
1913
1914 iwl_pcie_synchronize_irqs(trans);
1915
1916 if (trans->trans_cfg->gen2)
1917 iwl_txq_gen2_tx_free(trans);
1918 else
1919 iwl_pcie_tx_free(trans);
1920 iwl_pcie_rx_free(trans);
1921
1922 if (trans_pcie->rba.alloc_wq) {
1923 destroy_workqueue(trans_pcie->rba.alloc_wq);
1924 trans_pcie->rba.alloc_wq = NULL;
1925 }
1926
1927 if (trans_pcie->msix_enabled) {
1928 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1929 irq_set_affinity_hint(
1930 trans_pcie->msix_entries[i].vector,
1931 NULL);
1932 }
1933
1934 trans_pcie->msix_enabled = false;
1935 } else {
1936 iwl_pcie_free_ict(trans);
1937 }
1938
1939 iwl_pcie_free_fw_monitor(trans);
1940
1941 if (trans_pcie->pnvm_dram.size)
1942 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
1943 trans_pcie->pnvm_dram.block,
1944 trans_pcie->pnvm_dram.physical);
1945
1946 mutex_destroy(&trans_pcie->mutex);
1947 iwl_trans_free(trans);
1948 }
1949
iwl_trans_pcie_set_pmi(struct iwl_trans * trans,bool state)1950 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1951 {
1952 if (state)
1953 set_bit(STATUS_TPOWER_PMI, &trans->status);
1954 else
1955 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1956 }
1957
1958 struct iwl_trans_pcie_removal {
1959 struct pci_dev *pdev;
1960 struct work_struct work;
1961 };
1962
iwl_trans_pcie_removal_wk(struct work_struct * wk)1963 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1964 {
1965 struct iwl_trans_pcie_removal *removal =
1966 container_of(wk, struct iwl_trans_pcie_removal, work);
1967 struct pci_dev *pdev = removal->pdev;
1968 static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1969
1970 dev_err(&pdev->dev, "Device gone - attempting removal\n");
1971 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1972 pci_lock_rescan_remove();
1973 pci_dev_put(pdev);
1974 pci_stop_and_remove_bus_device(pdev);
1975 pci_unlock_rescan_remove();
1976
1977 kfree(removal);
1978 module_put(THIS_MODULE);
1979 }
1980
1981 /*
1982 * This version doesn't disable BHs but rather assumes they're
1983 * already disabled.
1984 */
__iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)1985 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
1986 {
1987 int ret;
1988 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1989
1990 spin_lock(&trans_pcie->reg_lock);
1991
1992 if (trans_pcie->cmd_hold_nic_awake)
1993 goto out;
1994
1995 /* this bit wakes up the NIC */
1996 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1997 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1998 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1999 udelay(2);
2000
2001 /*
2002 * These bits say the device is running, and should keep running for
2003 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2004 * but they do not indicate that embedded SRAM is restored yet;
2005 * HW with volatile SRAM must save/restore contents to/from
2006 * host DRAM when sleeping/waking for power-saving.
2007 * Each direction takes approximately 1/4 millisecond; with this
2008 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2009 * series of register accesses are expected (e.g. reading Event Log),
2010 * to keep device from sleeping.
2011 *
2012 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2013 * SRAM is okay/restored. We don't check that here because this call
2014 * is just for hardware register access; but GP1 MAC_SLEEP
2015 * check is a good idea before accessing the SRAM of HW with
2016 * volatile SRAM (e.g. reading Event Log).
2017 *
2018 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2019 * and do not save/restore SRAM when power cycling.
2020 */
2021 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2022 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2023 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2024 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2025 if (unlikely(ret < 0)) {
2026 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2027
2028 WARN_ONCE(1,
2029 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2030 cntrl);
2031
2032 iwl_trans_pcie_dump_regs(trans);
2033
2034 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2035 struct iwl_trans_pcie_removal *removal;
2036
2037 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2038 goto err;
2039
2040 IWL_ERR(trans, "Device gone - scheduling removal!\n");
2041
2042 /*
2043 * get a module reference to avoid doing this
2044 * while unloading anyway and to avoid
2045 * scheduling a work with code that's being
2046 * removed.
2047 */
2048 if (!try_module_get(THIS_MODULE)) {
2049 IWL_ERR(trans,
2050 "Module is being unloaded - abort\n");
2051 goto err;
2052 }
2053
2054 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2055 if (!removal) {
2056 module_put(THIS_MODULE);
2057 goto err;
2058 }
2059 /*
2060 * we don't need to clear this flag, because
2061 * the trans will be freed and reallocated.
2062 */
2063 set_bit(STATUS_TRANS_DEAD, &trans->status);
2064
2065 removal->pdev = to_pci_dev(trans->dev);
2066 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2067 pci_dev_get(removal->pdev);
2068 schedule_work(&removal->work);
2069 } else {
2070 iwl_write32(trans, CSR_RESET,
2071 CSR_RESET_REG_FLAG_FORCE_NMI);
2072 }
2073
2074 err:
2075 spin_unlock(&trans_pcie->reg_lock);
2076 return false;
2077 }
2078
2079 out:
2080 /*
2081 * Fool sparse by faking we release the lock - sparse will
2082 * track nic_access anyway.
2083 */
2084 __release(&trans_pcie->reg_lock);
2085 return true;
2086 }
2087
iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2088 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2089 {
2090 bool ret;
2091
2092 local_bh_disable();
2093 ret = __iwl_trans_pcie_grab_nic_access(trans);
2094 if (ret) {
2095 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2096 return ret;
2097 }
2098 local_bh_enable();
2099 return false;
2100 }
2101
iwl_trans_pcie_release_nic_access(struct iwl_trans * trans)2102 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2103 {
2104 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2105
2106 lockdep_assert_held(&trans_pcie->reg_lock);
2107
2108 /*
2109 * Fool sparse by faking we acquiring the lock - sparse will
2110 * track nic_access anyway.
2111 */
2112 __acquire(&trans_pcie->reg_lock);
2113
2114 if (trans_pcie->cmd_hold_nic_awake)
2115 goto out;
2116
2117 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2118 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2119 /*
2120 * Above we read the CSR_GP_CNTRL register, which will flush
2121 * any previous writes, but we need the write that clears the
2122 * MAC_ACCESS_REQ bit to be performed before any other writes
2123 * scheduled on different CPUs (after we drop reg_lock).
2124 */
2125 out:
2126 spin_unlock_bh(&trans_pcie->reg_lock);
2127 }
2128
iwl_trans_pcie_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)2129 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2130 void *buf, int dwords)
2131 {
2132 int offs = 0;
2133 u32 *vals = buf;
2134
2135 while (offs < dwords) {
2136 /* limit the time we spin here under lock to 1/2s */
2137 unsigned long end = jiffies + HZ / 2;
2138 bool resched = false;
2139
2140 if (iwl_trans_grab_nic_access(trans)) {
2141 iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2142 addr + 4 * offs);
2143
2144 while (offs < dwords) {
2145 vals[offs] = iwl_read32(trans,
2146 HBUS_TARG_MEM_RDAT);
2147 offs++;
2148
2149 if (time_after(jiffies, end)) {
2150 resched = true;
2151 break;
2152 }
2153 }
2154 iwl_trans_release_nic_access(trans);
2155
2156 if (resched)
2157 cond_resched();
2158 } else {
2159 return -EBUSY;
2160 }
2161 }
2162
2163 return 0;
2164 }
2165
iwl_trans_pcie_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)2166 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2167 const void *buf, int dwords)
2168 {
2169 int offs, ret = 0;
2170 const u32 *vals = buf;
2171
2172 if (iwl_trans_grab_nic_access(trans)) {
2173 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2174 for (offs = 0; offs < dwords; offs++)
2175 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2176 vals ? vals[offs] : 0);
2177 iwl_trans_release_nic_access(trans);
2178 } else {
2179 ret = -EBUSY;
2180 }
2181 return ret;
2182 }
2183
iwl_trans_pcie_read_config32(struct iwl_trans * trans,u32 ofs,u32 * val)2184 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2185 u32 *val)
2186 {
2187 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2188 ofs, val);
2189 }
2190
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)2191 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2192 {
2193 int i;
2194
2195 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2196 struct iwl_txq *txq = trans->txqs.txq[i];
2197
2198 if (i == trans->txqs.cmd.q_id)
2199 continue;
2200
2201 spin_lock_bh(&txq->lock);
2202
2203 if (!block && !(WARN_ON_ONCE(!txq->block))) {
2204 txq->block--;
2205 if (!txq->block) {
2206 iwl_write32(trans, HBUS_TARG_WRPTR,
2207 txq->write_ptr | (i << 8));
2208 }
2209 } else if (block) {
2210 txq->block++;
2211 }
2212
2213 spin_unlock_bh(&txq->lock);
2214 }
2215 }
2216
2217 #define IWL_FLUSH_WAIT_MS 2000
2218
iwl_trans_pcie_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)2219 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2220 struct iwl_trans_rxq_dma_data *data)
2221 {
2222 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2223
2224 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2225 return -EINVAL;
2226
2227 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2228 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2229 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2230 data->fr_bd_wid = 0;
2231
2232 return 0;
2233 }
2234
iwl_trans_pcie_wait_txq_empty(struct iwl_trans * trans,int txq_idx)2235 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2236 {
2237 struct iwl_txq *txq;
2238 unsigned long now = jiffies;
2239 bool overflow_tx;
2240 u8 wr_ptr;
2241
2242 /* Make sure the NIC is still alive in the bus */
2243 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2244 return -ENODEV;
2245
2246 if (!test_bit(txq_idx, trans->txqs.queue_used))
2247 return -EINVAL;
2248
2249 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2250 txq = trans->txqs.txq[txq_idx];
2251
2252 spin_lock_bh(&txq->lock);
2253 overflow_tx = txq->overflow_tx ||
2254 !skb_queue_empty(&txq->overflow_q);
2255 spin_unlock_bh(&txq->lock);
2256
2257 wr_ptr = READ_ONCE(txq->write_ptr);
2258
2259 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2260 overflow_tx) &&
2261 !time_after(jiffies,
2262 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2263 u8 write_ptr = READ_ONCE(txq->write_ptr);
2264
2265 /*
2266 * If write pointer moved during the wait, warn only
2267 * if the TX came from op mode. In case TX came from
2268 * trans layer (overflow TX) don't warn.
2269 */
2270 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2271 "WR pointer moved while flushing %d -> %d\n",
2272 wr_ptr, write_ptr))
2273 return -ETIMEDOUT;
2274 wr_ptr = write_ptr;
2275
2276 usleep_range(1000, 2000);
2277
2278 spin_lock_bh(&txq->lock);
2279 overflow_tx = txq->overflow_tx ||
2280 !skb_queue_empty(&txq->overflow_q);
2281 spin_unlock_bh(&txq->lock);
2282 }
2283
2284 if (txq->read_ptr != txq->write_ptr) {
2285 IWL_ERR(trans,
2286 "fail to flush all tx fifo queues Q %d\n", txq_idx);
2287 iwl_txq_log_scd_error(trans, txq);
2288 return -ETIMEDOUT;
2289 }
2290
2291 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2292
2293 return 0;
2294 }
2295
iwl_trans_pcie_wait_txqs_empty(struct iwl_trans * trans,u32 txq_bm)2296 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2297 {
2298 int cnt;
2299 int ret = 0;
2300
2301 /* waiting for all the tx frames complete might take a while */
2302 for (cnt = 0;
2303 cnt < trans->trans_cfg->base_params->num_of_queues;
2304 cnt++) {
2305
2306 if (cnt == trans->txqs.cmd.q_id)
2307 continue;
2308 if (!test_bit(cnt, trans->txqs.queue_used))
2309 continue;
2310 if (!(BIT(cnt) & txq_bm))
2311 continue;
2312
2313 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2314 if (ret)
2315 break;
2316 }
2317
2318 return ret;
2319 }
2320
iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)2321 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2322 u32 mask, u32 value)
2323 {
2324 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2325
2326 spin_lock_bh(&trans_pcie->reg_lock);
2327 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2328 spin_unlock_bh(&trans_pcie->reg_lock);
2329 }
2330
get_csr_string(int cmd)2331 static const char *get_csr_string(int cmd)
2332 {
2333 #define IWL_CMD(x) case x: return #x
2334 switch (cmd) {
2335 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2336 IWL_CMD(CSR_INT_COALESCING);
2337 IWL_CMD(CSR_INT);
2338 IWL_CMD(CSR_INT_MASK);
2339 IWL_CMD(CSR_FH_INT_STATUS);
2340 IWL_CMD(CSR_GPIO_IN);
2341 IWL_CMD(CSR_RESET);
2342 IWL_CMD(CSR_GP_CNTRL);
2343 IWL_CMD(CSR_HW_REV);
2344 IWL_CMD(CSR_EEPROM_REG);
2345 IWL_CMD(CSR_EEPROM_GP);
2346 IWL_CMD(CSR_OTP_GP_REG);
2347 IWL_CMD(CSR_GIO_REG);
2348 IWL_CMD(CSR_GP_UCODE_REG);
2349 IWL_CMD(CSR_GP_DRIVER_REG);
2350 IWL_CMD(CSR_UCODE_DRV_GP1);
2351 IWL_CMD(CSR_UCODE_DRV_GP2);
2352 IWL_CMD(CSR_LED_REG);
2353 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2354 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2355 IWL_CMD(CSR_ANA_PLL_CFG);
2356 IWL_CMD(CSR_HW_REV_WA_REG);
2357 IWL_CMD(CSR_MONITOR_STATUS_REG);
2358 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2359 default:
2360 return "UNKNOWN";
2361 }
2362 #undef IWL_CMD
2363 }
2364
iwl_pcie_dump_csr(struct iwl_trans * trans)2365 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2366 {
2367 int i;
2368 static const u32 csr_tbl[] = {
2369 CSR_HW_IF_CONFIG_REG,
2370 CSR_INT_COALESCING,
2371 CSR_INT,
2372 CSR_INT_MASK,
2373 CSR_FH_INT_STATUS,
2374 CSR_GPIO_IN,
2375 CSR_RESET,
2376 CSR_GP_CNTRL,
2377 CSR_HW_REV,
2378 CSR_EEPROM_REG,
2379 CSR_EEPROM_GP,
2380 CSR_OTP_GP_REG,
2381 CSR_GIO_REG,
2382 CSR_GP_UCODE_REG,
2383 CSR_GP_DRIVER_REG,
2384 CSR_UCODE_DRV_GP1,
2385 CSR_UCODE_DRV_GP2,
2386 CSR_LED_REG,
2387 CSR_DRAM_INT_TBL_REG,
2388 CSR_GIO_CHICKEN_BITS,
2389 CSR_ANA_PLL_CFG,
2390 CSR_MONITOR_STATUS_REG,
2391 CSR_HW_REV_WA_REG,
2392 CSR_DBG_HPET_MEM_REG
2393 };
2394 IWL_ERR(trans, "CSR values:\n");
2395 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2396 "CSR_INT_PERIODIC_REG)\n");
2397 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2398 IWL_ERR(trans, " %25s: 0X%08x\n",
2399 get_csr_string(csr_tbl[i]),
2400 iwl_read32(trans, csr_tbl[i]));
2401 }
2402 }
2403
2404 #ifdef CONFIG_IWLWIFI_DEBUGFS
2405 /* create and remove of files */
2406 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2407 debugfs_create_file(#name, mode, parent, trans, \
2408 &iwl_dbgfs_##name##_ops); \
2409 } while (0)
2410
2411 /* file operation */
2412 #define DEBUGFS_READ_FILE_OPS(name) \
2413 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2414 .read = iwl_dbgfs_##name##_read, \
2415 .open = simple_open, \
2416 .llseek = generic_file_llseek, \
2417 };
2418
2419 #define DEBUGFS_WRITE_FILE_OPS(name) \
2420 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2421 .write = iwl_dbgfs_##name##_write, \
2422 .open = simple_open, \
2423 .llseek = generic_file_llseek, \
2424 };
2425
2426 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2427 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2428 .write = iwl_dbgfs_##name##_write, \
2429 .read = iwl_dbgfs_##name##_read, \
2430 .open = simple_open, \
2431 .llseek = generic_file_llseek, \
2432 };
2433
2434 struct iwl_dbgfs_tx_queue_priv {
2435 struct iwl_trans *trans;
2436 };
2437
2438 struct iwl_dbgfs_tx_queue_state {
2439 loff_t pos;
2440 };
2441
iwl_dbgfs_tx_queue_seq_start(struct seq_file * seq,loff_t * pos)2442 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2443 {
2444 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2445 struct iwl_dbgfs_tx_queue_state *state;
2446
2447 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2448 return NULL;
2449
2450 state = kmalloc(sizeof(*state), GFP_KERNEL);
2451 if (!state)
2452 return NULL;
2453 state->pos = *pos;
2454 return state;
2455 }
2456
iwl_dbgfs_tx_queue_seq_next(struct seq_file * seq,void * v,loff_t * pos)2457 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2458 void *v, loff_t *pos)
2459 {
2460 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2461 struct iwl_dbgfs_tx_queue_state *state = v;
2462
2463 *pos = ++state->pos;
2464
2465 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2466 return NULL;
2467
2468 return state;
2469 }
2470
iwl_dbgfs_tx_queue_seq_stop(struct seq_file * seq,void * v)2471 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2472 {
2473 kfree(v);
2474 }
2475
iwl_dbgfs_tx_queue_seq_show(struct seq_file * seq,void * v)2476 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2477 {
2478 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2479 struct iwl_dbgfs_tx_queue_state *state = v;
2480 struct iwl_trans *trans = priv->trans;
2481 struct iwl_txq *txq = trans->txqs.txq[state->pos];
2482
2483 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2484 (unsigned int)state->pos,
2485 !!test_bit(state->pos, trans->txqs.queue_used),
2486 !!test_bit(state->pos, trans->txqs.queue_stopped));
2487 if (txq)
2488 seq_printf(seq,
2489 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2490 txq->read_ptr, txq->write_ptr,
2491 txq->need_update, txq->frozen,
2492 txq->n_window, txq->ampdu);
2493 else
2494 seq_puts(seq, "(unallocated)");
2495
2496 if (state->pos == trans->txqs.cmd.q_id)
2497 seq_puts(seq, " (HCMD)");
2498 seq_puts(seq, "\n");
2499
2500 return 0;
2501 }
2502
2503 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2504 .start = iwl_dbgfs_tx_queue_seq_start,
2505 .next = iwl_dbgfs_tx_queue_seq_next,
2506 .stop = iwl_dbgfs_tx_queue_seq_stop,
2507 .show = iwl_dbgfs_tx_queue_seq_show,
2508 };
2509
iwl_dbgfs_tx_queue_open(struct inode * inode,struct file * filp)2510 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2511 {
2512 struct iwl_dbgfs_tx_queue_priv *priv;
2513
2514 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2515 sizeof(*priv));
2516
2517 if (!priv)
2518 return -ENOMEM;
2519
2520 priv->trans = inode->i_private;
2521 return 0;
2522 }
2523
iwl_dbgfs_rx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2524 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2525 char __user *user_buf,
2526 size_t count, loff_t *ppos)
2527 {
2528 struct iwl_trans *trans = file->private_data;
2529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2530 char *buf;
2531 int pos = 0, i, ret;
2532 size_t bufsz;
2533
2534 bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2535
2536 if (!trans_pcie->rxq)
2537 return -EAGAIN;
2538
2539 buf = kzalloc(bufsz, GFP_KERNEL);
2540 if (!buf)
2541 return -ENOMEM;
2542
2543 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2544 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2545
2546 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2547 i);
2548 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2549 rxq->read);
2550 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2551 rxq->write);
2552 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2553 rxq->write_actual);
2554 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2555 rxq->need_update);
2556 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2557 rxq->free_count);
2558 if (rxq->rb_stts) {
2559 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2560 rxq));
2561 pos += scnprintf(buf + pos, bufsz - pos,
2562 "\tclosed_rb_num: %u\n",
2563 r & 0x0FFF);
2564 } else {
2565 pos += scnprintf(buf + pos, bufsz - pos,
2566 "\tclosed_rb_num: Not Allocated\n");
2567 }
2568 }
2569 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2570 kfree(buf);
2571
2572 return ret;
2573 }
2574
iwl_dbgfs_interrupt_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2575 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2576 char __user *user_buf,
2577 size_t count, loff_t *ppos)
2578 {
2579 struct iwl_trans *trans = file->private_data;
2580 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2581 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2582
2583 int pos = 0;
2584 char *buf;
2585 int bufsz = 24 * 64; /* 24 items * 64 char per item */
2586 ssize_t ret;
2587
2588 buf = kzalloc(bufsz, GFP_KERNEL);
2589 if (!buf)
2590 return -ENOMEM;
2591
2592 pos += scnprintf(buf + pos, bufsz - pos,
2593 "Interrupt Statistics Report:\n");
2594
2595 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2596 isr_stats->hw);
2597 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2598 isr_stats->sw);
2599 if (isr_stats->sw || isr_stats->hw) {
2600 pos += scnprintf(buf + pos, bufsz - pos,
2601 "\tLast Restarting Code: 0x%X\n",
2602 isr_stats->err_code);
2603 }
2604 #ifdef CONFIG_IWLWIFI_DEBUG
2605 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2606 isr_stats->sch);
2607 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2608 isr_stats->alive);
2609 #endif
2610 pos += scnprintf(buf + pos, bufsz - pos,
2611 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2612
2613 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2614 isr_stats->ctkill);
2615
2616 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2617 isr_stats->wakeup);
2618
2619 pos += scnprintf(buf + pos, bufsz - pos,
2620 "Rx command responses:\t\t %u\n", isr_stats->rx);
2621
2622 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2623 isr_stats->tx);
2624
2625 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2626 isr_stats->unhandled);
2627
2628 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2629 kfree(buf);
2630 return ret;
2631 }
2632
iwl_dbgfs_interrupt_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2633 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2634 const char __user *user_buf,
2635 size_t count, loff_t *ppos)
2636 {
2637 struct iwl_trans *trans = file->private_data;
2638 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2639 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2640 u32 reset_flag;
2641 int ret;
2642
2643 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2644 if (ret)
2645 return ret;
2646 if (reset_flag == 0)
2647 memset(isr_stats, 0, sizeof(*isr_stats));
2648
2649 return count;
2650 }
2651
iwl_dbgfs_csr_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2652 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2653 const char __user *user_buf,
2654 size_t count, loff_t *ppos)
2655 {
2656 struct iwl_trans *trans = file->private_data;
2657
2658 iwl_pcie_dump_csr(trans);
2659
2660 return count;
2661 }
2662
iwl_dbgfs_fh_reg_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2663 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2664 char __user *user_buf,
2665 size_t count, loff_t *ppos)
2666 {
2667 struct iwl_trans *trans = file->private_data;
2668 char *buf = NULL;
2669 ssize_t ret;
2670
2671 ret = iwl_dump_fh(trans, &buf);
2672 if (ret < 0)
2673 return ret;
2674 if (!buf)
2675 return -EINVAL;
2676 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2677 kfree(buf);
2678 return ret;
2679 }
2680
iwl_dbgfs_rfkill_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2681 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2682 char __user *user_buf,
2683 size_t count, loff_t *ppos)
2684 {
2685 struct iwl_trans *trans = file->private_data;
2686 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2687 char buf[100];
2688 int pos;
2689
2690 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2691 trans_pcie->debug_rfkill,
2692 !(iwl_read32(trans, CSR_GP_CNTRL) &
2693 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2694
2695 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2696 }
2697
iwl_dbgfs_rfkill_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2698 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2699 const char __user *user_buf,
2700 size_t count, loff_t *ppos)
2701 {
2702 struct iwl_trans *trans = file->private_data;
2703 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2704 bool new_value;
2705 int ret;
2706
2707 ret = kstrtobool_from_user(user_buf, count, &new_value);
2708 if (ret)
2709 return ret;
2710 if (new_value == trans_pcie->debug_rfkill)
2711 return count;
2712 IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2713 trans_pcie->debug_rfkill, new_value);
2714 trans_pcie->debug_rfkill = new_value;
2715 iwl_pcie_handle_rfkill_irq(trans);
2716
2717 return count;
2718 }
2719
iwl_dbgfs_monitor_data_open(struct inode * inode,struct file * file)2720 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2721 struct file *file)
2722 {
2723 struct iwl_trans *trans = inode->i_private;
2724 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2725
2726 if (!trans->dbg.dest_tlv ||
2727 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2728 IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2729 return -ENOENT;
2730 }
2731
2732 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2733 return -EBUSY;
2734
2735 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2736 return simple_open(inode, file);
2737 }
2738
iwl_dbgfs_monitor_data_release(struct inode * inode,struct file * file)2739 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2740 struct file *file)
2741 {
2742 struct iwl_trans_pcie *trans_pcie =
2743 IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2744
2745 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2746 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2747 return 0;
2748 }
2749
iwl_write_to_user_buf(char __user * user_buf,ssize_t count,void * buf,ssize_t * size,ssize_t * bytes_copied)2750 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2751 void *buf, ssize_t *size,
2752 ssize_t *bytes_copied)
2753 {
2754 int buf_size_left = count - *bytes_copied;
2755
2756 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2757 if (*size > buf_size_left)
2758 *size = buf_size_left;
2759
2760 *size -= copy_to_user(user_buf, buf, *size);
2761 *bytes_copied += *size;
2762
2763 if (buf_size_left == *size)
2764 return true;
2765 return false;
2766 }
2767
iwl_dbgfs_monitor_data_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2768 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2769 char __user *user_buf,
2770 size_t count, loff_t *ppos)
2771 {
2772 struct iwl_trans *trans = file->private_data;
2773 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2774 void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2775 struct cont_rec *data = &trans_pcie->fw_mon_data;
2776 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2777 ssize_t size, bytes_copied = 0;
2778 bool b_full;
2779
2780 if (trans->dbg.dest_tlv) {
2781 write_ptr_addr =
2782 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2783 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2784 } else {
2785 write_ptr_addr = MON_BUFF_WRPTR;
2786 wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2787 }
2788
2789 if (unlikely(!trans->dbg.rec_on))
2790 return 0;
2791
2792 mutex_lock(&data->mutex);
2793 if (data->state ==
2794 IWL_FW_MON_DBGFS_STATE_DISABLED) {
2795 mutex_unlock(&data->mutex);
2796 return 0;
2797 }
2798
2799 /* write_ptr position in bytes rather then DW */
2800 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2801 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2802
2803 if (data->prev_wrap_cnt == wrap_cnt) {
2804 size = write_ptr - data->prev_wr_ptr;
2805 curr_buf = cpu_addr + data->prev_wr_ptr;
2806 b_full = iwl_write_to_user_buf(user_buf, count,
2807 curr_buf, &size,
2808 &bytes_copied);
2809 data->prev_wr_ptr += size;
2810
2811 } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2812 write_ptr < data->prev_wr_ptr) {
2813 size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2814 curr_buf = cpu_addr + data->prev_wr_ptr;
2815 b_full = iwl_write_to_user_buf(user_buf, count,
2816 curr_buf, &size,
2817 &bytes_copied);
2818 data->prev_wr_ptr += size;
2819
2820 if (!b_full) {
2821 size = write_ptr;
2822 b_full = iwl_write_to_user_buf(user_buf, count,
2823 cpu_addr, &size,
2824 &bytes_copied);
2825 data->prev_wr_ptr = size;
2826 data->prev_wrap_cnt++;
2827 }
2828 } else {
2829 if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2830 write_ptr > data->prev_wr_ptr)
2831 IWL_WARN(trans,
2832 "write pointer passed previous write pointer, start copying from the beginning\n");
2833 else if (!unlikely(data->prev_wrap_cnt == 0 &&
2834 data->prev_wr_ptr == 0))
2835 IWL_WARN(trans,
2836 "monitor data is out of sync, start copying from the beginning\n");
2837
2838 size = write_ptr;
2839 b_full = iwl_write_to_user_buf(user_buf, count,
2840 cpu_addr, &size,
2841 &bytes_copied);
2842 data->prev_wr_ptr = size;
2843 data->prev_wrap_cnt = wrap_cnt;
2844 }
2845
2846 mutex_unlock(&data->mutex);
2847
2848 return bytes_copied;
2849 }
2850
2851 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2852 DEBUGFS_READ_FILE_OPS(fh_reg);
2853 DEBUGFS_READ_FILE_OPS(rx_queue);
2854 DEBUGFS_WRITE_FILE_OPS(csr);
2855 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2856 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2857 .owner = THIS_MODULE,
2858 .open = iwl_dbgfs_tx_queue_open,
2859 .read = seq_read,
2860 .llseek = seq_lseek,
2861 .release = seq_release_private,
2862 };
2863
2864 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2865 .read = iwl_dbgfs_monitor_data_read,
2866 .open = iwl_dbgfs_monitor_data_open,
2867 .release = iwl_dbgfs_monitor_data_release,
2868 };
2869
2870 /* Create the debugfs files and directories */
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)2871 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2872 {
2873 struct dentry *dir = trans->dbgfs_dir;
2874
2875 DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2876 DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2877 DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2878 DEBUGFS_ADD_FILE(csr, dir, 0200);
2879 DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2880 DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2881 DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2882 }
2883
iwl_trans_pcie_debugfs_cleanup(struct iwl_trans * trans)2884 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2885 {
2886 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2887 struct cont_rec *data = &trans_pcie->fw_mon_data;
2888
2889 mutex_lock(&data->mutex);
2890 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2891 mutex_unlock(&data->mutex);
2892 }
2893 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2894
iwl_trans_pcie_get_cmdlen(struct iwl_trans * trans,void * tfd)2895 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2896 {
2897 u32 cmdlen = 0;
2898 int i;
2899
2900 for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
2901 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
2902
2903 return cmdlen;
2904 }
2905
iwl_trans_pcie_dump_rbs(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,int allocated_rb_nums)2906 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2907 struct iwl_fw_error_dump_data **data,
2908 int allocated_rb_nums)
2909 {
2910 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2911 int max_len = trans_pcie->rx_buf_bytes;
2912 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2913 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2914 u32 i, r, j, rb_len = 0;
2915
2916 spin_lock(&rxq->lock);
2917
2918 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2919
2920 for (i = rxq->read, j = 0;
2921 i != r && j < allocated_rb_nums;
2922 i = (i + 1) & RX_QUEUE_MASK, j++) {
2923 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2924 struct iwl_fw_error_dump_rb *rb;
2925
2926 dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2927 DMA_FROM_DEVICE);
2928
2929 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2930
2931 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2932 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2933 rb = (void *)(*data)->data;
2934 rb->index = cpu_to_le32(i);
2935 memcpy(rb->data, page_address(rxb->page), max_len);
2936 /* remap the page for the free benefit */
2937 rxb->page_dma = dma_map_page(trans->dev, rxb->page,
2938 rxb->offset, max_len,
2939 DMA_FROM_DEVICE);
2940
2941 *data = iwl_fw_error_next_data(*data);
2942 }
2943
2944 spin_unlock(&rxq->lock);
2945
2946 return rb_len;
2947 }
2948 #define IWL_CSR_TO_DUMP (0x250)
2949
iwl_trans_pcie_dump_csr(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)2950 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2951 struct iwl_fw_error_dump_data **data)
2952 {
2953 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2954 __le32 *val;
2955 int i;
2956
2957 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2958 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2959 val = (void *)(*data)->data;
2960
2961 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2962 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2963
2964 *data = iwl_fw_error_next_data(*data);
2965
2966 return csr_len;
2967 }
2968
iwl_trans_pcie_fh_regs_dump(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)2969 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2970 struct iwl_fw_error_dump_data **data)
2971 {
2972 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2973 __le32 *val;
2974 int i;
2975
2976 if (!iwl_trans_grab_nic_access(trans))
2977 return 0;
2978
2979 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2980 (*data)->len = cpu_to_le32(fh_regs_len);
2981 val = (void *)(*data)->data;
2982
2983 if (!trans->trans_cfg->gen2)
2984 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
2985 i += sizeof(u32))
2986 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2987 else
2988 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
2989 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
2990 i += sizeof(u32))
2991 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2992 i));
2993
2994 iwl_trans_release_nic_access(trans);
2995
2996 *data = iwl_fw_error_next_data(*data);
2997
2998 return sizeof(**data) + fh_regs_len;
2999 }
3000
3001 static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data,u32 monitor_len)3002 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3003 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3004 u32 monitor_len)
3005 {
3006 u32 buf_size_in_dwords = (monitor_len >> 2);
3007 u32 *buffer = (u32 *)fw_mon_data->data;
3008 u32 i;
3009
3010 if (!iwl_trans_grab_nic_access(trans))
3011 return 0;
3012
3013 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3014 for (i = 0; i < buf_size_in_dwords; i++)
3015 buffer[i] = iwl_read_umac_prph_no_grab(trans,
3016 MON_DMARB_RD_DATA_ADDR);
3017 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3018
3019 iwl_trans_release_nic_access(trans);
3020
3021 return monitor_len;
3022 }
3023
3024 static void
iwl_trans_pcie_dump_pointers(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data)3025 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3026 struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3027 {
3028 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3029
3030 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3031 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3032 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3033 write_ptr = DBGC_CUR_DBGBUF_STATUS;
3034 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3035 } else if (trans->dbg.dest_tlv) {
3036 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3037 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3038 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3039 } else {
3040 base = MON_BUFF_BASE_ADDR;
3041 write_ptr = MON_BUFF_WRPTR;
3042 wrap_cnt = MON_BUFF_CYCLE_CNT;
3043 }
3044
3045 write_ptr_val = iwl_read_prph(trans, write_ptr);
3046 fw_mon_data->fw_mon_cycle_cnt =
3047 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3048 fw_mon_data->fw_mon_base_ptr =
3049 cpu_to_le32(iwl_read_prph(trans, base));
3050 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3051 fw_mon_data->fw_mon_base_high_ptr =
3052 cpu_to_le32(iwl_read_prph(trans, base_high));
3053 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3054 /* convert wrtPtr to DWs, to align with all HWs */
3055 write_ptr_val >>= 2;
3056 }
3057 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3058 }
3059
3060 static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,u32 monitor_len)3061 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3062 struct iwl_fw_error_dump_data **data,
3063 u32 monitor_len)
3064 {
3065 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3066 u32 len = 0;
3067
3068 if (trans->dbg.dest_tlv ||
3069 (fw_mon->size &&
3070 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3071 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3072 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3073
3074 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3075 fw_mon_data = (void *)(*data)->data;
3076
3077 iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3078
3079 len += sizeof(**data) + sizeof(*fw_mon_data);
3080 if (fw_mon->size) {
3081 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3082 monitor_len = fw_mon->size;
3083 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3084 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3085 /*
3086 * Update pointers to reflect actual values after
3087 * shifting
3088 */
3089 if (trans->dbg.dest_tlv->version) {
3090 base = (iwl_read_prph(trans, base) &
3091 IWL_LDBG_M2S_BUF_BA_MSK) <<
3092 trans->dbg.dest_tlv->base_shift;
3093 base *= IWL_M2S_UNIT_SIZE;
3094 base += trans->cfg->smem_offset;
3095 } else {
3096 base = iwl_read_prph(trans, base) <<
3097 trans->dbg.dest_tlv->base_shift;
3098 }
3099
3100 iwl_trans_read_mem(trans, base, fw_mon_data->data,
3101 monitor_len / sizeof(u32));
3102 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3103 monitor_len =
3104 iwl_trans_pci_dump_marbh_monitor(trans,
3105 fw_mon_data,
3106 monitor_len);
3107 } else {
3108 /* Didn't match anything - output no monitor data */
3109 monitor_len = 0;
3110 }
3111
3112 len += monitor_len;
3113 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3114 }
3115
3116 return len;
3117 }
3118
iwl_trans_get_fw_monitor_len(struct iwl_trans * trans,u32 * len)3119 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3120 {
3121 if (trans->dbg.fw_mon.size) {
3122 *len += sizeof(struct iwl_fw_error_dump_data) +
3123 sizeof(struct iwl_fw_error_dump_fw_mon) +
3124 trans->dbg.fw_mon.size;
3125 return trans->dbg.fw_mon.size;
3126 } else if (trans->dbg.dest_tlv) {
3127 u32 base, end, cfg_reg, monitor_len;
3128
3129 if (trans->dbg.dest_tlv->version == 1) {
3130 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3131 cfg_reg = iwl_read_prph(trans, cfg_reg);
3132 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3133 trans->dbg.dest_tlv->base_shift;
3134 base *= IWL_M2S_UNIT_SIZE;
3135 base += trans->cfg->smem_offset;
3136
3137 monitor_len =
3138 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3139 trans->dbg.dest_tlv->end_shift;
3140 monitor_len *= IWL_M2S_UNIT_SIZE;
3141 } else {
3142 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3143 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3144
3145 base = iwl_read_prph(trans, base) <<
3146 trans->dbg.dest_tlv->base_shift;
3147 end = iwl_read_prph(trans, end) <<
3148 trans->dbg.dest_tlv->end_shift;
3149
3150 /* Make "end" point to the actual end */
3151 if (trans->trans_cfg->device_family >=
3152 IWL_DEVICE_FAMILY_8000 ||
3153 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3154 end += (1 << trans->dbg.dest_tlv->end_shift);
3155 monitor_len = end - base;
3156 }
3157 *len += sizeof(struct iwl_fw_error_dump_data) +
3158 sizeof(struct iwl_fw_error_dump_fw_mon) +
3159 monitor_len;
3160 return monitor_len;
3161 }
3162 return 0;
3163 }
3164
3165 static struct iwl_trans_dump_data
iwl_trans_pcie_dump_data(struct iwl_trans * trans,u32 dump_mask)3166 *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3167 u32 dump_mask)
3168 {
3169 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3170 struct iwl_fw_error_dump_data *data;
3171 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3172 struct iwl_fw_error_dump_txcmd *txcmd;
3173 struct iwl_trans_dump_data *dump_data;
3174 u32 len, num_rbs = 0, monitor_len = 0;
3175 int i, ptr;
3176 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3177 !trans->trans_cfg->mq_rx_supported &&
3178 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3179
3180 if (!dump_mask)
3181 return NULL;
3182
3183 /* transport dump header */
3184 len = sizeof(*dump_data);
3185
3186 /* host commands */
3187 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3188 len += sizeof(*data) +
3189 cmdq->n_window * (sizeof(*txcmd) +
3190 TFD_MAX_PAYLOAD_SIZE);
3191
3192 /* FW monitor */
3193 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3194 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3195
3196 /* CSR registers */
3197 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3198 len += sizeof(*data) + IWL_CSR_TO_DUMP;
3199
3200 /* FH registers */
3201 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3202 if (trans->trans_cfg->gen2)
3203 len += sizeof(*data) +
3204 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3205 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3206 else
3207 len += sizeof(*data) +
3208 (FH_MEM_UPPER_BOUND -
3209 FH_MEM_LOWER_BOUND);
3210 }
3211
3212 if (dump_rbs) {
3213 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3214 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3215 /* RBs */
3216 num_rbs =
3217 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3218 & 0x0FFF;
3219 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3220 len += num_rbs * (sizeof(*data) +
3221 sizeof(struct iwl_fw_error_dump_rb) +
3222 (PAGE_SIZE << trans_pcie->rx_page_order));
3223 }
3224
3225 /* Paged memory for gen2 HW */
3226 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3227 for (i = 0; i < trans->init_dram.paging_cnt; i++)
3228 len += sizeof(*data) +
3229 sizeof(struct iwl_fw_error_dump_paging) +
3230 trans->init_dram.paging[i].size;
3231
3232 dump_data = vzalloc(len);
3233 if (!dump_data)
3234 return NULL;
3235
3236 len = 0;
3237 data = (void *)dump_data->data;
3238
3239 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3240 u16 tfd_size = trans->txqs.tfd.size;
3241
3242 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3243 txcmd = (void *)data->data;
3244 spin_lock_bh(&cmdq->lock);
3245 ptr = cmdq->write_ptr;
3246 for (i = 0; i < cmdq->n_window; i++) {
3247 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3248 u8 tfdidx;
3249 u32 caplen, cmdlen;
3250
3251 if (trans->trans_cfg->use_tfh)
3252 tfdidx = idx;
3253 else
3254 tfdidx = ptr;
3255
3256 cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3257 (u8 *)cmdq->tfds +
3258 tfd_size * tfdidx);
3259 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3260
3261 if (cmdlen) {
3262 len += sizeof(*txcmd) + caplen;
3263 txcmd->cmdlen = cpu_to_le32(cmdlen);
3264 txcmd->caplen = cpu_to_le32(caplen);
3265 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3266 caplen);
3267 txcmd = (void *)((u8 *)txcmd->data + caplen);
3268 }
3269
3270 ptr = iwl_txq_dec_wrap(trans, ptr);
3271 }
3272 spin_unlock_bh(&cmdq->lock);
3273
3274 data->len = cpu_to_le32(len);
3275 len += sizeof(*data);
3276 data = iwl_fw_error_next_data(data);
3277 }
3278
3279 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3280 len += iwl_trans_pcie_dump_csr(trans, &data);
3281 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3282 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3283 if (dump_rbs)
3284 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3285
3286 /* Paged memory for gen2 HW */
3287 if (trans->trans_cfg->gen2 &&
3288 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3289 for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3290 struct iwl_fw_error_dump_paging *paging;
3291 u32 page_len = trans->init_dram.paging[i].size;
3292
3293 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3294 data->len = cpu_to_le32(sizeof(*paging) + page_len);
3295 paging = (void *)data->data;
3296 paging->index = cpu_to_le32(i);
3297 memcpy(paging->data,
3298 trans->init_dram.paging[i].block, page_len);
3299 data = iwl_fw_error_next_data(data);
3300
3301 len += sizeof(*data) + sizeof(*paging) + page_len;
3302 }
3303 }
3304 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3305 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3306
3307 dump_data->len = len;
3308
3309 return dump_data;
3310 }
3311
iwl_trans_pci_interrupts(struct iwl_trans * trans,bool enable)3312 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3313 {
3314 if (enable)
3315 iwl_enable_interrupts(trans);
3316 else
3317 iwl_disable_interrupts(trans);
3318 }
3319
iwl_trans_pcie_sync_nmi(struct iwl_trans * trans)3320 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3321 {
3322 u32 inta_addr, sw_err_bit;
3323 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3324
3325 if (trans_pcie->msix_enabled) {
3326 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3327 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3328 } else {
3329 inta_addr = CSR_INT;
3330 sw_err_bit = CSR_INT_BIT_SW_ERR;
3331 }
3332
3333 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3334 }
3335
3336 #define IWL_TRANS_COMMON_OPS \
3337 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
3338 .write8 = iwl_trans_pcie_write8, \
3339 .write32 = iwl_trans_pcie_write32, \
3340 .read32 = iwl_trans_pcie_read32, \
3341 .read_prph = iwl_trans_pcie_read_prph, \
3342 .write_prph = iwl_trans_pcie_write_prph, \
3343 .read_mem = iwl_trans_pcie_read_mem, \
3344 .write_mem = iwl_trans_pcie_write_mem, \
3345 .read_config32 = iwl_trans_pcie_read_config32, \
3346 .configure = iwl_trans_pcie_configure, \
3347 .set_pmi = iwl_trans_pcie_set_pmi, \
3348 .sw_reset = iwl_trans_pcie_sw_reset, \
3349 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
3350 .release_nic_access = iwl_trans_pcie_release_nic_access, \
3351 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
3352 .dump_data = iwl_trans_pcie_dump_data, \
3353 .d3_suspend = iwl_trans_pcie_d3_suspend, \
3354 .d3_resume = iwl_trans_pcie_d3_resume, \
3355 .interrupts = iwl_trans_pci_interrupts, \
3356 .sync_nmi = iwl_trans_pcie_sync_nmi \
3357
3358 static const struct iwl_trans_ops trans_ops_pcie = {
3359 IWL_TRANS_COMMON_OPS,
3360 .start_hw = iwl_trans_pcie_start_hw,
3361 .fw_alive = iwl_trans_pcie_fw_alive,
3362 .start_fw = iwl_trans_pcie_start_fw,
3363 .stop_device = iwl_trans_pcie_stop_device,
3364
3365 .send_cmd = iwl_pcie_enqueue_hcmd,
3366
3367 .tx = iwl_trans_pcie_tx,
3368 .reclaim = iwl_txq_reclaim,
3369
3370 .txq_disable = iwl_trans_pcie_txq_disable,
3371 .txq_enable = iwl_trans_pcie_txq_enable,
3372
3373 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3374
3375 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3376
3377 .freeze_txq_timer = iwl_trans_txq_freeze_timer,
3378 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3379 #ifdef CONFIG_IWLWIFI_DEBUGFS
3380 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3381 #endif
3382 };
3383
3384 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3385 IWL_TRANS_COMMON_OPS,
3386 .start_hw = iwl_trans_pcie_start_hw,
3387 .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3388 .start_fw = iwl_trans_pcie_gen2_start_fw,
3389 .stop_device = iwl_trans_pcie_gen2_stop_device,
3390
3391 .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
3392
3393 .tx = iwl_txq_gen2_tx,
3394 .reclaim = iwl_txq_reclaim,
3395
3396 .set_q_ptrs = iwl_txq_set_q_ptrs,
3397
3398 .txq_alloc = iwl_txq_dyn_alloc,
3399 .txq_free = iwl_txq_dyn_free,
3400 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3401 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3402 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3403 #ifdef CONFIG_IWLWIFI_DEBUGFS
3404 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3405 #endif
3406 };
3407
iwl_trans_pcie_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,const struct iwl_cfg_trans_params * cfg_trans)3408 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3409 const struct pci_device_id *ent,
3410 const struct iwl_cfg_trans_params *cfg_trans)
3411 {
3412 struct iwl_trans_pcie *trans_pcie;
3413 struct iwl_trans *trans;
3414 int ret, addr_size;
3415 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3416
3417 if (!cfg_trans->gen2)
3418 ops = &trans_ops_pcie;
3419
3420 ret = pcim_enable_device(pdev);
3421 if (ret)
3422 return ERR_PTR(ret);
3423
3424 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3425 cfg_trans);
3426 if (!trans)
3427 return ERR_PTR(-ENOMEM);
3428
3429 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3430
3431 trans_pcie->trans = trans;
3432 trans_pcie->opmode_down = true;
3433 spin_lock_init(&trans_pcie->irq_lock);
3434 spin_lock_init(&trans_pcie->reg_lock);
3435 spin_lock_init(&trans_pcie->alloc_page_lock);
3436 mutex_init(&trans_pcie->mutex);
3437 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3438 init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3439
3440 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3441 WQ_HIGHPRI | WQ_UNBOUND, 1);
3442 if (!trans_pcie->rba.alloc_wq) {
3443 ret = -ENOMEM;
3444 goto out_free_trans;
3445 }
3446 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3447
3448 trans_pcie->debug_rfkill = -1;
3449
3450 if (!cfg_trans->base_params->pcie_l1_allowed) {
3451 /*
3452 * W/A - seems to solve weird behavior. We need to remove this
3453 * if we don't want to stay in L1 all the time. This wastes a
3454 * lot of power.
3455 */
3456 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3457 PCIE_LINK_STATE_L1 |
3458 PCIE_LINK_STATE_CLKPM);
3459 }
3460
3461 trans_pcie->def_rx_queue = 0;
3462
3463 pci_set_master(pdev);
3464
3465 addr_size = trans->txqs.tfd.addr_size;
3466 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3467 if (!ret)
3468 ret = pci_set_consistent_dma_mask(pdev,
3469 DMA_BIT_MASK(addr_size));
3470 if (ret) {
3471 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3472 if (!ret)
3473 ret = pci_set_consistent_dma_mask(pdev,
3474 DMA_BIT_MASK(32));
3475 /* both attempts failed: */
3476 if (ret) {
3477 dev_err(&pdev->dev, "No suitable DMA available\n");
3478 goto out_no_pci;
3479 }
3480 }
3481
3482 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3483 if (ret) {
3484 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3485 goto out_no_pci;
3486 }
3487
3488 trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3489 if (!trans_pcie->hw_base) {
3490 dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3491 ret = -ENODEV;
3492 goto out_no_pci;
3493 }
3494
3495 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3496 * PCI Tx retries from interfering with C3 CPU state */
3497 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3498
3499 trans_pcie->pci_dev = pdev;
3500 iwl_disable_interrupts(trans);
3501
3502 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3503 if (trans->hw_rev == 0xffffffff) {
3504 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3505 ret = -EIO;
3506 goto out_no_pci;
3507 }
3508
3509 /*
3510 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3511 * changed, and now the revision step also includes bit 0-1 (no more
3512 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3513 * in the old format.
3514 */
3515 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3516 trans->hw_rev = (trans->hw_rev & 0xfff0) |
3517 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3518
3519 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3520
3521 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3522 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3523 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3524 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3525
3526 init_waitqueue_head(&trans_pcie->sx_waitq);
3527
3528
3529 if (trans_pcie->msix_enabled) {
3530 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3531 if (ret)
3532 goto out_no_pci;
3533 } else {
3534 ret = iwl_pcie_alloc_ict(trans);
3535 if (ret)
3536 goto out_no_pci;
3537
3538 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3539 iwl_pcie_isr,
3540 iwl_pcie_irq_handler,
3541 IRQF_SHARED, DRV_NAME, trans);
3542 if (ret) {
3543 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3544 goto out_free_ict;
3545 }
3546 }
3547
3548 #ifdef CONFIG_IWLWIFI_DEBUGFS
3549 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3550 mutex_init(&trans_pcie->fw_mon_data.mutex);
3551 #endif
3552
3553 iwl_dbg_tlv_init(trans);
3554
3555 return trans;
3556
3557 out_free_ict:
3558 iwl_pcie_free_ict(trans);
3559 out_no_pci:
3560 destroy_workqueue(trans_pcie->rba.alloc_wq);
3561 out_free_trans:
3562 iwl_trans_free(trans);
3563 return ERR_PTR(ret);
3564 }
3565