1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2007-2015, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17 #if defined(__FreeBSD__)
18 #include <sys/rman.h>
19 #include <linux/delay.h>
20 #endif
21
22 #include "iwl-drv.h"
23 #include "iwl-trans.h"
24 #include "iwl-csr.h"
25 #include "iwl-prph.h"
26 #include "iwl-scd.h"
27 #include "iwl-agn-hw.h"
28 #include "fw/error-dump.h"
29 #include "fw/dbg.h"
30 #include "fw/api/tx.h"
31 #include "mei/iwl-mei.h"
32 #include "internal.h"
33 #include "iwl-fh.h"
34 #include "iwl-context-info-gen3.h"
35
36 /* extended range in FW SRAM */
37 #define IWL_FW_MEM_EXTENDED_START 0x40000
38 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
39
iwl_trans_pcie_dump_regs(struct iwl_trans * trans)40 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
41 {
42 #define PCI_DUMP_SIZE 352
43 #define PCI_MEM_DUMP_SIZE 64
44 #define PCI_PARENT_DUMP_SIZE 524
45 #define PREFIX_LEN 32
46 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
47 struct pci_dev *pdev = trans_pcie->pci_dev;
48 u32 i, pos, alloc_size, *ptr, *buf;
49 char *prefix;
50
51 if (trans_pcie->pcie_dbg_dumped_once)
52 return;
53
54 /* Should be a multiple of 4 */
55 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
56 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
57 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
58
59 /* Alloc a max size buffer */
60 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
61 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
62 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
63 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
64
65 buf = kmalloc(alloc_size, GFP_ATOMIC);
66 if (!buf)
67 return;
68 prefix = (char *)buf + alloc_size - PREFIX_LEN;
69
70 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
71
72 /* Print wifi device registers */
73 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
74 IWL_ERR(trans, "iwlwifi device config registers:\n");
75 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
76 if (pci_read_config_dword(pdev, i, ptr))
77 goto err_read;
78 #if defined(__linux__)
79 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
80 #elif defined(__FreeBSD__)
81 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
82 #endif
83
84 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
85 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
86 *ptr = iwl_read32(trans, i);
87 #if defined(__linux__)
88 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
89 #elif defined(__FreeBSD__)
90 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
91 #endif
92
93 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
94 if (pos) {
95 IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
96 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
97 if (pci_read_config_dword(pdev, pos + i, ptr))
98 goto err_read;
99 #if defined(__linux__)
100 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
101 32, 4, buf, i, 0);
102 #elif defined(__FreeBSD__)
103 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
104 #endif
105 }
106
107 /* Print parent device registers next */
108 if (!pdev->bus->self)
109 goto out;
110
111 pdev = pdev->bus->self;
112 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
113
114 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
115 pci_name(pdev));
116 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
117 if (pci_read_config_dword(pdev, i, ptr))
118 goto err_read;
119 #if defined(__linux__)
120 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
121 #elif defined(__FreeBSD__)
122 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
123 #endif
124
125 /* Print root port AER registers */
126 pos = 0;
127 pdev = pcie_find_root_port(pdev);
128 if (pdev)
129 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
130 if (pos) {
131 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
132 pci_name(pdev));
133 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
134 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
135 if (pci_read_config_dword(pdev, pos + i, ptr))
136 goto err_read;
137 #if defined(__linux__)
138 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
139 4, buf, i, 0);
140 #elif defined(__FreeBSD__)
141 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
142 #endif
143 }
144 goto out;
145
146 err_read:
147 #if defined(__linux__)
148 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
149 #elif defined(__FreeBSD__)
150 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
151 #endif
152 IWL_ERR(trans, "Read failed at 0x%X\n", i);
153 out:
154 trans_pcie->pcie_dbg_dumped_once = 1;
155 kfree(buf);
156 }
157
iwl_trans_pcie_sw_reset(struct iwl_trans * trans,bool retake_ownership)158 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
159 bool retake_ownership)
160 {
161 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
162 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
163 iwl_set_bit(trans, CSR_GP_CNTRL,
164 CSR_GP_CNTRL_REG_FLAG_SW_RESET);
165 usleep_range(10000, 20000);
166 } else {
167 iwl_set_bit(trans, CSR_RESET,
168 CSR_RESET_REG_FLAG_SW_RESET);
169 usleep_range(5000, 6000);
170 }
171
172 if (retake_ownership)
173 return iwl_pcie_prepare_card_hw(trans);
174
175 return 0;
176 }
177
iwl_pcie_free_fw_monitor(struct iwl_trans * trans)178 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
179 {
180 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
181
182 if (!fw_mon->size)
183 return;
184
185 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
186 fw_mon->physical);
187
188 fw_mon->block = NULL;
189 fw_mon->physical = 0;
190 fw_mon->size = 0;
191 }
192
iwl_pcie_alloc_fw_monitor_block(struct iwl_trans * trans,u8 max_power)193 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
194 u8 max_power)
195 {
196 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
197 void *block = NULL;
198 dma_addr_t physical = 0;
199 u32 size = 0;
200 u8 power;
201
202 if (fw_mon->size) {
203 memset(fw_mon->block, 0, fw_mon->size);
204 return;
205 }
206
207 /* need at least 2 KiB, so stop at 11 */
208 for (power = max_power; power >= 11; power--) {
209 size = BIT(power);
210 block = dma_alloc_coherent(trans->dev, size, &physical,
211 GFP_KERNEL | __GFP_NOWARN);
212 if (!block)
213 continue;
214
215 IWL_INFO(trans,
216 "Allocated 0x%08x bytes for firmware monitor.\n",
217 size);
218 break;
219 }
220
221 if (WARN_ON_ONCE(!block))
222 return;
223
224 if (power != max_power)
225 IWL_ERR(trans,
226 "Sorry - debug buffer is only %luK while you requested %luK\n",
227 (unsigned long)BIT(power - 10),
228 (unsigned long)BIT(max_power - 10));
229
230 fw_mon->block = block;
231 fw_mon->physical = physical;
232 fw_mon->size = size;
233 }
234
iwl_pcie_alloc_fw_monitor(struct iwl_trans * trans,u8 max_power)235 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
236 {
237 if (!max_power) {
238 /* default max_power is maximum */
239 max_power = 26;
240 } else {
241 max_power += 11;
242 }
243
244 if (WARN(max_power > 26,
245 "External buffer size for monitor is too big %d, check the FW TLV\n",
246 max_power))
247 return;
248
249 iwl_pcie_alloc_fw_monitor_block(trans, max_power);
250 }
251
iwl_trans_pcie_read_shr(struct iwl_trans * trans,u32 reg)252 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
253 {
254 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
255 ((reg & 0x0000ffff) | (2 << 28)));
256 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
257 }
258
iwl_trans_pcie_write_shr(struct iwl_trans * trans,u32 reg,u32 val)259 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
260 {
261 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
262 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
263 ((reg & 0x0000ffff) | (3 << 28)));
264 }
265
iwl_pcie_set_pwr(struct iwl_trans * trans,bool vaux)266 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
267 {
268 if (trans->cfg->apmg_not_supported)
269 return;
270
271 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
272 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
273 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
274 ~APMG_PS_CTRL_MSK_PWR_SRC);
275 else
276 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
277 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
278 ~APMG_PS_CTRL_MSK_PWR_SRC);
279 }
280
281 /* PCI registers */
282 #define PCI_CFG_RETRY_TIMEOUT 0x041
283
iwl_pcie_apm_config(struct iwl_trans * trans)284 void iwl_pcie_apm_config(struct iwl_trans *trans)
285 {
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 u16 lctl;
288 u16 cap;
289
290 /*
291 * L0S states have been found to be unstable with our devices
292 * and in newer hardware they are not officially supported at
293 * all, so we must always set the L0S_DISABLED bit.
294 */
295 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
296
297 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
298 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
299
300 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
301 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
302 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
303 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
304 trans->ltr_enabled ? "En" : "Dis");
305 }
306
307 /*
308 * Start up NIC's basic functionality after it has been reset
309 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
310 * NOTE: This does not load uCode nor start the embedded processor
311 */
iwl_pcie_apm_init(struct iwl_trans * trans)312 static int iwl_pcie_apm_init(struct iwl_trans *trans)
313 {
314 int ret;
315
316 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
317
318 /*
319 * Use "set_bit" below rather than "write", to preserve any hardware
320 * bits already set by default after reset.
321 */
322
323 /* Disable L0S exit timer (platform NMI Work/Around) */
324 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
325 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
326 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
327
328 /*
329 * Disable L0s without affecting L1;
330 * don't wait for ICH L0s (ICH bug W/A)
331 */
332 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
333 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
334
335 /* Set FH wait threshold to maximum (HW error during stress W/A) */
336 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
337
338 /*
339 * Enable HAP INTA (interrupt from management bus) to
340 * wake device's PCI Express link L1a -> L0s
341 */
342 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
343 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
344
345 iwl_pcie_apm_config(trans);
346
347 /* Configure analog phase-lock-loop before activating to D0A */
348 if (trans->trans_cfg->base_params->pll_cfg)
349 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
350
351 ret = iwl_finish_nic_init(trans);
352 if (ret)
353 return ret;
354
355 if (trans->cfg->host_interrupt_operation_mode) {
356 /*
357 * This is a bit of an abuse - This is needed for 7260 / 3160
358 * only check host_interrupt_operation_mode even if this is
359 * not related to host_interrupt_operation_mode.
360 *
361 * Enable the oscillator to count wake up time for L1 exit. This
362 * consumes slightly more power (100uA) - but allows to be sure
363 * that we wake up from L1 on time.
364 *
365 * This looks weird: read twice the same register, discard the
366 * value, set a bit, and yet again, read that same register
367 * just to discard the value. But that's the way the hardware
368 * seems to like it.
369 */
370 iwl_read_prph(trans, OSC_CLK);
371 iwl_read_prph(trans, OSC_CLK);
372 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
373 iwl_read_prph(trans, OSC_CLK);
374 iwl_read_prph(trans, OSC_CLK);
375 }
376
377 /*
378 * Enable DMA clock and wait for it to stabilize.
379 *
380 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
381 * bits do not disable clocks. This preserves any hardware
382 * bits already set by default in "CLK_CTRL_REG" after reset.
383 */
384 if (!trans->cfg->apmg_not_supported) {
385 iwl_write_prph(trans, APMG_CLK_EN_REG,
386 APMG_CLK_VAL_DMA_CLK_RQT);
387 udelay(20);
388
389 /* Disable L1-Active */
390 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
391 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
392
393 /* Clear the interrupt in APMG if the NIC is in RFKILL */
394 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
395 APMG_RTC_INT_STT_RFKILL);
396 }
397
398 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
399
400 return 0;
401 }
402
403 /*
404 * Enable LP XTAL to avoid HW bug where device may consume much power if
405 * FW is not loaded after device reset. LP XTAL is disabled by default
406 * after device HW reset. Do it only if XTAL is fed by internal source.
407 * Configure device's "persistence" mode to avoid resetting XTAL again when
408 * SHRD_HW_RST occurs in S3.
409 */
iwl_pcie_apm_lp_xtal_enable(struct iwl_trans * trans)410 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
411 {
412 int ret;
413 u32 apmg_gp1_reg;
414 u32 apmg_xtal_cfg_reg;
415 u32 dl_cfg_reg;
416
417 /* Force XTAL ON */
418 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
419 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
420
421 ret = iwl_trans_pcie_sw_reset(trans, true);
422
423 if (!ret)
424 ret = iwl_finish_nic_init(trans);
425
426 if (WARN_ON(ret)) {
427 /* Release XTAL ON request */
428 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
429 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
430 return;
431 }
432
433 /*
434 * Clear "disable persistence" to avoid LP XTAL resetting when
435 * SHRD_HW_RST is applied in S3.
436 */
437 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
438 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
439
440 /*
441 * Force APMG XTAL to be active to prevent its disabling by HW
442 * caused by APMG idle state.
443 */
444 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
445 SHR_APMG_XTAL_CFG_REG);
446 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
447 apmg_xtal_cfg_reg |
448 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
449
450 ret = iwl_trans_pcie_sw_reset(trans, true);
451 if (ret)
452 IWL_ERR(trans,
453 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
454
455 /* Enable LP XTAL by indirect access through CSR */
456 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
457 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
458 SHR_APMG_GP1_WF_XTAL_LP_EN |
459 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
460
461 /* Clear delay line clock power up */
462 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
463 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
464 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
465
466 /*
467 * Enable persistence mode to avoid LP XTAL resetting when
468 * SHRD_HW_RST is applied in S3.
469 */
470 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
471 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
472
473 /*
474 * Clear "initialization complete" bit to move adapter from
475 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
476 */
477 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
478
479 /* Activates XTAL resources monitor */
480 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
481 CSR_MONITOR_XTAL_RESOURCES);
482
483 /* Release XTAL ON request */
484 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
485 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
486 udelay(10);
487
488 /* Release APMG XTAL */
489 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
490 apmg_xtal_cfg_reg &
491 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
492 }
493
iwl_pcie_apm_stop_master(struct iwl_trans * trans)494 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
495 {
496 int ret;
497
498 /* stop device's busmaster DMA activity */
499
500 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
501 iwl_set_bit(trans, CSR_GP_CNTRL,
502 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
503
504 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
505 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
506 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
507 100);
508 usleep_range(10000, 20000);
509 } else {
510 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
511
512 ret = iwl_poll_bit(trans, CSR_RESET,
513 CSR_RESET_REG_FLAG_MASTER_DISABLED,
514 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
515 }
516
517 if (ret < 0)
518 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
519
520 IWL_DEBUG_INFO(trans, "stop master\n");
521 }
522
iwl_pcie_apm_stop(struct iwl_trans * trans,bool op_mode_leave)523 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
524 {
525 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
526
527 if (op_mode_leave) {
528 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
529 iwl_pcie_apm_init(trans);
530
531 /* inform ME that we are leaving */
532 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
533 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
534 APMG_PCIDEV_STT_VAL_WAKE_ME);
535 else if (trans->trans_cfg->device_family >=
536 IWL_DEVICE_FAMILY_8000) {
537 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
538 CSR_RESET_LINK_PWR_MGMT_DISABLED);
539 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
540 CSR_HW_IF_CONFIG_REG_PREPARE |
541 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
542 mdelay(1);
543 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
544 CSR_RESET_LINK_PWR_MGMT_DISABLED);
545 }
546 mdelay(5);
547 }
548
549 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
550
551 /* Stop device's DMA activity */
552 iwl_pcie_apm_stop_master(trans);
553
554 if (trans->cfg->lp_xtal_workaround) {
555 iwl_pcie_apm_lp_xtal_enable(trans);
556 return;
557 }
558
559 iwl_trans_pcie_sw_reset(trans, false);
560
561 /*
562 * Clear "initialization complete" bit to move adapter from
563 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
564 */
565 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
566 }
567
iwl_pcie_nic_init(struct iwl_trans * trans)568 static int iwl_pcie_nic_init(struct iwl_trans *trans)
569 {
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571 int ret;
572
573 /* nic_init */
574 spin_lock_bh(&trans_pcie->irq_lock);
575 ret = iwl_pcie_apm_init(trans);
576 spin_unlock_bh(&trans_pcie->irq_lock);
577
578 if (ret)
579 return ret;
580
581 iwl_pcie_set_pwr(trans, false);
582
583 iwl_op_mode_nic_config(trans->op_mode);
584
585 /* Allocate the RX queue, or reset if it is already allocated */
586 ret = iwl_pcie_rx_init(trans);
587 if (ret)
588 return ret;
589
590 /* Allocate or reset and init all Tx and Command queues */
591 if (iwl_pcie_tx_init(trans)) {
592 iwl_pcie_rx_free(trans);
593 return -ENOMEM;
594 }
595
596 if (trans->trans_cfg->base_params->shadow_reg_enable) {
597 /* enable shadow regs in HW */
598 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
599 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
600 }
601
602 return 0;
603 }
604
605 #define HW_READY_TIMEOUT (50)
606
607 /* Note: returns poll_bit return value, which is >= 0 if success */
iwl_pcie_set_hw_ready(struct iwl_trans * trans)608 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
609 {
610 int ret;
611
612 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
613 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
614
615 /* See if we got it */
616 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
617 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
618 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
619 HW_READY_TIMEOUT);
620
621 if (ret >= 0)
622 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
623
624 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
625 return ret;
626 }
627
628 /* Note: returns standard 0/-ERROR code */
iwl_pcie_prepare_card_hw(struct iwl_trans * trans)629 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
630 {
631 int ret;
632 int iter;
633
634 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
635
636 ret = iwl_pcie_set_hw_ready(trans);
637 /* If the card is ready, exit 0 */
638 if (ret >= 0) {
639 trans->csme_own = false;
640 return 0;
641 }
642
643 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
644 CSR_RESET_LINK_PWR_MGMT_DISABLED);
645 usleep_range(1000, 2000);
646
647 for (iter = 0; iter < 10; iter++) {
648 int t = 0;
649
650 /* If HW is not ready, prepare the conditions to check again */
651 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
652 CSR_HW_IF_CONFIG_REG_PREPARE);
653
654 do {
655 ret = iwl_pcie_set_hw_ready(trans);
656 if (ret >= 0) {
657 trans->csme_own = false;
658 return 0;
659 }
660
661 if (iwl_mei_is_connected()) {
662 IWL_DEBUG_INFO(trans,
663 "Couldn't prepare the card but SAP is connected\n");
664 trans->csme_own = true;
665 if (trans->trans_cfg->device_family !=
666 IWL_DEVICE_FAMILY_9000)
667 IWL_ERR(trans,
668 "SAP not supported for this NIC family\n");
669
670 return -EBUSY;
671 }
672
673 usleep_range(200, 1000);
674 t += 200;
675 } while (t < 150000);
676 msleep(25);
677 }
678
679 IWL_ERR(trans, "Couldn't prepare the card\n");
680
681 return ret;
682 }
683
684 /*
685 * ucode
686 */
iwl_pcie_load_firmware_chunk_fh(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)687 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
688 u32 dst_addr, dma_addr_t phy_addr,
689 u32 byte_cnt)
690 {
691 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
692 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
693
694 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
695 dst_addr);
696
697 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
698 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
699
700 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
701 (iwl_get_dma_hi_addr(phy_addr)
702 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
703
704 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
705 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
706 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
707 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
708
709 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
710 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
711 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
712 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
713 }
714
iwl_pcie_load_firmware_chunk(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)715 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
716 u32 dst_addr, dma_addr_t phy_addr,
717 u32 byte_cnt)
718 {
719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
720 int ret;
721
722 trans_pcie->ucode_write_complete = false;
723
724 if (!iwl_trans_grab_nic_access(trans))
725 return -EIO;
726
727 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
728 byte_cnt);
729 iwl_trans_release_nic_access(trans);
730
731 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
732 trans_pcie->ucode_write_complete, 5 * HZ);
733 if (!ret) {
734 IWL_ERR(trans, "Failed to load firmware chunk!\n");
735 iwl_trans_pcie_dump_regs(trans);
736 return -ETIMEDOUT;
737 }
738
739 return 0;
740 }
741
iwl_pcie_load_section(struct iwl_trans * trans,u8 section_num,const struct fw_desc * section)742 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
743 const struct fw_desc *section)
744 {
745 u8 *v_addr;
746 dma_addr_t p_addr;
747 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
748 int ret = 0;
749
750 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
751 section_num);
752
753 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
754 GFP_KERNEL | __GFP_NOWARN);
755 if (!v_addr) {
756 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
757 chunk_sz = PAGE_SIZE;
758 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
759 &p_addr, GFP_KERNEL);
760 if (!v_addr)
761 return -ENOMEM;
762 }
763
764 for (offset = 0; offset < section->len; offset += chunk_sz) {
765 u32 copy_size, dst_addr;
766 bool extended_addr = false;
767
768 copy_size = min_t(u32, chunk_sz, section->len - offset);
769 dst_addr = section->offset + offset;
770
771 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
772 dst_addr <= IWL_FW_MEM_EXTENDED_END)
773 extended_addr = true;
774
775 if (extended_addr)
776 iwl_set_bits_prph(trans, LMPM_CHICK,
777 LMPM_CHICK_EXTENDED_ADDR_SPACE);
778
779 memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
780 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
781 copy_size);
782
783 if (extended_addr)
784 iwl_clear_bits_prph(trans, LMPM_CHICK,
785 LMPM_CHICK_EXTENDED_ADDR_SPACE);
786
787 if (ret) {
788 IWL_ERR(trans,
789 "Could not load the [%d] uCode section\n",
790 section_num);
791 break;
792 }
793 }
794
795 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
796 return ret;
797 }
798
iwl_pcie_load_cpu_sections_8000(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)799 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
800 const struct fw_img *image,
801 int cpu,
802 int *first_ucode_section)
803 {
804 int shift_param;
805 int i, ret = 0, sec_num = 0x1;
806 u32 val, last_read_idx = 0;
807
808 if (cpu == 1) {
809 shift_param = 0;
810 *first_ucode_section = 0;
811 } else {
812 shift_param = 16;
813 (*first_ucode_section)++;
814 }
815
816 for (i = *first_ucode_section; i < image->num_sec; i++) {
817 last_read_idx = i;
818
819 /*
820 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
821 * CPU1 to CPU2.
822 * PAGING_SEPARATOR_SECTION delimiter - separate between
823 * CPU2 non paged to CPU2 paging sec.
824 */
825 if (!image->sec[i].data ||
826 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
827 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
828 IWL_DEBUG_FW(trans,
829 "Break since Data not valid or Empty section, sec = %d\n",
830 i);
831 break;
832 }
833
834 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
835 if (ret)
836 return ret;
837
838 /* Notify ucode of loaded section number and status */
839 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
840 val = val | (sec_num << shift_param);
841 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
842
843 sec_num = (sec_num << 1) | 0x1;
844 }
845
846 *first_ucode_section = last_read_idx;
847
848 iwl_enable_interrupts(trans);
849
850 if (trans->trans_cfg->gen2) {
851 if (cpu == 1)
852 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
853 0xFFFF);
854 else
855 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
856 0xFFFFFFFF);
857 } else {
858 if (cpu == 1)
859 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
860 0xFFFF);
861 else
862 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
863 0xFFFFFFFF);
864 }
865
866 return 0;
867 }
868
iwl_pcie_load_cpu_sections(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)869 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
870 const struct fw_img *image,
871 int cpu,
872 int *first_ucode_section)
873 {
874 int i, ret = 0;
875 u32 last_read_idx = 0;
876
877 if (cpu == 1)
878 *first_ucode_section = 0;
879 else
880 (*first_ucode_section)++;
881
882 for (i = *first_ucode_section; i < image->num_sec; i++) {
883 last_read_idx = i;
884
885 /*
886 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
887 * CPU1 to CPU2.
888 * PAGING_SEPARATOR_SECTION delimiter - separate between
889 * CPU2 non paged to CPU2 paging sec.
890 */
891 if (!image->sec[i].data ||
892 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
893 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
894 IWL_DEBUG_FW(trans,
895 "Break since Data not valid or Empty section, sec = %d\n",
896 i);
897 break;
898 }
899
900 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
901 if (ret)
902 return ret;
903 }
904
905 *first_ucode_section = last_read_idx;
906
907 return 0;
908 }
909
iwl_pcie_apply_destination_ini(struct iwl_trans * trans)910 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
911 {
912 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
913 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
914 &trans->dbg.fw_mon_cfg[alloc_id];
915 struct iwl_dram_data *frag;
916
917 if (!iwl_trans_dbg_ini_valid(trans))
918 return;
919
920 if (le32_to_cpu(fw_mon_cfg->buf_location) ==
921 IWL_FW_INI_LOCATION_SRAM_PATH) {
922 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
923 /* set sram monitor by enabling bit 7 */
924 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
925 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
926
927 return;
928 }
929
930 if (le32_to_cpu(fw_mon_cfg->buf_location) !=
931 IWL_FW_INI_LOCATION_DRAM_PATH ||
932 !trans->dbg.fw_mon_ini[alloc_id].num_frags)
933 return;
934
935 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
936
937 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
938 alloc_id);
939
940 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
941 frag->physical >> MON_BUFF_SHIFT_VER2);
942 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
943 (frag->physical + frag->size - 256) >>
944 MON_BUFF_SHIFT_VER2);
945 }
946
iwl_pcie_apply_destination(struct iwl_trans * trans)947 void iwl_pcie_apply_destination(struct iwl_trans *trans)
948 {
949 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
950 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
951 int i;
952
953 if (iwl_trans_dbg_ini_valid(trans)) {
954 iwl_pcie_apply_destination_ini(trans);
955 return;
956 }
957
958 IWL_INFO(trans, "Applying debug destination %s\n",
959 get_fw_dbg_mode_string(dest->monitor_mode));
960
961 if (dest->monitor_mode == EXTERNAL_MODE)
962 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
963 else
964 IWL_WARN(trans, "PCI should have external buffer debug\n");
965
966 for (i = 0; i < trans->dbg.n_dest_reg; i++) {
967 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
968 u32 val = le32_to_cpu(dest->reg_ops[i].val);
969
970 switch (dest->reg_ops[i].op) {
971 case CSR_ASSIGN:
972 iwl_write32(trans, addr, val);
973 break;
974 case CSR_SETBIT:
975 iwl_set_bit(trans, addr, BIT(val));
976 break;
977 case CSR_CLEARBIT:
978 iwl_clear_bit(trans, addr, BIT(val));
979 break;
980 case PRPH_ASSIGN:
981 iwl_write_prph(trans, addr, val);
982 break;
983 case PRPH_SETBIT:
984 iwl_set_bits_prph(trans, addr, BIT(val));
985 break;
986 case PRPH_CLEARBIT:
987 iwl_clear_bits_prph(trans, addr, BIT(val));
988 break;
989 case PRPH_BLOCKBIT:
990 if (iwl_read_prph(trans, addr) & BIT(val)) {
991 IWL_ERR(trans,
992 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
993 val, addr);
994 goto monitor;
995 }
996 break;
997 default:
998 IWL_ERR(trans, "FW debug - unknown OP %d\n",
999 dest->reg_ops[i].op);
1000 break;
1001 }
1002 }
1003
1004 monitor:
1005 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
1006 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
1007 fw_mon->physical >> dest->base_shift);
1008 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1009 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
1010 (fw_mon->physical + fw_mon->size -
1011 256) >> dest->end_shift);
1012 else
1013 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
1014 (fw_mon->physical + fw_mon->size) >>
1015 dest->end_shift);
1016 }
1017 }
1018
iwl_pcie_load_given_ucode(struct iwl_trans * trans,const struct fw_img * image)1019 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
1020 const struct fw_img *image)
1021 {
1022 int ret = 0;
1023 int first_ucode_section;
1024
1025 IWL_DEBUG_FW(trans, "working with %s CPU\n",
1026 image->is_dual_cpus ? "Dual" : "Single");
1027
1028 /* load to FW the binary non secured sections of CPU1 */
1029 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
1030 if (ret)
1031 return ret;
1032
1033 if (image->is_dual_cpus) {
1034 /* set CPU2 header address */
1035 iwl_write_prph(trans,
1036 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
1037 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1038
1039 /* load to FW the binary sections of CPU2 */
1040 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
1041 &first_ucode_section);
1042 if (ret)
1043 return ret;
1044 }
1045
1046 if (iwl_pcie_dbg_on(trans))
1047 iwl_pcie_apply_destination(trans);
1048
1049 iwl_enable_interrupts(trans);
1050
1051 /* release CPU reset */
1052 iwl_write32(trans, CSR_RESET, 0);
1053
1054 return 0;
1055 }
1056
iwl_pcie_load_given_ucode_8000(struct iwl_trans * trans,const struct fw_img * image)1057 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1058 const struct fw_img *image)
1059 {
1060 int ret = 0;
1061 int first_ucode_section;
1062
1063 IWL_DEBUG_FW(trans, "working with %s CPU\n",
1064 image->is_dual_cpus ? "Dual" : "Single");
1065
1066 if (iwl_pcie_dbg_on(trans))
1067 iwl_pcie_apply_destination(trans);
1068
1069 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1070 iwl_read_prph(trans, WFPM_GP2));
1071
1072 /*
1073 * Set default value. On resume reading the values that were
1074 * zeored can provide debug data on the resume flow.
1075 * This is for debugging only and has no functional impact.
1076 */
1077 iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1078
1079 /* configure the ucode to be ready to get the secured image */
1080 /* release CPU reset */
1081 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1082
1083 /* load to FW the binary Secured sections of CPU1 */
1084 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1085 &first_ucode_section);
1086 if (ret)
1087 return ret;
1088
1089 /* load to FW the binary sections of CPU2 */
1090 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1091 &first_ucode_section);
1092 }
1093
iwl_pcie_check_hw_rf_kill(struct iwl_trans * trans)1094 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1095 {
1096 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1097 bool hw_rfkill = iwl_is_rfkill_set(trans);
1098 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1099 bool report;
1100
1101 if (hw_rfkill) {
1102 set_bit(STATUS_RFKILL_HW, &trans->status);
1103 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1104 } else {
1105 clear_bit(STATUS_RFKILL_HW, &trans->status);
1106 if (trans_pcie->opmode_down)
1107 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1108 }
1109
1110 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1111
1112 if (prev != report)
1113 iwl_trans_pcie_rf_kill(trans, report);
1114
1115 return hw_rfkill;
1116 }
1117
1118 struct iwl_causes_list {
1119 u16 mask_reg;
1120 u8 bit;
1121 u8 addr;
1122 };
1123
1124 #define IWL_CAUSE(reg, mask) \
1125 { \
1126 .mask_reg = reg, \
1127 .bit = ilog2(mask), \
1128 .addr = ilog2(mask) + \
1129 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
1130 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
1131 0xffff), /* causes overflow warning */ \
1132 }
1133
1134 static const struct iwl_causes_list causes_list_common[] = {
1135 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
1136 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
1137 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
1138 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
1139 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
1140 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
1141 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
1142 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
1143 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
1144 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
1145 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
1146 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
1147 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
1148 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
1149 };
1150
1151 static const struct iwl_causes_list causes_list_pre_bz[] = {
1152 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
1153 };
1154
1155 static const struct iwl_causes_list causes_list_bz[] = {
1156 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
1157 };
1158
iwl_pcie_map_list(struct iwl_trans * trans,const struct iwl_causes_list * causes,int arr_size,int val)1159 static void iwl_pcie_map_list(struct iwl_trans *trans,
1160 const struct iwl_causes_list *causes,
1161 int arr_size, int val)
1162 {
1163 int i;
1164
1165 for (i = 0; i < arr_size; i++) {
1166 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1167 iwl_clear_bit(trans, causes[i].mask_reg,
1168 BIT(causes[i].bit));
1169 }
1170 }
1171
iwl_pcie_map_non_rx_causes(struct iwl_trans * trans)1172 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1173 {
1174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1175 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1176 /*
1177 * Access all non RX causes and map them to the default irq.
1178 * In case we are missing at least one interrupt vector,
1179 * the first interrupt vector will serve non-RX and FBQ causes.
1180 */
1181 iwl_pcie_map_list(trans, causes_list_common,
1182 ARRAY_SIZE(causes_list_common), val);
1183 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1184 iwl_pcie_map_list(trans, causes_list_bz,
1185 ARRAY_SIZE(causes_list_bz), val);
1186 else
1187 iwl_pcie_map_list(trans, causes_list_pre_bz,
1188 ARRAY_SIZE(causes_list_pre_bz), val);
1189 }
1190
iwl_pcie_map_rx_causes(struct iwl_trans * trans)1191 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1192 {
1193 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1194 u32 offset =
1195 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1196 u32 val, idx;
1197
1198 /*
1199 * The first RX queue - fallback queue, which is designated for
1200 * management frame, command responses etc, is always mapped to the
1201 * first interrupt vector. The other RX queues are mapped to
1202 * the other (N - 2) interrupt vectors.
1203 */
1204 val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1205 for (idx = 1; idx < trans->num_rx_queues; idx++) {
1206 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1207 MSIX_FH_INT_CAUSES_Q(idx - offset));
1208 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1209 }
1210 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1211
1212 val = MSIX_FH_INT_CAUSES_Q(0);
1213 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1214 val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1215 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1216
1217 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1218 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1219 }
1220
iwl_pcie_conf_msix_hw(struct iwl_trans_pcie * trans_pcie)1221 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1222 {
1223 struct iwl_trans *trans = trans_pcie->trans;
1224
1225 if (!trans_pcie->msix_enabled) {
1226 if (trans->trans_cfg->mq_rx_supported &&
1227 test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1228 iwl_write_umac_prph(trans, UREG_CHICK,
1229 UREG_CHICK_MSI_ENABLE);
1230 return;
1231 }
1232 /*
1233 * The IVAR table needs to be configured again after reset,
1234 * but if the device is disabled, we can't write to
1235 * prph.
1236 */
1237 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1238 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1239
1240 /*
1241 * Each cause from the causes list above and the RX causes is
1242 * represented as a byte in the IVAR table. The first nibble
1243 * represents the bound interrupt vector of the cause, the second
1244 * represents no auto clear for this cause. This will be set if its
1245 * interrupt vector is bound to serve other causes.
1246 */
1247 iwl_pcie_map_rx_causes(trans);
1248
1249 iwl_pcie_map_non_rx_causes(trans);
1250 }
1251
iwl_pcie_init_msix(struct iwl_trans_pcie * trans_pcie)1252 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1253 {
1254 struct iwl_trans *trans = trans_pcie->trans;
1255
1256 iwl_pcie_conf_msix_hw(trans_pcie);
1257
1258 if (!trans_pcie->msix_enabled)
1259 return;
1260
1261 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1262 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1263 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1264 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1265 }
1266
_iwl_trans_pcie_stop_device(struct iwl_trans * trans)1267 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1268 {
1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270
1271 lockdep_assert_held(&trans_pcie->mutex);
1272
1273 if (trans_pcie->is_down)
1274 return;
1275
1276 trans_pcie->is_down = true;
1277
1278 /* tell the device to stop sending interrupts */
1279 iwl_disable_interrupts(trans);
1280
1281 /* device going down, Stop using ICT table */
1282 iwl_pcie_disable_ict(trans);
1283
1284 /*
1285 * If a HW restart happens during firmware loading,
1286 * then the firmware loading might call this function
1287 * and later it might be called again due to the
1288 * restart. So don't process again if the device is
1289 * already dead.
1290 */
1291 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1292 IWL_DEBUG_INFO(trans,
1293 "DEVICE_ENABLED bit was set and is now cleared\n");
1294 iwl_pcie_rx_napi_sync(trans);
1295 iwl_pcie_tx_stop(trans);
1296 iwl_pcie_rx_stop(trans);
1297
1298 /* Power-down device's busmaster DMA clocks */
1299 if (!trans->cfg->apmg_not_supported) {
1300 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1301 APMG_CLK_VAL_DMA_CLK_RQT);
1302 udelay(5);
1303 }
1304 }
1305
1306 /* Make sure (redundant) we've released our request to stay awake */
1307 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1308 iwl_clear_bit(trans, CSR_GP_CNTRL,
1309 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1310 else
1311 iwl_clear_bit(trans, CSR_GP_CNTRL,
1312 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1313
1314 /* Stop the device, and put it in low power state */
1315 iwl_pcie_apm_stop(trans, false);
1316
1317 /* re-take ownership to prevent other users from stealing the device */
1318 iwl_trans_pcie_sw_reset(trans, true);
1319
1320 /*
1321 * Upon stop, the IVAR table gets erased, so msi-x won't
1322 * work. This causes a bug in RF-KILL flows, since the interrupt
1323 * that enables radio won't fire on the correct irq, and the
1324 * driver won't be able to handle the interrupt.
1325 * Configure the IVAR table again after reset.
1326 */
1327 iwl_pcie_conf_msix_hw(trans_pcie);
1328
1329 /*
1330 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1331 * This is a bug in certain verions of the hardware.
1332 * Certain devices also keep sending HW RF kill interrupt all
1333 * the time, unless the interrupt is ACKed even if the interrupt
1334 * should be masked. Re-ACK all the interrupts here.
1335 */
1336 iwl_disable_interrupts(trans);
1337
1338 /* clear all status bits */
1339 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1340 clear_bit(STATUS_INT_ENABLED, &trans->status);
1341 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1342
1343 /*
1344 * Even if we stop the HW, we still want the RF kill
1345 * interrupt
1346 */
1347 iwl_enable_rfkill_int(trans);
1348 }
1349
iwl_pcie_synchronize_irqs(struct iwl_trans * trans)1350 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1351 {
1352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1353
1354 if (trans_pcie->msix_enabled) {
1355 int i;
1356
1357 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1358 synchronize_irq(trans_pcie->msix_entries[i].vector);
1359 } else {
1360 synchronize_irq(trans_pcie->pci_dev->irq);
1361 }
1362 }
1363
iwl_trans_pcie_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1364 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1365 const struct fw_img *fw, bool run_in_rfkill)
1366 {
1367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1368 bool hw_rfkill;
1369 int ret;
1370
1371 /* This may fail if AMT took ownership of the device */
1372 if (iwl_pcie_prepare_card_hw(trans)) {
1373 IWL_WARN(trans, "Exit HW not ready\n");
1374 return -EIO;
1375 }
1376
1377 iwl_enable_rfkill_int(trans);
1378
1379 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1380
1381 /*
1382 * We enabled the RF-Kill interrupt and the handler may very
1383 * well be running. Disable the interrupts to make sure no other
1384 * interrupt can be fired.
1385 */
1386 iwl_disable_interrupts(trans);
1387
1388 /* Make sure it finished running */
1389 iwl_pcie_synchronize_irqs(trans);
1390
1391 mutex_lock(&trans_pcie->mutex);
1392
1393 /* If platform's RF_KILL switch is NOT set to KILL */
1394 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1395 if (hw_rfkill && !run_in_rfkill) {
1396 ret = -ERFKILL;
1397 goto out;
1398 }
1399
1400 /* Someone called stop_device, don't try to start_fw */
1401 if (trans_pcie->is_down) {
1402 IWL_WARN(trans,
1403 "Can't start_fw since the HW hasn't been started\n");
1404 ret = -EIO;
1405 goto out;
1406 }
1407
1408 /* make sure rfkill handshake bits are cleared */
1409 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1410 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1411 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1412
1413 /* clear (again), then enable host interrupts */
1414 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1415
1416 ret = iwl_pcie_nic_init(trans);
1417 if (ret) {
1418 IWL_ERR(trans, "Unable to init nic\n");
1419 goto out;
1420 }
1421
1422 /*
1423 * Now, we load the firmware and don't want to be interrupted, even
1424 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1425 * FH_TX interrupt which is needed to load the firmware). If the
1426 * RF-Kill switch is toggled, we will find out after having loaded
1427 * the firmware and return the proper value to the caller.
1428 */
1429 iwl_enable_fw_load_int(trans);
1430
1431 /* really make sure rfkill handshake bits are cleared */
1432 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1433 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1434
1435 /* Load the given image to the HW */
1436 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1437 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1438 else
1439 ret = iwl_pcie_load_given_ucode(trans, fw);
1440
1441 /* re-check RF-Kill state since we may have missed the interrupt */
1442 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1443 if (hw_rfkill && !run_in_rfkill)
1444 ret = -ERFKILL;
1445
1446 out:
1447 mutex_unlock(&trans_pcie->mutex);
1448 return ret;
1449 }
1450
iwl_trans_pcie_fw_alive(struct iwl_trans * trans,u32 scd_addr)1451 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1452 {
1453 iwl_pcie_reset_ict(trans);
1454 iwl_pcie_tx_start(trans, scd_addr);
1455 }
1456
iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans * trans,bool was_in_rfkill)1457 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1458 bool was_in_rfkill)
1459 {
1460 bool hw_rfkill;
1461
1462 /*
1463 * Check again since the RF kill state may have changed while
1464 * all the interrupts were disabled, in this case we couldn't
1465 * receive the RF kill interrupt and update the state in the
1466 * op_mode.
1467 * Don't call the op_mode if the rkfill state hasn't changed.
1468 * This allows the op_mode to call stop_device from the rfkill
1469 * notification without endless recursion. Under very rare
1470 * circumstances, we might have a small recursion if the rfkill
1471 * state changed exactly now while we were called from stop_device.
1472 * This is very unlikely but can happen and is supported.
1473 */
1474 hw_rfkill = iwl_is_rfkill_set(trans);
1475 if (hw_rfkill) {
1476 set_bit(STATUS_RFKILL_HW, &trans->status);
1477 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1478 } else {
1479 clear_bit(STATUS_RFKILL_HW, &trans->status);
1480 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1481 }
1482 if (hw_rfkill != was_in_rfkill)
1483 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1484 }
1485
iwl_trans_pcie_stop_device(struct iwl_trans * trans)1486 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1487 {
1488 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1489 bool was_in_rfkill;
1490
1491 iwl_op_mode_time_point(trans->op_mode,
1492 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1493 NULL);
1494
1495 mutex_lock(&trans_pcie->mutex);
1496 trans_pcie->opmode_down = true;
1497 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1498 _iwl_trans_pcie_stop_device(trans);
1499 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1500 mutex_unlock(&trans_pcie->mutex);
1501 }
1502
iwl_trans_pcie_rf_kill(struct iwl_trans * trans,bool state)1503 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1504 {
1505 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1506 IWL_TRANS_GET_PCIE_TRANS(trans);
1507
1508 lockdep_assert_held(&trans_pcie->mutex);
1509
1510 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1511 state ? "disabled" : "enabled");
1512 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1513 if (trans->trans_cfg->gen2)
1514 _iwl_trans_pcie_gen2_stop_device(trans);
1515 else
1516 _iwl_trans_pcie_stop_device(trans);
1517 }
1518 }
1519
iwl_pcie_d3_complete_suspend(struct iwl_trans * trans,bool test,bool reset)1520 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1521 bool test, bool reset)
1522 {
1523 iwl_disable_interrupts(trans);
1524
1525 /*
1526 * in testing mode, the host stays awake and the
1527 * hardware won't be reset (not even partially)
1528 */
1529 if (test)
1530 return;
1531
1532 iwl_pcie_disable_ict(trans);
1533
1534 iwl_pcie_synchronize_irqs(trans);
1535
1536 iwl_clear_bit(trans, CSR_GP_CNTRL,
1537 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1538 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1539
1540 if (reset) {
1541 /*
1542 * reset TX queues -- some of their registers reset during S3
1543 * so if we don't reset everything here the D3 image would try
1544 * to execute some invalid memory upon resume
1545 */
1546 iwl_trans_pcie_tx_reset(trans);
1547 }
1548
1549 iwl_pcie_set_pwr(trans, true);
1550 }
1551
iwl_pcie_d3_handshake(struct iwl_trans * trans,bool suspend)1552 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
1553 {
1554 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1555 int ret;
1556
1557 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
1558 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1559 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
1560 UREG_DOORBELL_TO_ISR6_RESUME);
1561 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1562 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
1563 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
1564 CSR_IPC_SLEEP_CONTROL_RESUME);
1565 else
1566 return 0;
1567
1568 ret = wait_event_timeout(trans_pcie->sx_waitq,
1569 trans_pcie->sx_complete, 2 * HZ);
1570
1571 /* Invalidate it toward next suspend or resume */
1572 trans_pcie->sx_complete = false;
1573
1574 if (!ret) {
1575 IWL_ERR(trans, "Timeout %s D3\n",
1576 suspend ? "entering" : "exiting");
1577 return -ETIMEDOUT;
1578 }
1579
1580 return 0;
1581 }
1582
iwl_trans_pcie_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1583 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1584 bool reset)
1585 {
1586 int ret;
1587
1588 if (!reset)
1589 /* Enable persistence mode to avoid reset */
1590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1591 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1592
1593 ret = iwl_pcie_d3_handshake(trans, true);
1594 if (ret)
1595 return ret;
1596
1597 iwl_pcie_d3_complete_suspend(trans, test, reset);
1598
1599 return 0;
1600 }
1601
iwl_trans_pcie_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1602 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1603 enum iwl_d3_status *status,
1604 bool test, bool reset)
1605 {
1606 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1607 u32 val;
1608 int ret;
1609
1610 if (test) {
1611 iwl_enable_interrupts(trans);
1612 *status = IWL_D3_STATUS_ALIVE;
1613 ret = 0;
1614 goto out;
1615 }
1616
1617 iwl_set_bit(trans, CSR_GP_CNTRL,
1618 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1619
1620 ret = iwl_finish_nic_init(trans);
1621 if (ret)
1622 return ret;
1623
1624 /*
1625 * Reconfigure IVAR table in case of MSIX or reset ict table in
1626 * MSI mode since HW reset erased it.
1627 * Also enables interrupts - none will happen as
1628 * the device doesn't know we're waking it up, only when
1629 * the opmode actually tells it after this call.
1630 */
1631 iwl_pcie_conf_msix_hw(trans_pcie);
1632 if (!trans_pcie->msix_enabled)
1633 iwl_pcie_reset_ict(trans);
1634 iwl_enable_interrupts(trans);
1635
1636 iwl_pcie_set_pwr(trans, false);
1637
1638 if (!reset) {
1639 iwl_clear_bit(trans, CSR_GP_CNTRL,
1640 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1641 } else {
1642 iwl_trans_pcie_tx_reset(trans);
1643
1644 ret = iwl_pcie_rx_init(trans);
1645 if (ret) {
1646 IWL_ERR(trans,
1647 "Failed to resume the device (RX reset)\n");
1648 return ret;
1649 }
1650 }
1651
1652 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1653 iwl_read_umac_prph(trans, WFPM_GP2));
1654
1655 val = iwl_read32(trans, CSR_RESET);
1656 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1657 *status = IWL_D3_STATUS_RESET;
1658 else
1659 *status = IWL_D3_STATUS_ALIVE;
1660
1661 out:
1662 if (*status == IWL_D3_STATUS_ALIVE)
1663 ret = iwl_pcie_d3_handshake(trans, false);
1664
1665 return ret;
1666 }
1667
1668 static void
iwl_pcie_set_interrupt_capa(struct pci_dev * pdev,struct iwl_trans * trans,const struct iwl_cfg_trans_params * cfg_trans)1669 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1670 struct iwl_trans *trans,
1671 const struct iwl_cfg_trans_params *cfg_trans)
1672 {
1673 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1674 int max_irqs, num_irqs, i, ret;
1675 u16 pci_cmd;
1676 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1677
1678 if (!cfg_trans->mq_rx_supported)
1679 goto enable_msi;
1680
1681 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1682 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1683
1684 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1685 for (i = 0; i < max_irqs; i++)
1686 trans_pcie->msix_entries[i].entry = i;
1687
1688 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1689 MSIX_MIN_INTERRUPT_VECTORS,
1690 max_irqs);
1691 if (num_irqs < 0) {
1692 IWL_DEBUG_INFO(trans,
1693 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1694 num_irqs);
1695 goto enable_msi;
1696 }
1697 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1698
1699 IWL_DEBUG_INFO(trans,
1700 "MSI-X enabled. %d interrupt vectors were allocated\n",
1701 num_irqs);
1702
1703 /*
1704 * In case the OS provides fewer interrupts than requested, different
1705 * causes will share the same interrupt vector as follows:
1706 * One interrupt less: non rx causes shared with FBQ.
1707 * Two interrupts less: non rx causes shared with FBQ and RSS.
1708 * More than two interrupts: we will use fewer RSS queues.
1709 */
1710 if (num_irqs <= max_irqs - 2) {
1711 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1712 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1713 IWL_SHARED_IRQ_FIRST_RSS;
1714 } else if (num_irqs == max_irqs - 1) {
1715 trans_pcie->trans->num_rx_queues = num_irqs;
1716 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1717 } else {
1718 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1719 }
1720
1721 IWL_DEBUG_INFO(trans,
1722 "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1723 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1724
1725 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1726
1727 trans_pcie->alloc_vecs = num_irqs;
1728 trans_pcie->msix_enabled = true;
1729 return;
1730
1731 enable_msi:
1732 ret = pci_enable_msi(pdev);
1733 if (ret) {
1734 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1735 /* enable rfkill interrupt: hw bug w/a */
1736 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1737 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1738 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1739 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1740 }
1741 }
1742 }
1743
iwl_pcie_irq_set_affinity(struct iwl_trans * trans)1744 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1745 {
1746 int iter_rx_q, i, ret, cpu, offset;
1747 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1748
1749 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1750 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1751 offset = 1 + i;
1752 for (; i < iter_rx_q ; i++) {
1753 /*
1754 * Get the cpu prior to the place to search
1755 * (i.e. return will be > i - 1).
1756 */
1757 cpu = cpumask_next(i - offset, cpu_online_mask);
1758 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1759 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1760 &trans_pcie->affinity_mask[i]);
1761 if (ret)
1762 IWL_ERR(trans_pcie->trans,
1763 "Failed to set affinity mask for IRQ %d\n",
1764 trans_pcie->msix_entries[i].vector);
1765 }
1766 }
1767
iwl_pcie_init_msix_handler(struct pci_dev * pdev,struct iwl_trans_pcie * trans_pcie)1768 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1769 struct iwl_trans_pcie *trans_pcie)
1770 {
1771 int i;
1772
1773 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1774 int ret;
1775 struct msix_entry *msix_entry;
1776 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1777
1778 if (!qname)
1779 return -ENOMEM;
1780
1781 msix_entry = &trans_pcie->msix_entries[i];
1782 ret = devm_request_threaded_irq(&pdev->dev,
1783 msix_entry->vector,
1784 iwl_pcie_msix_isr,
1785 (i == trans_pcie->def_irq) ?
1786 iwl_pcie_irq_msix_handler :
1787 iwl_pcie_irq_rx_msix_handler,
1788 IRQF_SHARED,
1789 qname,
1790 msix_entry);
1791 if (ret) {
1792 IWL_ERR(trans_pcie->trans,
1793 "Error allocating IRQ %d\n", i);
1794
1795 return ret;
1796 }
1797 }
1798 iwl_pcie_irq_set_affinity(trans_pcie->trans);
1799
1800 return 0;
1801 }
1802
iwl_trans_pcie_clear_persistence_bit(struct iwl_trans * trans)1803 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1804 {
1805 u32 hpm, wprot;
1806
1807 switch (trans->trans_cfg->device_family) {
1808 case IWL_DEVICE_FAMILY_9000:
1809 wprot = PREG_PRPH_WPROT_9000;
1810 break;
1811 case IWL_DEVICE_FAMILY_22000:
1812 wprot = PREG_PRPH_WPROT_22000;
1813 break;
1814 default:
1815 return 0;
1816 }
1817
1818 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1819 if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {
1820 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1821
1822 if (wprot_val & PREG_WFPM_ACCESS) {
1823 IWL_ERR(trans,
1824 "Error, can not clear persistence bit\n");
1825 return -EPERM;
1826 }
1827 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1828 hpm & ~PERSISTENCE_BIT);
1829 }
1830
1831 return 0;
1832 }
1833
iwl_pcie_gen2_force_power_gating(struct iwl_trans * trans)1834 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1835 {
1836 int ret;
1837
1838 ret = iwl_finish_nic_init(trans);
1839 if (ret < 0)
1840 return ret;
1841
1842 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1843 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1844 udelay(20);
1845 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1846 HPM_HIPM_GEN_CFG_CR_PG_EN |
1847 HPM_HIPM_GEN_CFG_CR_SLP_EN);
1848 udelay(20);
1849 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1850 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1851
1852 return iwl_trans_pcie_sw_reset(trans, true);
1853 }
1854
_iwl_trans_pcie_start_hw(struct iwl_trans * trans)1855 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1856 {
1857 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1858 int err;
1859
1860 lockdep_assert_held(&trans_pcie->mutex);
1861
1862 err = iwl_pcie_prepare_card_hw(trans);
1863 if (err) {
1864 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1865 return err;
1866 }
1867
1868 err = iwl_trans_pcie_clear_persistence_bit(trans);
1869 if (err)
1870 return err;
1871
1872 err = iwl_trans_pcie_sw_reset(trans, true);
1873 if (err)
1874 return err;
1875
1876 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1877 trans->trans_cfg->integrated) {
1878 err = iwl_pcie_gen2_force_power_gating(trans);
1879 if (err)
1880 return err;
1881 }
1882
1883 err = iwl_pcie_apm_init(trans);
1884 if (err)
1885 return err;
1886
1887 iwl_pcie_init_msix(trans_pcie);
1888
1889 /* From now on, the op_mode will be kept updated about RF kill state */
1890 iwl_enable_rfkill_int(trans);
1891
1892 trans_pcie->opmode_down = false;
1893
1894 /* Set is_down to false here so that...*/
1895 trans_pcie->is_down = false;
1896
1897 /* ...rfkill can call stop_device and set it false if needed */
1898 iwl_pcie_check_hw_rf_kill(trans);
1899
1900 return 0;
1901 }
1902
iwl_trans_pcie_start_hw(struct iwl_trans * trans)1903 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1904 {
1905 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1906 int ret;
1907
1908 mutex_lock(&trans_pcie->mutex);
1909 ret = _iwl_trans_pcie_start_hw(trans);
1910 mutex_unlock(&trans_pcie->mutex);
1911
1912 return ret;
1913 }
1914
iwl_trans_pcie_op_mode_leave(struct iwl_trans * trans)1915 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1916 {
1917 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1918
1919 mutex_lock(&trans_pcie->mutex);
1920
1921 /* disable interrupts - don't enable HW RF kill interrupt */
1922 iwl_disable_interrupts(trans);
1923
1924 iwl_pcie_apm_stop(trans, true);
1925
1926 iwl_disable_interrupts(trans);
1927
1928 iwl_pcie_disable_ict(trans);
1929
1930 mutex_unlock(&trans_pcie->mutex);
1931
1932 iwl_pcie_synchronize_irqs(trans);
1933 }
1934
1935 #if defined(__linux__)
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)1936 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1937 {
1938 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1939 }
1940
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)1941 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1942 {
1943 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1944 }
1945
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)1946 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1947 {
1948 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1949 }
1950 #elif defined(__FreeBSD__)
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)1951 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1952 {
1953
1954 IWL_DEBUG_PCI_RW(trans, "W1 %#010x %#04x\n", ofs, val);
1955 bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1956 }
1957
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)1958 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1959 {
1960
1961 IWL_DEBUG_PCI_RW(trans, "W4 %#010x %#010x\n", ofs, val);
1962 bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1963 }
1964
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)1965 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1966 {
1967 u32 v;
1968
1969 v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs);
1970 IWL_DEBUG_PCI_RW(trans, "R4 %#010x %#010x\n", ofs, v);
1971 return (v);
1972 }
1973 #endif
1974
iwl_trans_pcie_prph_msk(struct iwl_trans * trans)1975 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1976 {
1977 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1978 return 0x00FFFFFF;
1979 else
1980 return 0x000FFFFF;
1981 }
1982
iwl_trans_pcie_read_prph(struct iwl_trans * trans,u32 reg)1983 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1984 {
1985 u32 mask = iwl_trans_pcie_prph_msk(trans);
1986
1987 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1988 ((reg & mask) | (3 << 24)));
1989 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1990 }
1991
iwl_trans_pcie_write_prph(struct iwl_trans * trans,u32 addr,u32 val)1992 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1993 u32 val)
1994 {
1995 u32 mask = iwl_trans_pcie_prph_msk(trans);
1996
1997 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1998 ((addr & mask) | (3 << 24)));
1999 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
2000 }
2001
iwl_trans_pcie_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)2002 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
2003 const struct iwl_trans_config *trans_cfg)
2004 {
2005 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2006
2007 /* free all first - we might be reconfigured for a different size */
2008 iwl_pcie_free_rbs_pool(trans);
2009
2010 trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
2011 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
2012 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
2013 trans->txqs.page_offs = trans_cfg->cb_data_offs;
2014 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
2015 trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
2016
2017 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
2018 trans_pcie->n_no_reclaim_cmds = 0;
2019 else
2020 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
2021 if (trans_pcie->n_no_reclaim_cmds)
2022 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
2023 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
2024
2025 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
2026 trans_pcie->rx_page_order =
2027 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
2028 trans_pcie->rx_buf_bytes =
2029 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
2030 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
2031 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
2032 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
2033
2034 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
2035 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
2036
2037 trans->command_groups = trans_cfg->command_groups;
2038 trans->command_groups_size = trans_cfg->command_groups_size;
2039
2040 /* Initialize NAPI here - it should be before registering to mac80211
2041 * in the opmode but after the HW struct is allocated.
2042 * As this function may be called again in some corner cases don't
2043 * do anything if NAPI was already initialized.
2044 */
2045 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
2046 init_dummy_netdev(&trans_pcie->napi_dev);
2047
2048 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
2049 }
2050
iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions * dram_regions,struct device * dev)2051 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
2052 struct device *dev)
2053 {
2054 u8 i;
2055 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
2056
2057 /* free DRAM payloads */
2058 for (i = 0; i < dram_regions->n_regions; i++) {
2059 dma_free_coherent(dev, dram_regions->drams[i].size,
2060 dram_regions->drams[i].block,
2061 dram_regions->drams[i].physical);
2062 }
2063 dram_regions->n_regions = 0;
2064
2065 /* free DRAM addresses array */
2066 if (desc_dram->block) {
2067 dma_free_coherent(dev, desc_dram->size,
2068 desc_dram->block,
2069 desc_dram->physical);
2070 }
2071 memset(desc_dram, 0, sizeof(*desc_dram));
2072 }
2073
iwl_trans_pcie_free(struct iwl_trans * trans)2074 void iwl_trans_pcie_free(struct iwl_trans *trans)
2075 {
2076 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2077 int i;
2078
2079 iwl_pcie_synchronize_irqs(trans);
2080
2081 if (trans->trans_cfg->gen2)
2082 iwl_txq_gen2_tx_free(trans);
2083 else
2084 iwl_pcie_tx_free(trans);
2085 iwl_pcie_rx_free(trans);
2086
2087 if (trans_pcie->rba.alloc_wq) {
2088 destroy_workqueue(trans_pcie->rba.alloc_wq);
2089 trans_pcie->rba.alloc_wq = NULL;
2090 }
2091
2092 if (trans_pcie->msix_enabled) {
2093 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
2094 irq_set_affinity_hint(
2095 trans_pcie->msix_entries[i].vector,
2096 NULL);
2097 }
2098
2099 trans_pcie->msix_enabled = false;
2100 } else {
2101 iwl_pcie_free_ict(trans);
2102 }
2103
2104 iwl_pcie_free_fw_monitor(trans);
2105
2106 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
2107 trans->dev);
2108 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
2109 trans->dev);
2110
2111 mutex_destroy(&trans_pcie->mutex);
2112 iwl_trans_free(trans);
2113 }
2114
iwl_trans_pcie_set_pmi(struct iwl_trans * trans,bool state)2115 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
2116 {
2117 if (state)
2118 set_bit(STATUS_TPOWER_PMI, &trans->status);
2119 else
2120 clear_bit(STATUS_TPOWER_PMI, &trans->status);
2121 }
2122
2123 struct iwl_trans_pcie_removal {
2124 struct pci_dev *pdev;
2125 struct work_struct work;
2126 bool rescan;
2127 };
2128
iwl_trans_pcie_removal_wk(struct work_struct * wk)2129 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
2130 {
2131 struct iwl_trans_pcie_removal *removal =
2132 container_of(wk, struct iwl_trans_pcie_removal, work);
2133 struct pci_dev *pdev = removal->pdev;
2134 static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
2135 struct pci_bus *bus = pdev->bus;
2136
2137 dev_err(&pdev->dev, "Device gone - attempting removal\n");
2138 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2139 pci_lock_rescan_remove();
2140 pci_dev_put(pdev);
2141 pci_stop_and_remove_bus_device(pdev);
2142 if (removal->rescan)
2143 #if defined(__linux__)
2144 pci_rescan_bus(bus->parent);
2145 #elif defined(__FreeBSD__)
2146 pci_rescan_bus(bus);
2147 #endif
2148 pci_unlock_rescan_remove();
2149
2150 kfree(removal);
2151 module_put(THIS_MODULE);
2152 }
2153
iwl_trans_pcie_remove(struct iwl_trans * trans,bool rescan)2154 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
2155 {
2156 struct iwl_trans_pcie_removal *removal;
2157
2158 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2159 return;
2160
2161 IWL_ERR(trans, "Device gone - scheduling removal!\n");
2162
2163 /*
2164 * get a module reference to avoid doing this
2165 * while unloading anyway and to avoid
2166 * scheduling a work with code that's being
2167 * removed.
2168 */
2169 if (!try_module_get(THIS_MODULE)) {
2170 IWL_ERR(trans,
2171 "Module is being unloaded - abort\n");
2172 return;
2173 }
2174
2175 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2176 if (!removal) {
2177 module_put(THIS_MODULE);
2178 return;
2179 }
2180 /*
2181 * we don't need to clear this flag, because
2182 * the trans will be freed and reallocated.
2183 */
2184 set_bit(STATUS_TRANS_DEAD, &trans->status);
2185
2186 removal->pdev = to_pci_dev(trans->dev);
2187 removal->rescan = rescan;
2188 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2189 pci_dev_get(removal->pdev);
2190 schedule_work(&removal->work);
2191 }
2192 EXPORT_SYMBOL(iwl_trans_pcie_remove);
2193
2194 /*
2195 * This version doesn't disable BHs but rather assumes they're
2196 * already disabled.
2197 */
__iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2198 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2199 {
2200 int ret;
2201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2202 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
2203 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2204 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
2205 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
2206
2207 spin_lock(&trans_pcie->reg_lock);
2208
2209 if (trans_pcie->cmd_hold_nic_awake)
2210 goto out;
2211
2212 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2213 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
2214 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2215 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2216 }
2217
2218 /* this bit wakes up the NIC */
2219 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
2220 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2221 udelay(2);
2222
2223 /*
2224 * These bits say the device is running, and should keep running for
2225 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2226 * but they do not indicate that embedded SRAM is restored yet;
2227 * HW with volatile SRAM must save/restore contents to/from
2228 * host DRAM when sleeping/waking for power-saving.
2229 * Each direction takes approximately 1/4 millisecond; with this
2230 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2231 * series of register accesses are expected (e.g. reading Event Log),
2232 * to keep device from sleeping.
2233 *
2234 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2235 * SRAM is okay/restored. We don't check that here because this call
2236 * is just for hardware register access; but GP1 MAC_SLEEP
2237 * check is a good idea before accessing the SRAM of HW with
2238 * volatile SRAM (e.g. reading Event Log).
2239 *
2240 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2241 * and do not save/restore SRAM when power cycling.
2242 */
2243 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
2244 if (unlikely(ret < 0)) {
2245 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2246
2247 WARN_ONCE(1,
2248 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2249 cntrl);
2250
2251 iwl_trans_pcie_dump_regs(trans);
2252
2253 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
2254 iwl_trans_pcie_remove(trans, false);
2255 else
2256 iwl_write32(trans, CSR_RESET,
2257 CSR_RESET_REG_FLAG_FORCE_NMI);
2258
2259 spin_unlock(&trans_pcie->reg_lock);
2260 return false;
2261 }
2262
2263 out:
2264 /*
2265 * Fool sparse by faking we release the lock - sparse will
2266 * track nic_access anyway.
2267 */
2268 __release(&trans_pcie->reg_lock);
2269 return true;
2270 }
2271
iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2272 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2273 {
2274 bool ret;
2275
2276 local_bh_disable();
2277 ret = __iwl_trans_pcie_grab_nic_access(trans);
2278 if (ret) {
2279 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2280 return ret;
2281 }
2282 local_bh_enable();
2283 return false;
2284 }
2285
iwl_trans_pcie_release_nic_access(struct iwl_trans * trans)2286 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2287 {
2288 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2289
2290 lockdep_assert_held(&trans_pcie->reg_lock);
2291
2292 /*
2293 * Fool sparse by faking we acquiring the lock - sparse will
2294 * track nic_access anyway.
2295 */
2296 __acquire(&trans_pcie->reg_lock);
2297
2298 if (trans_pcie->cmd_hold_nic_awake)
2299 goto out;
2300 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2301 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2302 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
2303 else
2304 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2305 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2306 /*
2307 * Above we read the CSR_GP_CNTRL register, which will flush
2308 * any previous writes, but we need the write that clears the
2309 * MAC_ACCESS_REQ bit to be performed before any other writes
2310 * scheduled on different CPUs (after we drop reg_lock).
2311 */
2312 out:
2313 spin_unlock_bh(&trans_pcie->reg_lock);
2314 }
2315
iwl_trans_pcie_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)2316 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2317 void *buf, int dwords)
2318 {
2319 int offs = 0;
2320 u32 *vals = buf;
2321
2322 while (offs < dwords) {
2323 /* limit the time we spin here under lock to 1/2s */
2324 unsigned long end = jiffies + HZ / 2;
2325 bool resched = false;
2326
2327 if (iwl_trans_grab_nic_access(trans)) {
2328 iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2329 addr + 4 * offs);
2330
2331 while (offs < dwords) {
2332 vals[offs] = iwl_read32(trans,
2333 HBUS_TARG_MEM_RDAT);
2334 offs++;
2335
2336 if (time_after(jiffies, end)) {
2337 resched = true;
2338 break;
2339 }
2340 }
2341 iwl_trans_release_nic_access(trans);
2342
2343 if (resched)
2344 cond_resched();
2345 } else {
2346 return -EBUSY;
2347 }
2348 }
2349
2350 return 0;
2351 }
2352
iwl_trans_pcie_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)2353 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2354 const void *buf, int dwords)
2355 {
2356 int offs, ret = 0;
2357 const u32 *vals = buf;
2358
2359 if (iwl_trans_grab_nic_access(trans)) {
2360 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2361 for (offs = 0; offs < dwords; offs++)
2362 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2363 vals ? vals[offs] : 0);
2364 iwl_trans_release_nic_access(trans);
2365 } else {
2366 ret = -EBUSY;
2367 }
2368 return ret;
2369 }
2370
iwl_trans_pcie_read_config32(struct iwl_trans * trans,u32 ofs,u32 * val)2371 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2372 u32 *val)
2373 {
2374 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2375 ofs, val);
2376 }
2377
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)2378 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2379 {
2380 int i;
2381
2382 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2383 struct iwl_txq *txq = trans->txqs.txq[i];
2384
2385 if (i == trans->txqs.cmd.q_id)
2386 continue;
2387
2388 spin_lock_bh(&txq->lock);
2389
2390 if (!block && !(WARN_ON_ONCE(!txq->block))) {
2391 txq->block--;
2392 if (!txq->block) {
2393 iwl_write32(trans, HBUS_TARG_WRPTR,
2394 txq->write_ptr | (i << 8));
2395 }
2396 } else if (block) {
2397 txq->block++;
2398 }
2399
2400 spin_unlock_bh(&txq->lock);
2401 }
2402 }
2403
2404 #define IWL_FLUSH_WAIT_MS 2000
2405
iwl_trans_pcie_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)2406 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2407 struct iwl_trans_rxq_dma_data *data)
2408 {
2409 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2410
2411 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2412 return -EINVAL;
2413
2414 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2415 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2416 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2417 data->fr_bd_wid = 0;
2418
2419 return 0;
2420 }
2421
iwl_trans_pcie_wait_txq_empty(struct iwl_trans * trans,int txq_idx)2422 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2423 {
2424 struct iwl_txq *txq;
2425 unsigned long now = jiffies;
2426 bool overflow_tx;
2427 u8 wr_ptr;
2428
2429 /* Make sure the NIC is still alive in the bus */
2430 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2431 return -ENODEV;
2432
2433 if (!test_bit(txq_idx, trans->txqs.queue_used))
2434 return -EINVAL;
2435
2436 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2437 txq = trans->txqs.txq[txq_idx];
2438
2439 spin_lock_bh(&txq->lock);
2440 overflow_tx = txq->overflow_tx ||
2441 !skb_queue_empty(&txq->overflow_q);
2442 spin_unlock_bh(&txq->lock);
2443
2444 wr_ptr = READ_ONCE(txq->write_ptr);
2445
2446 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2447 overflow_tx) &&
2448 !time_after(jiffies,
2449 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2450 u8 write_ptr = READ_ONCE(txq->write_ptr);
2451
2452 /*
2453 * If write pointer moved during the wait, warn only
2454 * if the TX came from op mode. In case TX came from
2455 * trans layer (overflow TX) don't warn.
2456 */
2457 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2458 "WR pointer moved while flushing %d -> %d\n",
2459 wr_ptr, write_ptr))
2460 return -ETIMEDOUT;
2461 wr_ptr = write_ptr;
2462
2463 usleep_range(1000, 2000);
2464
2465 spin_lock_bh(&txq->lock);
2466 overflow_tx = txq->overflow_tx ||
2467 !skb_queue_empty(&txq->overflow_q);
2468 spin_unlock_bh(&txq->lock);
2469 }
2470
2471 if (txq->read_ptr != txq->write_ptr) {
2472 IWL_ERR(trans,
2473 "fail to flush all tx fifo queues Q %d\n", txq_idx);
2474 iwl_txq_log_scd_error(trans, txq);
2475 return -ETIMEDOUT;
2476 }
2477
2478 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2479
2480 return 0;
2481 }
2482
iwl_trans_pcie_wait_txqs_empty(struct iwl_trans * trans,u32 txq_bm)2483 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2484 {
2485 int cnt;
2486 int ret = 0;
2487
2488 /* waiting for all the tx frames complete might take a while */
2489 for (cnt = 0;
2490 cnt < trans->trans_cfg->base_params->num_of_queues;
2491 cnt++) {
2492
2493 if (cnt == trans->txqs.cmd.q_id)
2494 continue;
2495 if (!test_bit(cnt, trans->txqs.queue_used))
2496 continue;
2497 if (!(BIT(cnt) & txq_bm))
2498 continue;
2499
2500 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2501 if (ret)
2502 break;
2503 }
2504
2505 return ret;
2506 }
2507
iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)2508 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2509 u32 mask, u32 value)
2510 {
2511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2512
2513 spin_lock_bh(&trans_pcie->reg_lock);
2514 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2515 spin_unlock_bh(&trans_pcie->reg_lock);
2516 }
2517
get_csr_string(int cmd)2518 static const char *get_csr_string(int cmd)
2519 {
2520 #define IWL_CMD(x) case x: return #x
2521 switch (cmd) {
2522 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2523 IWL_CMD(CSR_INT_COALESCING);
2524 IWL_CMD(CSR_INT);
2525 IWL_CMD(CSR_INT_MASK);
2526 IWL_CMD(CSR_FH_INT_STATUS);
2527 IWL_CMD(CSR_GPIO_IN);
2528 IWL_CMD(CSR_RESET);
2529 IWL_CMD(CSR_GP_CNTRL);
2530 IWL_CMD(CSR_HW_REV);
2531 IWL_CMD(CSR_EEPROM_REG);
2532 IWL_CMD(CSR_EEPROM_GP);
2533 IWL_CMD(CSR_OTP_GP_REG);
2534 IWL_CMD(CSR_GIO_REG);
2535 IWL_CMD(CSR_GP_UCODE_REG);
2536 IWL_CMD(CSR_GP_DRIVER_REG);
2537 IWL_CMD(CSR_UCODE_DRV_GP1);
2538 IWL_CMD(CSR_UCODE_DRV_GP2);
2539 IWL_CMD(CSR_LED_REG);
2540 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2541 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2542 IWL_CMD(CSR_ANA_PLL_CFG);
2543 IWL_CMD(CSR_HW_REV_WA_REG);
2544 IWL_CMD(CSR_MONITOR_STATUS_REG);
2545 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2546 default:
2547 return "UNKNOWN";
2548 }
2549 #undef IWL_CMD
2550 }
2551
iwl_pcie_dump_csr(struct iwl_trans * trans)2552 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2553 {
2554 int i;
2555 static const u32 csr_tbl[] = {
2556 CSR_HW_IF_CONFIG_REG,
2557 CSR_INT_COALESCING,
2558 CSR_INT,
2559 CSR_INT_MASK,
2560 CSR_FH_INT_STATUS,
2561 CSR_GPIO_IN,
2562 CSR_RESET,
2563 CSR_GP_CNTRL,
2564 CSR_HW_REV,
2565 CSR_EEPROM_REG,
2566 CSR_EEPROM_GP,
2567 CSR_OTP_GP_REG,
2568 CSR_GIO_REG,
2569 CSR_GP_UCODE_REG,
2570 CSR_GP_DRIVER_REG,
2571 CSR_UCODE_DRV_GP1,
2572 CSR_UCODE_DRV_GP2,
2573 CSR_LED_REG,
2574 CSR_DRAM_INT_TBL_REG,
2575 CSR_GIO_CHICKEN_BITS,
2576 CSR_ANA_PLL_CFG,
2577 CSR_MONITOR_STATUS_REG,
2578 CSR_HW_REV_WA_REG,
2579 CSR_DBG_HPET_MEM_REG
2580 };
2581 IWL_ERR(trans, "CSR values:\n");
2582 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2583 "CSR_INT_PERIODIC_REG)\n");
2584 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2585 IWL_ERR(trans, " %25s: 0X%08x\n",
2586 get_csr_string(csr_tbl[i]),
2587 iwl_read32(trans, csr_tbl[i]));
2588 }
2589 }
2590
2591 #ifdef CONFIG_IWLWIFI_DEBUGFS
2592 /* create and remove of files */
2593 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2594 debugfs_create_file(#name, mode, parent, trans, \
2595 &iwl_dbgfs_##name##_ops); \
2596 } while (0)
2597
2598 /* file operation */
2599 #define DEBUGFS_READ_FILE_OPS(name) \
2600 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2601 .read = iwl_dbgfs_##name##_read, \
2602 .open = simple_open, \
2603 .llseek = generic_file_llseek, \
2604 };
2605
2606 #define DEBUGFS_WRITE_FILE_OPS(name) \
2607 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2608 .write = iwl_dbgfs_##name##_write, \
2609 .open = simple_open, \
2610 .llseek = generic_file_llseek, \
2611 };
2612
2613 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2614 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2615 .write = iwl_dbgfs_##name##_write, \
2616 .read = iwl_dbgfs_##name##_read, \
2617 .open = simple_open, \
2618 .llseek = generic_file_llseek, \
2619 };
2620
2621 struct iwl_dbgfs_tx_queue_priv {
2622 struct iwl_trans *trans;
2623 };
2624
2625 struct iwl_dbgfs_tx_queue_state {
2626 loff_t pos;
2627 };
2628
iwl_dbgfs_tx_queue_seq_start(struct seq_file * seq,loff_t * pos)2629 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2630 {
2631 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2632 struct iwl_dbgfs_tx_queue_state *state;
2633
2634 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2635 return NULL;
2636
2637 state = kmalloc(sizeof(*state), GFP_KERNEL);
2638 if (!state)
2639 return NULL;
2640 state->pos = *pos;
2641 return state;
2642 }
2643
iwl_dbgfs_tx_queue_seq_next(struct seq_file * seq,void * v,loff_t * pos)2644 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2645 void *v, loff_t *pos)
2646 {
2647 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2648 struct iwl_dbgfs_tx_queue_state *state = v;
2649
2650 *pos = ++state->pos;
2651
2652 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2653 return NULL;
2654
2655 return state;
2656 }
2657
iwl_dbgfs_tx_queue_seq_stop(struct seq_file * seq,void * v)2658 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2659 {
2660 kfree(v);
2661 }
2662
iwl_dbgfs_tx_queue_seq_show(struct seq_file * seq,void * v)2663 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2664 {
2665 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2666 struct iwl_dbgfs_tx_queue_state *state = v;
2667 struct iwl_trans *trans = priv->trans;
2668 struct iwl_txq *txq = trans->txqs.txq[state->pos];
2669
2670 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2671 (unsigned int)state->pos,
2672 !!test_bit(state->pos, trans->txqs.queue_used),
2673 !!test_bit(state->pos, trans->txqs.queue_stopped));
2674 if (txq)
2675 seq_printf(seq,
2676 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2677 txq->read_ptr, txq->write_ptr,
2678 txq->need_update, txq->frozen,
2679 txq->n_window, txq->ampdu);
2680 else
2681 seq_puts(seq, "(unallocated)");
2682
2683 if (state->pos == trans->txqs.cmd.q_id)
2684 seq_puts(seq, " (HCMD)");
2685 seq_puts(seq, "\n");
2686
2687 return 0;
2688 }
2689
2690 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2691 .start = iwl_dbgfs_tx_queue_seq_start,
2692 .next = iwl_dbgfs_tx_queue_seq_next,
2693 .stop = iwl_dbgfs_tx_queue_seq_stop,
2694 .show = iwl_dbgfs_tx_queue_seq_show,
2695 };
2696
iwl_dbgfs_tx_queue_open(struct inode * inode,struct file * filp)2697 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2698 {
2699 struct iwl_dbgfs_tx_queue_priv *priv;
2700
2701 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2702 sizeof(*priv));
2703
2704 if (!priv)
2705 return -ENOMEM;
2706
2707 priv->trans = inode->i_private;
2708 return 0;
2709 }
2710
iwl_dbgfs_rx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2711 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2712 char __user *user_buf,
2713 size_t count, loff_t *ppos)
2714 {
2715 struct iwl_trans *trans = file->private_data;
2716 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2717 char *buf;
2718 int pos = 0, i, ret;
2719 size_t bufsz;
2720
2721 bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2722
2723 if (!trans_pcie->rxq)
2724 return -EAGAIN;
2725
2726 buf = kzalloc(bufsz, GFP_KERNEL);
2727 if (!buf)
2728 return -ENOMEM;
2729
2730 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2731 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2732
2733 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2734 i);
2735 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2736 rxq->read);
2737 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2738 rxq->write);
2739 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2740 rxq->write_actual);
2741 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2742 rxq->need_update);
2743 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2744 rxq->free_count);
2745 if (rxq->rb_stts) {
2746 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2747 rxq));
2748 pos += scnprintf(buf + pos, bufsz - pos,
2749 "\tclosed_rb_num: %u\n",
2750 r & 0x0FFF);
2751 } else {
2752 pos += scnprintf(buf + pos, bufsz - pos,
2753 "\tclosed_rb_num: Not Allocated\n");
2754 }
2755 }
2756 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2757 kfree(buf);
2758
2759 return ret;
2760 }
2761
iwl_dbgfs_interrupt_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2762 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2763 char __user *user_buf,
2764 size_t count, loff_t *ppos)
2765 {
2766 struct iwl_trans *trans = file->private_data;
2767 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2768 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2769
2770 int pos = 0;
2771 char *buf;
2772 int bufsz = 24 * 64; /* 24 items * 64 char per item */
2773 ssize_t ret;
2774
2775 buf = kzalloc(bufsz, GFP_KERNEL);
2776 if (!buf)
2777 return -ENOMEM;
2778
2779 pos += scnprintf(buf + pos, bufsz - pos,
2780 "Interrupt Statistics Report:\n");
2781
2782 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2783 isr_stats->hw);
2784 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2785 isr_stats->sw);
2786 if (isr_stats->sw || isr_stats->hw) {
2787 pos += scnprintf(buf + pos, bufsz - pos,
2788 "\tLast Restarting Code: 0x%X\n",
2789 isr_stats->err_code);
2790 }
2791 #ifdef CONFIG_IWLWIFI_DEBUG
2792 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2793 isr_stats->sch);
2794 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2795 isr_stats->alive);
2796 #endif
2797 pos += scnprintf(buf + pos, bufsz - pos,
2798 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2799
2800 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2801 isr_stats->ctkill);
2802
2803 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2804 isr_stats->wakeup);
2805
2806 pos += scnprintf(buf + pos, bufsz - pos,
2807 "Rx command responses:\t\t %u\n", isr_stats->rx);
2808
2809 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2810 isr_stats->tx);
2811
2812 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2813 isr_stats->unhandled);
2814
2815 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2816 kfree(buf);
2817 return ret;
2818 }
2819
iwl_dbgfs_interrupt_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2820 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2821 const char __user *user_buf,
2822 size_t count, loff_t *ppos)
2823 {
2824 struct iwl_trans *trans = file->private_data;
2825 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2826 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2827 u32 reset_flag;
2828 int ret;
2829
2830 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2831 if (ret)
2832 return ret;
2833 if (reset_flag == 0)
2834 memset(isr_stats, 0, sizeof(*isr_stats));
2835
2836 return count;
2837 }
2838
iwl_dbgfs_csr_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2839 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2840 const char __user *user_buf,
2841 size_t count, loff_t *ppos)
2842 {
2843 struct iwl_trans *trans = file->private_data;
2844
2845 iwl_pcie_dump_csr(trans);
2846
2847 return count;
2848 }
2849
iwl_dbgfs_fh_reg_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2850 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2851 char __user *user_buf,
2852 size_t count, loff_t *ppos)
2853 {
2854 struct iwl_trans *trans = file->private_data;
2855 char *buf = NULL;
2856 ssize_t ret;
2857
2858 ret = iwl_dump_fh(trans, &buf);
2859 if (ret < 0)
2860 return ret;
2861 if (!buf)
2862 return -EINVAL;
2863 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2864 kfree(buf);
2865 return ret;
2866 }
2867
iwl_dbgfs_rfkill_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2868 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2869 char __user *user_buf,
2870 size_t count, loff_t *ppos)
2871 {
2872 struct iwl_trans *trans = file->private_data;
2873 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2874 char buf[100];
2875 int pos;
2876
2877 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2878 trans_pcie->debug_rfkill,
2879 !(iwl_read32(trans, CSR_GP_CNTRL) &
2880 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2881
2882 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2883 }
2884
iwl_dbgfs_rfkill_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2885 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2886 const char __user *user_buf,
2887 size_t count, loff_t *ppos)
2888 {
2889 struct iwl_trans *trans = file->private_data;
2890 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2891 bool new_value;
2892 int ret;
2893
2894 ret = kstrtobool_from_user(user_buf, count, &new_value);
2895 if (ret)
2896 return ret;
2897 if (new_value == trans_pcie->debug_rfkill)
2898 return count;
2899 IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2900 trans_pcie->debug_rfkill, new_value);
2901 trans_pcie->debug_rfkill = new_value;
2902 iwl_pcie_handle_rfkill_irq(trans);
2903
2904 return count;
2905 }
2906
iwl_dbgfs_monitor_data_open(struct inode * inode,struct file * file)2907 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2908 struct file *file)
2909 {
2910 struct iwl_trans *trans = inode->i_private;
2911 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2912
2913 if (!trans->dbg.dest_tlv ||
2914 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2915 IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2916 return -ENOENT;
2917 }
2918
2919 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2920 return -EBUSY;
2921
2922 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2923 return simple_open(inode, file);
2924 }
2925
iwl_dbgfs_monitor_data_release(struct inode * inode,struct file * file)2926 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2927 struct file *file)
2928 {
2929 struct iwl_trans_pcie *trans_pcie =
2930 IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2931
2932 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2933 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2934 return 0;
2935 }
2936
iwl_write_to_user_buf(char __user * user_buf,ssize_t count,void * buf,ssize_t * size,ssize_t * bytes_copied)2937 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2938 void *buf, ssize_t *size,
2939 ssize_t *bytes_copied)
2940 {
2941 ssize_t buf_size_left = count - *bytes_copied;
2942
2943 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2944 if (*size > buf_size_left)
2945 *size = buf_size_left;
2946
2947 *size -= copy_to_user(user_buf, buf, *size);
2948 *bytes_copied += *size;
2949
2950 if (buf_size_left == *size)
2951 return true;
2952 return false;
2953 }
2954
iwl_dbgfs_monitor_data_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2955 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2956 char __user *user_buf,
2957 size_t count, loff_t *ppos)
2958 {
2959 struct iwl_trans *trans = file->private_data;
2960 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2961 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2962 struct cont_rec *data = &trans_pcie->fw_mon_data;
2963 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2964 ssize_t size, bytes_copied = 0;
2965 bool b_full;
2966
2967 if (trans->dbg.dest_tlv) {
2968 write_ptr_addr =
2969 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2970 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2971 } else {
2972 write_ptr_addr = MON_BUFF_WRPTR;
2973 wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2974 }
2975
2976 if (unlikely(!trans->dbg.rec_on))
2977 return 0;
2978
2979 mutex_lock(&data->mutex);
2980 if (data->state ==
2981 IWL_FW_MON_DBGFS_STATE_DISABLED) {
2982 mutex_unlock(&data->mutex);
2983 return 0;
2984 }
2985
2986 /* write_ptr position in bytes rather then DW */
2987 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2988 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2989
2990 if (data->prev_wrap_cnt == wrap_cnt) {
2991 size = write_ptr - data->prev_wr_ptr;
2992 curr_buf = cpu_addr + data->prev_wr_ptr;
2993 b_full = iwl_write_to_user_buf(user_buf, count,
2994 curr_buf, &size,
2995 &bytes_copied);
2996 data->prev_wr_ptr += size;
2997
2998 } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2999 write_ptr < data->prev_wr_ptr) {
3000 size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
3001 curr_buf = cpu_addr + data->prev_wr_ptr;
3002 b_full = iwl_write_to_user_buf(user_buf, count,
3003 curr_buf, &size,
3004 &bytes_copied);
3005 data->prev_wr_ptr += size;
3006
3007 if (!b_full) {
3008 size = write_ptr;
3009 b_full = iwl_write_to_user_buf(user_buf, count,
3010 cpu_addr, &size,
3011 &bytes_copied);
3012 data->prev_wr_ptr = size;
3013 data->prev_wrap_cnt++;
3014 }
3015 } else {
3016 if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3017 write_ptr > data->prev_wr_ptr)
3018 IWL_WARN(trans,
3019 "write pointer passed previous write pointer, start copying from the beginning\n");
3020 else if (!unlikely(data->prev_wrap_cnt == 0 &&
3021 data->prev_wr_ptr == 0))
3022 IWL_WARN(trans,
3023 "monitor data is out of sync, start copying from the beginning\n");
3024
3025 size = write_ptr;
3026 b_full = iwl_write_to_user_buf(user_buf, count,
3027 cpu_addr, &size,
3028 &bytes_copied);
3029 data->prev_wr_ptr = size;
3030 data->prev_wrap_cnt = wrap_cnt;
3031 }
3032
3033 mutex_unlock(&data->mutex);
3034
3035 return bytes_copied;
3036 }
3037
iwl_dbgfs_rf_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)3038 static ssize_t iwl_dbgfs_rf_read(struct file *file,
3039 char __user *user_buf,
3040 size_t count, loff_t *ppos)
3041 {
3042 struct iwl_trans *trans = file->private_data;
3043 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3044
3045 if (!trans_pcie->rf_name[0])
3046 return -ENODEV;
3047
3048 return simple_read_from_buffer(user_buf, count, ppos,
3049 trans_pcie->rf_name,
3050 strlen(trans_pcie->rf_name));
3051 }
3052
3053 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
3054 DEBUGFS_READ_FILE_OPS(fh_reg);
3055 DEBUGFS_READ_FILE_OPS(rx_queue);
3056 DEBUGFS_WRITE_FILE_OPS(csr);
3057 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
3058 DEBUGFS_READ_FILE_OPS(rf);
3059
3060 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
3061 .owner = THIS_MODULE,
3062 .open = iwl_dbgfs_tx_queue_open,
3063 .read = seq_read,
3064 .llseek = seq_lseek,
3065 .release = seq_release_private,
3066 };
3067
3068 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
3069 .read = iwl_dbgfs_monitor_data_read,
3070 .open = iwl_dbgfs_monitor_data_open,
3071 .release = iwl_dbgfs_monitor_data_release,
3072 };
3073
3074 /* Create the debugfs files and directories */
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)3075 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
3076 {
3077 struct dentry *dir = trans->dbgfs_dir;
3078
3079 DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
3080 DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
3081 DEBUGFS_ADD_FILE(interrupt, dir, 0600);
3082 DEBUGFS_ADD_FILE(csr, dir, 0200);
3083 DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
3084 DEBUGFS_ADD_FILE(rfkill, dir, 0600);
3085 DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
3086 DEBUGFS_ADD_FILE(rf, dir, 0400);
3087 }
3088
iwl_trans_pcie_debugfs_cleanup(struct iwl_trans * trans)3089 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
3090 {
3091 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3092 struct cont_rec *data = &trans_pcie->fw_mon_data;
3093
3094 mutex_lock(&data->mutex);
3095 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
3096 mutex_unlock(&data->mutex);
3097 }
3098 #endif /*CONFIG_IWLWIFI_DEBUGFS */
3099
iwl_trans_pcie_get_cmdlen(struct iwl_trans * trans,void * tfd)3100 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
3101 {
3102 u32 cmdlen = 0;
3103 int i;
3104
3105 for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
3106 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
3107
3108 return cmdlen;
3109 }
3110
iwl_trans_pcie_dump_rbs(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,int allocated_rb_nums)3111 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
3112 struct iwl_fw_error_dump_data **data,
3113 int allocated_rb_nums)
3114 {
3115 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3116 int max_len = trans_pcie->rx_buf_bytes;
3117 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3118 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3119 u32 i, r, j, rb_len = 0;
3120
3121 spin_lock(&rxq->lock);
3122
3123 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
3124
3125 for (i = rxq->read, j = 0;
3126 i != r && j < allocated_rb_nums;
3127 i = (i + 1) & RX_QUEUE_MASK, j++) {
3128 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3129 struct iwl_fw_error_dump_rb *rb;
3130
3131 dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
3132 max_len, DMA_FROM_DEVICE);
3133
3134 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
3135
3136 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3137 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3138 rb = (void *)(*data)->data;
3139 rb->index = cpu_to_le32(i);
3140 memcpy(rb->data, page_address(rxb->page), max_len);
3141
3142 *data = iwl_fw_error_next_data(*data);
3143 }
3144
3145 spin_unlock(&rxq->lock);
3146
3147 return rb_len;
3148 }
3149 #define IWL_CSR_TO_DUMP (0x250)
3150
iwl_trans_pcie_dump_csr(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3151 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
3152 struct iwl_fw_error_dump_data **data)
3153 {
3154 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
3155 __le32 *val;
3156 int i;
3157
3158 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3159 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3160 val = (void *)(*data)->data;
3161
3162 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3163 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3164
3165 *data = iwl_fw_error_next_data(*data);
3166
3167 return csr_len;
3168 }
3169
iwl_trans_pcie_fh_regs_dump(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3170 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3171 struct iwl_fw_error_dump_data **data)
3172 {
3173 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3174 __le32 *val;
3175 int i;
3176
3177 if (!iwl_trans_grab_nic_access(trans))
3178 return 0;
3179
3180 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3181 (*data)->len = cpu_to_le32(fh_regs_len);
3182 val = (void *)(*data)->data;
3183
3184 if (!trans->trans_cfg->gen2)
3185 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3186 i += sizeof(u32))
3187 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3188 else
3189 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3190 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3191 i += sizeof(u32))
3192 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3193 i));
3194
3195 iwl_trans_release_nic_access(trans);
3196
3197 *data = iwl_fw_error_next_data(*data);
3198
3199 return sizeof(**data) + fh_regs_len;
3200 }
3201
3202 static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data,u32 monitor_len)3203 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3204 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3205 u32 monitor_len)
3206 {
3207 u32 buf_size_in_dwords = (monitor_len >> 2);
3208 u32 *buffer = (u32 *)fw_mon_data->data;
3209 u32 i;
3210
3211 if (!iwl_trans_grab_nic_access(trans))
3212 return 0;
3213
3214 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3215 for (i = 0; i < buf_size_in_dwords; i++)
3216 buffer[i] = iwl_read_umac_prph_no_grab(trans,
3217 MON_DMARB_RD_DATA_ADDR);
3218 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3219
3220 iwl_trans_release_nic_access(trans);
3221
3222 return monitor_len;
3223 }
3224
3225 static void
iwl_trans_pcie_dump_pointers(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data)3226 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3227 struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3228 {
3229 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3230
3231 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3232 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3233 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3234 write_ptr = DBGC_CUR_DBGBUF_STATUS;
3235 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3236 } else if (trans->dbg.dest_tlv) {
3237 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3238 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3239 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3240 } else {
3241 base = MON_BUFF_BASE_ADDR;
3242 write_ptr = MON_BUFF_WRPTR;
3243 wrap_cnt = MON_BUFF_CYCLE_CNT;
3244 }
3245
3246 write_ptr_val = iwl_read_prph(trans, write_ptr);
3247 fw_mon_data->fw_mon_cycle_cnt =
3248 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3249 fw_mon_data->fw_mon_base_ptr =
3250 cpu_to_le32(iwl_read_prph(trans, base));
3251 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3252 fw_mon_data->fw_mon_base_high_ptr =
3253 cpu_to_le32(iwl_read_prph(trans, base_high));
3254 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3255 /* convert wrtPtr to DWs, to align with all HWs */
3256 write_ptr_val >>= 2;
3257 }
3258 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3259 }
3260
3261 static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,u32 monitor_len)3262 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3263 struct iwl_fw_error_dump_data **data,
3264 u32 monitor_len)
3265 {
3266 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3267 u32 len = 0;
3268
3269 if (trans->dbg.dest_tlv ||
3270 (fw_mon->size &&
3271 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3272 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3273 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3274
3275 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3276 fw_mon_data = (void *)(*data)->data;
3277
3278 iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3279
3280 len += sizeof(**data) + sizeof(*fw_mon_data);
3281 if (fw_mon->size) {
3282 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3283 monitor_len = fw_mon->size;
3284 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3285 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3286 /*
3287 * Update pointers to reflect actual values after
3288 * shifting
3289 */
3290 if (trans->dbg.dest_tlv->version) {
3291 base = (iwl_read_prph(trans, base) &
3292 IWL_LDBG_M2S_BUF_BA_MSK) <<
3293 trans->dbg.dest_tlv->base_shift;
3294 base *= IWL_M2S_UNIT_SIZE;
3295 base += trans->cfg->smem_offset;
3296 } else {
3297 base = iwl_read_prph(trans, base) <<
3298 trans->dbg.dest_tlv->base_shift;
3299 }
3300
3301 iwl_trans_read_mem(trans, base, fw_mon_data->data,
3302 monitor_len / sizeof(u32));
3303 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3304 monitor_len =
3305 iwl_trans_pci_dump_marbh_monitor(trans,
3306 fw_mon_data,
3307 monitor_len);
3308 } else {
3309 /* Didn't match anything - output no monitor data */
3310 monitor_len = 0;
3311 }
3312
3313 len += monitor_len;
3314 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3315 }
3316
3317 return len;
3318 }
3319
iwl_trans_get_fw_monitor_len(struct iwl_trans * trans,u32 * len)3320 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3321 {
3322 if (trans->dbg.fw_mon.size) {
3323 *len += sizeof(struct iwl_fw_error_dump_data) +
3324 sizeof(struct iwl_fw_error_dump_fw_mon) +
3325 trans->dbg.fw_mon.size;
3326 return trans->dbg.fw_mon.size;
3327 } else if (trans->dbg.dest_tlv) {
3328 u32 base, end, cfg_reg, monitor_len;
3329
3330 if (trans->dbg.dest_tlv->version == 1) {
3331 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3332 cfg_reg = iwl_read_prph(trans, cfg_reg);
3333 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3334 trans->dbg.dest_tlv->base_shift;
3335 base *= IWL_M2S_UNIT_SIZE;
3336 base += trans->cfg->smem_offset;
3337
3338 monitor_len =
3339 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3340 trans->dbg.dest_tlv->end_shift;
3341 monitor_len *= IWL_M2S_UNIT_SIZE;
3342 } else {
3343 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3344 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3345
3346 base = iwl_read_prph(trans, base) <<
3347 trans->dbg.dest_tlv->base_shift;
3348 end = iwl_read_prph(trans, end) <<
3349 trans->dbg.dest_tlv->end_shift;
3350
3351 /* Make "end" point to the actual end */
3352 if (trans->trans_cfg->device_family >=
3353 IWL_DEVICE_FAMILY_8000 ||
3354 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3355 end += (1 << trans->dbg.dest_tlv->end_shift);
3356 monitor_len = end - base;
3357 }
3358 *len += sizeof(struct iwl_fw_error_dump_data) +
3359 sizeof(struct iwl_fw_error_dump_fw_mon) +
3360 monitor_len;
3361 return monitor_len;
3362 }
3363 return 0;
3364 }
3365
3366 static struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans * trans,u32 dump_mask,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx)3367 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3368 u32 dump_mask,
3369 const struct iwl_dump_sanitize_ops *sanitize_ops,
3370 void *sanitize_ctx)
3371 {
3372 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3373 struct iwl_fw_error_dump_data *data;
3374 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3375 struct iwl_fw_error_dump_txcmd *txcmd;
3376 struct iwl_trans_dump_data *dump_data;
3377 u32 len, num_rbs = 0, monitor_len = 0;
3378 int i, ptr;
3379 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3380 !trans->trans_cfg->mq_rx_supported &&
3381 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3382
3383 if (!dump_mask)
3384 return NULL;
3385
3386 /* transport dump header */
3387 len = sizeof(*dump_data);
3388
3389 /* host commands */
3390 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3391 len += sizeof(*data) +
3392 cmdq->n_window * (sizeof(*txcmd) +
3393 TFD_MAX_PAYLOAD_SIZE);
3394
3395 /* FW monitor */
3396 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3397 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3398
3399 /* CSR registers */
3400 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3401 len += sizeof(*data) + IWL_CSR_TO_DUMP;
3402
3403 /* FH registers */
3404 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3405 if (trans->trans_cfg->gen2)
3406 len += sizeof(*data) +
3407 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3408 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3409 else
3410 len += sizeof(*data) +
3411 (FH_MEM_UPPER_BOUND -
3412 FH_MEM_LOWER_BOUND);
3413 }
3414
3415 if (dump_rbs) {
3416 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3417 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3418 /* RBs */
3419 num_rbs =
3420 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3421 & 0x0FFF;
3422 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3423 len += num_rbs * (sizeof(*data) +
3424 sizeof(struct iwl_fw_error_dump_rb) +
3425 (PAGE_SIZE << trans_pcie->rx_page_order));
3426 }
3427
3428 /* Paged memory for gen2 HW */
3429 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3430 for (i = 0; i < trans->init_dram.paging_cnt; i++)
3431 len += sizeof(*data) +
3432 sizeof(struct iwl_fw_error_dump_paging) +
3433 trans->init_dram.paging[i].size;
3434
3435 dump_data = vzalloc(len);
3436 if (!dump_data)
3437 return NULL;
3438
3439 len = 0;
3440 data = (void *)dump_data->data;
3441
3442 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3443 u16 tfd_size = trans->txqs.tfd.size;
3444
3445 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3446 txcmd = (void *)data->data;
3447 spin_lock_bh(&cmdq->lock);
3448 ptr = cmdq->write_ptr;
3449 for (i = 0; i < cmdq->n_window; i++) {
3450 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3451 u8 tfdidx;
3452 u32 caplen, cmdlen;
3453
3454 if (trans->trans_cfg->gen2)
3455 tfdidx = idx;
3456 else
3457 tfdidx = ptr;
3458
3459 cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3460 (u8 *)cmdq->tfds +
3461 tfd_size * tfdidx);
3462 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3463
3464 if (cmdlen) {
3465 len += sizeof(*txcmd) + caplen;
3466 txcmd->cmdlen = cpu_to_le32(cmdlen);
3467 txcmd->caplen = cpu_to_le32(caplen);
3468 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3469 caplen);
3470 if (sanitize_ops && sanitize_ops->frob_hcmd)
3471 sanitize_ops->frob_hcmd(sanitize_ctx,
3472 txcmd->data,
3473 caplen);
3474 txcmd = (void *)((u8 *)txcmd->data + caplen);
3475 }
3476
3477 ptr = iwl_txq_dec_wrap(trans, ptr);
3478 }
3479 spin_unlock_bh(&cmdq->lock);
3480
3481 data->len = cpu_to_le32(len);
3482 len += sizeof(*data);
3483 data = iwl_fw_error_next_data(data);
3484 }
3485
3486 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3487 len += iwl_trans_pcie_dump_csr(trans, &data);
3488 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3489 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3490 if (dump_rbs)
3491 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3492
3493 /* Paged memory for gen2 HW */
3494 if (trans->trans_cfg->gen2 &&
3495 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3496 for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3497 struct iwl_fw_error_dump_paging *paging;
3498 u32 page_len = trans->init_dram.paging[i].size;
3499
3500 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3501 data->len = cpu_to_le32(sizeof(*paging) + page_len);
3502 paging = (void *)data->data;
3503 paging->index = cpu_to_le32(i);
3504 memcpy(paging->data,
3505 trans->init_dram.paging[i].block, page_len);
3506 data = iwl_fw_error_next_data(data);
3507
3508 len += sizeof(*data) + sizeof(*paging) + page_len;
3509 }
3510 }
3511 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3512 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3513
3514 dump_data->len = len;
3515
3516 return dump_data;
3517 }
3518
iwl_trans_pci_interrupts(struct iwl_trans * trans,bool enable)3519 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3520 {
3521 if (enable)
3522 iwl_enable_interrupts(trans);
3523 else
3524 iwl_disable_interrupts(trans);
3525 }
3526
iwl_trans_pcie_sync_nmi(struct iwl_trans * trans)3527 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3528 {
3529 u32 inta_addr, sw_err_bit;
3530 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3531
3532 if (trans_pcie->msix_enabled) {
3533 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3534 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3535 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
3536 else
3537 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3538 } else {
3539 inta_addr = CSR_INT;
3540 sw_err_bit = CSR_INT_BIT_SW_ERR;
3541 }
3542
3543 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3544 }
3545
3546 #define IWL_TRANS_COMMON_OPS \
3547 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
3548 .write8 = iwl_trans_pcie_write8, \
3549 .write32 = iwl_trans_pcie_write32, \
3550 .read32 = iwl_trans_pcie_read32, \
3551 .read_prph = iwl_trans_pcie_read_prph, \
3552 .write_prph = iwl_trans_pcie_write_prph, \
3553 .read_mem = iwl_trans_pcie_read_mem, \
3554 .write_mem = iwl_trans_pcie_write_mem, \
3555 .read_config32 = iwl_trans_pcie_read_config32, \
3556 .configure = iwl_trans_pcie_configure, \
3557 .set_pmi = iwl_trans_pcie_set_pmi, \
3558 .sw_reset = iwl_trans_pcie_sw_reset, \
3559 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
3560 .release_nic_access = iwl_trans_pcie_release_nic_access, \
3561 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
3562 .dump_data = iwl_trans_pcie_dump_data, \
3563 .d3_suspend = iwl_trans_pcie_d3_suspend, \
3564 .d3_resume = iwl_trans_pcie_d3_resume, \
3565 .interrupts = iwl_trans_pci_interrupts, \
3566 .sync_nmi = iwl_trans_pcie_sync_nmi, \
3567 .imr_dma_data = iwl_trans_pcie_copy_imr \
3568
3569 static const struct iwl_trans_ops trans_ops_pcie = {
3570 IWL_TRANS_COMMON_OPS,
3571 .start_hw = iwl_trans_pcie_start_hw,
3572 .fw_alive = iwl_trans_pcie_fw_alive,
3573 .start_fw = iwl_trans_pcie_start_fw,
3574 .stop_device = iwl_trans_pcie_stop_device,
3575
3576 .send_cmd = iwl_pcie_enqueue_hcmd,
3577
3578 .tx = iwl_trans_pcie_tx,
3579 .reclaim = iwl_txq_reclaim,
3580
3581 .txq_disable = iwl_trans_pcie_txq_disable,
3582 .txq_enable = iwl_trans_pcie_txq_enable,
3583
3584 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3585
3586 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3587
3588 .freeze_txq_timer = iwl_trans_txq_freeze_timer,
3589 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3590 #ifdef CONFIG_IWLWIFI_DEBUGFS
3591 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3592 #endif
3593 };
3594
3595 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3596 IWL_TRANS_COMMON_OPS,
3597 .start_hw = iwl_trans_pcie_start_hw,
3598 .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3599 .start_fw = iwl_trans_pcie_gen2_start_fw,
3600 .stop_device = iwl_trans_pcie_gen2_stop_device,
3601
3602 .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
3603
3604 .tx = iwl_txq_gen2_tx,
3605 .reclaim = iwl_txq_reclaim,
3606
3607 .set_q_ptrs = iwl_txq_set_q_ptrs,
3608
3609 .txq_alloc = iwl_txq_dyn_alloc,
3610 .txq_free = iwl_txq_dyn_free,
3611 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3612 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3613 .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm,
3614 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3615 .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power,
3616 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
3617 #ifdef CONFIG_IWLWIFI_DEBUGFS
3618 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3619 #endif
3620 };
3621
iwl_trans_pcie_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,const struct iwl_cfg_trans_params * cfg_trans)3622 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3623 const struct pci_device_id *ent,
3624 const struct iwl_cfg_trans_params *cfg_trans)
3625 {
3626 struct iwl_trans_pcie *trans_pcie;
3627 struct iwl_trans *trans;
3628 int ret, addr_size;
3629 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3630 void __iomem * const *table;
3631
3632 if (!cfg_trans->gen2)
3633 ops = &trans_ops_pcie;
3634
3635 ret = pcim_enable_device(pdev);
3636 if (ret)
3637 return ERR_PTR(ret);
3638
3639 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3640 cfg_trans);
3641 if (!trans)
3642 return ERR_PTR(-ENOMEM);
3643
3644 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3645
3646 trans_pcie->trans = trans;
3647 trans_pcie->opmode_down = true;
3648 spin_lock_init(&trans_pcie->irq_lock);
3649 spin_lock_init(&trans_pcie->reg_lock);
3650 spin_lock_init(&trans_pcie->alloc_page_lock);
3651 mutex_init(&trans_pcie->mutex);
3652 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3653 init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3654 init_waitqueue_head(&trans_pcie->imr_waitq);
3655
3656 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3657 WQ_HIGHPRI | WQ_UNBOUND, 0);
3658 if (!trans_pcie->rba.alloc_wq) {
3659 ret = -ENOMEM;
3660 goto out_free_trans;
3661 }
3662 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3663
3664 trans_pcie->debug_rfkill = -1;
3665
3666 if (!cfg_trans->base_params->pcie_l1_allowed) {
3667 /*
3668 * W/A - seems to solve weird behavior. We need to remove this
3669 * if we don't want to stay in L1 all the time. This wastes a
3670 * lot of power.
3671 */
3672 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3673 PCIE_LINK_STATE_L1 |
3674 PCIE_LINK_STATE_CLKPM);
3675 }
3676
3677 trans_pcie->def_rx_queue = 0;
3678
3679 pci_set_master(pdev);
3680
3681 addr_size = trans->txqs.tfd.addr_size;
3682 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3683 if (ret) {
3684 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3685 /* both attempts failed: */
3686 if (ret) {
3687 dev_err(&pdev->dev, "No suitable DMA available\n");
3688 goto out_no_pci;
3689 }
3690 }
3691
3692 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3693 if (ret) {
3694 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3695 goto out_no_pci;
3696 }
3697
3698 #if defined(__FreeBSD__)
3699 linuxkpi_pcim_want_to_use_bus_functions(pdev);
3700 #endif
3701 table = pcim_iomap_table(pdev);
3702 if (!table) {
3703 dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3704 ret = -ENOMEM;
3705 goto out_no_pci;
3706 }
3707
3708 trans_pcie->hw_base = table[0];
3709 if (!trans_pcie->hw_base) {
3710 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
3711 ret = -ENODEV;
3712 goto out_no_pci;
3713 }
3714
3715 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3716 * PCI Tx retries from interfering with C3 CPU state */
3717 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3718
3719 trans_pcie->pci_dev = pdev;
3720 iwl_disable_interrupts(trans);
3721
3722 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3723 if (trans->hw_rev == 0xffffffff) {
3724 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3725 ret = -EIO;
3726 goto out_no_pci;
3727 }
3728
3729 /*
3730 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3731 * changed, and now the revision step also includes bit 0-1 (no more
3732 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3733 * in the old format.
3734 */
3735 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3736 trans->hw_rev_step = trans->hw_rev & 0xF;
3737 else
3738 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
3739
3740 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3741
3742 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3743 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3744 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3745 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3746
3747 init_waitqueue_head(&trans_pcie->sx_waitq);
3748
3749
3750 if (trans_pcie->msix_enabled) {
3751 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3752 if (ret)
3753 goto out_no_pci;
3754 } else {
3755 ret = iwl_pcie_alloc_ict(trans);
3756 if (ret)
3757 goto out_no_pci;
3758
3759 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3760 iwl_pcie_isr,
3761 iwl_pcie_irq_handler,
3762 IRQF_SHARED, DRV_NAME, trans);
3763 if (ret) {
3764 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3765 goto out_free_ict;
3766 }
3767 }
3768
3769 #ifdef CONFIG_IWLWIFI_DEBUGFS
3770 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3771 mutex_init(&trans_pcie->fw_mon_data.mutex);
3772 #endif
3773
3774 iwl_dbg_tlv_init(trans);
3775
3776 return trans;
3777
3778 out_free_ict:
3779 iwl_pcie_free_ict(trans);
3780 out_no_pci:
3781 destroy_workqueue(trans_pcie->rba.alloc_wq);
3782 out_free_trans:
3783 iwl_trans_free(trans);
3784 return ERR_PTR(ret);
3785 }
3786
iwl_trans_pcie_copy_imr_fh(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3787 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
3788 u32 dst_addr, u64 src_addr, u32 byte_cnt)
3789 {
3790 iwl_write_prph(trans, IMR_UREG_CHICK,
3791 iwl_read_prph(trans, IMR_UREG_CHICK) |
3792 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
3793 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
3794 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
3795 (u32)(src_addr & 0xFFFFFFFF));
3796 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
3797 iwl_get_dma_hi_addr(src_addr));
3798 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
3799 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
3800 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
3801 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
3802 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
3803 }
3804
iwl_trans_pcie_copy_imr(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3805 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
3806 u32 dst_addr, u64 src_addr, u32 byte_cnt)
3807 {
3808 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3809 int ret = -1;
3810
3811 trans_pcie->imr_status = IMR_D2S_REQUESTED;
3812 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
3813 ret = wait_event_timeout(trans_pcie->imr_waitq,
3814 trans_pcie->imr_status !=
3815 IMR_D2S_REQUESTED, 5 * HZ);
3816 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3817 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
3818 iwl_trans_pcie_dump_regs(trans);
3819 return -ETIMEDOUT;
3820 }
3821 trans_pcie->imr_status = IMR_D2S_IDLE;
3822 return 0;
3823 }
3824