1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24 #if defined(__FreeBSD__)
25 #include <linux/skbuff.h>
26 #include "iwl-modparams.h"
27 #endif
28
29 /**
30 * DOC: Transport layer - what is it ?
31 *
32 * The transport layer is the layer that deals with the HW directly. It provides
33 * an abstraction of the underlying HW to the upper layer. The transport layer
34 * doesn't provide any policy, algorithm or anything of this kind, but only
35 * mechanisms to make the HW do something. It is not completely stateless but
36 * close to it.
37 * We will have an implementation for each different supported bus.
38 */
39
40 /**
41 * DOC: Life cycle of the transport layer
42 *
43 * The transport layer has a very precise life cycle.
44 *
45 * 1) A helper function is called during the module initialization and
46 * registers the bus driver's ops with the transport's alloc function.
47 * 2) Bus's probe calls to the transport layer's allocation functions.
48 * Of course this function is bus specific.
49 * 3) This allocation functions will spawn the upper layer which will
50 * register mac80211.
51 *
52 * 4) At some point (i.e. mac80211's start call), the op_mode will call
53 * the following sequence:
54 * start_hw
55 * start_fw
56 *
57 * 5) Then when finished (or reset):
58 * stop_device
59 *
60 * 6) Eventually, the free function will be called.
61 */
62
63 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
64
65 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
66 #define FH_RSCSR_FRAME_INVALID 0x55550000
67 #define FH_RSCSR_FRAME_ALIGN 0x40
68 #define FH_RSCSR_RPA_EN BIT(25)
69 #define FH_RSCSR_RADA_EN BIT(26)
70 #define FH_RSCSR_RXQ_POS 16
71 #define FH_RSCSR_RXQ_MASK 0x3F0000
72
73 struct iwl_rx_packet {
74 /*
75 * The first 4 bytes of the RX frame header contain both the RX frame
76 * size and some flags.
77 * Bit fields:
78 * 31: flag flush RB request
79 * 30: flag ignore TC (terminal counter) request
80 * 29: flag fast IRQ request
81 * 28-27: Reserved
82 * 26: RADA enabled
83 * 25: Offload enabled
84 * 24: RPF enabled
85 * 23: RSS enabled
86 * 22: Checksum enabled
87 * 21-16: RX queue
88 * 15-14: Reserved
89 * 13-00: RX frame size
90 */
91 __le32 len_n_flags;
92 struct iwl_cmd_header hdr;
93 u8 data[];
94 } __packed;
95
iwl_rx_packet_len(const struct iwl_rx_packet * pkt)96 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
97 {
98 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
99 }
100
iwl_rx_packet_payload_len(const struct iwl_rx_packet * pkt)101 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
102 {
103 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
104 }
105
106 /**
107 * enum CMD_MODE - how to send the host commands ?
108 *
109 * @CMD_ASYNC: Return right away and don't wait for the response
110 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
111 * the response. The caller needs to call iwl_free_resp when done.
112 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
113 * called after this command completes. Valid only with CMD_ASYNC.
114 * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
115 * SUSPEND and RESUME commands. We are in D3 mode when we set
116 * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
117 */
118 enum CMD_MODE {
119 CMD_ASYNC = BIT(0),
120 CMD_WANT_SKB = BIT(1),
121 CMD_SEND_IN_RFKILL = BIT(2),
122 CMD_WANT_ASYNC_CALLBACK = BIT(3),
123 CMD_SEND_IN_D3 = BIT(4),
124 };
125
126 #define DEF_CMD_PAYLOAD_SIZE 320
127
128 /**
129 * struct iwl_device_cmd
130 *
131 * For allocation of the command and tx queues, this establishes the overall
132 * size of the largest command we send to uCode, except for commands that
133 * aren't fully copied and use other TFD space.
134 */
135 struct iwl_device_cmd {
136 union {
137 struct {
138 struct iwl_cmd_header hdr; /* uCode API */
139 u8 payload[DEF_CMD_PAYLOAD_SIZE];
140 };
141 struct {
142 struct iwl_cmd_header_wide hdr_wide;
143 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
144 sizeof(struct iwl_cmd_header_wide) +
145 sizeof(struct iwl_cmd_header)];
146 };
147 };
148 } __packed;
149
150 /**
151 * struct iwl_device_tx_cmd - buffer for TX command
152 * @hdr: the header
153 * @payload: the payload placeholder
154 *
155 * The actual structure is sized dynamically according to need.
156 */
157 struct iwl_device_tx_cmd {
158 struct iwl_cmd_header hdr;
159 u8 payload[];
160 } __packed;
161
162 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
163
164 /*
165 * number of transfer buffers (fragments) per transmit frame descriptor;
166 * this is just the driver's idea, the hardware supports 20
167 */
168 #define IWL_MAX_CMD_TBS_PER_TFD 2
169
170 /* We need 2 entries for the TX command and header, and another one might
171 * be needed for potential data in the SKB's head. The remaining ones can
172 * be used for frags.
173 */
174 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
175
176 /**
177 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
178 *
179 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
180 * ring. The transport layer doesn't map the command's buffer to DMA, but
181 * rather copies it to a previously allocated DMA buffer. This flag tells
182 * the transport layer not to copy the command, but to map the existing
183 * buffer (that is passed in) instead. This saves the memcpy and allows
184 * commands that are bigger than the fixed buffer to be submitted.
185 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
186 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
187 * chunk internally and free it again after the command completes. This
188 * can (currently) be used only once per command.
189 * Note that a TFD entry after a DUP one cannot be a normal copied one.
190 */
191 enum iwl_hcmd_dataflag {
192 IWL_HCMD_DFL_NOCOPY = BIT(0),
193 IWL_HCMD_DFL_DUP = BIT(1),
194 };
195
196 enum iwl_error_event_table_status {
197 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
198 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
199 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
200 IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
201 IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
202 IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
203 IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
204 };
205
206 /**
207 * struct iwl_host_cmd - Host command to the uCode
208 *
209 * @data: array of chunks that composes the data of the host command
210 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
211 * @_rx_page_order: (internally used to free response packet);
212 * FreeBSD uses _page instead.
213 * @_rx_page_addr: (internally used to free response packet)
214 * @flags: can be CMD_*
215 * @len: array of the lengths of the chunks in data
216 * @dataflags: IWL_HCMD_DFL_*
217 * @id: command id of the host command, for wide commands encoding the
218 * version and group as well
219 */
220 struct iwl_host_cmd {
221 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
222 struct iwl_rx_packet *resp_pkt;
223 #if defined(__linux__)
224 unsigned long _rx_page_addr;
225 #elif defined(__FreeBSD__)
226 struct page *_page;
227 #endif
228 u32 _rx_page_order;
229
230 u32 flags;
231 u32 id;
232 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
233 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
234 };
235
iwl_free_resp(struct iwl_host_cmd * cmd)236 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
237 {
238 #if defined(__linux__)
239 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
240 #elif defined(__FreeBSD__)
241 __free_pages(cmd->_page, cmd->_rx_page_order);
242 #endif
243 }
244
245 struct iwl_rx_cmd_buffer {
246 struct page *_page;
247 int _offset;
248 bool _page_stolen;
249 u32 _rx_page_order;
250 unsigned int truesize;
251 };
252
rxb_addr(struct iwl_rx_cmd_buffer * r)253 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
254 {
255 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
256 }
257
rxb_offset(struct iwl_rx_cmd_buffer * r)258 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
259 {
260 return r->_offset;
261 }
262
rxb_steal_page(struct iwl_rx_cmd_buffer * r)263 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
264 {
265 r->_page_stolen = true;
266 get_page(r->_page);
267 return r->_page;
268 }
269
iwl_free_rxb(struct iwl_rx_cmd_buffer * r)270 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
271 {
272 __free_pages(r->_page, r->_rx_page_order);
273 }
274
275 #define MAX_NO_RECLAIM_CMDS 6
276
277 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
278
279 /*
280 * Maximum number of HW queues the transport layer
281 * currently supports
282 */
283 #define IWL_MAX_HW_QUEUES 32
284 #define IWL_MAX_TVQM_QUEUES 512
285
286 #define IWL_MAX_TID_COUNT 8
287 #define IWL_MGMT_TID 15
288 #define IWL_FRAME_LIMIT 64
289 #define IWL_MAX_RX_HW_QUEUES 16
290 #define IWL_9000_MAX_RX_HW_QUEUES 6
291
292 /**
293 * enum iwl_wowlan_status - WoWLAN image/device status
294 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
295 * @IWL_D3_STATUS_RESET: device was reset while suspended
296 */
297 enum iwl_d3_status {
298 IWL_D3_STATUS_ALIVE,
299 IWL_D3_STATUS_RESET,
300 };
301
302 /**
303 * enum iwl_trans_status: transport status flags
304 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
305 * @STATUS_DEVICE_ENABLED: APM is enabled
306 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
307 * @STATUS_INT_ENABLED: interrupts are enabled
308 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
309 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
310 * @STATUS_FW_ERROR: the fw is in error state
311 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
312 * are sent
313 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
314 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
315 * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
316 * e.g. for testing
317 */
318 enum iwl_trans_status {
319 STATUS_SYNC_HCMD_ACTIVE,
320 STATUS_DEVICE_ENABLED,
321 STATUS_TPOWER_PMI,
322 STATUS_INT_ENABLED,
323 STATUS_RFKILL_HW,
324 STATUS_RFKILL_OPMODE,
325 STATUS_FW_ERROR,
326 STATUS_TRANS_GOING_IDLE,
327 STATUS_TRANS_IDLE,
328 STATUS_TRANS_DEAD,
329 STATUS_SUPPRESS_CMD_ERROR_ONCE,
330 };
331
332 static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)333 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
334 {
335 switch (rb_size) {
336 case IWL_AMSDU_2K:
337 return get_order(2 * 1024);
338 case IWL_AMSDU_4K:
339 return get_order(4 * 1024);
340 case IWL_AMSDU_8K:
341 return get_order(8 * 1024);
342 case IWL_AMSDU_12K:
343 return get_order(16 * 1024);
344 default:
345 WARN_ON(1);
346 return -1;
347 }
348 }
349
350 static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)351 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
352 {
353 switch (rb_size) {
354 case IWL_AMSDU_2K:
355 return 2 * 1024;
356 case IWL_AMSDU_4K:
357 return 4 * 1024;
358 case IWL_AMSDU_8K:
359 return 8 * 1024;
360 case IWL_AMSDU_12K:
361 return 16 * 1024;
362 default:
363 WARN_ON(1);
364 return 0;
365 }
366 }
367
368 struct iwl_hcmd_names {
369 u8 cmd_id;
370 const char *const cmd_name;
371 };
372
373 #define HCMD_NAME(x) \
374 { .cmd_id = x, .cmd_name = #x }
375
376 struct iwl_hcmd_arr {
377 const struct iwl_hcmd_names *arr;
378 int size;
379 };
380
381 #define HCMD_ARR(x) \
382 { .arr = x, .size = ARRAY_SIZE(x) }
383
384 /**
385 * struct iwl_dump_sanitize_ops - dump sanitization operations
386 * @frob_txf: Scrub the TX FIFO data
387 * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
388 * but that might be short or long (&struct iwl_cmd_header or
389 * &struct iwl_cmd_header_wide)
390 * @frob_mem: Scrub memory data
391 */
392 struct iwl_dump_sanitize_ops {
393 void (*frob_txf)(void *ctx, void *buf, size_t buflen);
394 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
395 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
396 };
397
398 /**
399 * struct iwl_trans_config - transport configuration
400 *
401 * @op_mode: pointer to the upper layer.
402 * @cmd_queue: the index of the command queue.
403 * Must be set before start_fw.
404 * @cmd_fifo: the fifo for host commands
405 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
406 * @no_reclaim_cmds: Some devices erroneously don't set the
407 * SEQ_RX_FRAME bit on some notifications, this is the
408 * list of such notifications to filter. Max length is
409 * %MAX_NO_RECLAIM_CMDS.
410 * @n_no_reclaim_cmds: # of commands in list
411 * @rx_buf_size: RX buffer size needed for A-MSDUs
412 * if unset 4k will be the RX buffer size
413 * @bc_table_dword: set to true if the BC table expects the byte count to be
414 * in DWORD (as opposed to bytes)
415 * @scd_set_active: should the transport configure the SCD for HCMD queue
416 * @command_groups: array of command groups, each member is an array of the
417 * commands in the group; for debugging only
418 * @command_groups_size: number of command groups, to avoid illegal access
419 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
420 * space for at least two pointers
421 * @fw_reset_handshake: firmware supports reset flow handshake
422 * @queue_alloc_cmd_ver: queue allocation command version, set to 0
423 * for using the older SCD_QUEUE_CFG, set to the version of
424 * SCD_QUEUE_CONFIG_CMD otherwise.
425 */
426 struct iwl_trans_config {
427 struct iwl_op_mode *op_mode;
428
429 u8 cmd_queue;
430 u8 cmd_fifo;
431 unsigned int cmd_q_wdg_timeout;
432 const u8 *no_reclaim_cmds;
433 unsigned int n_no_reclaim_cmds;
434
435 enum iwl_amsdu_size rx_buf_size;
436 bool bc_table_dword;
437 bool scd_set_active;
438 const struct iwl_hcmd_arr *command_groups;
439 int command_groups_size;
440
441 u8 cb_data_offs;
442 bool fw_reset_handshake;
443 u8 queue_alloc_cmd_ver;
444 };
445
446 struct iwl_trans_dump_data {
447 u32 len;
448 u8 data[];
449 };
450
451 struct iwl_trans;
452
453 struct iwl_trans_txq_scd_cfg {
454 u8 fifo;
455 u8 sta_id;
456 u8 tid;
457 bool aggregate;
458 int frame_limit;
459 };
460
461 /**
462 * struct iwl_trans_rxq_dma_data - RX queue DMA data
463 * @fr_bd_cb: DMA address of free BD cyclic buffer
464 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
465 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
466 * @ur_bd_cb: DMA address of used BD cyclic buffer
467 */
468 struct iwl_trans_rxq_dma_data {
469 u64 fr_bd_cb;
470 u32 fr_bd_wid;
471 u64 urbd_stts_wrptr;
472 u64 ur_bd_cb;
473 };
474
475 /* maximal number of DRAM MAP entries supported by FW */
476 #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
477
478 /**
479 * struct iwl_pnvm_image - contains info about the parsed pnvm image
480 * @chunks: array of pointers to pnvm payloads and their sizes
481 * @n_chunks: the number of the pnvm payloads.
482 * @version: the version of the loaded PNVM image
483 */
484 struct iwl_pnvm_image {
485 struct {
486 const void *data;
487 u32 len;
488 } chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
489 u32 n_chunks;
490 u32 version;
491 };
492
493 /**
494 * struct iwl_trans_ops - transport specific operations
495 *
496 * All the handlers MUST be implemented
497 *
498 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
499 * May sleep.
500 * @op_mode_leave: Turn off the HW RF kill indication if on
501 * May sleep
502 * @start_fw: allocates and inits all the resources for the transport
503 * layer. Also kick a fw image.
504 * May sleep
505 * @fw_alive: called when the fw sends alive notification. If the fw provides
506 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
507 * May sleep
508 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
509 * the HW. From that point on, the HW will be stopped but will still issue
510 * an interrupt if the HW RF kill switch is triggered.
511 * This callback must do the right thing and not crash even if %start_hw()
512 * was called but not &start_fw(). May sleep.
513 * @d3_suspend: put the device into the correct mode for WoWLAN during
514 * suspend. This is optional, if not implemented WoWLAN will not be
515 * supported. This callback may sleep.
516 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
517 * talk to the WoWLAN image to get its status. This is optional, if not
518 * implemented WoWLAN will not be supported. This callback may sleep.
519 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
520 * If RFkill is asserted in the middle of a SYNC host command, it must
521 * return -ERFKILL straight away.
522 * May sleep only if CMD_ASYNC is not set
523 * @tx: send an skb. The transport relies on the op_mode to zero the
524 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
525 * the CSUM will be taken care of (TCP CSUM and IP header in case of
526 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
527 * header if it is IPv4.
528 * Must be atomic
529 * @reclaim: free packet until ssn. Returns a list of freed packets.
530 * Must be atomic
531 * @txq_enable: setup a queue. To setup an AC queue, use the
532 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
533 * this one. The op_mode must not configure the HCMD queue. The scheduler
534 * configuration may be %NULL, in which case the hardware will not be
535 * configured. If true is returned, the operation mode needs to increment
536 * the sequence number of the packets routed to this queue because of a
537 * hardware scheduler bug. May sleep.
538 * @txq_disable: de-configure a Tx queue to send AMPDUs
539 * Must be atomic
540 * @txq_set_shared_mode: change Tx queue shared/unshared marking
541 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
542 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
543 * @freeze_txq_timer: prevents the timer of the queue from firing until the
544 * queue is set to awake. Must be atomic.
545 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
546 * that the transport needs to refcount the calls since this function
547 * will be called several times with block = true, and then the queues
548 * need to be unblocked only after the same number of calls with
549 * block = false.
550 * @write8: write a u8 to a register at offset ofs from the BAR
551 * @write32: write a u32 to a register at offset ofs from the BAR
552 * @read32: read a u32 register at offset ofs from the BAR
553 * @read_prph: read a DWORD from a periphery register
554 * @write_prph: write a DWORD to a periphery register
555 * @read_mem: read device's SRAM in DWORD
556 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
557 * will be zeroed.
558 * @read_config32: read a u32 value from the device's config space at
559 * the given offset.
560 * @configure: configure parameters required by the transport layer from
561 * the op_mode. May be called several times before start_fw, can't be
562 * called after that.
563 * @set_pmi: set the power pmi state
564 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
565 * Sleeping is not allowed between grab_nic_access and
566 * release_nic_access.
567 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
568 * must be the same one that was sent before to the grab_nic_access.
569 * @set_bits_mask - set SRAM register according to value and mask.
570 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
571 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
572 * Note that the transport must fill in the proper file headers.
573 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
574 * of the trans debugfs
575 * @load_pnvm: save the pnvm data in DRAM
576 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
577 * context info.
578 * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
579 * @set_reduce_power: set reduce power table addresses in the sratch buffer
580 * @interrupts: disable/enable interrupts to transport
581 */
582 struct iwl_trans_ops {
583
584 int (*start_hw)(struct iwl_trans *iwl_trans);
585 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
586 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
587 bool run_in_rfkill);
588 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
589 void (*stop_device)(struct iwl_trans *trans);
590
591 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
592 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
593 bool test, bool reset);
594
595 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
596
597 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
598 struct iwl_device_tx_cmd *dev_cmd, int queue);
599 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
600 struct sk_buff_head *skbs);
601
602 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
603
604 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
605 const struct iwl_trans_txq_scd_cfg *cfg,
606 unsigned int queue_wdg_timeout);
607 void (*txq_disable)(struct iwl_trans *trans, int queue,
608 bool configure_scd);
609 /* 22000 functions */
610 int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
611 u32 sta_mask, u8 tid,
612 int size, unsigned int queue_wdg_timeout);
613 void (*txq_free)(struct iwl_trans *trans, int queue);
614 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
615 struct iwl_trans_rxq_dma_data *data);
616
617 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
618 bool shared);
619
620 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
621 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
622 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
623 bool freeze);
624 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
625
626 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
627 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
628 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
629 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
630 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
631 int (*read_mem)(struct iwl_trans *trans, u32 addr,
632 void *buf, int dwords);
633 int (*write_mem)(struct iwl_trans *trans, u32 addr,
634 const void *buf, int dwords);
635 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
636 void (*configure)(struct iwl_trans *trans,
637 const struct iwl_trans_config *trans_cfg);
638 void (*set_pmi)(struct iwl_trans *trans, bool state);
639 int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
640 bool (*grab_nic_access)(struct iwl_trans *trans);
641 void (*release_nic_access)(struct iwl_trans *trans);
642 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
643 u32 value);
644
645 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
646 u32 dump_mask,
647 const struct iwl_dump_sanitize_ops *sanitize_ops,
648 void *sanitize_ctx);
649 void (*debugfs_cleanup)(struct iwl_trans *trans);
650 void (*sync_nmi)(struct iwl_trans *trans);
651 int (*load_pnvm)(struct iwl_trans *trans,
652 const struct iwl_pnvm_image *pnvm_payloads,
653 const struct iwl_ucode_capabilities *capa);
654 void (*set_pnvm)(struct iwl_trans *trans,
655 const struct iwl_ucode_capabilities *capa);
656 int (*load_reduce_power)(struct iwl_trans *trans,
657 const struct iwl_pnvm_image *payloads,
658 const struct iwl_ucode_capabilities *capa);
659 void (*set_reduce_power)(struct iwl_trans *trans,
660 const struct iwl_ucode_capabilities *capa);
661
662 void (*interrupts)(struct iwl_trans *trans, bool enable);
663 int (*imr_dma_data)(struct iwl_trans *trans,
664 u32 dst_addr, u64 src_addr,
665 u32 byte_cnt);
666
667 };
668
669 /**
670 * enum iwl_trans_state - state of the transport layer
671 *
672 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
673 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
674 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
675 */
676 enum iwl_trans_state {
677 IWL_TRANS_NO_FW,
678 IWL_TRANS_FW_STARTED,
679 IWL_TRANS_FW_ALIVE,
680 };
681
682 /**
683 * DOC: Platform power management
684 *
685 * In system-wide power management the entire platform goes into a low
686 * power state (e.g. idle or suspend to RAM) at the same time and the
687 * device is configured as a wakeup source for the entire platform.
688 * This is usually triggered by userspace activity (e.g. the user
689 * presses the suspend button or a power management daemon decides to
690 * put the platform in low power mode). The device's behavior in this
691 * mode is dictated by the wake-on-WLAN configuration.
692 *
693 * The terms used for the device's behavior are as follows:
694 *
695 * - D0: the device is fully powered and the host is awake;
696 * - D3: the device is in low power mode and only reacts to
697 * specific events (e.g. magic-packet received or scan
698 * results found);
699 *
700 * These terms reflect the power modes in the firmware and are not to
701 * be confused with the physical device power state.
702 */
703
704 /**
705 * enum iwl_plat_pm_mode - platform power management mode
706 *
707 * This enumeration describes the device's platform power management
708 * behavior when in system-wide suspend (i.e WoWLAN).
709 *
710 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
711 * device. In system-wide suspend mode, it means that the all
712 * connections will be closed automatically by mac80211 before
713 * the platform is suspended.
714 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
715 */
716 enum iwl_plat_pm_mode {
717 IWL_PLAT_PM_MODE_DISABLED,
718 IWL_PLAT_PM_MODE_D3,
719 };
720
721 /**
722 * enum iwl_ini_cfg_state
723 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
724 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
725 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
726 * are corrupted. The rest of the debug TLVs will still be used
727 */
728 enum iwl_ini_cfg_state {
729 IWL_INI_CFG_STATE_NOT_LOADED,
730 IWL_INI_CFG_STATE_LOADED,
731 IWL_INI_CFG_STATE_CORRUPTED,
732 };
733
734 /* Max time to wait for nmi interrupt */
735 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
736
737 /**
738 * struct iwl_dram_data
739 * @physical: page phy pointer
740 * @block: pointer to the allocated block/page
741 * @size: size of the block/page
742 */
743 struct iwl_dram_data {
744 dma_addr_t physical;
745 void *block;
746 int size;
747 };
748
749 /**
750 * @drams: array of several DRAM areas that contains the pnvm and power
751 * reduction table payloads.
752 * @n_regions: number of DRAM regions that were allocated
753 * @prph_scratch_mem_desc: points to a structure allocated in dram,
754 * designed to show FW where all the payloads are.
755 */
756 struct iwl_dram_regions {
757 struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
758 struct iwl_dram_data prph_scratch_mem_desc;
759 u8 n_regions;
760 };
761
762 /**
763 * struct iwl_fw_mon - fw monitor per allocation id
764 * @num_frags: number of fragments
765 * @frags: an array of DRAM buffer fragments
766 */
767 struct iwl_fw_mon {
768 u32 num_frags;
769 struct iwl_dram_data *frags;
770 };
771
772 /**
773 * struct iwl_self_init_dram - dram data used by self init process
774 * @fw: lmac and umac dram data
775 * @fw_cnt: total number of items in array
776 * @paging: paging dram data
777 * @paging_cnt: total number of items in array
778 */
779 struct iwl_self_init_dram {
780 struct iwl_dram_data *fw;
781 int fw_cnt;
782 struct iwl_dram_data *paging;
783 int paging_cnt;
784 };
785
786 /**
787 * struct iwl_imr_data - imr dram data used during debug process
788 * @imr_enable: imr enable status received from fw
789 * @imr_size: imr dram size received from fw
790 * @sram_addr: sram address from debug tlv
791 * @sram_size: sram size from debug tlv
792 * @imr2sram_remainbyte`: size remained after each dma transfer
793 * @imr_curr_addr: current dst address used during dma transfer
794 * @imr_base_addr: imr address received from fw
795 */
796 struct iwl_imr_data {
797 u32 imr_enable;
798 u32 imr_size;
799 u32 sram_addr;
800 u32 sram_size;
801 u32 imr2sram_remainbyte;
802 u64 imr_curr_addr;
803 __le64 imr_base_addr;
804 };
805
806 #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES 32
807
808 /**
809 * struct iwl_pc_data - program counter details
810 * @pc_name: cpu name
811 * @pc_address: cpu program counter
812 */
813 struct iwl_pc_data {
814 u8 pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
815 u32 pc_address;
816 };
817
818 /**
819 * struct iwl_trans_debug - transport debug related data
820 *
821 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
822 * @rec_on: true iff there is a fw debug recording currently active
823 * @dest_tlv: points to the destination TLV for debug
824 * @conf_tlv: array of pointers to configuration TLVs for debug
825 * @trigger_tlv: array of pointers to triggers TLVs for debug
826 * @lmac_error_event_table: addrs of lmacs error tables
827 * @umac_error_event_table: addr of umac error table
828 * @tcm_error_event_table: address(es) of TCM error table(s)
829 * @rcm_error_event_table: address(es) of RCM error table(s)
830 * @error_event_table_tlv_status: bitmap that indicates what error table
831 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
832 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
833 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
834 * @fw_mon_cfg: debug buffer allocation configuration
835 * @fw_mon_ini: DRAM buffer fragments per allocation id
836 * @fw_mon: DRAM buffer for firmware monitor
837 * @hw_error: equals true if hw error interrupt was received from the FW
838 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
839 * @active_regions: active regions
840 * @debug_info_tlv_list: list of debug info TLVs
841 * @time_point: array of debug time points
842 * @periodic_trig_list: periodic triggers list
843 * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
844 * @ucode_preset: preset based on ucode
845 * @dump_file_name_ext: dump file name extension
846 * @dump_file_name_ext_valid: dump file name extension if valid or not
847 * @num_pc: number of program counter for cpu
848 * @pc_data: details of the program counter
849 */
850 struct iwl_trans_debug {
851 u8 n_dest_reg;
852 bool rec_on;
853
854 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
855 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
856 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
857
858 u32 lmac_error_event_table[2];
859 u32 umac_error_event_table;
860 u32 tcm_error_event_table[2];
861 u32 rcm_error_event_table[2];
862 unsigned int error_event_table_tlv_status;
863
864 enum iwl_ini_cfg_state internal_ini_cfg;
865 enum iwl_ini_cfg_state external_ini_cfg;
866
867 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
868 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
869
870 struct iwl_dram_data fw_mon;
871
872 bool hw_error;
873 enum iwl_fw_ini_buffer_location ini_dest;
874
875 u64 unsupported_region_msk;
876 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
877 struct list_head debug_info_tlv_list;
878 struct iwl_dbg_tlv_time_point_data
879 time_point[IWL_FW_INI_TIME_POINT_NUM];
880 struct list_head periodic_trig_list;
881
882 u32 domains_bitmap;
883 u32 ucode_preset;
884 bool restart_required;
885 u32 last_tp_resetfw;
886 struct iwl_imr_data imr_data;
887 u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
888 bool dump_file_name_ext_valid;
889 u32 num_pc;
890 struct iwl_pc_data *pc_data;
891 };
892
893 struct iwl_dma_ptr {
894 dma_addr_t dma;
895 void *addr;
896 size_t size;
897 };
898
899 struct iwl_cmd_meta {
900 /* only for SYNC commands, iff the reply skb is wanted */
901 struct iwl_host_cmd *source;
902 u32 flags;
903 u32 tbs;
904 };
905
906 /*
907 * The FH will write back to the first TB only, so we need to copy some data
908 * into the buffer regardless of whether it should be mapped or not.
909 * This indicates how big the first TB must be to include the scratch buffer
910 * and the assigned PN.
911 * Since PN location is 8 bytes at offset 12, it's 20 now.
912 * If we make it bigger then allocations will be bigger and copy slower, so
913 * that's probably not useful.
914 */
915 #define IWL_FIRST_TB_SIZE 20
916 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
917
918 struct iwl_pcie_txq_entry {
919 void *cmd;
920 struct sk_buff *skb;
921 /* buffer to free after command completes */
922 const void *free_buf;
923 struct iwl_cmd_meta meta;
924 };
925
926 struct iwl_pcie_first_tb_buf {
927 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
928 };
929
930 /**
931 * struct iwl_txq - Tx Queue for DMA
932 * @q: generic Rx/Tx queue descriptor
933 * @tfds: transmit frame descriptors (DMA memory)
934 * @first_tb_bufs: start of command headers, including scratch buffers, for
935 * the writeback -- this is DMA memory and an array holding one buffer
936 * for each command on the queue
937 * @first_tb_dma: DMA address for the first_tb_bufs start
938 * @entries: transmit entries (driver state)
939 * @lock: queue lock
940 * @stuck_timer: timer that fires if queue gets stuck
941 * @trans: pointer back to transport (for timer)
942 * @need_update: indicates need to update read/write index
943 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
944 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
945 * @frozen: tx stuck queue timer is frozen
946 * @frozen_expiry_remainder: remember how long until the timer fires
947 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
948 * @write_ptr: 1-st empty entry (index) host_w
949 * @read_ptr: last used entry (index) host_r
950 * @dma_addr: physical addr for BD's
951 * @n_window: safe queue window
952 * @id: queue id
953 * @low_mark: low watermark, resume queue if free space more than this
954 * @high_mark: high watermark, stop queue if free space less than this
955 *
956 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
957 * descriptors) and required locking structures.
958 *
959 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
960 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
961 * there might be HW changes in the future). For the normal TX
962 * queues, n_window, which is the size of the software queue data
963 * is also 256; however, for the command queue, n_window is only
964 * 32 since we don't need so many commands pending. Since the HW
965 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
966 * This means that we end up with the following:
967 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
968 * SW entries: | 0 | ... | 31 |
969 * where N is a number between 0 and 7. This means that the SW
970 * data is a window overlayed over the HW queue.
971 */
972 struct iwl_txq {
973 void *tfds;
974 struct iwl_pcie_first_tb_buf *first_tb_bufs;
975 dma_addr_t first_tb_dma;
976 struct iwl_pcie_txq_entry *entries;
977 /* lock for syncing changes on the queue */
978 spinlock_t lock;
979 unsigned long frozen_expiry_remainder;
980 struct timer_list stuck_timer;
981 struct iwl_trans *trans;
982 bool need_update;
983 bool frozen;
984 bool ampdu;
985 int block;
986 unsigned long wd_timeout;
987 struct sk_buff_head overflow_q;
988 struct iwl_dma_ptr bc_tbl;
989
990 int write_ptr;
991 int read_ptr;
992 dma_addr_t dma_addr;
993 int n_window;
994 u32 id;
995 int low_mark;
996 int high_mark;
997
998 bool overflow_tx;
999 };
1000
1001 /**
1002 * struct iwl_trans_txqs - transport tx queues data
1003 *
1004 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
1005 * @page_offs: offset from skb->cb to mac header page pointer
1006 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
1007 * @queue_used - bit mask of used queues
1008 * @queue_stopped - bit mask of stopped queues
1009 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
1010 * @queue_alloc_cmd_ver: queue allocation command version
1011 */
1012 struct iwl_trans_txqs {
1013 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1014 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1015 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
1016 struct dma_pool *bc_pool;
1017 size_t bc_tbl_size;
1018 bool bc_table_dword;
1019 u8 page_offs;
1020 u8 dev_cmd_offs;
1021 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
1022
1023 struct {
1024 u8 fifo;
1025 u8 q_id;
1026 unsigned int wdg_timeout;
1027 } cmd;
1028
1029 struct {
1030 u8 max_tbs;
1031 u16 size;
1032 u8 addr_size;
1033 } tfd;
1034
1035 struct iwl_dma_ptr scd_bc_tbls;
1036
1037 u8 queue_alloc_cmd_ver;
1038 };
1039
1040 /**
1041 * struct iwl_trans - transport common data
1042 *
1043 * @csme_own - true if we couldn't get ownership on the device
1044 * @ops - pointer to iwl_trans_ops
1045 * @op_mode - pointer to the op_mode
1046 * @trans_cfg: the trans-specific configuration part
1047 * @cfg - pointer to the configuration
1048 * @drv - pointer to iwl_drv
1049 * @status: a bit-mask of transport status flags
1050 * @dev - pointer to struct device * that represents the device
1051 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
1052 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
1053 * @hw_rf_id a u32 with the device RF ID
1054 * @hw_crf_id a u32 with the device CRF ID
1055 * @hw_wfpm_id a u32 with the device wfpm ID
1056 * @hw_id: a u32 with the ID of the device / sub-device.
1057 * Set during transport allocation.
1058 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
1059 * @hw_rev_step: The mac step of the HW
1060 * @pm_support: set to true in start_hw if link pm is supported
1061 * @ltr_enabled: set to true if the LTR is enabled
1062 * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
1063 * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
1064 * @wide_cmd_header: true when ucode supports wide command header format
1065 * @wait_command_queue: wait queue for sync commands
1066 * @num_rx_queues: number of RX queues allocated by the transport;
1067 * the transport must set this before calling iwl_drv_start()
1068 * @iml_len: the length of the image loader
1069 * @iml: a pointer to the image loader itself
1070 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
1071 * The user should use iwl_trans_{alloc,free}_tx_cmd.
1072 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
1073 * starting the firmware, used for tracing
1074 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
1075 * start of the 802.11 header in the @rx_mpdu_cmd
1076 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
1077 * @system_pm_mode: the system-wide power management mode in use.
1078 * This mode is set dynamically, depending on the WoWLAN values
1079 * configured from the userspace at runtime.
1080 * @iwl_trans_txqs: transport tx queues data.
1081 * @mbx_addr_0_step: step address data 0
1082 * @mbx_addr_1_step: step address data 1
1083 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
1084 * only valid for discrete (not integrated) NICs
1085 */
1086 struct iwl_trans {
1087 bool csme_own;
1088 const struct iwl_trans_ops *ops;
1089 struct iwl_op_mode *op_mode;
1090 const struct iwl_cfg_trans_params *trans_cfg;
1091 const struct iwl_cfg *cfg;
1092 struct iwl_drv *drv;
1093 enum iwl_trans_state state;
1094 unsigned long status;
1095
1096 struct device *dev;
1097 u32 max_skb_frags;
1098 u32 hw_rev;
1099 u32 hw_rev_step;
1100 u32 hw_rf_id;
1101 u32 hw_crf_id;
1102 u32 hw_cnv_id;
1103 u32 hw_wfpm_id;
1104 u32 hw_id;
1105 char hw_id_str[52];
1106 u32 sku_id[3];
1107
1108 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
1109
1110 bool pm_support;
1111 bool ltr_enabled;
1112 u8 pnvm_loaded:1;
1113 u8 fail_to_parse_pnvm_image:1;
1114 u8 reduce_power_loaded:1;
1115 u8 failed_to_load_reduce_power_image:1;
1116
1117 const struct iwl_hcmd_arr *command_groups;
1118 int command_groups_size;
1119 bool wide_cmd_header;
1120
1121 wait_queue_head_t wait_command_queue;
1122 u8 num_rx_queues;
1123
1124 size_t iml_len;
1125 u8 *iml;
1126
1127 /* The following fields are internal only */
1128 struct kmem_cache *dev_cmd_pool;
1129 char dev_cmd_pool_name[50];
1130
1131 struct dentry *dbgfs_dir;
1132
1133 #ifdef CONFIG_LOCKDEP
1134 struct lockdep_map sync_cmd_lockdep_map;
1135 #endif
1136
1137 struct iwl_trans_debug dbg;
1138 struct iwl_self_init_dram init_dram;
1139
1140 enum iwl_plat_pm_mode system_pm_mode;
1141
1142 const char *name;
1143 struct iwl_trans_txqs txqs;
1144 u32 mbx_addr_0_step;
1145 u32 mbx_addr_1_step;
1146
1147 u8 pcie_link_speed;
1148
1149 /* pointer to trans specific struct */
1150 /*Ensure that this pointer will always be aligned to sizeof pointer */
1151 char trans_specific[] __aligned(sizeof(void *));
1152 };
1153
1154 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1155 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1156
iwl_trans_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)1157 static inline void iwl_trans_configure(struct iwl_trans *trans,
1158 const struct iwl_trans_config *trans_cfg)
1159 {
1160 trans->op_mode = trans_cfg->op_mode;
1161
1162 trans->ops->configure(trans, trans_cfg);
1163 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1164 }
1165
iwl_trans_start_hw(struct iwl_trans * trans)1166 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1167 {
1168 might_sleep();
1169
1170 return trans->ops->start_hw(trans);
1171 }
1172
iwl_trans_op_mode_leave(struct iwl_trans * trans)1173 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1174 {
1175 might_sleep();
1176
1177 if (trans->ops->op_mode_leave)
1178 trans->ops->op_mode_leave(trans);
1179
1180 trans->op_mode = NULL;
1181
1182 trans->state = IWL_TRANS_NO_FW;
1183 }
1184
iwl_trans_fw_alive(struct iwl_trans * trans,u32 scd_addr)1185 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1186 {
1187 might_sleep();
1188
1189 trans->state = IWL_TRANS_FW_ALIVE;
1190
1191 trans->ops->fw_alive(trans, scd_addr);
1192 }
1193
iwl_trans_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1194 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1195 const struct fw_img *fw,
1196 bool run_in_rfkill)
1197 {
1198 int ret;
1199
1200 might_sleep();
1201
1202 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1203
1204 clear_bit(STATUS_FW_ERROR, &trans->status);
1205 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1206 if (ret == 0)
1207 trans->state = IWL_TRANS_FW_STARTED;
1208
1209 return ret;
1210 }
1211
iwl_trans_stop_device(struct iwl_trans * trans)1212 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1213 {
1214 might_sleep();
1215
1216 trans->ops->stop_device(trans);
1217
1218 trans->state = IWL_TRANS_NO_FW;
1219 }
1220
iwl_trans_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1221 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1222 bool reset)
1223 {
1224 might_sleep();
1225 if (!trans->ops->d3_suspend)
1226 return -EOPNOTSUPP;
1227
1228 return trans->ops->d3_suspend(trans, test, reset);
1229 }
1230
iwl_trans_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1231 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1232 enum iwl_d3_status *status,
1233 bool test, bool reset)
1234 {
1235 might_sleep();
1236 if (!trans->ops->d3_resume)
1237 return -EOPNOTSUPP;
1238
1239 return trans->ops->d3_resume(trans, status, test, reset);
1240 }
1241
1242 static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans * trans,u32 dump_mask,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx)1243 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1244 const struct iwl_dump_sanitize_ops *sanitize_ops,
1245 void *sanitize_ctx)
1246 {
1247 if (!trans->ops->dump_data)
1248 return NULL;
1249 return trans->ops->dump_data(trans, dump_mask,
1250 sanitize_ops, sanitize_ctx);
1251 }
1252
1253 static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans * trans)1254 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1255 {
1256 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1257 }
1258
1259 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1260
iwl_trans_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_tx_cmd * dev_cmd)1261 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1262 struct iwl_device_tx_cmd *dev_cmd)
1263 {
1264 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1265 }
1266
iwl_trans_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int queue)1267 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1268 struct iwl_device_tx_cmd *dev_cmd, int queue)
1269 {
1270 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1271 return -EIO;
1272
1273 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1274 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1275 return -EIO;
1276 }
1277
1278 return trans->ops->tx(trans, skb, dev_cmd, queue);
1279 }
1280
iwl_trans_reclaim(struct iwl_trans * trans,int queue,int ssn,struct sk_buff_head * skbs)1281 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1282 int ssn, struct sk_buff_head *skbs)
1283 {
1284 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1285 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1286 return;
1287 }
1288
1289 trans->ops->reclaim(trans, queue, ssn, skbs);
1290 }
1291
iwl_trans_set_q_ptrs(struct iwl_trans * trans,int queue,int ptr)1292 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1293 int ptr)
1294 {
1295 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1296 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1297 return;
1298 }
1299
1300 trans->ops->set_q_ptrs(trans, queue, ptr);
1301 }
1302
iwl_trans_txq_disable(struct iwl_trans * trans,int queue,bool configure_scd)1303 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1304 bool configure_scd)
1305 {
1306 trans->ops->txq_disable(trans, queue, configure_scd);
1307 }
1308
1309 static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans * trans,int queue,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int queue_wdg_timeout)1310 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1311 const struct iwl_trans_txq_scd_cfg *cfg,
1312 unsigned int queue_wdg_timeout)
1313 {
1314 might_sleep();
1315
1316 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1317 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1318 return false;
1319 }
1320
1321 return trans->ops->txq_enable(trans, queue, ssn,
1322 cfg, queue_wdg_timeout);
1323 }
1324
1325 static inline int
iwl_trans_get_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)1326 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1327 struct iwl_trans_rxq_dma_data *data)
1328 {
1329 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1330 return -ENOTSUPP;
1331
1332 return trans->ops->rxq_dma_data(trans, queue, data);
1333 }
1334
1335 static inline void
iwl_trans_txq_free(struct iwl_trans * trans,int queue)1336 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1337 {
1338 if (WARN_ON_ONCE(!trans->ops->txq_free))
1339 return;
1340
1341 trans->ops->txq_free(trans, queue);
1342 }
1343
1344 static inline int
iwl_trans_txq_alloc(struct iwl_trans * trans,u32 flags,u32 sta_mask,u8 tid,int size,unsigned int wdg_timeout)1345 iwl_trans_txq_alloc(struct iwl_trans *trans,
1346 u32 flags, u32 sta_mask, u8 tid,
1347 int size, unsigned int wdg_timeout)
1348 {
1349 might_sleep();
1350
1351 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1352 return -ENOTSUPP;
1353
1354 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1355 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1356 return -EIO;
1357 }
1358
1359 return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
1360 size, wdg_timeout);
1361 }
1362
iwl_trans_txq_set_shared_mode(struct iwl_trans * trans,int queue,bool shared_mode)1363 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1364 int queue, bool shared_mode)
1365 {
1366 if (trans->ops->txq_set_shared_mode)
1367 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1368 }
1369
iwl_trans_txq_enable(struct iwl_trans * trans,int queue,int fifo,int sta_id,int tid,int frame_limit,u16 ssn,unsigned int queue_wdg_timeout)1370 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1371 int fifo, int sta_id, int tid,
1372 int frame_limit, u16 ssn,
1373 unsigned int queue_wdg_timeout)
1374 {
1375 struct iwl_trans_txq_scd_cfg cfg = {
1376 .fifo = fifo,
1377 .sta_id = sta_id,
1378 .tid = tid,
1379 .frame_limit = frame_limit,
1380 .aggregate = sta_id >= 0,
1381 };
1382
1383 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1384 }
1385
1386 static inline
iwl_trans_ac_txq_enable(struct iwl_trans * trans,int queue,int fifo,unsigned int queue_wdg_timeout)1387 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1388 unsigned int queue_wdg_timeout)
1389 {
1390 struct iwl_trans_txq_scd_cfg cfg = {
1391 .fifo = fifo,
1392 .sta_id = -1,
1393 .tid = IWL_MAX_TID_COUNT,
1394 .frame_limit = IWL_FRAME_LIMIT,
1395 .aggregate = false,
1396 };
1397
1398 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1399 }
1400
iwl_trans_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1401 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1402 unsigned long txqs,
1403 bool freeze)
1404 {
1405 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1406 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1407 return;
1408 }
1409
1410 if (trans->ops->freeze_txq_timer)
1411 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1412 }
1413
iwl_trans_block_txq_ptrs(struct iwl_trans * trans,bool block)1414 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1415 bool block)
1416 {
1417 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1418 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1419 return;
1420 }
1421
1422 if (trans->ops->block_txq_ptrs)
1423 trans->ops->block_txq_ptrs(trans, block);
1424 }
1425
iwl_trans_wait_tx_queues_empty(struct iwl_trans * trans,u32 txqs)1426 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1427 u32 txqs)
1428 {
1429 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1430 return -ENOTSUPP;
1431
1432 /* No need to wait if the firmware is not alive */
1433 if (trans->state != IWL_TRANS_FW_ALIVE) {
1434 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1435 return -EIO;
1436 }
1437
1438 return trans->ops->wait_tx_queues_empty(trans, txqs);
1439 }
1440
iwl_trans_wait_txq_empty(struct iwl_trans * trans,int queue)1441 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1442 {
1443 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1444 return -ENOTSUPP;
1445
1446 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1447 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1448 return -EIO;
1449 }
1450
1451 return trans->ops->wait_txq_empty(trans, queue);
1452 }
1453
iwl_trans_write8(struct iwl_trans * trans,u32 ofs,u8 val)1454 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1455 {
1456 trans->ops->write8(trans, ofs, val);
1457 }
1458
iwl_trans_write32(struct iwl_trans * trans,u32 ofs,u32 val)1459 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1460 {
1461 trans->ops->write32(trans, ofs, val);
1462 }
1463
iwl_trans_read32(struct iwl_trans * trans,u32 ofs)1464 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1465 {
1466 return trans->ops->read32(trans, ofs);
1467 }
1468
iwl_trans_read_prph(struct iwl_trans * trans,u32 ofs)1469 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1470 {
1471 return trans->ops->read_prph(trans, ofs);
1472 }
1473
iwl_trans_write_prph(struct iwl_trans * trans,u32 ofs,u32 val)1474 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1475 u32 val)
1476 {
1477 return trans->ops->write_prph(trans, ofs, val);
1478 }
1479
iwl_trans_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)1480 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1481 void *buf, int dwords)
1482 {
1483 return trans->ops->read_mem(trans, addr, buf, dwords);
1484 }
1485
1486 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1487 do { \
1488 if (__builtin_constant_p(bufsize)) \
1489 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1490 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1491 } while (0)
1492
iwl_trans_write_imr_mem(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)1493 static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
1494 u32 dst_addr, u64 src_addr,
1495 u32 byte_cnt)
1496 {
1497 if (trans->ops->imr_dma_data)
1498 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
1499 return 0;
1500 }
1501
iwl_trans_read_mem32(struct iwl_trans * trans,u32 addr)1502 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1503 {
1504 u32 value;
1505
1506 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1507 return 0xa5a5a5a5;
1508
1509 return value;
1510 }
1511
iwl_trans_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)1512 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1513 const void *buf, int dwords)
1514 {
1515 return trans->ops->write_mem(trans, addr, buf, dwords);
1516 }
1517
iwl_trans_write_mem32(struct iwl_trans * trans,u32 addr,u32 val)1518 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1519 u32 val)
1520 {
1521 return iwl_trans_write_mem(trans, addr, &val, 1);
1522 }
1523
iwl_trans_set_pmi(struct iwl_trans * trans,bool state)1524 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1525 {
1526 if (trans->ops->set_pmi)
1527 trans->ops->set_pmi(trans, state);
1528 }
1529
iwl_trans_sw_reset(struct iwl_trans * trans,bool retake_ownership)1530 static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1531 bool retake_ownership)
1532 {
1533 if (trans->ops->sw_reset)
1534 return trans->ops->sw_reset(trans, retake_ownership);
1535 return 0;
1536 }
1537
1538 static inline void
iwl_trans_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)1539 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1540 {
1541 trans->ops->set_bits_mask(trans, reg, mask, value);
1542 }
1543
1544 #define iwl_trans_grab_nic_access(trans) \
1545 __cond_lock(nic_access, \
1546 likely((trans)->ops->grab_nic_access(trans)))
1547
__releases(nic_access)1548 static inline void __releases(nic_access)
1549 iwl_trans_release_nic_access(struct iwl_trans *trans)
1550 {
1551 trans->ops->release_nic_access(trans);
1552 __release(nic_access);
1553 }
1554
iwl_trans_fw_error(struct iwl_trans * trans,bool sync)1555 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1556 {
1557 if (WARN_ON_ONCE(!trans->op_mode))
1558 return;
1559
1560 /* prevent double restarts due to the same erroneous FW */
1561 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1562 iwl_op_mode_nic_error(trans->op_mode, sync);
1563 trans->state = IWL_TRANS_NO_FW;
1564 }
1565 }
1566
iwl_trans_fw_running(struct iwl_trans * trans)1567 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1568 {
1569 return trans->state == IWL_TRANS_FW_ALIVE;
1570 }
1571
iwl_trans_sync_nmi(struct iwl_trans * trans)1572 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1573 {
1574 if (trans->ops->sync_nmi)
1575 trans->ops->sync_nmi(trans);
1576 }
1577
1578 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1579 u32 sw_err_bit);
1580
iwl_trans_load_pnvm(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_data,const struct iwl_ucode_capabilities * capa)1581 static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
1582 const struct iwl_pnvm_image *pnvm_data,
1583 const struct iwl_ucode_capabilities *capa)
1584 {
1585 return trans->ops->load_pnvm(trans, pnvm_data, capa);
1586 }
1587
iwl_trans_set_pnvm(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)1588 static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
1589 const struct iwl_ucode_capabilities *capa)
1590 {
1591 if (trans->ops->set_pnvm)
1592 trans->ops->set_pnvm(trans, capa);
1593 }
1594
iwl_trans_load_reduce_power(struct iwl_trans * trans,const struct iwl_pnvm_image * payloads,const struct iwl_ucode_capabilities * capa)1595 static inline int iwl_trans_load_reduce_power
1596 (struct iwl_trans *trans,
1597 const struct iwl_pnvm_image *payloads,
1598 const struct iwl_ucode_capabilities *capa)
1599 {
1600 return trans->ops->load_reduce_power(trans, payloads, capa);
1601 }
1602
1603 static inline void
iwl_trans_set_reduce_power(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)1604 iwl_trans_set_reduce_power(struct iwl_trans *trans,
1605 const struct iwl_ucode_capabilities *capa)
1606 {
1607 if (trans->ops->set_reduce_power)
1608 trans->ops->set_reduce_power(trans, capa);
1609 }
1610
iwl_trans_dbg_ini_valid(struct iwl_trans * trans)1611 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1612 {
1613 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1614 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1615 }
1616
iwl_trans_interrupts(struct iwl_trans * trans,bool enable)1617 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1618 {
1619 if (trans->ops->interrupts)
1620 trans->ops->interrupts(trans, enable);
1621 }
1622
1623 /*****************************************************
1624 * transport helper functions
1625 *****************************************************/
1626 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1627 struct device *dev,
1628 const struct iwl_trans_ops *ops,
1629 const struct iwl_cfg_trans_params *cfg_trans);
1630 int iwl_trans_init(struct iwl_trans *trans);
1631 void iwl_trans_free(struct iwl_trans *trans);
1632
iwl_trans_is_hw_error_value(u32 val)1633 static inline bool iwl_trans_is_hw_error_value(u32 val)
1634 {
1635 return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1636 }
1637
1638 /*****************************************************
1639 * driver (transport) register/unregister functions
1640 ******************************************************/
1641 int __must_check iwl_pci_register_driver(void);
1642 void iwl_pci_unregister_driver(void);
1643 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
1644
1645 #endif /* __iwl_trans_h__ */
1646