1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24
25 /**
26 * DOC: Transport layer - what is it ?
27 *
28 * The transport layer is the layer that deals with the HW directly. It provides
29 * an abstraction of the underlying HW to the upper layer. The transport layer
30 * doesn't provide any policy, algorithm or anything of this kind, but only
31 * mechanisms to make the HW do something. It is not completely stateless but
32 * close to it.
33 * We will have an implementation for each different supported bus.
34 */
35
36 /**
37 * DOC: Life cycle of the transport layer
38 *
39 * The transport layer has a very precise life cycle.
40 *
41 * 1) A helper function is called during the module initialization and
42 * registers the bus driver's ops with the transport's alloc function.
43 * 2) Bus's probe calls to the transport layer's allocation functions.
44 * Of course this function is bus specific.
45 * 3) This allocation functions will spawn the upper layer which will
46 * register mac80211.
47 *
48 * 4) At some point (i.e. mac80211's start call), the op_mode will call
49 * the following sequence:
50 * start_hw
51 * start_fw
52 *
53 * 5) Then when finished (or reset):
54 * stop_device
55 *
56 * 6) Eventually, the free function will be called.
57 */
58
59 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
60
61 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
62 #define FH_RSCSR_FRAME_INVALID 0x55550000
63 #define FH_RSCSR_FRAME_ALIGN 0x40
64 #define FH_RSCSR_RPA_EN BIT(25)
65 #define FH_RSCSR_RADA_EN BIT(26)
66 #define FH_RSCSR_RXQ_POS 16
67 #define FH_RSCSR_RXQ_MASK 0x3F0000
68
69 struct iwl_rx_packet {
70 /*
71 * The first 4 bytes of the RX frame header contain both the RX frame
72 * size and some flags.
73 * Bit fields:
74 * 31: flag flush RB request
75 * 30: flag ignore TC (terminal counter) request
76 * 29: flag fast IRQ request
77 * 28-27: Reserved
78 * 26: RADA enabled
79 * 25: Offload enabled
80 * 24: RPF enabled
81 * 23: RSS enabled
82 * 22: Checksum enabled
83 * 21-16: RX queue
84 * 15-14: Reserved
85 * 13-00: RX frame size
86 */
87 __le32 len_n_flags;
88 struct iwl_cmd_header hdr;
89 u8 data[];
90 } __packed;
91
iwl_rx_packet_len(const struct iwl_rx_packet * pkt)92 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
93 {
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
95 }
96
iwl_rx_packet_payload_len(const struct iwl_rx_packet * pkt)97 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
98 {
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
100 }
101
102 /**
103 * enum CMD_MODE - how to send the host commands ?
104 *
105 * @CMD_ASYNC: Return right away and don't wait for the response
106 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
107 * the response. The caller needs to call iwl_free_resp when done.
108 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
109 * called after this command completes. Valid only with CMD_ASYNC.
110 * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
111 * SUSPEND and RESUME commands. We are in D3 mode when we set
112 * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
113 */
114 enum CMD_MODE {
115 CMD_ASYNC = BIT(0),
116 CMD_WANT_SKB = BIT(1),
117 CMD_SEND_IN_RFKILL = BIT(2),
118 CMD_WANT_ASYNC_CALLBACK = BIT(3),
119 CMD_SEND_IN_D3 = BIT(4),
120 };
121
122 #define DEF_CMD_PAYLOAD_SIZE 320
123
124 /**
125 * struct iwl_device_cmd
126 *
127 * For allocation of the command and tx queues, this establishes the overall
128 * size of the largest command we send to uCode, except for commands that
129 * aren't fully copied and use other TFD space.
130 */
131 struct iwl_device_cmd {
132 union {
133 struct {
134 struct iwl_cmd_header hdr; /* uCode API */
135 u8 payload[DEF_CMD_PAYLOAD_SIZE];
136 };
137 struct {
138 struct iwl_cmd_header_wide hdr_wide;
139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
140 sizeof(struct iwl_cmd_header_wide) +
141 sizeof(struct iwl_cmd_header)];
142 };
143 };
144 } __packed;
145
146 /**
147 * struct iwl_device_tx_cmd - buffer for TX command
148 * @hdr: the header
149 * @payload: the payload placeholder
150 *
151 * The actual structure is sized dynamically according to need.
152 */
153 struct iwl_device_tx_cmd {
154 struct iwl_cmd_header hdr;
155 u8 payload[];
156 } __packed;
157
158 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
159
160 /*
161 * number of transfer buffers (fragments) per transmit frame descriptor;
162 * this is just the driver's idea, the hardware supports 20
163 */
164 #define IWL_MAX_CMD_TBS_PER_TFD 2
165
166 /* We need 2 entries for the TX command and header, and another one might
167 * be needed for potential data in the SKB's head. The remaining ones can
168 * be used for frags.
169 */
170 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
171
172 /**
173 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
174 *
175 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
176 * ring. The transport layer doesn't map the command's buffer to DMA, but
177 * rather copies it to a previously allocated DMA buffer. This flag tells
178 * the transport layer not to copy the command, but to map the existing
179 * buffer (that is passed in) instead. This saves the memcpy and allows
180 * commands that are bigger than the fixed buffer to be submitted.
181 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
182 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
183 * chunk internally and free it again after the command completes. This
184 * can (currently) be used only once per command.
185 * Note that a TFD entry after a DUP one cannot be a normal copied one.
186 */
187 enum iwl_hcmd_dataflag {
188 IWL_HCMD_DFL_NOCOPY = BIT(0),
189 IWL_HCMD_DFL_DUP = BIT(1),
190 };
191
192 enum iwl_error_event_table_status {
193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
196 };
197
198 /**
199 * struct iwl_host_cmd - Host command to the uCode
200 *
201 * @data: array of chunks that composes the data of the host command
202 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
203 * @_rx_page_order: (internally used to free response packet)
204 * @_rx_page_addr: (internally used to free response packet)
205 * @flags: can be CMD_*
206 * @len: array of the lengths of the chunks in data
207 * @dataflags: IWL_HCMD_DFL_*
208 * @id: command id of the host command, for wide commands encoding the
209 * version and group as well
210 */
211 struct iwl_host_cmd {
212 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
213 struct iwl_rx_packet *resp_pkt;
214 unsigned long _rx_page_addr;
215 u32 _rx_page_order;
216
217 u32 flags;
218 u32 id;
219 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
220 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
221 };
222
iwl_free_resp(struct iwl_host_cmd * cmd)223 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
224 {
225 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
226 }
227
228 struct iwl_rx_cmd_buffer {
229 struct page *_page;
230 int _offset;
231 bool _page_stolen;
232 u32 _rx_page_order;
233 unsigned int truesize;
234 };
235
rxb_addr(struct iwl_rx_cmd_buffer * r)236 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
237 {
238 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
239 }
240
rxb_offset(struct iwl_rx_cmd_buffer * r)241 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
242 {
243 return r->_offset;
244 }
245
rxb_steal_page(struct iwl_rx_cmd_buffer * r)246 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
247 {
248 r->_page_stolen = true;
249 get_page(r->_page);
250 return r->_page;
251 }
252
iwl_free_rxb(struct iwl_rx_cmd_buffer * r)253 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
254 {
255 __free_pages(r->_page, r->_rx_page_order);
256 }
257
258 #define MAX_NO_RECLAIM_CMDS 6
259
260 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
261
262 /*
263 * Maximum number of HW queues the transport layer
264 * currently supports
265 */
266 #define IWL_MAX_HW_QUEUES 32
267 #define IWL_MAX_TVQM_QUEUES 512
268
269 #define IWL_MAX_TID_COUNT 8
270 #define IWL_MGMT_TID 15
271 #define IWL_FRAME_LIMIT 64
272 #define IWL_MAX_RX_HW_QUEUES 16
273 #define IWL_9000_MAX_RX_HW_QUEUES 6
274
275 /**
276 * enum iwl_wowlan_status - WoWLAN image/device status
277 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
278 * @IWL_D3_STATUS_RESET: device was reset while suspended
279 */
280 enum iwl_d3_status {
281 IWL_D3_STATUS_ALIVE,
282 IWL_D3_STATUS_RESET,
283 };
284
285 /**
286 * enum iwl_trans_status: transport status flags
287 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
288 * @STATUS_DEVICE_ENABLED: APM is enabled
289 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
290 * @STATUS_INT_ENABLED: interrupts are enabled
291 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
292 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
293 * @STATUS_FW_ERROR: the fw is in error state
294 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
295 * are sent
296 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
297 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
298 */
299 enum iwl_trans_status {
300 STATUS_SYNC_HCMD_ACTIVE,
301 STATUS_DEVICE_ENABLED,
302 STATUS_TPOWER_PMI,
303 STATUS_INT_ENABLED,
304 STATUS_RFKILL_HW,
305 STATUS_RFKILL_OPMODE,
306 STATUS_FW_ERROR,
307 STATUS_TRANS_GOING_IDLE,
308 STATUS_TRANS_IDLE,
309 STATUS_TRANS_DEAD,
310 };
311
312 static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)313 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
314 {
315 switch (rb_size) {
316 case IWL_AMSDU_2K:
317 return get_order(2 * 1024);
318 case IWL_AMSDU_4K:
319 return get_order(4 * 1024);
320 case IWL_AMSDU_8K:
321 return get_order(8 * 1024);
322 case IWL_AMSDU_12K:
323 return get_order(16 * 1024);
324 default:
325 WARN_ON(1);
326 return -1;
327 }
328 }
329
330 static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)331 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
332 {
333 switch (rb_size) {
334 case IWL_AMSDU_2K:
335 return 2 * 1024;
336 case IWL_AMSDU_4K:
337 return 4 * 1024;
338 case IWL_AMSDU_8K:
339 return 8 * 1024;
340 case IWL_AMSDU_12K:
341 return 16 * 1024;
342 default:
343 WARN_ON(1);
344 return 0;
345 }
346 }
347
348 struct iwl_hcmd_names {
349 u8 cmd_id;
350 const char *const cmd_name;
351 };
352
353 #define HCMD_NAME(x) \
354 { .cmd_id = x, .cmd_name = #x }
355
356 struct iwl_hcmd_arr {
357 const struct iwl_hcmd_names *arr;
358 int size;
359 };
360
361 #define HCMD_ARR(x) \
362 { .arr = x, .size = ARRAY_SIZE(x) }
363
364 /**
365 * struct iwl_trans_config - transport configuration
366 *
367 * @op_mode: pointer to the upper layer.
368 * @cmd_queue: the index of the command queue.
369 * Must be set before start_fw.
370 * @cmd_fifo: the fifo for host commands
371 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
372 * @no_reclaim_cmds: Some devices erroneously don't set the
373 * SEQ_RX_FRAME bit on some notifications, this is the
374 * list of such notifications to filter. Max length is
375 * %MAX_NO_RECLAIM_CMDS.
376 * @n_no_reclaim_cmds: # of commands in list
377 * @rx_buf_size: RX buffer size needed for A-MSDUs
378 * if unset 4k will be the RX buffer size
379 * @bc_table_dword: set to true if the BC table expects the byte count to be
380 * in DWORD (as opposed to bytes)
381 * @scd_set_active: should the transport configure the SCD for HCMD queue
382 * @command_groups: array of command groups, each member is an array of the
383 * commands in the group; for debugging only
384 * @command_groups_size: number of command groups, to avoid illegal access
385 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
386 * space for at least two pointers
387 * @fw_reset_handshake: firmware supports reset flow handshake
388 */
389 struct iwl_trans_config {
390 struct iwl_op_mode *op_mode;
391
392 u8 cmd_queue;
393 u8 cmd_fifo;
394 unsigned int cmd_q_wdg_timeout;
395 const u8 *no_reclaim_cmds;
396 unsigned int n_no_reclaim_cmds;
397
398 enum iwl_amsdu_size rx_buf_size;
399 bool bc_table_dword;
400 bool scd_set_active;
401 const struct iwl_hcmd_arr *command_groups;
402 int command_groups_size;
403
404 u8 cb_data_offs;
405 bool fw_reset_handshake;
406 };
407
408 struct iwl_trans_dump_data {
409 u32 len;
410 u8 data[];
411 };
412
413 struct iwl_trans;
414
415 struct iwl_trans_txq_scd_cfg {
416 u8 fifo;
417 u8 sta_id;
418 u8 tid;
419 bool aggregate;
420 int frame_limit;
421 };
422
423 /**
424 * struct iwl_trans_rxq_dma_data - RX queue DMA data
425 * @fr_bd_cb: DMA address of free BD cyclic buffer
426 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
427 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
428 * @ur_bd_cb: DMA address of used BD cyclic buffer
429 */
430 struct iwl_trans_rxq_dma_data {
431 u64 fr_bd_cb;
432 u32 fr_bd_wid;
433 u64 urbd_stts_wrptr;
434 u64 ur_bd_cb;
435 };
436
437 /**
438 * struct iwl_trans_ops - transport specific operations
439 *
440 * All the handlers MUST be implemented
441 *
442 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
443 * May sleep.
444 * @op_mode_leave: Turn off the HW RF kill indication if on
445 * May sleep
446 * @start_fw: allocates and inits all the resources for the transport
447 * layer. Also kick a fw image.
448 * May sleep
449 * @fw_alive: called when the fw sends alive notification. If the fw provides
450 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
451 * May sleep
452 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
453 * the HW. From that point on, the HW will be stopped but will still issue
454 * an interrupt if the HW RF kill switch is triggered.
455 * This callback must do the right thing and not crash even if %start_hw()
456 * was called but not &start_fw(). May sleep.
457 * @d3_suspend: put the device into the correct mode for WoWLAN during
458 * suspend. This is optional, if not implemented WoWLAN will not be
459 * supported. This callback may sleep.
460 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
461 * talk to the WoWLAN image to get its status. This is optional, if not
462 * implemented WoWLAN will not be supported. This callback may sleep.
463 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
464 * If RFkill is asserted in the middle of a SYNC host command, it must
465 * return -ERFKILL straight away.
466 * May sleep only if CMD_ASYNC is not set
467 * @tx: send an skb. The transport relies on the op_mode to zero the
468 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
469 * the CSUM will be taken care of (TCP CSUM and IP header in case of
470 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
471 * header if it is IPv4.
472 * Must be atomic
473 * @reclaim: free packet until ssn. Returns a list of freed packets.
474 * Must be atomic
475 * @txq_enable: setup a queue. To setup an AC queue, use the
476 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
477 * this one. The op_mode must not configure the HCMD queue. The scheduler
478 * configuration may be %NULL, in which case the hardware will not be
479 * configured. If true is returned, the operation mode needs to increment
480 * the sequence number of the packets routed to this queue because of a
481 * hardware scheduler bug. May sleep.
482 * @txq_disable: de-configure a Tx queue to send AMPDUs
483 * Must be atomic
484 * @txq_set_shared_mode: change Tx queue shared/unshared marking
485 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
486 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
487 * @freeze_txq_timer: prevents the timer of the queue from firing until the
488 * queue is set to awake. Must be atomic.
489 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
490 * that the transport needs to refcount the calls since this function
491 * will be called several times with block = true, and then the queues
492 * need to be unblocked only after the same number of calls with
493 * block = false.
494 * @write8: write a u8 to a register at offset ofs from the BAR
495 * @write32: write a u32 to a register at offset ofs from the BAR
496 * @read32: read a u32 register at offset ofs from the BAR
497 * @read_prph: read a DWORD from a periphery register
498 * @write_prph: write a DWORD to a periphery register
499 * @read_mem: read device's SRAM in DWORD
500 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
501 * will be zeroed.
502 * @read_config32: read a u32 value from the device's config space at
503 * the given offset.
504 * @configure: configure parameters required by the transport layer from
505 * the op_mode. May be called several times before start_fw, can't be
506 * called after that.
507 * @set_pmi: set the power pmi state
508 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
509 * Sleeping is not allowed between grab_nic_access and
510 * release_nic_access.
511 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
512 * must be the same one that was sent before to the grab_nic_access.
513 * @set_bits_mask - set SRAM register according to value and mask.
514 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
515 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
516 * Note that the transport must fill in the proper file headers.
517 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
518 * of the trans debugfs
519 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
520 * context info.
521 * @interrupts: disable/enable interrupts to transport
522 */
523 struct iwl_trans_ops {
524
525 int (*start_hw)(struct iwl_trans *iwl_trans);
526 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
527 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
528 bool run_in_rfkill);
529 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
530 void (*stop_device)(struct iwl_trans *trans);
531
532 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
533 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
534 bool test, bool reset);
535
536 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
537
538 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
539 struct iwl_device_tx_cmd *dev_cmd, int queue);
540 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
541 struct sk_buff_head *skbs);
542
543 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
544
545 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
546 const struct iwl_trans_txq_scd_cfg *cfg,
547 unsigned int queue_wdg_timeout);
548 void (*txq_disable)(struct iwl_trans *trans, int queue,
549 bool configure_scd);
550 /* 22000 functions */
551 int (*txq_alloc)(struct iwl_trans *trans,
552 __le16 flags, u8 sta_id, u8 tid,
553 int cmd_id, int size,
554 unsigned int queue_wdg_timeout);
555 void (*txq_free)(struct iwl_trans *trans, int queue);
556 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
557 struct iwl_trans_rxq_dma_data *data);
558
559 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
560 bool shared);
561
562 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
563 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
564 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
565 bool freeze);
566 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
567
568 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
569 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
570 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
571 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
572 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
573 int (*read_mem)(struct iwl_trans *trans, u32 addr,
574 void *buf, int dwords);
575 int (*write_mem)(struct iwl_trans *trans, u32 addr,
576 const void *buf, int dwords);
577 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
578 void (*configure)(struct iwl_trans *trans,
579 const struct iwl_trans_config *trans_cfg);
580 void (*set_pmi)(struct iwl_trans *trans, bool state);
581 void (*sw_reset)(struct iwl_trans *trans);
582 bool (*grab_nic_access)(struct iwl_trans *trans);
583 void (*release_nic_access)(struct iwl_trans *trans);
584 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
585 u32 value);
586
587 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
588 u32 dump_mask);
589 void (*debugfs_cleanup)(struct iwl_trans *trans);
590 void (*sync_nmi)(struct iwl_trans *trans);
591 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
592 void (*interrupts)(struct iwl_trans *trans, bool enable);
593 };
594
595 /**
596 * enum iwl_trans_state - state of the transport layer
597 *
598 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
599 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
600 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
601 */
602 enum iwl_trans_state {
603 IWL_TRANS_NO_FW,
604 IWL_TRANS_FW_STARTED,
605 IWL_TRANS_FW_ALIVE,
606 };
607
608 /**
609 * DOC: Platform power management
610 *
611 * In system-wide power management the entire platform goes into a low
612 * power state (e.g. idle or suspend to RAM) at the same time and the
613 * device is configured as a wakeup source for the entire platform.
614 * This is usually triggered by userspace activity (e.g. the user
615 * presses the suspend button or a power management daemon decides to
616 * put the platform in low power mode). The device's behavior in this
617 * mode is dictated by the wake-on-WLAN configuration.
618 *
619 * The terms used for the device's behavior are as follows:
620 *
621 * - D0: the device is fully powered and the host is awake;
622 * - D3: the device is in low power mode and only reacts to
623 * specific events (e.g. magic-packet received or scan
624 * results found);
625 *
626 * These terms reflect the power modes in the firmware and are not to
627 * be confused with the physical device power state.
628 */
629
630 /**
631 * enum iwl_plat_pm_mode - platform power management mode
632 *
633 * This enumeration describes the device's platform power management
634 * behavior when in system-wide suspend (i.e WoWLAN).
635 *
636 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
637 * device. In system-wide suspend mode, it means that the all
638 * connections will be closed automatically by mac80211 before
639 * the platform is suspended.
640 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
641 */
642 enum iwl_plat_pm_mode {
643 IWL_PLAT_PM_MODE_DISABLED,
644 IWL_PLAT_PM_MODE_D3,
645 };
646
647 /**
648 * enum iwl_ini_cfg_state
649 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
650 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
651 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
652 * are corrupted. The rest of the debug TLVs will still be used
653 */
654 enum iwl_ini_cfg_state {
655 IWL_INI_CFG_STATE_NOT_LOADED,
656 IWL_INI_CFG_STATE_LOADED,
657 IWL_INI_CFG_STATE_CORRUPTED,
658 };
659
660 /* Max time to wait for nmi interrupt */
661 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
662
663 /**
664 * struct iwl_dram_data
665 * @physical: page phy pointer
666 * @block: pointer to the allocated block/page
667 * @size: size of the block/page
668 */
669 struct iwl_dram_data {
670 dma_addr_t physical;
671 void *block;
672 int size;
673 };
674
675 /**
676 * struct iwl_fw_mon - fw monitor per allocation id
677 * @num_frags: number of fragments
678 * @frags: an array of DRAM buffer fragments
679 */
680 struct iwl_fw_mon {
681 u32 num_frags;
682 struct iwl_dram_data *frags;
683 };
684
685 /**
686 * struct iwl_self_init_dram - dram data used by self init process
687 * @fw: lmac and umac dram data
688 * @fw_cnt: total number of items in array
689 * @paging: paging dram data
690 * @paging_cnt: total number of items in array
691 */
692 struct iwl_self_init_dram {
693 struct iwl_dram_data *fw;
694 int fw_cnt;
695 struct iwl_dram_data *paging;
696 int paging_cnt;
697 };
698
699 /**
700 * struct iwl_trans_debug - transport debug related data
701 *
702 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
703 * @rec_on: true iff there is a fw debug recording currently active
704 * @dest_tlv: points to the destination TLV for debug
705 * @conf_tlv: array of pointers to configuration TLVs for debug
706 * @trigger_tlv: array of pointers to triggers TLVs for debug
707 * @lmac_error_event_table: addrs of lmacs error tables
708 * @umac_error_event_table: addr of umac error table
709 * @error_event_table_tlv_status: bitmap that indicates what error table
710 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
711 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
712 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
713 * @fw_mon_cfg: debug buffer allocation configuration
714 * @fw_mon_ini: DRAM buffer fragments per allocation id
715 * @fw_mon: DRAM buffer for firmware monitor
716 * @hw_error: equals true if hw error interrupt was received from the FW
717 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
718 * @active_regions: active regions
719 * @debug_info_tlv_list: list of debug info TLVs
720 * @time_point: array of debug time points
721 * @periodic_trig_list: periodic triggers list
722 * @domains_bitmap: bitmap of active domains other than
723 * &IWL_FW_INI_DOMAIN_ALWAYS_ON
724 */
725 struct iwl_trans_debug {
726 u8 n_dest_reg;
727 bool rec_on;
728
729 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
730 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
731 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
732
733 u32 lmac_error_event_table[2];
734 u32 umac_error_event_table;
735 unsigned int error_event_table_tlv_status;
736
737 enum iwl_ini_cfg_state internal_ini_cfg;
738 enum iwl_ini_cfg_state external_ini_cfg;
739
740 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
741 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
742
743 struct iwl_dram_data fw_mon;
744
745 bool hw_error;
746 enum iwl_fw_ini_buffer_location ini_dest;
747
748 u64 unsupported_region_msk;
749 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
750 struct list_head debug_info_tlv_list;
751 struct iwl_dbg_tlv_time_point_data
752 time_point[IWL_FW_INI_TIME_POINT_NUM];
753 struct list_head periodic_trig_list;
754
755 u32 domains_bitmap;
756 };
757
758 struct iwl_dma_ptr {
759 dma_addr_t dma;
760 void *addr;
761 size_t size;
762 };
763
764 struct iwl_cmd_meta {
765 /* only for SYNC commands, iff the reply skb is wanted */
766 struct iwl_host_cmd *source;
767 u32 flags;
768 u32 tbs;
769 };
770
771 /*
772 * The FH will write back to the first TB only, so we need to copy some data
773 * into the buffer regardless of whether it should be mapped or not.
774 * This indicates how big the first TB must be to include the scratch buffer
775 * and the assigned PN.
776 * Since PN location is 8 bytes at offset 12, it's 20 now.
777 * If we make it bigger then allocations will be bigger and copy slower, so
778 * that's probably not useful.
779 */
780 #define IWL_FIRST_TB_SIZE 20
781 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
782
783 struct iwl_pcie_txq_entry {
784 void *cmd;
785 struct sk_buff *skb;
786 /* buffer to free after command completes */
787 const void *free_buf;
788 struct iwl_cmd_meta meta;
789 };
790
791 struct iwl_pcie_first_tb_buf {
792 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
793 };
794
795 /**
796 * struct iwl_txq - Tx Queue for DMA
797 * @q: generic Rx/Tx queue descriptor
798 * @tfds: transmit frame descriptors (DMA memory)
799 * @first_tb_bufs: start of command headers, including scratch buffers, for
800 * the writeback -- this is DMA memory and an array holding one buffer
801 * for each command on the queue
802 * @first_tb_dma: DMA address for the first_tb_bufs start
803 * @entries: transmit entries (driver state)
804 * @lock: queue lock
805 * @stuck_timer: timer that fires if queue gets stuck
806 * @trans: pointer back to transport (for timer)
807 * @need_update: indicates need to update read/write index
808 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
809 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
810 * @frozen: tx stuck queue timer is frozen
811 * @frozen_expiry_remainder: remember how long until the timer fires
812 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
813 * @write_ptr: 1-st empty entry (index) host_w
814 * @read_ptr: last used entry (index) host_r
815 * @dma_addr: physical addr for BD's
816 * @n_window: safe queue window
817 * @id: queue id
818 * @low_mark: low watermark, resume queue if free space more than this
819 * @high_mark: high watermark, stop queue if free space less than this
820 *
821 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
822 * descriptors) and required locking structures.
823 *
824 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
825 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
826 * there might be HW changes in the future). For the normal TX
827 * queues, n_window, which is the size of the software queue data
828 * is also 256; however, for the command queue, n_window is only
829 * 32 since we don't need so many commands pending. Since the HW
830 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
831 * This means that we end up with the following:
832 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
833 * SW entries: | 0 | ... | 31 |
834 * where N is a number between 0 and 7. This means that the SW
835 * data is a window overlayed over the HW queue.
836 */
837 struct iwl_txq {
838 void *tfds;
839 struct iwl_pcie_first_tb_buf *first_tb_bufs;
840 dma_addr_t first_tb_dma;
841 struct iwl_pcie_txq_entry *entries;
842 /* lock for syncing changes on the queue */
843 spinlock_t lock;
844 unsigned long frozen_expiry_remainder;
845 struct timer_list stuck_timer;
846 struct iwl_trans *trans;
847 bool need_update;
848 bool frozen;
849 bool ampdu;
850 int block;
851 unsigned long wd_timeout;
852 struct sk_buff_head overflow_q;
853 struct iwl_dma_ptr bc_tbl;
854
855 int write_ptr;
856 int read_ptr;
857 dma_addr_t dma_addr;
858 int n_window;
859 u32 id;
860 int low_mark;
861 int high_mark;
862
863 bool overflow_tx;
864 };
865
866 /**
867 * struct iwl_trans_txqs - transport tx queues data
868 *
869 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
870 * @page_offs: offset from skb->cb to mac header page pointer
871 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
872 * @queue_used - bit mask of used queues
873 * @queue_stopped - bit mask of stopped queues
874 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
875 */
876 struct iwl_trans_txqs {
877 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
878 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
879 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
880 struct dma_pool *bc_pool;
881 size_t bc_tbl_size;
882 bool bc_table_dword;
883 u8 page_offs;
884 u8 dev_cmd_offs;
885 struct __percpu iwl_tso_hdr_page * tso_hdr_page;
886
887 struct {
888 u8 fifo;
889 u8 q_id;
890 unsigned int wdg_timeout;
891 } cmd;
892
893 struct {
894 u8 max_tbs;
895 u16 size;
896 u8 addr_size;
897 } tfd;
898
899 struct iwl_dma_ptr scd_bc_tbls;
900 };
901
902 /**
903 * struct iwl_trans - transport common data
904 *
905 * @ops - pointer to iwl_trans_ops
906 * @op_mode - pointer to the op_mode
907 * @trans_cfg: the trans-specific configuration part
908 * @cfg - pointer to the configuration
909 * @drv - pointer to iwl_drv
910 * @status: a bit-mask of transport status flags
911 * @dev - pointer to struct device * that represents the device
912 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
913 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
914 * @hw_rf_id a u32 with the device RF ID
915 * @hw_id: a u32 with the ID of the device / sub-device.
916 * Set during transport allocation.
917 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
918 * @pm_support: set to true in start_hw if link pm is supported
919 * @ltr_enabled: set to true if the LTR is enabled
920 * @wide_cmd_header: true when ucode supports wide command header format
921 * @wait_command_queue: wait queue for sync commands
922 * @num_rx_queues: number of RX queues allocated by the transport;
923 * the transport must set this before calling iwl_drv_start()
924 * @iml_len: the length of the image loader
925 * @iml: a pointer to the image loader itself
926 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
927 * The user should use iwl_trans_{alloc,free}_tx_cmd.
928 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
929 * starting the firmware, used for tracing
930 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
931 * start of the 802.11 header in the @rx_mpdu_cmd
932 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
933 * @system_pm_mode: the system-wide power management mode in use.
934 * This mode is set dynamically, depending on the WoWLAN values
935 * configured from the userspace at runtime.
936 * @iwl_trans_txqs: transport tx queues data.
937 */
938 struct iwl_trans {
939 const struct iwl_trans_ops *ops;
940 struct iwl_op_mode *op_mode;
941 const struct iwl_cfg_trans_params *trans_cfg;
942 const struct iwl_cfg *cfg;
943 struct iwl_drv *drv;
944 enum iwl_trans_state state;
945 unsigned long status;
946
947 struct device *dev;
948 u32 max_skb_frags;
949 u32 hw_rev;
950 u32 hw_rf_id;
951 u32 hw_id;
952 char hw_id_str[52];
953 u32 sku_id[3];
954
955 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
956
957 bool pm_support;
958 bool ltr_enabled;
959 u8 pnvm_loaded:1;
960
961 const struct iwl_hcmd_arr *command_groups;
962 int command_groups_size;
963 bool wide_cmd_header;
964
965 wait_queue_head_t wait_command_queue;
966 u8 num_rx_queues;
967
968 size_t iml_len;
969 u8 *iml;
970
971 /* The following fields are internal only */
972 struct kmem_cache *dev_cmd_pool;
973 char dev_cmd_pool_name[50];
974
975 struct dentry *dbgfs_dir;
976
977 #ifdef CONFIG_LOCKDEP
978 struct lockdep_map sync_cmd_lockdep_map;
979 #endif
980
981 struct iwl_trans_debug dbg;
982 struct iwl_self_init_dram init_dram;
983
984 enum iwl_plat_pm_mode system_pm_mode;
985
986 const char *name;
987 struct iwl_trans_txqs txqs;
988
989 /* pointer to trans specific struct */
990 /*Ensure that this pointer will always be aligned to sizeof pointer */
991 char trans_specific[] __aligned(sizeof(void *));
992 };
993
994 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
995 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
996
iwl_trans_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)997 static inline void iwl_trans_configure(struct iwl_trans *trans,
998 const struct iwl_trans_config *trans_cfg)
999 {
1000 trans->op_mode = trans_cfg->op_mode;
1001
1002 trans->ops->configure(trans, trans_cfg);
1003 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1004 }
1005
iwl_trans_start_hw(struct iwl_trans * trans)1006 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1007 {
1008 might_sleep();
1009
1010 return trans->ops->start_hw(trans);
1011 }
1012
iwl_trans_op_mode_leave(struct iwl_trans * trans)1013 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1014 {
1015 might_sleep();
1016
1017 if (trans->ops->op_mode_leave)
1018 trans->ops->op_mode_leave(trans);
1019
1020 trans->op_mode = NULL;
1021
1022 trans->state = IWL_TRANS_NO_FW;
1023 }
1024
iwl_trans_fw_alive(struct iwl_trans * trans,u32 scd_addr)1025 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1026 {
1027 might_sleep();
1028
1029 trans->state = IWL_TRANS_FW_ALIVE;
1030
1031 trans->ops->fw_alive(trans, scd_addr);
1032 }
1033
iwl_trans_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1034 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1035 const struct fw_img *fw,
1036 bool run_in_rfkill)
1037 {
1038 int ret;
1039
1040 might_sleep();
1041
1042 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1043
1044 clear_bit(STATUS_FW_ERROR, &trans->status);
1045 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1046 if (ret == 0)
1047 trans->state = IWL_TRANS_FW_STARTED;
1048
1049 return ret;
1050 }
1051
iwl_trans_stop_device(struct iwl_trans * trans)1052 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1053 {
1054 might_sleep();
1055
1056 trans->ops->stop_device(trans);
1057
1058 trans->state = IWL_TRANS_NO_FW;
1059 }
1060
iwl_trans_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1061 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1062 bool reset)
1063 {
1064 might_sleep();
1065 if (!trans->ops->d3_suspend)
1066 return 0;
1067
1068 return trans->ops->d3_suspend(trans, test, reset);
1069 }
1070
iwl_trans_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1071 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1072 enum iwl_d3_status *status,
1073 bool test, bool reset)
1074 {
1075 might_sleep();
1076 if (!trans->ops->d3_resume)
1077 return 0;
1078
1079 return trans->ops->d3_resume(trans, status, test, reset);
1080 }
1081
1082 static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans * trans,u32 dump_mask)1083 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
1084 {
1085 if (!trans->ops->dump_data)
1086 return NULL;
1087 return trans->ops->dump_data(trans, dump_mask);
1088 }
1089
1090 static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans * trans)1091 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1092 {
1093 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1094 }
1095
1096 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1097
iwl_trans_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_tx_cmd * dev_cmd)1098 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1099 struct iwl_device_tx_cmd *dev_cmd)
1100 {
1101 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1102 }
1103
iwl_trans_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int queue)1104 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1105 struct iwl_device_tx_cmd *dev_cmd, int queue)
1106 {
1107 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1108 return -EIO;
1109
1110 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1111 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1112 return -EIO;
1113 }
1114
1115 return trans->ops->tx(trans, skb, dev_cmd, queue);
1116 }
1117
iwl_trans_reclaim(struct iwl_trans * trans,int queue,int ssn,struct sk_buff_head * skbs)1118 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1119 int ssn, struct sk_buff_head *skbs)
1120 {
1121 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1122 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1123 return;
1124 }
1125
1126 trans->ops->reclaim(trans, queue, ssn, skbs);
1127 }
1128
iwl_trans_set_q_ptrs(struct iwl_trans * trans,int queue,int ptr)1129 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1130 int ptr)
1131 {
1132 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1133 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1134 return;
1135 }
1136
1137 trans->ops->set_q_ptrs(trans, queue, ptr);
1138 }
1139
iwl_trans_txq_disable(struct iwl_trans * trans,int queue,bool configure_scd)1140 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1141 bool configure_scd)
1142 {
1143 trans->ops->txq_disable(trans, queue, configure_scd);
1144 }
1145
1146 static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans * trans,int queue,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int queue_wdg_timeout)1147 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1148 const struct iwl_trans_txq_scd_cfg *cfg,
1149 unsigned int queue_wdg_timeout)
1150 {
1151 might_sleep();
1152
1153 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1154 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1155 return false;
1156 }
1157
1158 return trans->ops->txq_enable(trans, queue, ssn,
1159 cfg, queue_wdg_timeout);
1160 }
1161
1162 static inline int
iwl_trans_get_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)1163 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1164 struct iwl_trans_rxq_dma_data *data)
1165 {
1166 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1167 return -ENOTSUPP;
1168
1169 return trans->ops->rxq_dma_data(trans, queue, data);
1170 }
1171
1172 static inline void
iwl_trans_txq_free(struct iwl_trans * trans,int queue)1173 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1174 {
1175 if (WARN_ON_ONCE(!trans->ops->txq_free))
1176 return;
1177
1178 trans->ops->txq_free(trans, queue);
1179 }
1180
1181 static inline int
iwl_trans_txq_alloc(struct iwl_trans * trans,__le16 flags,u8 sta_id,u8 tid,int cmd_id,int size,unsigned int wdg_timeout)1182 iwl_trans_txq_alloc(struct iwl_trans *trans,
1183 __le16 flags, u8 sta_id, u8 tid,
1184 int cmd_id, int size,
1185 unsigned int wdg_timeout)
1186 {
1187 might_sleep();
1188
1189 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1190 return -ENOTSUPP;
1191
1192 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1193 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1194 return -EIO;
1195 }
1196
1197 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1198 cmd_id, size, wdg_timeout);
1199 }
1200
iwl_trans_txq_set_shared_mode(struct iwl_trans * trans,int queue,bool shared_mode)1201 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1202 int queue, bool shared_mode)
1203 {
1204 if (trans->ops->txq_set_shared_mode)
1205 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1206 }
1207
iwl_trans_txq_enable(struct iwl_trans * trans,int queue,int fifo,int sta_id,int tid,int frame_limit,u16 ssn,unsigned int queue_wdg_timeout)1208 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1209 int fifo, int sta_id, int tid,
1210 int frame_limit, u16 ssn,
1211 unsigned int queue_wdg_timeout)
1212 {
1213 struct iwl_trans_txq_scd_cfg cfg = {
1214 .fifo = fifo,
1215 .sta_id = sta_id,
1216 .tid = tid,
1217 .frame_limit = frame_limit,
1218 .aggregate = sta_id >= 0,
1219 };
1220
1221 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1222 }
1223
1224 static inline
iwl_trans_ac_txq_enable(struct iwl_trans * trans,int queue,int fifo,unsigned int queue_wdg_timeout)1225 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1226 unsigned int queue_wdg_timeout)
1227 {
1228 struct iwl_trans_txq_scd_cfg cfg = {
1229 .fifo = fifo,
1230 .sta_id = -1,
1231 .tid = IWL_MAX_TID_COUNT,
1232 .frame_limit = IWL_FRAME_LIMIT,
1233 .aggregate = false,
1234 };
1235
1236 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1237 }
1238
iwl_trans_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1239 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1240 unsigned long txqs,
1241 bool freeze)
1242 {
1243 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1244 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1245 return;
1246 }
1247
1248 if (trans->ops->freeze_txq_timer)
1249 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1250 }
1251
iwl_trans_block_txq_ptrs(struct iwl_trans * trans,bool block)1252 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1253 bool block)
1254 {
1255 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1256 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1257 return;
1258 }
1259
1260 if (trans->ops->block_txq_ptrs)
1261 trans->ops->block_txq_ptrs(trans, block);
1262 }
1263
iwl_trans_wait_tx_queues_empty(struct iwl_trans * trans,u32 txqs)1264 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1265 u32 txqs)
1266 {
1267 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1268 return -ENOTSUPP;
1269
1270 /* No need to wait if the firmware is not alive */
1271 if (trans->state != IWL_TRANS_FW_ALIVE) {
1272 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1273 return -EIO;
1274 }
1275
1276 return trans->ops->wait_tx_queues_empty(trans, txqs);
1277 }
1278
iwl_trans_wait_txq_empty(struct iwl_trans * trans,int queue)1279 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1280 {
1281 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1282 return -ENOTSUPP;
1283
1284 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1285 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1286 return -EIO;
1287 }
1288
1289 return trans->ops->wait_txq_empty(trans, queue);
1290 }
1291
iwl_trans_write8(struct iwl_trans * trans,u32 ofs,u8 val)1292 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1293 {
1294 trans->ops->write8(trans, ofs, val);
1295 }
1296
iwl_trans_write32(struct iwl_trans * trans,u32 ofs,u32 val)1297 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1298 {
1299 trans->ops->write32(trans, ofs, val);
1300 }
1301
iwl_trans_read32(struct iwl_trans * trans,u32 ofs)1302 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1303 {
1304 return trans->ops->read32(trans, ofs);
1305 }
1306
iwl_trans_read_prph(struct iwl_trans * trans,u32 ofs)1307 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1308 {
1309 return trans->ops->read_prph(trans, ofs);
1310 }
1311
iwl_trans_write_prph(struct iwl_trans * trans,u32 ofs,u32 val)1312 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1313 u32 val)
1314 {
1315 return trans->ops->write_prph(trans, ofs, val);
1316 }
1317
iwl_trans_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)1318 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1319 void *buf, int dwords)
1320 {
1321 return trans->ops->read_mem(trans, addr, buf, dwords);
1322 }
1323
1324 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1325 do { \
1326 if (__builtin_constant_p(bufsize)) \
1327 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1328 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1329 } while (0)
1330
iwl_trans_read_mem32(struct iwl_trans * trans,u32 addr)1331 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1332 {
1333 u32 value;
1334
1335 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1336 return 0xa5a5a5a5;
1337
1338 return value;
1339 }
1340
iwl_trans_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)1341 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1342 const void *buf, int dwords)
1343 {
1344 return trans->ops->write_mem(trans, addr, buf, dwords);
1345 }
1346
iwl_trans_write_mem32(struct iwl_trans * trans,u32 addr,u32 val)1347 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1348 u32 val)
1349 {
1350 return iwl_trans_write_mem(trans, addr, &val, 1);
1351 }
1352
iwl_trans_set_pmi(struct iwl_trans * trans,bool state)1353 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1354 {
1355 if (trans->ops->set_pmi)
1356 trans->ops->set_pmi(trans, state);
1357 }
1358
iwl_trans_sw_reset(struct iwl_trans * trans)1359 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1360 {
1361 if (trans->ops->sw_reset)
1362 trans->ops->sw_reset(trans);
1363 }
1364
1365 static inline void
iwl_trans_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)1366 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1367 {
1368 trans->ops->set_bits_mask(trans, reg, mask, value);
1369 }
1370
1371 #define iwl_trans_grab_nic_access(trans) \
1372 __cond_lock(nic_access, \
1373 likely((trans)->ops->grab_nic_access(trans)))
1374
__releases(nic_access)1375 static inline void __releases(nic_access)
1376 iwl_trans_release_nic_access(struct iwl_trans *trans)
1377 {
1378 trans->ops->release_nic_access(trans);
1379 __release(nic_access);
1380 }
1381
iwl_trans_fw_error(struct iwl_trans * trans)1382 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1383 {
1384 if (WARN_ON_ONCE(!trans->op_mode))
1385 return;
1386
1387 /* prevent double restarts due to the same erroneous FW */
1388 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1389 iwl_op_mode_nic_error(trans->op_mode);
1390 trans->state = IWL_TRANS_NO_FW;
1391 }
1392 }
1393
iwl_trans_fw_running(struct iwl_trans * trans)1394 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1395 {
1396 return trans->state == IWL_TRANS_FW_ALIVE;
1397 }
1398
iwl_trans_sync_nmi(struct iwl_trans * trans)1399 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1400 {
1401 if (trans->ops->sync_nmi)
1402 trans->ops->sync_nmi(trans);
1403 }
1404
1405 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1406 u32 sw_err_bit);
1407
iwl_trans_set_pnvm(struct iwl_trans * trans,const void * data,u32 len)1408 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1409 const void *data, u32 len)
1410 {
1411 if (trans->ops->set_pnvm) {
1412 int ret = trans->ops->set_pnvm(trans, data, len);
1413
1414 if (ret)
1415 return ret;
1416 }
1417
1418 trans->pnvm_loaded = true;
1419
1420 return 0;
1421 }
1422
iwl_trans_dbg_ini_valid(struct iwl_trans * trans)1423 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1424 {
1425 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1426 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1427 }
1428
iwl_trans_interrupts(struct iwl_trans * trans,bool enable)1429 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1430 {
1431 if (trans->ops->interrupts)
1432 trans->ops->interrupts(trans, enable);
1433 }
1434
1435 /*****************************************************
1436 * transport helper functions
1437 *****************************************************/
1438 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1439 struct device *dev,
1440 const struct iwl_trans_ops *ops,
1441 const struct iwl_cfg_trans_params *cfg_trans);
1442 int iwl_trans_init(struct iwl_trans *trans);
1443 void iwl_trans_free(struct iwl_trans *trans);
1444
1445 /*****************************************************
1446 * driver (transport) register/unregister functions
1447 ******************************************************/
1448 int __must_check iwl_pci_register_driver(void);
1449 void iwl_pci_unregister_driver(void);
1450
1451 #endif /* __iwl_trans_h__ */
1452