1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 - 2019 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #ifndef __iwl_trans_h__
65 #define __iwl_trans_h__
66 
67 #include <linux/ieee80211.h>
68 #include <linux/mm.h> /* for page_address */
69 #include <linux/lockdep.h>
70 #include <linux/kernel.h>
71 
72 #include "iwl-debug.h"
73 #include "iwl-config.h"
74 #include "fw/img.h"
75 #include "iwl-op-mode.h"
76 #include "fw/api/cmdhdr.h"
77 #include "fw/api/txq.h"
78 #include "fw/api/dbg-tlv.h"
79 #include "iwl-dbg-tlv.h"
80 
81 /**
82  * DOC: Transport layer - what is it ?
83  *
84  * The transport layer is the layer that deals with the HW directly. It provides
85  * an abstraction of the underlying HW to the upper layer. The transport layer
86  * doesn't provide any policy, algorithm or anything of this kind, but only
87  * mechanisms to make the HW do something. It is not completely stateless but
88  * close to it.
89  * We will have an implementation for each different supported bus.
90  */
91 
92 /**
93  * DOC: Life cycle of the transport layer
94  *
95  * The transport layer has a very precise life cycle.
96  *
97  *	1) A helper function is called during the module initialization and
98  *	   registers the bus driver's ops with the transport's alloc function.
99  *	2) Bus's probe calls to the transport layer's allocation functions.
100  *	   Of course this function is bus specific.
101  *	3) This allocation functions will spawn the upper layer which will
102  *	   register mac80211.
103  *
104  *	4) At some point (i.e. mac80211's start call), the op_mode will call
105  *	   the following sequence:
106  *	   start_hw
107  *	   start_fw
108  *
109  *	5) Then when finished (or reset):
110  *	   stop_device
111  *
112  *	6) Eventually, the free function will be called.
113  */
114 
115 #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
116 
117 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
118 #define FH_RSCSR_FRAME_INVALID		0x55550000
119 #define FH_RSCSR_FRAME_ALIGN		0x40
120 #define FH_RSCSR_RPA_EN			BIT(25)
121 #define FH_RSCSR_RADA_EN		BIT(26)
122 #define FH_RSCSR_RXQ_POS		16
123 #define FH_RSCSR_RXQ_MASK		0x3F0000
124 
125 struct iwl_rx_packet {
126 	/*
127 	 * The first 4 bytes of the RX frame header contain both the RX frame
128 	 * size and some flags.
129 	 * Bit fields:
130 	 * 31:    flag flush RB request
131 	 * 30:    flag ignore TC (terminal counter) request
132 	 * 29:    flag fast IRQ request
133 	 * 28-27: Reserved
134 	 * 26:    RADA enabled
135 	 * 25:    Offload enabled
136 	 * 24:    RPF enabled
137 	 * 23:    RSS enabled
138 	 * 22:    Checksum enabled
139 	 * 21-16: RX queue
140 	 * 15-14: Reserved
141 	 * 13-00: RX frame size
142 	 */
143 	__le32 len_n_flags;
144 	struct iwl_cmd_header hdr;
145 	u8 data[];
146 } __packed;
147 
148 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
149 {
150 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
151 }
152 
153 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
154 {
155 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
156 }
157 
158 /**
159  * enum CMD_MODE - how to send the host commands ?
160  *
161  * @CMD_ASYNC: Return right away and don't wait for the response
162  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
163  *	the response. The caller needs to call iwl_free_resp when done.
164  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
165  *	called after this command completes. Valid only with CMD_ASYNC.
166  */
167 enum CMD_MODE {
168 	CMD_ASYNC		= BIT(0),
169 	CMD_WANT_SKB		= BIT(1),
170 	CMD_SEND_IN_RFKILL	= BIT(2),
171 	CMD_WANT_ASYNC_CALLBACK	= BIT(3),
172 };
173 
174 #define DEF_CMD_PAYLOAD_SIZE 320
175 
176 /**
177  * struct iwl_device_cmd
178  *
179  * For allocation of the command and tx queues, this establishes the overall
180  * size of the largest command we send to uCode, except for commands that
181  * aren't fully copied and use other TFD space.
182  */
183 struct iwl_device_cmd {
184 	union {
185 		struct {
186 			struct iwl_cmd_header hdr;	/* uCode API */
187 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
188 		};
189 		struct {
190 			struct iwl_cmd_header_wide hdr_wide;
191 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
192 					sizeof(struct iwl_cmd_header_wide) +
193 					sizeof(struct iwl_cmd_header)];
194 		};
195 	};
196 } __packed;
197 
198 /**
199  * struct iwl_device_tx_cmd - buffer for TX command
200  * @hdr: the header
201  * @payload: the payload placeholder
202  *
203  * The actual structure is sized dynamically according to need.
204  */
205 struct iwl_device_tx_cmd {
206 	struct iwl_cmd_header hdr;
207 	u8 payload[];
208 } __packed;
209 
210 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
211 
212 /*
213  * number of transfer buffers (fragments) per transmit frame descriptor;
214  * this is just the driver's idea, the hardware supports 20
215  */
216 #define IWL_MAX_CMD_TBS_PER_TFD	2
217 
218 /**
219  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
220  *
221  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
222  *	ring. The transport layer doesn't map the command's buffer to DMA, but
223  *	rather copies it to a previously allocated DMA buffer. This flag tells
224  *	the transport layer not to copy the command, but to map the existing
225  *	buffer (that is passed in) instead. This saves the memcpy and allows
226  *	commands that are bigger than the fixed buffer to be submitted.
227  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
228  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
229  *	chunk internally and free it again after the command completes. This
230  *	can (currently) be used only once per command.
231  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
232  */
233 enum iwl_hcmd_dataflag {
234 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
235 	IWL_HCMD_DFL_DUP	= BIT(1),
236 };
237 
238 enum iwl_error_event_table_status {
239 	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
240 	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
241 	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
242 };
243 
244 /**
245  * struct iwl_host_cmd - Host command to the uCode
246  *
247  * @data: array of chunks that composes the data of the host command
248  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
249  * @_rx_page_order: (internally used to free response packet)
250  * @_rx_page_addr: (internally used to free response packet)
251  * @flags: can be CMD_*
252  * @len: array of the lengths of the chunks in data
253  * @dataflags: IWL_HCMD_DFL_*
254  * @id: command id of the host command, for wide commands encoding the
255  *	version and group as well
256  */
257 struct iwl_host_cmd {
258 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
259 	struct iwl_rx_packet *resp_pkt;
260 	unsigned long _rx_page_addr;
261 	u32 _rx_page_order;
262 
263 	u32 flags;
264 	u32 id;
265 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
266 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
267 };
268 
269 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
270 {
271 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
272 }
273 
274 struct iwl_rx_cmd_buffer {
275 	struct page *_page;
276 	int _offset;
277 	bool _page_stolen;
278 	u32 _rx_page_order;
279 	unsigned int truesize;
280 };
281 
282 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
283 {
284 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
285 }
286 
287 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
288 {
289 	return r->_offset;
290 }
291 
292 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
293 {
294 	r->_page_stolen = true;
295 	get_page(r->_page);
296 	return r->_page;
297 }
298 
299 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
300 {
301 	__free_pages(r->_page, r->_rx_page_order);
302 }
303 
304 #define MAX_NO_RECLAIM_CMDS	6
305 
306 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
307 
308 /*
309  * Maximum number of HW queues the transport layer
310  * currently supports
311  */
312 #define IWL_MAX_HW_QUEUES		32
313 #define IWL_MAX_TVQM_QUEUES		512
314 
315 #define IWL_MAX_TID_COUNT	8
316 #define IWL_MGMT_TID		15
317 #define IWL_FRAME_LIMIT	64
318 #define IWL_MAX_RX_HW_QUEUES	16
319 
320 /**
321  * enum iwl_wowlan_status - WoWLAN image/device status
322  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
323  * @IWL_D3_STATUS_RESET: device was reset while suspended
324  */
325 enum iwl_d3_status {
326 	IWL_D3_STATUS_ALIVE,
327 	IWL_D3_STATUS_RESET,
328 };
329 
330 /**
331  * enum iwl_trans_status: transport status flags
332  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
333  * @STATUS_DEVICE_ENABLED: APM is enabled
334  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
335  * @STATUS_INT_ENABLED: interrupts are enabled
336  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
337  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
338  * @STATUS_FW_ERROR: the fw is in error state
339  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
340  *	are sent
341  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
342  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
343  */
344 enum iwl_trans_status {
345 	STATUS_SYNC_HCMD_ACTIVE,
346 	STATUS_DEVICE_ENABLED,
347 	STATUS_TPOWER_PMI,
348 	STATUS_INT_ENABLED,
349 	STATUS_RFKILL_HW,
350 	STATUS_RFKILL_OPMODE,
351 	STATUS_FW_ERROR,
352 	STATUS_TRANS_GOING_IDLE,
353 	STATUS_TRANS_IDLE,
354 	STATUS_TRANS_DEAD,
355 };
356 
357 static inline int
358 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
359 {
360 	switch (rb_size) {
361 	case IWL_AMSDU_2K:
362 		return get_order(2 * 1024);
363 	case IWL_AMSDU_4K:
364 		return get_order(4 * 1024);
365 	case IWL_AMSDU_8K:
366 		return get_order(8 * 1024);
367 	case IWL_AMSDU_12K:
368 		return get_order(12 * 1024);
369 	default:
370 		WARN_ON(1);
371 		return -1;
372 	}
373 }
374 
375 static inline int
376 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
377 {
378 	switch (rb_size) {
379 	case IWL_AMSDU_2K:
380 		return 2 * 1024;
381 	case IWL_AMSDU_4K:
382 		return 4 * 1024;
383 	case IWL_AMSDU_8K:
384 		return 8 * 1024;
385 	case IWL_AMSDU_12K:
386 		return 12 * 1024;
387 	default:
388 		WARN_ON(1);
389 		return 0;
390 	}
391 }
392 
393 struct iwl_hcmd_names {
394 	u8 cmd_id;
395 	const char *const cmd_name;
396 };
397 
398 #define HCMD_NAME(x)	\
399 	{ .cmd_id = x, .cmd_name = #x }
400 
401 struct iwl_hcmd_arr {
402 	const struct iwl_hcmd_names *arr;
403 	int size;
404 };
405 
406 #define HCMD_ARR(x)	\
407 	{ .arr = x, .size = ARRAY_SIZE(x) }
408 
409 /**
410  * struct iwl_trans_config - transport configuration
411  *
412  * @op_mode: pointer to the upper layer.
413  * @cmd_queue: the index of the command queue.
414  *	Must be set before start_fw.
415  * @cmd_fifo: the fifo for host commands
416  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
417  * @no_reclaim_cmds: Some devices erroneously don't set the
418  *	SEQ_RX_FRAME bit on some notifications, this is the
419  *	list of such notifications to filter. Max length is
420  *	%MAX_NO_RECLAIM_CMDS.
421  * @n_no_reclaim_cmds: # of commands in list
422  * @rx_buf_size: RX buffer size needed for A-MSDUs
423  *	if unset 4k will be the RX buffer size
424  * @bc_table_dword: set to true if the BC table expects the byte count to be
425  *	in DWORD (as opposed to bytes)
426  * @scd_set_active: should the transport configure the SCD for HCMD queue
427  * @sw_csum_tx: transport should compute the TCP checksum
428  * @command_groups: array of command groups, each member is an array of the
429  *	commands in the group; for debugging only
430  * @command_groups_size: number of command groups, to avoid illegal access
431  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
432  *	space for at least two pointers
433  */
434 struct iwl_trans_config {
435 	struct iwl_op_mode *op_mode;
436 
437 	u8 cmd_queue;
438 	u8 cmd_fifo;
439 	unsigned int cmd_q_wdg_timeout;
440 	const u8 *no_reclaim_cmds;
441 	unsigned int n_no_reclaim_cmds;
442 
443 	enum iwl_amsdu_size rx_buf_size;
444 	bool bc_table_dword;
445 	bool scd_set_active;
446 	bool sw_csum_tx;
447 	const struct iwl_hcmd_arr *command_groups;
448 	int command_groups_size;
449 
450 	u8 cb_data_offs;
451 };
452 
453 struct iwl_trans_dump_data {
454 	u32 len;
455 	u8 data[];
456 };
457 
458 struct iwl_trans;
459 
460 struct iwl_trans_txq_scd_cfg {
461 	u8 fifo;
462 	u8 sta_id;
463 	u8 tid;
464 	bool aggregate;
465 	int frame_limit;
466 };
467 
468 /**
469  * struct iwl_trans_rxq_dma_data - RX queue DMA data
470  * @fr_bd_cb: DMA address of free BD cyclic buffer
471  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
472  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
473  * @ur_bd_cb: DMA address of used BD cyclic buffer
474  */
475 struct iwl_trans_rxq_dma_data {
476 	u64 fr_bd_cb;
477 	u32 fr_bd_wid;
478 	u64 urbd_stts_wrptr;
479 	u64 ur_bd_cb;
480 };
481 
482 /**
483  * struct iwl_trans_ops - transport specific operations
484  *
485  * All the handlers MUST be implemented
486  *
487  * @start_hw: starts the HW. From that point on, the HW can send interrupts.
488  *	May sleep.
489  * @op_mode_leave: Turn off the HW RF kill indication if on
490  *	May sleep
491  * @start_fw: allocates and inits all the resources for the transport
492  *	layer. Also kick a fw image.
493  *	May sleep
494  * @fw_alive: called when the fw sends alive notification. If the fw provides
495  *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
496  *	May sleep
497  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
498  *	the HW. From that point on, the HW will be stopped but will still issue
499  *	an interrupt if the HW RF kill switch is triggered.
500  *	This callback must do the right thing and not crash even if %start_hw()
501  *	was called but not &start_fw(). May sleep.
502  * @d3_suspend: put the device into the correct mode for WoWLAN during
503  *	suspend. This is optional, if not implemented WoWLAN will not be
504  *	supported. This callback may sleep.
505  * @d3_resume: resume the device after WoWLAN, enabling the opmode to
506  *	talk to the WoWLAN image to get its status. This is optional, if not
507  *	implemented WoWLAN will not be supported. This callback may sleep.
508  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
509  *	If RFkill is asserted in the middle of a SYNC host command, it must
510  *	return -ERFKILL straight away.
511  *	May sleep only if CMD_ASYNC is not set
512  * @tx: send an skb. The transport relies on the op_mode to zero the
513  *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
514  *	the CSUM will be taken care of (TCP CSUM and IP header in case of
515  *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
516  *	header if it is IPv4.
517  *	Must be atomic
518  * @reclaim: free packet until ssn. Returns a list of freed packets.
519  *	Must be atomic
520  * @txq_enable: setup a queue. To setup an AC queue, use the
521  *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
522  *	this one. The op_mode must not configure the HCMD queue. The scheduler
523  *	configuration may be %NULL, in which case the hardware will not be
524  *	configured. If true is returned, the operation mode needs to increment
525  *	the sequence number of the packets routed to this queue because of a
526  *	hardware scheduler bug. May sleep.
527  * @txq_disable: de-configure a Tx queue to send AMPDUs
528  *	Must be atomic
529  * @txq_set_shared_mode: change Tx queue shared/unshared marking
530  * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
531  * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
532  * @freeze_txq_timer: prevents the timer of the queue from firing until the
533  *	queue is set to awake. Must be atomic.
534  * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
535  *	that the transport needs to refcount the calls since this function
536  *	will be called several times with block = true, and then the queues
537  *	need to be unblocked only after the same number of calls with
538  *	block = false.
539  * @write8: write a u8 to a register at offset ofs from the BAR
540  * @write32: write a u32 to a register at offset ofs from the BAR
541  * @read32: read a u32 register at offset ofs from the BAR
542  * @read_prph: read a DWORD from a periphery register
543  * @write_prph: write a DWORD to a periphery register
544  * @read_mem: read device's SRAM in DWORD
545  * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
546  *	will be zeroed.
547  * @configure: configure parameters required by the transport layer from
548  *	the op_mode. May be called several times before start_fw, can't be
549  *	called after that.
550  * @set_pmi: set the power pmi state
551  * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
552  *	Sleeping is not allowed between grab_nic_access and
553  *	release_nic_access.
554  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
555  *	must be the same one that was sent before to the grab_nic_access.
556  * @set_bits_mask - set SRAM register according to value and mask.
557  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
558  *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
559  *	Note that the transport must fill in the proper file headers.
560  * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
561  *	of the trans debugfs
562  */
563 struct iwl_trans_ops {
564 
565 	int (*start_hw)(struct iwl_trans *iwl_trans);
566 	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
567 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
568 			bool run_in_rfkill);
569 	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
570 	void (*stop_device)(struct iwl_trans *trans);
571 
572 	int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
573 	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
574 			 bool test, bool reset);
575 
576 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
577 
578 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
579 		  struct iwl_device_tx_cmd *dev_cmd, int queue);
580 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
581 			struct sk_buff_head *skbs);
582 
583 	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
584 
585 	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
586 			   const struct iwl_trans_txq_scd_cfg *cfg,
587 			   unsigned int queue_wdg_timeout);
588 	void (*txq_disable)(struct iwl_trans *trans, int queue,
589 			    bool configure_scd);
590 	/* 22000 functions */
591 	int (*txq_alloc)(struct iwl_trans *trans,
592 			 __le16 flags, u8 sta_id, u8 tid,
593 			 int cmd_id, int size,
594 			 unsigned int queue_wdg_timeout);
595 	void (*txq_free)(struct iwl_trans *trans, int queue);
596 	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
597 			    struct iwl_trans_rxq_dma_data *data);
598 
599 	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
600 				    bool shared);
601 
602 	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
603 	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
604 	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
605 				 bool freeze);
606 	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
607 
608 	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
609 	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
610 	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
611 	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
612 	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
613 	int (*read_mem)(struct iwl_trans *trans, u32 addr,
614 			void *buf, int dwords);
615 	int (*write_mem)(struct iwl_trans *trans, u32 addr,
616 			 const void *buf, int dwords);
617 	void (*configure)(struct iwl_trans *trans,
618 			  const struct iwl_trans_config *trans_cfg);
619 	void (*set_pmi)(struct iwl_trans *trans, bool state);
620 	void (*sw_reset)(struct iwl_trans *trans);
621 	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
622 	void (*release_nic_access)(struct iwl_trans *trans,
623 				   unsigned long *flags);
624 	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
625 			      u32 value);
626 	int  (*suspend)(struct iwl_trans *trans);
627 	void (*resume)(struct iwl_trans *trans);
628 
629 	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
630 						 u32 dump_mask);
631 	void (*debugfs_cleanup)(struct iwl_trans *trans);
632 	void (*sync_nmi)(struct iwl_trans *trans);
633 };
634 
635 /**
636  * enum iwl_trans_state - state of the transport layer
637  *
638  * @IWL_TRANS_NO_FW: no fw has sent an alive response
639  * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
640  */
641 enum iwl_trans_state {
642 	IWL_TRANS_NO_FW = 0,
643 	IWL_TRANS_FW_ALIVE	= 1,
644 };
645 
646 /**
647  * DOC: Platform power management
648  *
649  * In system-wide power management the entire platform goes into a low
650  * power state (e.g. idle or suspend to RAM) at the same time and the
651  * device is configured as a wakeup source for the entire platform.
652  * This is usually triggered by userspace activity (e.g. the user
653  * presses the suspend button or a power management daemon decides to
654  * put the platform in low power mode).  The device's behavior in this
655  * mode is dictated by the wake-on-WLAN configuration.
656  *
657  * The terms used for the device's behavior are as follows:
658  *
659  *	- D0: the device is fully powered and the host is awake;
660  *	- D3: the device is in low power mode and only reacts to
661  *		specific events (e.g. magic-packet received or scan
662  *		results found);
663  *
664  * These terms reflect the power modes in the firmware and are not to
665  * be confused with the physical device power state.
666  */
667 
668 /**
669  * enum iwl_plat_pm_mode - platform power management mode
670  *
671  * This enumeration describes the device's platform power management
672  * behavior when in system-wide suspend (i.e WoWLAN).
673  *
674  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
675  *	device.  In system-wide suspend mode, it means that the all
676  *	connections will be closed automatically by mac80211 before
677  *	the platform is suspended.
678  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
679  */
680 enum iwl_plat_pm_mode {
681 	IWL_PLAT_PM_MODE_DISABLED,
682 	IWL_PLAT_PM_MODE_D3,
683 };
684 
685 /**
686  * enum iwl_ini_cfg_state
687  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
688  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
689  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
690  *	are corrupted. The rest of the debug TLVs will still be used
691  */
692 enum iwl_ini_cfg_state {
693 	IWL_INI_CFG_STATE_NOT_LOADED,
694 	IWL_INI_CFG_STATE_LOADED,
695 	IWL_INI_CFG_STATE_CORRUPTED,
696 };
697 
698 /* Max time to wait for nmi interrupt */
699 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
700 
701 /**
702  * struct iwl_dram_data
703  * @physical: page phy pointer
704  * @block: pointer to the allocated block/page
705  * @size: size of the block/page
706  */
707 struct iwl_dram_data {
708 	dma_addr_t physical;
709 	void *block;
710 	int size;
711 };
712 
713 /**
714  * struct iwl_fw_mon - fw monitor per allocation id
715  * @num_frags: number of fragments
716  * @frags: an array of DRAM buffer fragments
717  */
718 struct iwl_fw_mon {
719 	u32 num_frags;
720 	struct iwl_dram_data *frags;
721 };
722 
723 /**
724  * struct iwl_self_init_dram - dram data used by self init process
725  * @fw: lmac and umac dram data
726  * @fw_cnt: total number of items in array
727  * @paging: paging dram data
728  * @paging_cnt: total number of items in array
729  */
730 struct iwl_self_init_dram {
731 	struct iwl_dram_data *fw;
732 	int fw_cnt;
733 	struct iwl_dram_data *paging;
734 	int paging_cnt;
735 };
736 
737 /**
738  * struct iwl_trans_debug - transport debug related data
739  *
740  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
741  * @rec_on: true iff there is a fw debug recording currently active
742  * @dest_tlv: points to the destination TLV for debug
743  * @conf_tlv: array of pointers to configuration TLVs for debug
744  * @trigger_tlv: array of pointers to triggers TLVs for debug
745  * @lmac_error_event_table: addrs of lmacs error tables
746  * @umac_error_event_table: addr of umac error table
747  * @error_event_table_tlv_status: bitmap that indicates what error table
748  *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
749  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
750  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
751  * @fw_mon_cfg: debug buffer allocation configuration
752  * @fw_mon_ini: DRAM buffer fragments per allocation id
753  * @fw_mon: DRAM buffer for firmware monitor
754  * @hw_error: equals true if hw error interrupt was received from the FW
755  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
756  * @active_regions: active regions
757  * @debug_info_tlv_list: list of debug info TLVs
758  * @time_point: array of debug time points
759  * @periodic_trig_list: periodic triggers list
760  * @domains_bitmap: bitmap of active domains other than
761  *	&IWL_FW_INI_DOMAIN_ALWAYS_ON
762  */
763 struct iwl_trans_debug {
764 	u8 n_dest_reg;
765 	bool rec_on;
766 
767 	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
768 	const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
769 	struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
770 
771 	u32 lmac_error_event_table[2];
772 	u32 umac_error_event_table;
773 	unsigned int error_event_table_tlv_status;
774 
775 	enum iwl_ini_cfg_state internal_ini_cfg;
776 	enum iwl_ini_cfg_state external_ini_cfg;
777 
778 	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
779 	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
780 
781 	struct iwl_dram_data fw_mon;
782 
783 	bool hw_error;
784 	enum iwl_fw_ini_buffer_location ini_dest;
785 
786 	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
787 	struct list_head debug_info_tlv_list;
788 	struct iwl_dbg_tlv_time_point_data
789 		time_point[IWL_FW_INI_TIME_POINT_NUM];
790 	struct list_head periodic_trig_list;
791 
792 	u32 domains_bitmap;
793 };
794 
795 /**
796  * struct iwl_trans - transport common data
797  *
798  * @ops - pointer to iwl_trans_ops
799  * @op_mode - pointer to the op_mode
800  * @trans_cfg: the trans-specific configuration part
801  * @cfg - pointer to the configuration
802  * @drv - pointer to iwl_drv
803  * @status: a bit-mask of transport status flags
804  * @dev - pointer to struct device * that represents the device
805  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
806  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
807  * @hw_rf_id a u32 with the device RF ID
808  * @hw_id: a u32 with the ID of the device / sub-device.
809  *	Set during transport allocation.
810  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
811  * @pm_support: set to true in start_hw if link pm is supported
812  * @ltr_enabled: set to true if the LTR is enabled
813  * @wide_cmd_header: true when ucode supports wide command header format
814  * @num_rx_queues: number of RX queues allocated by the transport;
815  *	the transport must set this before calling iwl_drv_start()
816  * @iml_len: the length of the image loader
817  * @iml: a pointer to the image loader itself
818  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
819  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
820  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
821  *	starting the firmware, used for tracing
822  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
823  *	start of the 802.11 header in the @rx_mpdu_cmd
824  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
825  * @system_pm_mode: the system-wide power management mode in use.
826  *	This mode is set dynamically, depending on the WoWLAN values
827  *	configured from the userspace at runtime.
828  */
829 struct iwl_trans {
830 	const struct iwl_trans_ops *ops;
831 	struct iwl_op_mode *op_mode;
832 	const struct iwl_cfg_trans_params *trans_cfg;
833 	const struct iwl_cfg *cfg;
834 	struct iwl_drv *drv;
835 	enum iwl_trans_state state;
836 	unsigned long status;
837 
838 	struct device *dev;
839 	u32 max_skb_frags;
840 	u32 hw_rev;
841 	u32 hw_rf_id;
842 	u32 hw_id;
843 	char hw_id_str[52];
844 
845 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
846 
847 	bool pm_support;
848 	bool ltr_enabled;
849 
850 	const struct iwl_hcmd_arr *command_groups;
851 	int command_groups_size;
852 	bool wide_cmd_header;
853 
854 	u8 num_rx_queues;
855 
856 	size_t iml_len;
857 	u8 *iml;
858 
859 	/* The following fields are internal only */
860 	struct kmem_cache *dev_cmd_pool;
861 	char dev_cmd_pool_name[50];
862 
863 	struct dentry *dbgfs_dir;
864 
865 #ifdef CONFIG_LOCKDEP
866 	struct lockdep_map sync_cmd_lockdep_map;
867 #endif
868 
869 	struct iwl_trans_debug dbg;
870 	struct iwl_self_init_dram init_dram;
871 
872 	enum iwl_plat_pm_mode system_pm_mode;
873 
874 	const char *name;
875 
876 	/* pointer to trans specific struct */
877 	/*Ensure that this pointer will always be aligned to sizeof pointer */
878 	char trans_specific[0] __aligned(sizeof(void *));
879 };
880 
881 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
882 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
883 
884 static inline void iwl_trans_configure(struct iwl_trans *trans,
885 				       const struct iwl_trans_config *trans_cfg)
886 {
887 	trans->op_mode = trans_cfg->op_mode;
888 
889 	trans->ops->configure(trans, trans_cfg);
890 	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
891 }
892 
893 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
894 {
895 	might_sleep();
896 
897 	return trans->ops->start_hw(trans);
898 }
899 
900 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
901 {
902 	might_sleep();
903 
904 	if (trans->ops->op_mode_leave)
905 		trans->ops->op_mode_leave(trans);
906 
907 	trans->op_mode = NULL;
908 
909 	trans->state = IWL_TRANS_NO_FW;
910 }
911 
912 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
913 {
914 	might_sleep();
915 
916 	trans->state = IWL_TRANS_FW_ALIVE;
917 
918 	trans->ops->fw_alive(trans, scd_addr);
919 }
920 
921 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
922 				     const struct fw_img *fw,
923 				     bool run_in_rfkill)
924 {
925 	might_sleep();
926 
927 	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
928 
929 	clear_bit(STATUS_FW_ERROR, &trans->status);
930 	return trans->ops->start_fw(trans, fw, run_in_rfkill);
931 }
932 
933 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
934 {
935 	might_sleep();
936 
937 	trans->ops->stop_device(trans);
938 
939 	trans->state = IWL_TRANS_NO_FW;
940 }
941 
942 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
943 				       bool reset)
944 {
945 	might_sleep();
946 	if (!trans->ops->d3_suspend)
947 		return 0;
948 
949 	return trans->ops->d3_suspend(trans, test, reset);
950 }
951 
952 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
953 				      enum iwl_d3_status *status,
954 				      bool test, bool reset)
955 {
956 	might_sleep();
957 	if (!trans->ops->d3_resume)
958 		return 0;
959 
960 	return trans->ops->d3_resume(trans, status, test, reset);
961 }
962 
963 static inline int iwl_trans_suspend(struct iwl_trans *trans)
964 {
965 	if (!trans->ops->suspend)
966 		return 0;
967 
968 	return trans->ops->suspend(trans);
969 }
970 
971 static inline void iwl_trans_resume(struct iwl_trans *trans)
972 {
973 	if (trans->ops->resume)
974 		trans->ops->resume(trans);
975 }
976 
977 static inline struct iwl_trans_dump_data *
978 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
979 {
980 	if (!trans->ops->dump_data)
981 		return NULL;
982 	return trans->ops->dump_data(trans, dump_mask);
983 }
984 
985 static inline struct iwl_device_tx_cmd *
986 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
987 {
988 	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
989 }
990 
991 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
992 
993 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
994 					 struct iwl_device_tx_cmd *dev_cmd)
995 {
996 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
997 }
998 
999 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1000 			       struct iwl_device_tx_cmd *dev_cmd, int queue)
1001 {
1002 	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1003 		return -EIO;
1004 
1005 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1006 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1007 		return -EIO;
1008 	}
1009 
1010 	return trans->ops->tx(trans, skb, dev_cmd, queue);
1011 }
1012 
1013 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1014 				     int ssn, struct sk_buff_head *skbs)
1015 {
1016 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1017 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1018 		return;
1019 	}
1020 
1021 	trans->ops->reclaim(trans, queue, ssn, skbs);
1022 }
1023 
1024 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1025 					int ptr)
1026 {
1027 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1028 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1029 		return;
1030 	}
1031 
1032 	trans->ops->set_q_ptrs(trans, queue, ptr);
1033 }
1034 
1035 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1036 					 bool configure_scd)
1037 {
1038 	trans->ops->txq_disable(trans, queue, configure_scd);
1039 }
1040 
1041 static inline bool
1042 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1043 			 const struct iwl_trans_txq_scd_cfg *cfg,
1044 			 unsigned int queue_wdg_timeout)
1045 {
1046 	might_sleep();
1047 
1048 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1049 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1050 		return false;
1051 	}
1052 
1053 	return trans->ops->txq_enable(trans, queue, ssn,
1054 				      cfg, queue_wdg_timeout);
1055 }
1056 
1057 static inline int
1058 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1059 			   struct iwl_trans_rxq_dma_data *data)
1060 {
1061 	if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1062 		return -ENOTSUPP;
1063 
1064 	return trans->ops->rxq_dma_data(trans, queue, data);
1065 }
1066 
1067 static inline void
1068 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1069 {
1070 	if (WARN_ON_ONCE(!trans->ops->txq_free))
1071 		return;
1072 
1073 	trans->ops->txq_free(trans, queue);
1074 }
1075 
1076 static inline int
1077 iwl_trans_txq_alloc(struct iwl_trans *trans,
1078 		    __le16 flags, u8 sta_id, u8 tid,
1079 		    int cmd_id, int size,
1080 		    unsigned int wdg_timeout)
1081 {
1082 	might_sleep();
1083 
1084 	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1085 		return -ENOTSUPP;
1086 
1087 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1088 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1089 		return -EIO;
1090 	}
1091 
1092 	return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1093 				     cmd_id, size, wdg_timeout);
1094 }
1095 
1096 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1097 						 int queue, bool shared_mode)
1098 {
1099 	if (trans->ops->txq_set_shared_mode)
1100 		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1101 }
1102 
1103 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1104 					int fifo, int sta_id, int tid,
1105 					int frame_limit, u16 ssn,
1106 					unsigned int queue_wdg_timeout)
1107 {
1108 	struct iwl_trans_txq_scd_cfg cfg = {
1109 		.fifo = fifo,
1110 		.sta_id = sta_id,
1111 		.tid = tid,
1112 		.frame_limit = frame_limit,
1113 		.aggregate = sta_id >= 0,
1114 	};
1115 
1116 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1117 }
1118 
1119 static inline
1120 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1121 			     unsigned int queue_wdg_timeout)
1122 {
1123 	struct iwl_trans_txq_scd_cfg cfg = {
1124 		.fifo = fifo,
1125 		.sta_id = -1,
1126 		.tid = IWL_MAX_TID_COUNT,
1127 		.frame_limit = IWL_FRAME_LIMIT,
1128 		.aggregate = false,
1129 	};
1130 
1131 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1132 }
1133 
1134 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1135 					      unsigned long txqs,
1136 					      bool freeze)
1137 {
1138 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1139 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1140 		return;
1141 	}
1142 
1143 	if (trans->ops->freeze_txq_timer)
1144 		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1145 }
1146 
1147 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1148 					    bool block)
1149 {
1150 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1151 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1152 		return;
1153 	}
1154 
1155 	if (trans->ops->block_txq_ptrs)
1156 		trans->ops->block_txq_ptrs(trans, block);
1157 }
1158 
1159 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1160 						 u32 txqs)
1161 {
1162 	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1163 		return -ENOTSUPP;
1164 
1165 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1166 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1167 		return -EIO;
1168 	}
1169 
1170 	return trans->ops->wait_tx_queues_empty(trans, txqs);
1171 }
1172 
1173 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1174 {
1175 	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1176 		return -ENOTSUPP;
1177 
1178 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1179 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1180 		return -EIO;
1181 	}
1182 
1183 	return trans->ops->wait_txq_empty(trans, queue);
1184 }
1185 
1186 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1187 {
1188 	trans->ops->write8(trans, ofs, val);
1189 }
1190 
1191 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1192 {
1193 	trans->ops->write32(trans, ofs, val);
1194 }
1195 
1196 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1197 {
1198 	return trans->ops->read32(trans, ofs);
1199 }
1200 
1201 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1202 {
1203 	return trans->ops->read_prph(trans, ofs);
1204 }
1205 
1206 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1207 					u32 val)
1208 {
1209 	return trans->ops->write_prph(trans, ofs, val);
1210 }
1211 
1212 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1213 				     void *buf, int dwords)
1214 {
1215 	return trans->ops->read_mem(trans, addr, buf, dwords);
1216 }
1217 
1218 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1219 	do {								      \
1220 		if (__builtin_constant_p(bufsize))			      \
1221 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1222 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1223 	} while (0)
1224 
1225 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1226 {
1227 	u32 value;
1228 
1229 	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1230 		return 0xa5a5a5a5;
1231 
1232 	return value;
1233 }
1234 
1235 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1236 				      const void *buf, int dwords)
1237 {
1238 	return trans->ops->write_mem(trans, addr, buf, dwords);
1239 }
1240 
1241 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1242 					u32 val)
1243 {
1244 	return iwl_trans_write_mem(trans, addr, &val, 1);
1245 }
1246 
1247 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1248 {
1249 	if (trans->ops->set_pmi)
1250 		trans->ops->set_pmi(trans, state);
1251 }
1252 
1253 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1254 {
1255 	if (trans->ops->sw_reset)
1256 		trans->ops->sw_reset(trans);
1257 }
1258 
1259 static inline void
1260 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1261 {
1262 	trans->ops->set_bits_mask(trans, reg, mask, value);
1263 }
1264 
1265 #define iwl_trans_grab_nic_access(trans, flags)	\
1266 	__cond_lock(nic_access,				\
1267 		    likely((trans)->ops->grab_nic_access(trans, flags)))
1268 
1269 static inline void __releases(nic_access)
1270 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1271 {
1272 	trans->ops->release_nic_access(trans, flags);
1273 	__release(nic_access);
1274 }
1275 
1276 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1277 {
1278 	if (WARN_ON_ONCE(!trans->op_mode))
1279 		return;
1280 
1281 	/* prevent double restarts due to the same erroneous FW */
1282 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1283 		iwl_op_mode_nic_error(trans->op_mode);
1284 }
1285 
1286 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1287 {
1288 	return trans->state == IWL_TRANS_FW_ALIVE;
1289 }
1290 
1291 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1292 {
1293 	if (trans->ops->sync_nmi)
1294 		trans->ops->sync_nmi(trans);
1295 }
1296 
1297 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1298 {
1299 	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1300 		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1301 }
1302 
1303 /*****************************************************
1304  * transport helper functions
1305  *****************************************************/
1306 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1307 				  struct device *dev,
1308 				  const struct iwl_trans_ops *ops,
1309 				  unsigned int cmd_pool_size,
1310 				  unsigned int cmd_pool_align);
1311 void iwl_trans_free(struct iwl_trans *trans);
1312 
1313 /*****************************************************
1314 * driver (transport) register/unregister functions
1315 ******************************************************/
1316 int __must_check iwl_pci_register_driver(void);
1317 void iwl_pci_unregister_driver(void);
1318 
1319 #endif /* __iwl_trans_h__ */
1320