1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_op_mode_h__
8 #define __iwl_op_mode_h__
9
10 #include <linux/netdevice.h>
11 #include <linux/debugfs.h>
12 #include "iwl-dbg-tlv.h"
13
14 struct iwl_op_mode;
15 struct iwl_trans;
16 struct sk_buff;
17 struct iwl_device_cmd;
18 struct iwl_rx_cmd_buffer;
19 struct iwl_fw;
20 struct iwl_cfg;
21
22 /**
23 * DOC: Operational mode - what is it ?
24 *
25 * The operational mode (a.k.a. op_mode) is the layer that implements
26 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
27 * the transport API to access the HW. The op_mode doesn't need to know how the
28 * underlying HW works, since the transport layer takes care of that.
29 *
30 * There can be several op_mode: i.e. different fw APIs will require two
31 * different op_modes. This is why the op_mode is virtualized.
32 */
33
34 /**
35 * DOC: Life cycle of the Operational mode
36 *
37 * The operational mode has a very simple life cycle.
38 *
39 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
40 * capabilities advertised by the fw file (in TLV format).
41 * 2) The driver layer starts the op_mode (ops->start)
42 * 3) The op_mode registers mac80211
43 * 4) The op_mode is governed by mac80211
44 * 5) The driver layer stops the op_mode
45 */
46
47 /**
48 * struct iwl_op_mode_ops - op_mode specific operations
49 *
50 * The op_mode exports its ops so that external components can start it and
51 * interact with it. The driver layer typically calls the start and stop
52 * handlers, the transport layer calls the others.
53 *
54 * All the handlers MUST be implemented, except @rx_rss which can be left
55 * out *iff* the opmode will never run on hardware with multi-queue capability.
56 *
57 * @start: start the op_mode. The transport layer is already allocated.
58 * May sleep
59 * @stop: stop the op_mode. Must free all the memory allocated.
60 * May sleep
61 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
62 * HCMD this Rx responds to. Can't sleep.
63 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
64 * received on the RSS queue(s). The queue parameter indicates which of the
65 * RSS queues received this frame; it will always be non-zero.
66 * This method must not sleep.
67 * @queue_full: notifies that a HW queue is full.
68 * Must be atomic and called with BH disabled.
69 * @queue_not_full: notifies that a HW queue is not full any more.
70 * Must be atomic and called with BH disabled.
71 * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
72 * the radio is killed. Return %true if the device should be stopped by
73 * the transport immediately after the call. May sleep.
74 * Note that this must not return %true for newer devices using gen2 PCIe
75 * transport.
76 * @free_skb: allows the transport layer to free skbs that haven't been
77 * reclaimed by the op_mode. This can happen when the driver is freed and
78 * there are Tx packets pending in the transport layer.
79 * Must be atomic
80 * @nic_error: error notification. Must be atomic and must be called with BH
81 * disabled, unless the sync parameter is true.
82 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
83 * called with BH disabled.
84 * @nic_config: configure NIC, called before firmware is started.
85 * May sleep
86 * @wimax_active: invoked when WiMax becomes active. May sleep
87 * @time_point: called when transport layer wants to collect debug data
88 * @device_powered_off: called upon resume from hibernation but not only.
89 * Op_mode needs to reset its internal state because the device did not
90 * survive the system state transition. The firmware is no longer running,
91 * etc...
92 */
93 struct iwl_op_mode_ops {
94 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
95 const struct iwl_cfg *cfg,
96 const struct iwl_fw *fw,
97 struct dentry *dbgfs_dir);
98 void (*stop)(struct iwl_op_mode *op_mode);
99 void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
100 struct iwl_rx_cmd_buffer *rxb);
101 void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
102 struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
103 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
104 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
105 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
106 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
107 void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
108 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
109 void (*nic_config)(struct iwl_op_mode *op_mode);
110 void (*wimax_active)(struct iwl_op_mode *op_mode);
111 void (*time_point)(struct iwl_op_mode *op_mode,
112 enum iwl_fw_ini_time_point tp_id,
113 union iwl_dbg_tlv_tp_data *tp_data);
114 void (*device_powered_off)(struct iwl_op_mode *op_mode);
115 };
116
117 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
118 void iwl_opmode_deregister(const char *name);
119
120 /**
121 * struct iwl_op_mode - operational mode
122 * @ops: pointer to its own ops
123 *
124 * This holds an implementation of the mac80211 / fw API.
125 */
126 struct iwl_op_mode {
127 const struct iwl_op_mode_ops *ops;
128
129 char op_mode_specific[] __aligned(sizeof(void *));
130 };
131
iwl_op_mode_stop(struct iwl_op_mode * op_mode)132 static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
133 {
134 might_sleep();
135 op_mode->ops->stop(op_mode);
136 }
137
iwl_op_mode_rx(struct iwl_op_mode * op_mode,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb)138 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
139 struct napi_struct *napi,
140 struct iwl_rx_cmd_buffer *rxb)
141 {
142 return op_mode->ops->rx(op_mode, napi, rxb);
143 }
144
iwl_op_mode_rx_rss(struct iwl_op_mode * op_mode,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,unsigned int queue)145 static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
146 struct napi_struct *napi,
147 struct iwl_rx_cmd_buffer *rxb,
148 unsigned int queue)
149 {
150 op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
151 }
152
iwl_op_mode_queue_full(struct iwl_op_mode * op_mode,int queue)153 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
154 int queue)
155 {
156 op_mode->ops->queue_full(op_mode, queue);
157 }
158
iwl_op_mode_queue_not_full(struct iwl_op_mode * op_mode,int queue)159 static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
160 int queue)
161 {
162 op_mode->ops->queue_not_full(op_mode, queue);
163 }
164
165 static inline bool __must_check
iwl_op_mode_hw_rf_kill(struct iwl_op_mode * op_mode,bool state)166 iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
167 {
168 might_sleep();
169 return op_mode->ops->hw_rf_kill(op_mode, state);
170 }
171
iwl_op_mode_free_skb(struct iwl_op_mode * op_mode,struct sk_buff * skb)172 static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
173 struct sk_buff *skb)
174 {
175 if (WARN_ON_ONCE(!op_mode))
176 return;
177 op_mode->ops->free_skb(op_mode, skb);
178 }
179
iwl_op_mode_nic_error(struct iwl_op_mode * op_mode,bool sync)180 static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
181 {
182 op_mode->ops->nic_error(op_mode, sync);
183 }
184
iwl_op_mode_cmd_queue_full(struct iwl_op_mode * op_mode)185 static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
186 {
187 op_mode->ops->cmd_queue_full(op_mode);
188 }
189
iwl_op_mode_nic_config(struct iwl_op_mode * op_mode)190 static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
191 {
192 might_sleep();
193 if (op_mode->ops->nic_config)
194 op_mode->ops->nic_config(op_mode);
195 }
196
iwl_op_mode_wimax_active(struct iwl_op_mode * op_mode)197 static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
198 {
199 might_sleep();
200 op_mode->ops->wimax_active(op_mode);
201 }
202
iwl_op_mode_time_point(struct iwl_op_mode * op_mode,enum iwl_fw_ini_time_point tp_id,union iwl_dbg_tlv_tp_data * tp_data)203 static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
204 enum iwl_fw_ini_time_point tp_id,
205 union iwl_dbg_tlv_tp_data *tp_data)
206 {
207 if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
208 return;
209 op_mode->ops->time_point(op_mode, tp_id, tp_data);
210 }
211
iwl_op_mode_device_powered_off(struct iwl_op_mode * op_mode)212 static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
213 {
214 if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off)
215 return;
216 op_mode->ops->device_powered_off(op_mode);
217 }
218
219 #endif /* __iwl_op_mode_h__ */
220