1 /* $NetBSD: if_iwm.c,v 1.42 2016/06/10 13:27:14 ozaki-r Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.41 2015/05/22 06:50:54 kettenis Exp */
3
4 /*
5 * Copyright (c) 2014 genua mbh <info@genua.de>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*-
22 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23 * which were used as the reference documentation for this implementation.
24 *
25 * Driver version we are currently based off of is
26 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 *
28 ***********************************************************************
29 *
30 * This file is provided under a dual BSD/GPLv2 license. When using or
31 * redistributing this file, you may do so under either license.
32 *
33 * GPL LICENSE SUMMARY
34 *
35 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw@linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * All rights reserved.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 *
68 * * Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * * Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in
72 * the documentation and/or other materials provided with the
73 * distribution.
74 * * Neither the name Intel Corporation nor the names of its
75 * contributors may be used to endorse or promote products derived
76 * from this software without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
80 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
81 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
82 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
83 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
84 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
85 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
86 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
87 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
88 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89 */
90
91 /*-
92 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 *
94 * Permission to use, copy, modify, and distribute this software for any
95 * purpose with or without fee is hereby granted, provided that the above
96 * copyright notice and this permission notice appear in all copies.
97 *
98 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
99 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
100 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
101 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
102 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
103 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
104 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.42 2016/06/10 13:27:14 ozaki-r Exp $");
109
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/kmem.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/socket.h>
118 #include <sys/sockio.h>
119 #include <sys/sysctl.h>
120 #include <sys/systm.h>
121
122 #include <sys/cpu.h>
123 #include <sys/bus.h>
124 #include <sys/workqueue.h>
125 #include <machine/endian.h>
126 #include <machine/intr.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131 #include <dev/firmload.h>
132
133 #include <net/bpf.h>
134 #include <net/if.h>
135 #include <net/if_arp.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_types.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/in_systm.h>
143 #include <netinet/ip.h>
144
145 #include <net80211/ieee80211_var.h>
146 #include <net80211/ieee80211_amrr.h>
147 #include <net80211/ieee80211_radiotap.h>
148
149 #define DEVNAME(_s) device_xname((_s)->sc_dev)
150 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
151
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 0;
159 #else
160 #define DPRINTF(x) do { ; } while (0)
161 #define DPRINTFN(n, x) do { ; } while (0)
162 #endif
163
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166
167 static const uint8_t iwm_nvm_channels[] = {
168 /* 2.4 GHz */
169 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 /* 5 GHz */
171 36, 40, 44 , 48, 52, 56, 60, 64,
172 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 149, 153, 157, 161, 165
174 };
175 #define IWM_NUM_2GHZ_CHANNELS 14
176
177 static const struct iwm_rate {
178 uint8_t rate;
179 uint8_t plcp;
180 } iwm_rates[] = {
181 { 2, IWM_RATE_1M_PLCP },
182 { 4, IWM_RATE_2M_PLCP },
183 { 11, IWM_RATE_5M_PLCP },
184 { 22, IWM_RATE_11M_PLCP },
185 { 12, IWM_RATE_6M_PLCP },
186 { 18, IWM_RATE_9M_PLCP },
187 { 24, IWM_RATE_12M_PLCP },
188 { 36, IWM_RATE_18M_PLCP },
189 { 48, IWM_RATE_24M_PLCP },
190 { 72, IWM_RATE_36M_PLCP },
191 { 96, IWM_RATE_48M_PLCP },
192 { 108, IWM_RATE_54M_PLCP },
193 };
194 #define IWM_RIDX_CCK 0
195 #define IWM_RIDX_OFDM 4
196 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
197 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
198 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
199
200 struct iwm_newstate_state {
201 struct work ns_wk;
202 enum ieee80211_state ns_nstate;
203 int ns_arg;
204 int ns_generation;
205 };
206
207 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
208 static int iwm_firmware_store_section(struct iwm_softc *,
209 enum iwm_ucode_type, uint8_t *, size_t);
210 static int iwm_set_default_calib(struct iwm_softc *, const void *);
211 static int iwm_read_firmware(struct iwm_softc *);
212 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
213 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
214 #ifdef IWM_DEBUG
215 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
216 #endif
217 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
218 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
219 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
220 static int iwm_nic_lock(struct iwm_softc *);
221 static void iwm_nic_unlock(struct iwm_softc *);
222 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
223 uint32_t);
224 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
225 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
226 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
227 bus_size_t, bus_size_t);
228 static void iwm_dma_contig_free(struct iwm_dma_info *);
229 static int iwm_alloc_fwmem(struct iwm_softc *);
230 static void iwm_free_fwmem(struct iwm_softc *);
231 static int iwm_alloc_sched(struct iwm_softc *);
232 static void iwm_free_sched(struct iwm_softc *);
233 static int iwm_alloc_kw(struct iwm_softc *);
234 static void iwm_free_kw(struct iwm_softc *);
235 static int iwm_alloc_ict(struct iwm_softc *);
236 static void iwm_free_ict(struct iwm_softc *);
237 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
238 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
239 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
240 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
241 int);
242 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
243 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
244 static void iwm_enable_rfkill_int(struct iwm_softc *);
245 static int iwm_check_rfkill(struct iwm_softc *);
246 static void iwm_enable_interrupts(struct iwm_softc *);
247 static void iwm_restore_interrupts(struct iwm_softc *);
248 static void iwm_disable_interrupts(struct iwm_softc *);
249 static void iwm_ict_reset(struct iwm_softc *);
250 static int iwm_set_hw_ready(struct iwm_softc *);
251 static int iwm_prepare_card_hw(struct iwm_softc *);
252 static void iwm_apm_config(struct iwm_softc *);
253 static int iwm_apm_init(struct iwm_softc *);
254 static void iwm_apm_stop(struct iwm_softc *);
255 static int iwm_allow_mcast(struct iwm_softc *);
256 static int iwm_start_hw(struct iwm_softc *);
257 static void iwm_stop_device(struct iwm_softc *);
258 static void iwm_set_pwr(struct iwm_softc *);
259 static void iwm_mvm_nic_config(struct iwm_softc *);
260 static int iwm_nic_rx_init(struct iwm_softc *);
261 static int iwm_nic_tx_init(struct iwm_softc *);
262 static int iwm_nic_init(struct iwm_softc *);
263 static void iwm_enable_txq(struct iwm_softc *, int, int);
264 static int iwm_post_alive(struct iwm_softc *);
265 static int iwm_is_valid_channel(uint16_t);
266 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
267 static uint16_t iwm_channel_id_to_papd(uint16_t);
268 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
269 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
270 uint8_t **, uint16_t *, uint16_t);
271 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
272 void *);
273 static int iwm_send_phy_db_data(struct iwm_softc *);
274 static int iwm_send_phy_db_data(struct iwm_softc *);
275 static void iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
276 struct iwm_time_event_cmd_v1 *);
277 static int iwm_mvm_send_time_event_cmd(struct iwm_softc *,
278 const struct iwm_time_event_cmd_v2 *);
279 static int iwm_mvm_time_event_send_add(struct iwm_softc *,
280 struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
281 static void iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
282 uint32_t, uint32_t, uint32_t);
283 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
284 uint16_t, uint8_t *, uint16_t *);
285 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
286 uint16_t *);
287 static void iwm_init_channel_map(struct iwm_softc *,
288 const uint16_t * const);
289 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
290 const uint16_t *, const uint16_t *, uint8_t, uint8_t);
291 static int iwm_nvm_init(struct iwm_softc *);
292 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
293 const uint8_t *, uint32_t);
294 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
295 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
296 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
297 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
298 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
299 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
300 enum iwm_ucode_type);
301 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
302 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
303 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
304 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
305 struct iwm_rx_phy_info *);
306 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
307 struct iwm_rx_packet *, struct iwm_rx_data *);
308 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
309 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
310 struct iwm_rx_data *);
311 static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
312 struct iwm_rx_packet *, struct iwm_node *);
313 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
314 struct iwm_rx_data *);
315 static int iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
316 uint32_t);
317 static int iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
318 int);
319 static int iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
320 static void iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
321 struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
322 uint32_t, uint32_t);
323 static void iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
324 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
325 uint8_t, uint8_t);
326 static int iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
327 struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
328 uint32_t);
329 static int iwm_mvm_phy_ctxt_add(struct iwm_softc *,
330 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
331 uint8_t, uint8_t);
332 static int iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
333 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
334 uint8_t, uint8_t);
335 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
336 static int iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
337 uint16_t, const void *);
338 static int iwm_mvm_send_cmd_status(struct iwm_softc *,
339 struct iwm_host_cmd *, uint32_t *);
340 static int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
341 uint16_t, const void *, uint32_t *);
342 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
343 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
344 #if 0
345 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
346 uint16_t);
347 #endif
348 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
349 struct iwm_node *, struct ieee80211_frame *,
350 struct iwm_tx_cmd *);
351 static int iwm_tx(struct iwm_softc *, struct mbuf *,
352 struct ieee80211_node *, int);
353 static int iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
354 struct iwm_beacon_filter_cmd *);
355 static void iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
356 struct iwm_node *, struct iwm_beacon_filter_cmd *);
357 static int iwm_mvm_update_beacon_abort(struct iwm_softc *,
358 struct iwm_node *, int);
359 static void iwm_mvm_power_log(struct iwm_softc *,
360 struct iwm_mac_power_cmd *);
361 static void iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
362 struct iwm_mac_power_cmd *);
363 static int iwm_mvm_power_mac_update_mode(struct iwm_softc *,
364 struct iwm_node *);
365 static int iwm_mvm_power_update_device(struct iwm_softc *);
366 static int iwm_mvm_enable_beacon_filter(struct iwm_softc *,
367 struct iwm_node *);
368 static int iwm_mvm_disable_beacon_filter(struct iwm_softc *,
369 struct iwm_node *);
370 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
371 struct iwm_mvm_add_sta_cmd_v5 *);
372 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
373 struct iwm_mvm_add_sta_cmd_v6 *, int *);
374 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
375 int);
376 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
377 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
378 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
379 struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
380 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
381 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
382 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
383 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
384 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
385 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
386 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
387 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
388 static int iwm_mvm_scan_fill_channels(struct iwm_softc *,
389 struct iwm_scan_cmd *, int, int, int);
390 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
391 struct ieee80211_frame *, const uint8_t *, int,
392 const uint8_t *, int, const uint8_t *, int, int);
393 static int iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
394 int);
395 static void iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
396 int *);
397 static void iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
398 struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
399 static int iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
400 struct iwm_mac_ctx_cmd *);
401 static void iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
402 struct iwm_node *, struct iwm_mac_data_sta *, int);
403 static int iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
404 struct iwm_node *, uint32_t);
405 static int iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
406 uint32_t);
407 static int iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
408 static int iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
409 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
410 static int iwm_auth(struct iwm_softc *);
411 static int iwm_assoc(struct iwm_softc *);
412 static int iwm_release(struct iwm_softc *, struct iwm_node *);
413 static void iwm_calib_timeout(void *);
414 static void iwm_setrates(struct iwm_node *);
415 static int iwm_media_change(struct ifnet *);
416 static void iwm_newstate_cb(struct work *, void *);
417 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
418 static void iwm_endscan_cb(struct work *, void *);
419 static int iwm_init_hw(struct iwm_softc *);
420 static int iwm_init(struct ifnet *);
421 static void iwm_start(struct ifnet *);
422 static void iwm_stop(struct ifnet *, int);
423 static void iwm_watchdog(struct ifnet *);
424 static int iwm_ioctl(struct ifnet *, u_long, void *);
425 #ifdef IWM_DEBUG
426 static const char *iwm_desc_lookup(uint32_t);
427 static void iwm_nic_error(struct iwm_softc *);
428 #endif
429 static void iwm_notif_intr(struct iwm_softc *);
430 static int iwm_intr(void *);
431 static int iwm_preinit(struct iwm_softc *);
432 static void iwm_attach_hook(device_t);
433 static void iwm_attach(device_t, device_t, void *);
434 #if 0
435 static void iwm_init_task(void *);
436 static int iwm_activate(device_t, enum devact);
437 static void iwm_wakeup(struct iwm_softc *);
438 #endif
439 static void iwm_radiotap_attach(struct iwm_softc *);
440 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
441
442 static int iwm_sysctl_root_num;
443
444 static int
iwm_firmload(struct iwm_softc * sc)445 iwm_firmload(struct iwm_softc *sc)
446 {
447 struct iwm_fw_info *fw = &sc->sc_fw;
448 firmware_handle_t fwh;
449 int error;
450
451 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
452 return 0;
453
454 /* Open firmware image. */
455 if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
456 aprint_error_dev(sc->sc_dev,
457 "could not get firmware handle %s\n", sc->sc_fwname);
458 return error;
459 }
460
461 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
462 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
463 fw->fw_rawdata = NULL;
464 }
465
466 fw->fw_rawsize = firmware_get_size(fwh);
467 /*
468 * Well, this is how the Linux driver checks it ....
469 */
470 if (fw->fw_rawsize < sizeof(uint32_t)) {
471 aprint_error_dev(sc->sc_dev,
472 "firmware too short: %zd bytes\n", fw->fw_rawsize);
473 error = EINVAL;
474 goto out;
475 }
476
477 /* some sanity */
478 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
479 aprint_error_dev(sc->sc_dev,
480 "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
481 error = EINVAL;
482 goto out;
483 }
484
485 /* Read the firmware. */
486 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
487 if (fw->fw_rawdata == NULL) {
488 aprint_error_dev(sc->sc_dev,
489 "not enough memory to stock firmware %s\n", sc->sc_fwname);
490 error = ENOMEM;
491 goto out;
492 }
493 error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
494 if (error) {
495 aprint_error_dev(sc->sc_dev,
496 "could not read firmware %s\n", sc->sc_fwname);
497 goto out;
498 }
499
500 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
501 out:
502 /* caller will release memory, if necessary */
503
504 firmware_close(fwh);
505 return error;
506 }
507
508 /*
509 * just maintaining status quo.
510 */
511 static void
iwm_fix_channel(struct ieee80211com * ic,struct mbuf * m)512 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
513 {
514 struct iwm_softc *sc = ic->ic_ifp->if_softc;
515 struct ieee80211_frame *wh;
516 uint8_t subtype;
517 uint8_t *frm, *efrm;
518
519 wh = mtod(m, struct ieee80211_frame *);
520
521 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
522 return;
523
524 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
525
526 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
527 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
528 return;
529
530 if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
531 int chan = le32toh(sc->sc_last_phy_info.channel);
532 if (chan < __arraycount(ic->ic_channels))
533 ic->ic_curchan = &ic->ic_channels[chan];
534 return;
535 }
536
537 frm = (uint8_t *)(wh + 1);
538 efrm = mtod(m, uint8_t *) + m->m_len;
539
540 frm += 12; /* skip tstamp, bintval and capinfo fields */
541 while (frm < efrm) {
542 if (*frm == IEEE80211_ELEMID_DSPARMS) {
543 #if IEEE80211_CHAN_MAX < 255
544 if (frm[2] <= IEEE80211_CHAN_MAX)
545 #endif
546 ic->ic_curchan = &ic->ic_channels[frm[2]];
547 }
548 frm += frm[1] + 2;
549 }
550 }
551
552 /*
553 * Firmware parser.
554 */
555
556 static int
iwm_store_cscheme(struct iwm_softc * sc,uint8_t * data,size_t dlen)557 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
558 {
559 struct iwm_fw_cscheme_list *l = (void *)data;
560
561 if (dlen < sizeof(*l) ||
562 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
563 return EINVAL;
564
565 /* we don't actually store anything for now, always use s/w crypto */
566
567 return 0;
568 }
569
570 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,uint8_t * data,size_t dlen)571 iwm_firmware_store_section(struct iwm_softc *sc,
572 enum iwm_ucode_type type, uint8_t *data, size_t dlen)
573 {
574 struct iwm_fw_sects *fws;
575 struct iwm_fw_onesect *fwone;
576
577 if (type >= IWM_UCODE_TYPE_MAX)
578 return EINVAL;
579 if (dlen < sizeof(uint32_t))
580 return EINVAL;
581
582 fws = &sc->sc_fw.fw_sects[type];
583 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
584 return EINVAL;
585
586 fwone = &fws->fw_sect[fws->fw_count];
587
588 /* first 32bit are device load offset */
589 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
590
591 /* rest is data */
592 fwone->fws_data = data + sizeof(uint32_t);
593 fwone->fws_len = dlen - sizeof(uint32_t);
594
595 /* for freeing the buffer during driver unload */
596 fwone->fws_alloc = data;
597 fwone->fws_allocsize = dlen;
598
599 fws->fw_count++;
600 fws->fw_totlen += fwone->fws_len;
601
602 return 0;
603 }
604
605 /* iwlwifi: iwl-drv.c */
606 struct iwm_tlv_calib_data {
607 uint32_t ucode_type;
608 struct iwm_tlv_calib_ctrl calib;
609 } __packed;
610
611 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)612 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
613 {
614 const struct iwm_tlv_calib_data *def_calib = data;
615 uint32_t ucode_type = le32toh(def_calib->ucode_type);
616
617 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
618 DPRINTF(("%s: Wrong ucode_type %u for default "
619 "calibration.\n", DEVNAME(sc), ucode_type));
620 return EINVAL;
621 }
622
623 sc->sc_default_calib[ucode_type].flow_trigger =
624 def_calib->calib.flow_trigger;
625 sc->sc_default_calib[ucode_type].event_trigger =
626 def_calib->calib.event_trigger;
627
628 return 0;
629 }
630
631 static int
iwm_read_firmware(struct iwm_softc * sc)632 iwm_read_firmware(struct iwm_softc *sc)
633 {
634 struct iwm_fw_info *fw = &sc->sc_fw;
635 struct iwm_tlv_ucode_header *uhdr;
636 struct iwm_ucode_tlv tlv;
637 enum iwm_ucode_tlv_type tlv_type;
638 uint8_t *data;
639 int error, status;
640 size_t len;
641
642 if (fw->fw_status == IWM_FW_STATUS_NONE) {
643 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
644 } else {
645 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
646 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
647 }
648 status = fw->fw_status;
649
650 if (status == IWM_FW_STATUS_DONE)
651 return 0;
652
653 /*
654 * Load firmware into driver memory.
655 * fw_rawdata and fw_rawsize will be set.
656 */
657 error = iwm_firmload(sc);
658 if (error != 0) {
659 aprint_error_dev(sc->sc_dev,
660 "could not read firmware %s (error %d)\n",
661 sc->sc_fwname, error);
662 goto out;
663 }
664
665 /*
666 * Parse firmware contents
667 */
668
669 uhdr = (void *)fw->fw_rawdata;
670 if (*(uint32_t *)fw->fw_rawdata != 0
671 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
672 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
673 sc->sc_fwname);
674 error = EINVAL;
675 goto out;
676 }
677
678 sc->sc_fwver = le32toh(uhdr->ver);
679 data = uhdr->data;
680 len = fw->fw_rawsize - sizeof(*uhdr);
681
682 while (len >= sizeof(tlv)) {
683 size_t tlv_len;
684 void *tlv_data;
685
686 memcpy(&tlv, data, sizeof(tlv));
687 tlv_len = le32toh(tlv.length);
688 tlv_type = le32toh(tlv.type);
689
690 len -= sizeof(tlv);
691 data += sizeof(tlv);
692 tlv_data = data;
693
694 if (len < tlv_len) {
695 aprint_error_dev(sc->sc_dev,
696 "firmware too short: %zu bytes\n", len);
697 error = EINVAL;
698 goto parse_out;
699 }
700
701 switch ((int)tlv_type) {
702 case IWM_UCODE_TLV_PROBE_MAX_LEN:
703 if (tlv_len < sizeof(uint32_t)) {
704 error = EINVAL;
705 goto parse_out;
706 }
707 sc->sc_capa_max_probe_len
708 = le32toh(*(uint32_t *)tlv_data);
709 /* limit it to something sensible */
710 if (sc->sc_capa_max_probe_len > (1<<16)) {
711 DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
712 "ridiculous\n", DEVNAME(sc)));
713 error = EINVAL;
714 goto parse_out;
715 }
716 break;
717 case IWM_UCODE_TLV_PAN:
718 if (tlv_len) {
719 error = EINVAL;
720 goto parse_out;
721 }
722 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
723 break;
724 case IWM_UCODE_TLV_FLAGS:
725 if (tlv_len < sizeof(uint32_t)) {
726 error = EINVAL;
727 goto parse_out;
728 }
729 /*
730 * Apparently there can be many flags, but Linux driver
731 * parses only the first one, and so do we.
732 *
733 * XXX: why does this override IWM_UCODE_TLV_PAN?
734 * Intentional or a bug? Observations from
735 * current firmware file:
736 * 1) TLV_PAN is parsed first
737 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
738 * ==> this resets TLV_PAN to itself... hnnnk
739 */
740 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
741 break;
742 case IWM_UCODE_TLV_CSCHEME:
743 if ((error = iwm_store_cscheme(sc,
744 tlv_data, tlv_len)) != 0)
745 goto parse_out;
746 break;
747 case IWM_UCODE_TLV_NUM_OF_CPU:
748 if (tlv_len != sizeof(uint32_t)) {
749 error = EINVAL;
750 goto parse_out;
751 }
752 if (le32toh(*(uint32_t*)tlv_data) != 1) {
753 DPRINTF(("%s: driver supports "
754 "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
755 error = EINVAL;
756 goto parse_out;
757 }
758 break;
759 case IWM_UCODE_TLV_SEC_RT:
760 if ((error = iwm_firmware_store_section(sc,
761 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
762 goto parse_out;
763 break;
764 case IWM_UCODE_TLV_SEC_INIT:
765 if ((error = iwm_firmware_store_section(sc,
766 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
767 goto parse_out;
768 break;
769 case IWM_UCODE_TLV_SEC_WOWLAN:
770 if ((error = iwm_firmware_store_section(sc,
771 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
772 goto parse_out;
773 break;
774 case IWM_UCODE_TLV_DEF_CALIB:
775 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
776 error = EINVAL;
777 goto parse_out;
778 }
779 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
780 goto parse_out;
781 break;
782 case IWM_UCODE_TLV_PHY_SKU:
783 if (tlv_len != sizeof(uint32_t)) {
784 error = EINVAL;
785 goto parse_out;
786 }
787 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
788 break;
789
790 case IWM_UCODE_TLV_API_CHANGES_SET:
791 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
792 /* ignore, not used by current driver */
793 break;
794
795 default:
796 DPRINTF(("%s: unknown firmware section %d, abort\n",
797 DEVNAME(sc), tlv_type));
798 error = EINVAL;
799 goto parse_out;
800 }
801
802 len -= roundup(tlv_len, 4);
803 data += roundup(tlv_len, 4);
804 }
805
806 KASSERT(error == 0);
807
808 parse_out:
809 if (error) {
810 aprint_error_dev(sc->sc_dev,
811 "firmware parse error, section type %d\n", tlv_type);
812 }
813
814 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
815 aprint_error_dev(sc->sc_dev,
816 "device uses unsupported power ops\n");
817 error = ENOTSUP;
818 }
819
820 out:
821 if (error)
822 fw->fw_status = IWM_FW_STATUS_NONE;
823 else
824 fw->fw_status = IWM_FW_STATUS_DONE;
825 wakeup(&sc->sc_fw);
826
827 if (error && fw->fw_rawdata != NULL) {
828 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
829 fw->fw_rawdata = NULL;
830 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
831 }
832 return error;
833 }
834
835 /*
836 * basic device access
837 */
838
839 static uint32_t
iwm_read_prph(struct iwm_softc * sc,uint32_t addr)840 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
841 {
842 IWM_WRITE(sc,
843 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
844 IWM_BARRIER_READ_WRITE(sc);
845 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
846 }
847
848 static void
iwm_write_prph(struct iwm_softc * sc,uint32_t addr,uint32_t val)849 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
850 {
851 IWM_WRITE(sc,
852 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
853 IWM_BARRIER_WRITE(sc);
854 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
855 }
856
857 #ifdef IWM_DEBUG
858 /* iwlwifi: pcie/trans.c */
859 static int
iwm_read_mem(struct iwm_softc * sc,uint32_t addr,void * buf,int dwords)860 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
861 {
862 int offs, ret = 0;
863 uint32_t *vals = buf;
864
865 if (iwm_nic_lock(sc)) {
866 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
867 for (offs = 0; offs < dwords; offs++)
868 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
869 iwm_nic_unlock(sc);
870 } else {
871 ret = EBUSY;
872 }
873 return ret;
874 }
875 #endif
876
877 /* iwlwifi: pcie/trans.c */
878 static int
iwm_write_mem(struct iwm_softc * sc,uint32_t addr,const void * buf,int dwords)879 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
880 {
881 int offs;
882 const uint32_t *vals = buf;
883
884 if (iwm_nic_lock(sc)) {
885 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
886 /* WADDR auto-increments */
887 for (offs = 0; offs < dwords; offs++) {
888 uint32_t val = vals ? vals[offs] : 0;
889 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
890 }
891 iwm_nic_unlock(sc);
892 } else {
893 DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
894 return EBUSY;
895 }
896 return 0;
897 }
898
899 static int
iwm_write_mem32(struct iwm_softc * sc,uint32_t addr,uint32_t val)900 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
901 {
902 return iwm_write_mem(sc, addr, &val, 1);
903 }
904
905 static int
iwm_poll_bit(struct iwm_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)906 iwm_poll_bit(struct iwm_softc *sc, int reg,
907 uint32_t bits, uint32_t mask, int timo)
908 {
909 for (;;) {
910 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
911 return 1;
912 }
913 if (timo < 10) {
914 return 0;
915 }
916 timo -= 10;
917 DELAY(10);
918 }
919 }
920
921 static int
iwm_nic_lock(struct iwm_softc * sc)922 iwm_nic_lock(struct iwm_softc *sc)
923 {
924 int rv = 0;
925
926 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
927 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
928
929 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
930 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
931 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
932 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
933 rv = 1;
934 } else {
935 /* jolt */
936 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
937 }
938
939 return rv;
940 }
941
942 static void
iwm_nic_unlock(struct iwm_softc * sc)943 iwm_nic_unlock(struct iwm_softc *sc)
944 {
945 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
946 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
947 }
948
949 static void
iwm_set_bits_mask_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)950 iwm_set_bits_mask_prph(struct iwm_softc *sc,
951 uint32_t reg, uint32_t bits, uint32_t mask)
952 {
953 uint32_t val;
954
955 /* XXX: no error path? */
956 if (iwm_nic_lock(sc)) {
957 val = iwm_read_prph(sc, reg) & mask;
958 val |= bits;
959 iwm_write_prph(sc, reg, val);
960 iwm_nic_unlock(sc);
961 }
962 }
963
964 static void
iwm_set_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)965 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
966 {
967 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
968 }
969
970 static void
iwm_clear_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)971 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
972 {
973 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
974 }
975
976 /*
977 * DMA resource routines
978 */
979
980 static int
iwm_dma_contig_alloc(bus_dma_tag_t tag,struct iwm_dma_info * dma,bus_size_t size,bus_size_t alignment)981 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
982 bus_size_t size, bus_size_t alignment)
983 {
984 int nsegs, error;
985 void *va;
986
987 dma->tag = tag;
988 dma->size = size;
989
990 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
991 &dma->map);
992 if (error != 0)
993 goto fail;
994
995 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
996 BUS_DMA_NOWAIT);
997 if (error != 0)
998 goto fail;
999
1000 error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1001 BUS_DMA_NOWAIT);
1002 if (error != 0)
1003 goto fail;
1004 dma->vaddr = va;
1005
1006 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1007 BUS_DMA_NOWAIT);
1008 if (error != 0)
1009 goto fail;
1010
1011 memset(dma->vaddr, 0, size);
1012 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1013 dma->paddr = dma->map->dm_segs[0].ds_addr;
1014
1015 return 0;
1016
1017 fail: iwm_dma_contig_free(dma);
1018 return error;
1019 }
1020
1021 static void
iwm_dma_contig_free(struct iwm_dma_info * dma)1022 iwm_dma_contig_free(struct iwm_dma_info *dma)
1023 {
1024 if (dma->map != NULL) {
1025 if (dma->vaddr != NULL) {
1026 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1027 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1028 bus_dmamap_unload(dma->tag, dma->map);
1029 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1030 bus_dmamem_free(dma->tag, &dma->seg, 1);
1031 dma->vaddr = NULL;
1032 }
1033 bus_dmamap_destroy(dma->tag, dma->map);
1034 dma->map = NULL;
1035 }
1036 }
1037
1038 /* fwmem is used to load firmware onto the card */
1039 static int
iwm_alloc_fwmem(struct iwm_softc * sc)1040 iwm_alloc_fwmem(struct iwm_softc *sc)
1041 {
1042 /* Must be aligned on a 16-byte boundary. */
1043 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1044 sc->sc_fwdmasegsz, 16);
1045 }
1046
1047 static void
iwm_free_fwmem(struct iwm_softc * sc)1048 iwm_free_fwmem(struct iwm_softc *sc)
1049 {
1050 iwm_dma_contig_free(&sc->fw_dma);
1051 }
1052
1053 /* tx scheduler rings. not used? */
1054 static int
iwm_alloc_sched(struct iwm_softc * sc)1055 iwm_alloc_sched(struct iwm_softc *sc)
1056 {
1057 int rv;
1058
1059 /* TX scheduler rings must be aligned on a 1KB boundary. */
1060 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1061 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1062 return rv;
1063 }
1064
1065 static void
iwm_free_sched(struct iwm_softc * sc)1066 iwm_free_sched(struct iwm_softc *sc)
1067 {
1068 iwm_dma_contig_free(&sc->sched_dma);
1069 }
1070
1071 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1072 static int
iwm_alloc_kw(struct iwm_softc * sc)1073 iwm_alloc_kw(struct iwm_softc *sc)
1074 {
1075 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1076 }
1077
1078 static void
iwm_free_kw(struct iwm_softc * sc)1079 iwm_free_kw(struct iwm_softc *sc)
1080 {
1081 iwm_dma_contig_free(&sc->kw_dma);
1082 }
1083
1084 /* interrupt cause table */
1085 static int
iwm_alloc_ict(struct iwm_softc * sc)1086 iwm_alloc_ict(struct iwm_softc *sc)
1087 {
1088 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1089 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1090 }
1091
1092 static void
iwm_free_ict(struct iwm_softc * sc)1093 iwm_free_ict(struct iwm_softc *sc)
1094 {
1095 iwm_dma_contig_free(&sc->ict_dma);
1096 }
1097
1098 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1099 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1100 {
1101 bus_size_t size;
1102 int i, error;
1103
1104 ring->cur = 0;
1105
1106 /* Allocate RX descriptors (256-byte aligned). */
1107 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1108 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1109 if (error != 0) {
1110 aprint_error_dev(sc->sc_dev,
1111 "could not allocate RX ring DMA memory\n");
1112 goto fail;
1113 }
1114 ring->desc = ring->desc_dma.vaddr;
1115
1116 /* Allocate RX status area (16-byte aligned). */
1117 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1118 sizeof(*ring->stat), 16);
1119 if (error != 0) {
1120 aprint_error_dev(sc->sc_dev,
1121 "could not allocate RX status DMA memory\n");
1122 goto fail;
1123 }
1124 ring->stat = ring->stat_dma.vaddr;
1125
1126 /*
1127 * Allocate and map RX buffers.
1128 */
1129 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1130 struct iwm_rx_data *data = &ring->data[i];
1131
1132 memset(data, 0, sizeof(*data));
1133 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1134 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1135 &data->map);
1136 if (error != 0) {
1137 aprint_error_dev(sc->sc_dev,
1138 "could not create RX buf DMA map\n");
1139 goto fail;
1140 }
1141
1142 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1143 goto fail;
1144 }
1145 }
1146 return 0;
1147
1148 fail: iwm_free_rx_ring(sc, ring);
1149 return error;
1150 }
1151
1152 static void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1153 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1154 {
1155 int ntries;
1156
1157 if (iwm_nic_lock(sc)) {
1158 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1159 for (ntries = 0; ntries < 1000; ntries++) {
1160 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1161 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1162 break;
1163 DELAY(10);
1164 }
1165 iwm_nic_unlock(sc);
1166 }
1167 ring->cur = 0;
1168 }
1169
1170 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1171 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1172 {
1173 int i;
1174
1175 iwm_dma_contig_free(&ring->desc_dma);
1176 iwm_dma_contig_free(&ring->stat_dma);
1177
1178 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1179 struct iwm_rx_data *data = &ring->data[i];
1180
1181 if (data->m != NULL) {
1182 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1183 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1184 bus_dmamap_unload(sc->sc_dmat, data->map);
1185 m_freem(data->m);
1186 }
1187 if (data->map != NULL)
1188 bus_dmamap_destroy(sc->sc_dmat, data->map);
1189 }
1190 }
1191
1192 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1193 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1194 {
1195 bus_addr_t paddr;
1196 bus_size_t size;
1197 int i, error;
1198
1199 ring->qid = qid;
1200 ring->queued = 0;
1201 ring->cur = 0;
1202
1203 /* Allocate TX descriptors (256-byte aligned). */
1204 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1205 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1206 if (error != 0) {
1207 aprint_error_dev(sc->sc_dev,
1208 "could not allocate TX ring DMA memory\n");
1209 goto fail;
1210 }
1211 ring->desc = ring->desc_dma.vaddr;
1212
1213 /*
1214 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1215 * to allocate commands space for other rings.
1216 */
1217 if (qid > IWM_MVM_CMD_QUEUE)
1218 return 0;
1219
1220 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1221 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1222 if (error != 0) {
1223 aprint_error_dev(sc->sc_dev,
1224 "could not allocate TX cmd DMA memory\n");
1225 goto fail;
1226 }
1227 ring->cmd = ring->cmd_dma.vaddr;
1228
1229 paddr = ring->cmd_dma.paddr;
1230 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1231 struct iwm_tx_data *data = &ring->data[i];
1232
1233 data->cmd_paddr = paddr;
1234 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1235 + offsetof(struct iwm_tx_cmd, scratch);
1236 paddr += sizeof(struct iwm_device_cmd);
1237
1238 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
1239 IWM_NUM_OF_TBS - 2, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
1240 &data->map);
1241 if (error != 0) {
1242 aprint_error_dev(sc->sc_dev,
1243 "could not create TX buf DMA map\n");
1244 goto fail;
1245 }
1246 }
1247 KASSERT(paddr == ring->cmd_dma.paddr + size);
1248 return 0;
1249
1250 fail: iwm_free_tx_ring(sc, ring);
1251 return error;
1252 }
1253
1254 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1255 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1256 {
1257 int i;
1258
1259 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1260 struct iwm_tx_data *data = &ring->data[i];
1261
1262 if (data->m != NULL) {
1263 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1264 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1265 bus_dmamap_unload(sc->sc_dmat, data->map);
1266 m_freem(data->m);
1267 data->m = NULL;
1268 }
1269 }
1270 /* Clear TX descriptors. */
1271 memset(ring->desc, 0, ring->desc_dma.size);
1272 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1273 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1274 sc->qfullmsk &= ~(1 << ring->qid);
1275 ring->queued = 0;
1276 ring->cur = 0;
1277 }
1278
1279 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1280 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1281 {
1282 int i;
1283
1284 iwm_dma_contig_free(&ring->desc_dma);
1285 iwm_dma_contig_free(&ring->cmd_dma);
1286
1287 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1288 struct iwm_tx_data *data = &ring->data[i];
1289
1290 if (data->m != NULL) {
1291 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1292 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1293 bus_dmamap_unload(sc->sc_dmat, data->map);
1294 m_freem(data->m);
1295 }
1296 if (data->map != NULL)
1297 bus_dmamap_destroy(sc->sc_dmat, data->map);
1298 }
1299 }
1300
1301 /*
1302 * High-level hardware frobbing routines
1303 */
1304
1305 static void
iwm_enable_rfkill_int(struct iwm_softc * sc)1306 iwm_enable_rfkill_int(struct iwm_softc *sc)
1307 {
1308 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1309 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1310 }
1311
1312 static int
iwm_check_rfkill(struct iwm_softc * sc)1313 iwm_check_rfkill(struct iwm_softc *sc)
1314 {
1315 uint32_t v;
1316 int s;
1317 int rv;
1318
1319 s = splnet();
1320
1321 /*
1322 * "documentation" is not really helpful here:
1323 * 27: HW_RF_KILL_SW
1324 * Indicates state of (platform's) hardware RF-Kill switch
1325 *
1326 * But apparently when it's off, it's on ...
1327 */
1328 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1329 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1330 if (rv) {
1331 sc->sc_flags |= IWM_FLAG_RFKILL;
1332 } else {
1333 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1334 }
1335
1336 splx(s);
1337 return rv;
1338 }
1339
1340 static void
iwm_enable_interrupts(struct iwm_softc * sc)1341 iwm_enable_interrupts(struct iwm_softc *sc)
1342 {
1343 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1344 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1345 }
1346
1347 static void
iwm_restore_interrupts(struct iwm_softc * sc)1348 iwm_restore_interrupts(struct iwm_softc *sc)
1349 {
1350 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1351 }
1352
1353 static void
iwm_disable_interrupts(struct iwm_softc * sc)1354 iwm_disable_interrupts(struct iwm_softc *sc)
1355 {
1356 int s = splnet();
1357
1358 /* disable interrupts */
1359 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1360
1361 /* acknowledge all interrupts */
1362 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1363 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1364
1365 splx(s);
1366 }
1367
1368 static void
iwm_ict_reset(struct iwm_softc * sc)1369 iwm_ict_reset(struct iwm_softc *sc)
1370 {
1371 iwm_disable_interrupts(sc);
1372
1373 /* Reset ICT table. */
1374 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1375 sc->ict_cur = 0;
1376
1377 /* Set physical address of ICT table (4KB aligned). */
1378 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1379 IWM_CSR_DRAM_INT_TBL_ENABLE
1380 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1381 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1382
1383 /* Switch to ICT interrupt mode in driver. */
1384 sc->sc_flags |= IWM_FLAG_USE_ICT;
1385
1386 /* Re-enable interrupts. */
1387 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1388 iwm_enable_interrupts(sc);
1389 }
1390
1391 #define IWM_HW_READY_TIMEOUT 50
1392 static int
iwm_set_hw_ready(struct iwm_softc * sc)1393 iwm_set_hw_ready(struct iwm_softc *sc)
1394 {
1395 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1396 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1397
1398 return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1399 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1400 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1401 IWM_HW_READY_TIMEOUT);
1402 }
1403 #undef IWM_HW_READY_TIMEOUT
1404
1405 static int
iwm_prepare_card_hw(struct iwm_softc * sc)1406 iwm_prepare_card_hw(struct iwm_softc *sc)
1407 {
1408 int rv = 0;
1409 int t = 0;
1410
1411 if (iwm_set_hw_ready(sc))
1412 goto out;
1413
1414 /* If HW is not ready, prepare the conditions to check again */
1415 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1416 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1417
1418 do {
1419 if (iwm_set_hw_ready(sc))
1420 goto out;
1421 DELAY(200);
1422 t += 200;
1423 } while (t < 150000);
1424
1425 rv = ETIMEDOUT;
1426
1427 out:
1428 return rv;
1429 }
1430
1431 static void
iwm_apm_config(struct iwm_softc * sc)1432 iwm_apm_config(struct iwm_softc *sc)
1433 {
1434 pcireg_t reg;
1435
1436 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1437 sc->sc_cap_off + PCIE_LCSR);
1438 if (reg & PCIE_LCSR_ASPM_L1) {
1439 /* Um the Linux driver prints "Disabling L0S for this one ... */
1440 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1441 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1442 } else {
1443 /* ... and "Enabling" here */
1444 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1445 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1446 }
1447 }
1448
1449 /*
1450 * Start up NIC's basic functionality after it has been reset
1451 * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1452 * NOTE: This does not load uCode nor start the embedded processor
1453 */
1454 static int
iwm_apm_init(struct iwm_softc * sc)1455 iwm_apm_init(struct iwm_softc *sc)
1456 {
1457 int error = 0;
1458
1459 DPRINTF(("iwm apm start\n"));
1460
1461 /* Disable L0S exit timer (platform NMI Work/Around) */
1462 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1463 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1464
1465 /*
1466 * Disable L0s without affecting L1;
1467 * don't wait for ICH L0s (ICH bug W/A)
1468 */
1469 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1470 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1471
1472 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1473 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1474
1475 /*
1476 * Enable HAP INTA (interrupt from management bus) to
1477 * wake device's PCI Express link L1a -> L0s
1478 */
1479 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1480 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1481
1482 iwm_apm_config(sc);
1483
1484 #if 0 /* not for 7k */
1485 /* Configure analog phase-lock-loop before activating to D0A */
1486 if (trans->cfg->base_params->pll_cfg_val)
1487 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1488 trans->cfg->base_params->pll_cfg_val);
1489 #endif
1490
1491 /*
1492 * Set "initialization complete" bit to move adapter from
1493 * D0U* --> D0A* (powered-up active) state.
1494 */
1495 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1496
1497 /*
1498 * Wait for clock stabilization; once stabilized, access to
1499 * device-internal resources is supported, e.g. iwm_write_prph()
1500 * and accesses to uCode SRAM.
1501 */
1502 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1503 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1504 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1505 aprint_error_dev(sc->sc_dev,
1506 "timeout waiting for clock stabilization\n");
1507 goto out;
1508 }
1509
1510 if (sc->host_interrupt_operation_mode) {
1511 /*
1512 * This is a bit of an abuse - This is needed for 7260 / 3160
1513 * only check host_interrupt_operation_mode even if this is
1514 * not related to host_interrupt_operation_mode.
1515 *
1516 * Enable the oscillator to count wake up time for L1 exit. This
1517 * consumes slightly more power (100uA) - but allows to be sure
1518 * that we wake up from L1 on time.
1519 *
1520 * This looks weird: read twice the same register, discard the
1521 * value, set a bit, and yet again, read that same register
1522 * just to discard the value. But that's the way the hardware
1523 * seems to like it.
1524 */
1525 iwm_read_prph(sc, IWM_OSC_CLK);
1526 iwm_read_prph(sc, IWM_OSC_CLK);
1527 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1528 iwm_read_prph(sc, IWM_OSC_CLK);
1529 iwm_read_prph(sc, IWM_OSC_CLK);
1530 }
1531
1532 /*
1533 * Enable DMA clock and wait for it to stabilize.
1534 *
1535 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1536 * do not disable clocks. This preserves any hardware bits already
1537 * set by default in "CLK_CTRL_REG" after reset.
1538 */
1539 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1540 //kpause("iwmapm", 0, mstohz(20), NULL);
1541 DELAY(20);
1542
1543 /* Disable L1-Active */
1544 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1545 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1546
1547 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1548 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1549 IWM_APMG_RTC_INT_STT_RFKILL);
1550
1551 out:
1552 if (error)
1553 aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
1554 return error;
1555 }
1556
1557 /* iwlwifi/pcie/trans.c */
1558 static void
iwm_apm_stop(struct iwm_softc * sc)1559 iwm_apm_stop(struct iwm_softc *sc)
1560 {
1561 /* stop device's busmaster DMA activity */
1562 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1563
1564 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1565 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1566 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1567 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1568 DPRINTF(("iwm apm stop\n"));
1569 }
1570
1571 /* iwlwifi pcie/trans.c */
1572 static int
iwm_start_hw(struct iwm_softc * sc)1573 iwm_start_hw(struct iwm_softc *sc)
1574 {
1575 int error;
1576
1577 if ((error = iwm_prepare_card_hw(sc)) != 0)
1578 return error;
1579
1580 /* Reset the entire device */
1581 IWM_WRITE(sc, IWM_CSR_RESET,
1582 IWM_CSR_RESET_REG_FLAG_SW_RESET |
1583 IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1584 DELAY(10);
1585
1586 if ((error = iwm_apm_init(sc)) != 0)
1587 return error;
1588
1589 iwm_enable_rfkill_int(sc);
1590 iwm_check_rfkill(sc);
1591
1592 return 0;
1593 }
1594
1595 /* iwlwifi pcie/trans.c */
1596
1597 static void
iwm_stop_device(struct iwm_softc * sc)1598 iwm_stop_device(struct iwm_softc *sc)
1599 {
1600 int chnl, ntries;
1601 int qid;
1602
1603 /* tell the device to stop sending interrupts */
1604 iwm_disable_interrupts(sc);
1605
1606 /* device going down, Stop using ICT table */
1607 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1608
1609 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1610
1611 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1612
1613 /* Stop all DMA channels. */
1614 if (iwm_nic_lock(sc)) {
1615 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1616 IWM_WRITE(sc,
1617 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1618 for (ntries = 0; ntries < 200; ntries++) {
1619 uint32_t r;
1620
1621 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1622 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1623 chnl))
1624 break;
1625 DELAY(20);
1626 }
1627 }
1628 iwm_nic_unlock(sc);
1629 }
1630
1631 /* Stop RX ring. */
1632 iwm_reset_rx_ring(sc, &sc->rxq);
1633
1634 /* Reset all TX rings. */
1635 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1636 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1637
1638 /*
1639 * Power-down device's busmaster DMA clocks
1640 */
1641 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1642 DELAY(5);
1643
1644 /* Make sure (redundant) we've released our request to stay awake */
1645 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1646 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1647
1648 /* Stop the device, and put it in low power state */
1649 iwm_apm_stop(sc);
1650
1651 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1652 * Clean again the interrupt here
1653 */
1654 iwm_disable_interrupts(sc);
1655 /* stop and reset the on-board processor */
1656 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1657
1658 /*
1659 * Even if we stop the HW, we still want the RF kill
1660 * interrupt
1661 */
1662 iwm_enable_rfkill_int(sc);
1663 iwm_check_rfkill(sc);
1664 }
1665
1666 /* iwlwifi pcie/trans.c (always main power) */
1667 static void
iwm_set_pwr(struct iwm_softc * sc)1668 iwm_set_pwr(struct iwm_softc *sc)
1669 {
1670 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1671 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1672 }
1673
1674 /* iwlwifi: mvm/ops.c */
1675 static void
iwm_mvm_nic_config(struct iwm_softc * sc)1676 iwm_mvm_nic_config(struct iwm_softc *sc)
1677 {
1678 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1679 uint32_t reg_val = 0;
1680
1681 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1682 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1683 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1684 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1685 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1686 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1687
1688 /* SKU control */
1689 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1690 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1691 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1692 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1693
1694 /* radio configuration */
1695 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1696 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1697 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1698
1699 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1700
1701 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1702 radio_cfg_step, radio_cfg_dash));
1703
1704 /*
1705 * W/A : NIC is stuck in a reset state after Early PCIe power off
1706 * (PCIe power is lost before PERST# is asserted), causing ME FW
1707 * to lose ownership and not being able to obtain it back.
1708 */
1709 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1710 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1711 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1712 }
1713
1714 static int
iwm_nic_rx_init(struct iwm_softc * sc)1715 iwm_nic_rx_init(struct iwm_softc *sc)
1716 {
1717 if (!iwm_nic_lock(sc))
1718 return EBUSY;
1719
1720 /*
1721 * Initialize RX ring. This is from the iwn driver.
1722 */
1723 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1724
1725 /* stop DMA */
1726 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1727 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1728 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1729 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1730 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1731
1732 /* Set physical address of RX ring (256-byte aligned). */
1733 IWM_WRITE(sc,
1734 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1735
1736 /* Set physical address of RX status (16-byte aligned). */
1737 IWM_WRITE(sc,
1738 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1739
1740 /* Enable RX. */
1741 /*
1742 * Note: Linux driver also sets this:
1743 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1744 *
1745 * It causes weird behavior. YMMV.
1746 */
1747 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1748 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1749 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1750 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1751 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1752 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1753
1754 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1755
1756 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1757 if (sc->host_interrupt_operation_mode)
1758 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1759
1760 /*
1761 * Thus sayeth el jefe (iwlwifi) via a comment:
1762 *
1763 * This value should initially be 0 (before preparing any
1764 * RBs), should be 8 after preparing the first 8 RBs (for example)
1765 */
1766 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1767
1768 iwm_nic_unlock(sc);
1769
1770 return 0;
1771 }
1772
1773 static int
iwm_nic_tx_init(struct iwm_softc * sc)1774 iwm_nic_tx_init(struct iwm_softc *sc)
1775 {
1776 int qid;
1777
1778 if (!iwm_nic_lock(sc))
1779 return EBUSY;
1780
1781 /* Deactivate TX scheduler. */
1782 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1783
1784 /* Set physical address of "keep warm" page (16-byte aligned). */
1785 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1786
1787 /* Initialize TX rings. */
1788 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1789 struct iwm_tx_ring *txq = &sc->txq[qid];
1790
1791 /* Set physical address of TX ring (256-byte aligned). */
1792 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1793 txq->desc_dma.paddr >> 8);
1794 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1795 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1796 }
1797 iwm_nic_unlock(sc);
1798
1799 return 0;
1800 }
1801
1802 static int
iwm_nic_init(struct iwm_softc * sc)1803 iwm_nic_init(struct iwm_softc *sc)
1804 {
1805 int error;
1806
1807 iwm_apm_init(sc);
1808 iwm_set_pwr(sc);
1809
1810 iwm_mvm_nic_config(sc);
1811
1812 if ((error = iwm_nic_rx_init(sc)) != 0)
1813 return error;
1814
1815 /*
1816 * Ditto for TX, from iwn
1817 */
1818 if ((error = iwm_nic_tx_init(sc)) != 0)
1819 return error;
1820
1821 DPRINTF(("shadow registers enabled\n"));
1822 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1823
1824 return 0;
1825 }
1826
1827 #if 0
1828 enum iwm_mvm_tx_fifo {
1829 IWM_MVM_TX_FIFO_BK = 0,
1830 IWM_MVM_TX_FIFO_BE,
1831 IWM_MVM_TX_FIFO_VI,
1832 IWM_MVM_TX_FIFO_VO,
1833 IWM_MVM_TX_FIFO_MCAST = 5,
1834 };
1835
1836 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1837 IWM_MVM_TX_FIFO_VO,
1838 IWM_MVM_TX_FIFO_VI,
1839 IWM_MVM_TX_FIFO_BE,
1840 IWM_MVM_TX_FIFO_BK,
1841 };
1842 #endif
1843
1844 static void
iwm_enable_txq(struct iwm_softc * sc,int qid,int fifo)1845 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1846 {
1847 if (!iwm_nic_lock(sc)) {
1848 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1849 return; /* XXX return EBUSY */
1850 }
1851
1852 /* unactivate before configuration */
1853 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1854 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1855 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1856
1857 if (qid != IWM_MVM_CMD_QUEUE) {
1858 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1859 }
1860
1861 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1862
1863 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1864 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1865
1866 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1867 /* Set scheduler window size and frame limit. */
1868 iwm_write_mem32(sc,
1869 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1870 sizeof(uint32_t),
1871 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1872 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1873 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1874 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1875
1876 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1877 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1878 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1879 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1880 IWM_SCD_QUEUE_STTS_REG_MSK);
1881
1882 iwm_nic_unlock(sc);
1883
1884 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1885 }
1886
1887 static int
iwm_post_alive(struct iwm_softc * sc)1888 iwm_post_alive(struct iwm_softc *sc)
1889 {
1890 int nwords;
1891 int error, chnl;
1892
1893 if (!iwm_nic_lock(sc))
1894 return EBUSY;
1895
1896 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1897 DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
1898 error = EINVAL;
1899 goto out;
1900 }
1901
1902 iwm_ict_reset(sc);
1903
1904 /* Clear TX scheduler state in SRAM. */
1905 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1906 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1907 / sizeof(uint32_t);
1908 error = iwm_write_mem(sc,
1909 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1910 NULL, nwords);
1911 if (error)
1912 goto out;
1913
1914 /* Set physical address of TX scheduler rings (1KB aligned). */
1915 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1916
1917 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1918
1919 /* enable command channel */
1920 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1921
1922 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1923
1924 /* Enable DMA channels. */
1925 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1926 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1927 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1928 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1929 }
1930
1931 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1932 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1933
1934 /* Enable L1-Active */
1935 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1936 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1937
1938 out:
1939 iwm_nic_unlock(sc);
1940 return error;
1941 }
1942
1943 /*
1944 * PHY db
1945 * iwlwifi/iwl-phy-db.c
1946 */
1947
1948 /*
1949 * BEGIN iwl-phy-db.c
1950 */
1951
1952 enum iwm_phy_db_section_type {
1953 IWM_PHY_DB_CFG = 1,
1954 IWM_PHY_DB_CALIB_NCH,
1955 IWM_PHY_DB_UNUSED,
1956 IWM_PHY_DB_CALIB_CHG_PAPD,
1957 IWM_PHY_DB_CALIB_CHG_TXP,
1958 IWM_PHY_DB_MAX
1959 };
1960
1961 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1962
1963 /*
1964 * phy db - configure operational ucode
1965 */
1966 struct iwm_phy_db_cmd {
1967 uint16_t type;
1968 uint16_t length;
1969 uint8_t data[];
1970 } __packed;
1971
1972 /* for parsing of tx power channel group data that comes from the firmware*/
1973 struct iwm_phy_db_chg_txp {
1974 uint32_t space;
1975 uint16_t max_channel_idx;
1976 } __packed;
1977
1978 /*
1979 * phy db - Receive phy db chunk after calibrations
1980 */
1981 struct iwm_calib_res_notif_phy_db {
1982 uint16_t type;
1983 uint16_t length;
1984 uint8_t data[];
1985 } __packed;
1986
1987 /*
1988 * get phy db section: returns a pointer to a phy db section specified by
1989 * type and channel group id.
1990 */
1991 static struct iwm_phy_db_entry *
iwm_phy_db_get_section(struct iwm_softc * sc,enum iwm_phy_db_section_type type,uint16_t chg_id)1992 iwm_phy_db_get_section(struct iwm_softc *sc,
1993 enum iwm_phy_db_section_type type, uint16_t chg_id)
1994 {
1995 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1996
1997 if (type >= IWM_PHY_DB_MAX)
1998 return NULL;
1999
2000 switch (type) {
2001 case IWM_PHY_DB_CFG:
2002 return &phy_db->cfg;
2003 case IWM_PHY_DB_CALIB_NCH:
2004 return &phy_db->calib_nch;
2005 case IWM_PHY_DB_CALIB_CHG_PAPD:
2006 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2007 return NULL;
2008 return &phy_db->calib_ch_group_papd[chg_id];
2009 case IWM_PHY_DB_CALIB_CHG_TXP:
2010 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2011 return NULL;
2012 return &phy_db->calib_ch_group_txp[chg_id];
2013 default:
2014 return NULL;
2015 }
2016 return NULL;
2017 }
2018
2019 static int
iwm_phy_db_set_section(struct iwm_softc * sc,struct iwm_calib_res_notif_phy_db * phy_db_notif,uint16_t size)2020 iwm_phy_db_set_section(struct iwm_softc *sc,
2021 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2022 {
2023 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2024 struct iwm_phy_db_entry *entry;
2025 uint16_t chg_id = 0;
2026
2027 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2028 type == IWM_PHY_DB_CALIB_CHG_TXP)
2029 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2030
2031 entry = iwm_phy_db_get_section(sc, type, chg_id);
2032 if (!entry)
2033 return EINVAL;
2034
2035 if (entry->data)
2036 kmem_intr_free(entry->data, entry->size);
2037 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2038 if (!entry->data) {
2039 entry->size = 0;
2040 return ENOMEM;
2041 }
2042 memcpy(entry->data, phy_db_notif->data, size);
2043 entry->size = size;
2044
2045 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2046 __func__, __LINE__, type, size, entry->data));
2047
2048 return 0;
2049 }
2050
2051 static int
iwm_is_valid_channel(uint16_t ch_id)2052 iwm_is_valid_channel(uint16_t ch_id)
2053 {
2054 if (ch_id <= 14 ||
2055 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2056 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2057 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2058 return 1;
2059 return 0;
2060 }
2061
2062 static uint8_t
iwm_ch_id_to_ch_index(uint16_t ch_id)2063 iwm_ch_id_to_ch_index(uint16_t ch_id)
2064 {
2065 if (!iwm_is_valid_channel(ch_id))
2066 return 0xff;
2067
2068 if (ch_id <= 14)
2069 return ch_id - 1;
2070 if (ch_id <= 64)
2071 return (ch_id + 20) / 4;
2072 if (ch_id <= 140)
2073 return (ch_id - 12) / 4;
2074 return (ch_id - 13) / 4;
2075 }
2076
2077
2078 static uint16_t
iwm_channel_id_to_papd(uint16_t ch_id)2079 iwm_channel_id_to_papd(uint16_t ch_id)
2080 {
2081 if (!iwm_is_valid_channel(ch_id))
2082 return 0xff;
2083
2084 if (1 <= ch_id && ch_id <= 14)
2085 return 0;
2086 if (36 <= ch_id && ch_id <= 64)
2087 return 1;
2088 if (100 <= ch_id && ch_id <= 140)
2089 return 2;
2090 return 3;
2091 }
2092
2093 static uint16_t
iwm_channel_id_to_txp(struct iwm_softc * sc,uint16_t ch_id)2094 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2095 {
2096 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2097 struct iwm_phy_db_chg_txp *txp_chg;
2098 int i;
2099 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2100
2101 if (ch_index == 0xff)
2102 return 0xff;
2103
2104 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2105 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2106 if (!txp_chg)
2107 return 0xff;
2108 /*
2109 * Looking for the first channel group that its max channel is
2110 * higher then wanted channel.
2111 */
2112 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2113 return i;
2114 }
2115 return 0xff;
2116 }
2117
2118 static int
iwm_phy_db_get_section_data(struct iwm_softc * sc,uint32_t type,uint8_t ** data,uint16_t * size,uint16_t ch_id)2119 iwm_phy_db_get_section_data(struct iwm_softc *sc,
2120 uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
2121 {
2122 struct iwm_phy_db_entry *entry;
2123 uint16_t ch_group_id = 0;
2124
2125 /* find wanted channel group */
2126 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2127 ch_group_id = iwm_channel_id_to_papd(ch_id);
2128 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2129 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2130
2131 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2132 if (!entry)
2133 return EINVAL;
2134
2135 *data = entry->data;
2136 *size = entry->size;
2137
2138 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2139 __func__, __LINE__, type, *size));
2140
2141 return 0;
2142 }
2143
2144 static int
iwm_send_phy_db_cmd(struct iwm_softc * sc,uint16_t type,uint16_t length,void * data)2145 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2146 uint16_t length, void *data)
2147 {
2148 struct iwm_phy_db_cmd phy_db_cmd;
2149 struct iwm_host_cmd cmd = {
2150 .id = IWM_PHY_DB_CMD,
2151 .flags = IWM_CMD_SYNC,
2152 };
2153
2154 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2155 type, length));
2156
2157 /* Set phy db cmd variables */
2158 phy_db_cmd.type = le16toh(type);
2159 phy_db_cmd.length = le16toh(length);
2160
2161 /* Set hcmd variables */
2162 cmd.data[0] = &phy_db_cmd;
2163 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2164 cmd.data[1] = data;
2165 cmd.len[1] = length;
2166 cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2167
2168 return iwm_send_cmd(sc, &cmd);
2169 }
2170
2171 static int
iwm_phy_db_send_all_channel_groups(struct iwm_softc * sc,enum iwm_phy_db_section_type type,uint8_t max_ch_groups)2172 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2173 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2174 {
2175 uint16_t i;
2176 int err;
2177 struct iwm_phy_db_entry *entry;
2178
2179 /* Send all the channel-specific groups to operational fw */
2180 for (i = 0; i < max_ch_groups; i++) {
2181 entry = iwm_phy_db_get_section(sc, type, i);
2182 if (!entry)
2183 return EINVAL;
2184
2185 if (!entry->size)
2186 continue;
2187
2188 /* Send the requested PHY DB section */
2189 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2190 if (err) {
2191 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2192 "err %d\n", DEVNAME(sc), type, i, err));
2193 return err;
2194 }
2195
2196 DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2197 }
2198
2199 return 0;
2200 }
2201
2202 static int
iwm_send_phy_db_data(struct iwm_softc * sc)2203 iwm_send_phy_db_data(struct iwm_softc *sc)
2204 {
2205 uint8_t *data = NULL;
2206 uint16_t size = 0;
2207 int err;
2208
2209 DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2210
2211 /* Send PHY DB CFG section */
2212 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2213 if (err) {
2214 DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2215 DEVNAME(sc), err));
2216 return err;
2217 }
2218
2219 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2220 if (err) {
2221 DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2222 DEVNAME(sc), err));
2223 return err;
2224 }
2225
2226 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2227 &data, &size, 0);
2228 if (err) {
2229 DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2230 "%d\n", DEVNAME(sc), err));
2231 return err;
2232 }
2233
2234 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2235 if (err) {
2236 DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2237 "sect, %d\n", DEVNAME(sc), err));
2238 return err;
2239 }
2240
2241 /* Send all the TXP channel specific data */
2242 err = iwm_phy_db_send_all_channel_groups(sc,
2243 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2244 if (err) {
2245 DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2246 DEVNAME(sc), err));
2247 return err;
2248 }
2249
2250 /* Send all the TXP channel specific data */
2251 err = iwm_phy_db_send_all_channel_groups(sc,
2252 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2253 if (err) {
2254 DPRINTF(("%s: Cannot send channel specific TX power groups, "
2255 "%d\n", DEVNAME(sc), err));
2256 return err;
2257 }
2258
2259 DPRINTF(("Finished sending phy db non channel data\n"));
2260 return 0;
2261 }
2262
2263 /*
2264 * END iwl-phy-db.c
2265 */
2266
2267 /*
2268 * BEGIN iwlwifi/mvm/time-event.c
2269 */
2270
2271 /*
2272 * For the high priority TE use a time event type that has similar priority to
2273 * the FW's action scan priority.
2274 */
2275 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2276 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2277
2278 /* used to convert from time event API v2 to v1 */
2279 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2280 IWM_TE_V2_EVENT_SOCIOPATHIC)
2281 static inline uint16_t
iwm_te_v2_get_notify(uint16_t policy)2282 iwm_te_v2_get_notify(uint16_t policy)
2283 {
2284 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2285 }
2286
2287 static inline uint16_t
iwm_te_v2_get_dep_policy(uint16_t policy)2288 iwm_te_v2_get_dep_policy(uint16_t policy)
2289 {
2290 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2291 IWM_TE_V2_PLACEMENT_POS;
2292 }
2293
2294 static inline uint16_t
iwm_te_v2_get_absence(uint16_t policy)2295 iwm_te_v2_get_absence(uint16_t policy)
2296 {
2297 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2298 }
2299
2300 static void
iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 * cmd_v2,struct iwm_time_event_cmd_v1 * cmd_v1)2301 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2302 struct iwm_time_event_cmd_v1 *cmd_v1)
2303 {
2304 cmd_v1->id_and_color = cmd_v2->id_and_color;
2305 cmd_v1->action = cmd_v2->action;
2306 cmd_v1->id = cmd_v2->id;
2307 cmd_v1->apply_time = cmd_v2->apply_time;
2308 cmd_v1->max_delay = cmd_v2->max_delay;
2309 cmd_v1->depends_on = cmd_v2->depends_on;
2310 cmd_v1->interval = cmd_v2->interval;
2311 cmd_v1->duration = cmd_v2->duration;
2312 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2313 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2314 else
2315 cmd_v1->repeat = htole32(cmd_v2->repeat);
2316 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2317 cmd_v1->interval_reciprocal = 0; /* unused */
2318
2319 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2320 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2321 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2322 }
2323
2324 static int
iwm_mvm_send_time_event_cmd(struct iwm_softc * sc,const struct iwm_time_event_cmd_v2 * cmd)2325 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2326 const struct iwm_time_event_cmd_v2 *cmd)
2327 {
2328 struct iwm_time_event_cmd_v1 cmd_v1;
2329
2330 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2331 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2332 IWM_CMD_SYNC, sizeof(*cmd), cmd);
2333
2334 iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2335 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2336 sizeof(cmd_v1), &cmd_v1);
2337 }
2338
2339 static int
iwm_mvm_time_event_send_add(struct iwm_softc * sc,struct iwm_node * in,void * te_data,struct iwm_time_event_cmd_v2 * te_cmd)2340 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2341 void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2342 {
2343 int ret;
2344
2345 DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2346
2347 ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2348 if (ret) {
2349 DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2350 DEVNAME(sc), ret));
2351 }
2352
2353 return ret;
2354 }
2355
2356 static void
iwm_mvm_protect_session(struct iwm_softc * sc,struct iwm_node * in,uint32_t duration,uint32_t min_duration,uint32_t max_delay)2357 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2358 uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2359 {
2360 struct iwm_time_event_cmd_v2 time_cmd;
2361
2362 memset(&time_cmd, 0, sizeof(time_cmd));
2363
2364 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2365 time_cmd.id_and_color =
2366 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2367 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2368
2369 time_cmd.apply_time = htole32(iwm_read_prph(sc,
2370 IWM_DEVICE_SYSTEM_TIME_REG));
2371
2372 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2373 time_cmd.max_delay = htole32(max_delay);
2374 /* TODO: why do we need to interval = bi if it is not periodic? */
2375 time_cmd.interval = htole32(1);
2376 time_cmd.duration = htole32(duration);
2377 time_cmd.repeat = 1;
2378 time_cmd.policy
2379 = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2380 IWM_TE_V2_NOTIF_HOST_EVENT_END);
2381
2382 iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2383 }
2384
2385 /*
2386 * END iwlwifi/mvm/time-event.c
2387 */
2388
2389 /*
2390 * NVM read access and content parsing. We do not support
2391 * external NVM or writing NVM.
2392 * iwlwifi/mvm/nvm.c
2393 */
2394
2395 /* list of NVM sections we are allowed/need to read */
2396 static const int nvm_to_read[] = {
2397 IWM_NVM_SECTION_TYPE_HW,
2398 IWM_NVM_SECTION_TYPE_SW,
2399 IWM_NVM_SECTION_TYPE_CALIBRATION,
2400 IWM_NVM_SECTION_TYPE_PRODUCTION,
2401 };
2402
2403 /* Default NVM size to read */
2404 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2405 #define IWM_MAX_NVM_SECTION_SIZE 7000
2406
2407 #define IWM_NVM_WRITE_OPCODE 1
2408 #define IWM_NVM_READ_OPCODE 0
2409
2410 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)2411 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2412 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2413 {
2414 offset = 0;
2415 struct iwm_nvm_access_cmd nvm_access_cmd = {
2416 .offset = htole16(offset),
2417 .length = htole16(length),
2418 .type = htole16(section),
2419 .op_code = IWM_NVM_READ_OPCODE,
2420 };
2421 struct iwm_nvm_access_resp *nvm_resp;
2422 struct iwm_rx_packet *pkt;
2423 struct iwm_host_cmd cmd = {
2424 .id = IWM_NVM_ACCESS_CMD,
2425 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2426 IWM_CMD_SEND_IN_RFKILL,
2427 .data = { &nvm_access_cmd, },
2428 };
2429 int ret, bytes_read, offset_read;
2430 uint8_t *resp_data;
2431
2432 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2433
2434 ret = iwm_send_cmd(sc, &cmd);
2435 if (ret)
2436 return ret;
2437
2438 pkt = cmd.resp_pkt;
2439 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2440 DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2441 DEVNAME(sc), pkt->hdr.flags));
2442 ret = EIO;
2443 goto exit;
2444 }
2445
2446 /* Extract NVM response */
2447 nvm_resp = (void *)pkt->data;
2448
2449 ret = le16toh(nvm_resp->status);
2450 bytes_read = le16toh(nvm_resp->length);
2451 offset_read = le16toh(nvm_resp->offset);
2452 resp_data = nvm_resp->data;
2453 if (ret) {
2454 DPRINTF(("%s: NVM access command failed with status %d\n",
2455 DEVNAME(sc), ret));
2456 ret = EINVAL;
2457 goto exit;
2458 }
2459
2460 if (offset_read != offset) {
2461 DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2462 DEVNAME(sc), offset_read));
2463 ret = EINVAL;
2464 goto exit;
2465 }
2466
2467 memcpy(data + offset, resp_data, bytes_read);
2468 *len = bytes_read;
2469
2470 exit:
2471 iwm_free_resp(sc, &cmd);
2472 return ret;
2473 }
2474
2475 /*
2476 * Reads an NVM section completely.
2477 * NICs prior to 7000 family doesn't have a real NVM, but just read
2478 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2479 * by uCode, we need to manually check in this case that we don't
2480 * overflow and try to read more than the EEPROM size.
2481 * For 7000 family NICs, we supply the maximal size we can read, and
2482 * the uCode fills the response with as much data as we can,
2483 * without overflowing, so no check is needed.
2484 */
2485 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len)2486 iwm_nvm_read_section(struct iwm_softc *sc,
2487 uint16_t section, uint8_t *data, uint16_t *len)
2488 {
2489 uint16_t length, seglen;
2490 int error;
2491
2492 /* Set nvm section read length */
2493 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2494 *len = 0;
2495
2496 /* Read the NVM until exhausted (reading less than requested) */
2497 while (seglen == length) {
2498 error = iwm_nvm_read_chunk(sc,
2499 section, *len, length, data, &seglen);
2500 if (error) {
2501 aprint_error_dev(sc->sc_dev,
2502 "Cannot read NVM from section %d offset %d, "
2503 "length %d\n", section, *len, length);
2504 return error;
2505 }
2506 *len += seglen;
2507 }
2508
2509 DPRINTFN(4, ("NVM section %d read completed\n", section));
2510 return 0;
2511 }
2512
2513 /*
2514 * BEGIN IWM_NVM_PARSE
2515 */
2516
2517 /* iwlwifi/iwl-nvm-parse.c */
2518
2519 /* NVM offsets (in words) definitions */
2520 enum wkp_nvm_offsets {
2521 /* NVM HW-Section offset (in words) definitions */
2522 IWM_HW_ADDR = 0x15,
2523
2524 /* NVM SW-Section offset (in words) definitions */
2525 IWM_NVM_SW_SECTION = 0x1C0,
2526 IWM_NVM_VERSION = 0,
2527 IWM_RADIO_CFG = 1,
2528 IWM_SKU = 2,
2529 IWM_N_HW_ADDRS = 3,
2530 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2531
2532 /* NVM calibration section offset (in words) definitions */
2533 IWM_NVM_CALIB_SECTION = 0x2B8,
2534 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2535 };
2536
2537 /* SKU Capabilities (actual values from NVM definition) */
2538 enum nvm_sku_bits {
2539 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
2540 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
2541 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
2542 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
2543 };
2544
2545 /* radio config bits (actual values from NVM definition) */
2546 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
2547 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
2548 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
2549 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
2550 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
2551 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2552
2553 #define DEFAULT_MAX_TX_POWER 16
2554
2555 /**
2556 * enum iwm_nvm_channel_flags - channel flags in NVM
2557 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2558 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2559 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2560 * @IWM_NVM_CHANNEL_RADAR: radar detection required
2561 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2562 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2563 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2564 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2565 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2566 */
2567 enum iwm_nvm_channel_flags {
2568 IWM_NVM_CHANNEL_VALID = (1 << 0),
2569 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2570 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2571 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2572 IWM_NVM_CHANNEL_DFS = (1 << 7),
2573 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2574 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2575 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2576 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2577 };
2578
2579 static void
iwm_init_channel_map(struct iwm_softc * sc,const uint16_t * const nvm_ch_flags)2580 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2581 {
2582 struct ieee80211com *ic = &sc->sc_ic;
2583 struct iwm_nvm_data *data = &sc->sc_nvm;
2584 int ch_idx;
2585 struct ieee80211_channel *channel;
2586 uint16_t ch_flags;
2587 int is_5ghz;
2588 int flags, hw_value;
2589
2590 for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
2591 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2592
2593 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2594 !data->sku_cap_band_52GHz_enable)
2595 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2596
2597 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2598 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2599 iwm_nvm_channels[ch_idx],
2600 ch_flags,
2601 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2602 "5.2" : "2.4"));
2603 continue;
2604 }
2605
2606 hw_value = iwm_nvm_channels[ch_idx];
2607 channel = &ic->ic_channels[hw_value];
2608
2609 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2610 if (!is_5ghz) {
2611 flags = IEEE80211_CHAN_2GHZ;
2612 channel->ic_flags
2613 = IEEE80211_CHAN_CCK
2614 | IEEE80211_CHAN_OFDM
2615 | IEEE80211_CHAN_DYN
2616 | IEEE80211_CHAN_2GHZ;
2617 } else {
2618 flags = IEEE80211_CHAN_5GHZ;
2619 channel->ic_flags =
2620 IEEE80211_CHAN_A;
2621 }
2622 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2623
2624 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2625 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2626 }
2627 }
2628
2629 static int
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,uint8_t tx_chains,uint8_t rx_chains)2630 iwm_parse_nvm_data(struct iwm_softc *sc,
2631 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2632 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2633 {
2634 struct iwm_nvm_data *data = &sc->sc_nvm;
2635 uint8_t hw_addr[ETHER_ADDR_LEN];
2636 uint16_t radio_cfg, sku;
2637
2638 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2639
2640 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2641 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2642 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2643 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2644 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2645 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2646 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2647
2648 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2649 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2650 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2651 data->sku_cap_11n_enable = 0;
2652
2653 if (!data->valid_tx_ant || !data->valid_rx_ant) {
2654 DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
2655 data->valid_tx_ant, data->valid_rx_ant));
2656 return EINVAL;
2657 }
2658
2659 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2660
2661 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2662 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2663
2664 /* The byte order is little endian 16 bit, meaning 214365 */
2665 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2666 data->hw_addr[0] = hw_addr[1];
2667 data->hw_addr[1] = hw_addr[0];
2668 data->hw_addr[2] = hw_addr[3];
2669 data->hw_addr[3] = hw_addr[2];
2670 data->hw_addr[4] = hw_addr[5];
2671 data->hw_addr[5] = hw_addr[4];
2672
2673 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2674 data->calib_version = 255; /* TODO:
2675 this value will prevent some checks from
2676 failing, we need to check if this
2677 field is still needed, and if it does,
2678 where is it in the NVM */
2679
2680 return 0;
2681 }
2682
2683 /*
2684 * END NVM PARSE
2685 */
2686
2687 struct iwm_nvm_section {
2688 uint16_t length;
2689 const uint8_t *data;
2690 };
2691
2692 #define IWM_FW_VALID_TX_ANT(sc) \
2693 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2694 >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2695 #define IWM_FW_VALID_RX_ANT(sc) \
2696 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2697 >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2698
2699 static int
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)2700 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2701 {
2702 const uint16_t *hw, *sw, *calib;
2703
2704 /* Checking for required sections */
2705 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2706 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2707 DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2708 return ENOENT;
2709 }
2710
2711 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2712 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2713 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2714 return iwm_parse_nvm_data(sc, hw, sw, calib,
2715 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2716 }
2717
2718 static int
iwm_nvm_init(struct iwm_softc * sc)2719 iwm_nvm_init(struct iwm_softc *sc)
2720 {
2721 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2722 int i, section, error;
2723 uint16_t len;
2724 uint8_t *nvm_buffer, *temp;
2725
2726 /* Read From FW NVM */
2727 DPRINTF(("Read NVM\n"));
2728
2729 /* TODO: find correct NVM max size for a section */
2730 nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
2731 for (i = 0; i < __arraycount(nvm_to_read); i++) {
2732 section = nvm_to_read[i];
2733 KASSERT(section <= __arraycount(nvm_sections));
2734
2735 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2736 if (error)
2737 break;
2738
2739 temp = kmem_alloc(len, KM_SLEEP);
2740 memcpy(temp, nvm_buffer, len);
2741 nvm_sections[section].data = temp;
2742 nvm_sections[section].length = len;
2743 }
2744 kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
2745 if (error)
2746 return error;
2747
2748 return iwm_parse_nvm_sections(sc, nvm_sections);
2749 }
2750
2751 /*
2752 * Firmware loading gunk. This is kind of a weird hybrid between the
2753 * iwn driver and the Linux iwlwifi driver.
2754 */
2755
2756 static int
iwm_firmware_load_chunk(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * section,uint32_t byte_cnt)2757 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2758 const uint8_t *section, uint32_t byte_cnt)
2759 {
2760 struct iwm_dma_info *dma = &sc->fw_dma;
2761 int error;
2762
2763 /* Copy firmware section into pre-allocated DMA-safe memory. */
2764 memcpy(dma->vaddr, section, byte_cnt);
2765 bus_dmamap_sync(sc->sc_dmat,
2766 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2767
2768 if (!iwm_nic_lock(sc))
2769 return EBUSY;
2770
2771 sc->sc_fw_chunk_done = 0;
2772
2773 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2774 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2775 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2776 dst_addr);
2777 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2778 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2779 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2780 (iwm_get_dma_hi_addr(dma->paddr)
2781 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2782 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2783 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2784 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2785 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2786 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2787 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2788 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2789 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2790
2791 iwm_nic_unlock(sc);
2792
2793 /* wait 1s for this segment to load */
2794 while (!sc->sc_fw_chunk_done)
2795 if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2796 break;
2797
2798 return error;
2799 }
2800
2801 static int
iwm_load_firmware(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2802 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2803 {
2804 struct iwm_fw_sects *fws;
2805 int error, i, w;
2806 void *data;
2807 uint32_t dlen;
2808 uint32_t offset;
2809
2810 sc->sc_uc.uc_intr = 0;
2811
2812 fws = &sc->sc_fw.fw_sects[ucode_type];
2813 for (i = 0; i < fws->fw_count; i++) {
2814 data = fws->fw_sect[i].fws_data;
2815 dlen = fws->fw_sect[i].fws_len;
2816 offset = fws->fw_sect[i].fws_devoff;
2817 DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2818 ucode_type, offset, dlen));
2819 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2820 if (error) {
2821 DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
2822 "returned error %02d\n", i, fws->fw_count, error));
2823 return error;
2824 }
2825 }
2826
2827 /* wait for the firmware to load */
2828 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2829
2830 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2831 error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2832 }
2833
2834 return error;
2835 }
2836
2837 /* iwlwifi: pcie/trans.c */
2838 static int
iwm_start_fw(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2839 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2840 {
2841 int error;
2842
2843 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2844
2845 if ((error = iwm_nic_init(sc)) != 0) {
2846 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
2847 return error;
2848 }
2849
2850 /* make sure rfkill handshake bits are cleared */
2851 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2852 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2853 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2854
2855 /* clear (again), then enable host interrupts */
2856 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2857 iwm_enable_interrupts(sc);
2858
2859 /* really make sure rfkill handshake bits are cleared */
2860 /* maybe we should write a few times more? just to make sure */
2861 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2862 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2863
2864 /* Load the given image to the HW */
2865 error = iwm_load_firmware(sc, ucode_type);
2866 if (error) {
2867 aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
2868 error);
2869 }
2870 return error;
2871 }
2872
2873 static int
iwm_fw_alive(struct iwm_softc * sc,uint32_t sched_base)2874 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2875 {
2876 return iwm_post_alive(sc);
2877 }
2878
2879 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)2880 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2881 {
2882 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2883 .valid = htole32(valid_tx_ant),
2884 };
2885
2886 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2887 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2888 }
2889
2890 /* iwlwifi: mvm/fw.c */
2891 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)2892 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2893 {
2894 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2895 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2896
2897 /* Set parameters */
2898 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2899 phy_cfg_cmd.calib_control.event_trigger =
2900 sc->sc_default_calib[ucode_type].event_trigger;
2901 phy_cfg_cmd.calib_control.flow_trigger =
2902 sc->sc_default_calib[ucode_type].flow_trigger;
2903
2904 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2905 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2906 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2907 }
2908
2909 static int
iwm_mvm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2910 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2911 enum iwm_ucode_type ucode_type)
2912 {
2913 enum iwm_ucode_type old_type = sc->sc_uc_current;
2914 int error;
2915
2916 if ((error = iwm_read_firmware(sc)) != 0)
2917 return error;
2918
2919 sc->sc_uc_current = ucode_type;
2920 error = iwm_start_fw(sc, ucode_type);
2921 if (error) {
2922 sc->sc_uc_current = old_type;
2923 return error;
2924 }
2925
2926 return iwm_fw_alive(sc, sc->sched_base);
2927 }
2928
2929 /*
2930 * mvm misc bits
2931 */
2932
2933 /*
2934 * follows iwlwifi/fw.c
2935 */
2936 static int
iwm_run_init_mvm_ucode(struct iwm_softc * sc,int justnvm)2937 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2938 {
2939 int error;
2940
2941 /* do not operate with rfkill switch turned on */
2942 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2943 aprint_error_dev(sc->sc_dev,
2944 "radio is disabled by hardware switch\n");
2945 return EPERM;
2946 }
2947
2948 sc->sc_init_complete = 0;
2949 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2950 IWM_UCODE_TYPE_INIT)) != 0)
2951 return error;
2952
2953 if (justnvm) {
2954 if ((error = iwm_nvm_init(sc)) != 0) {
2955 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
2956 return error;
2957 }
2958 memcpy(&sc->sc_ic.ic_myaddr,
2959 &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2960
2961 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2962 + sc->sc_capa_max_probe_len
2963 + IWM_MAX_NUM_SCAN_CHANNELS
2964 * sizeof(struct iwm_scan_channel);
2965 sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
2966
2967 return 0;
2968 }
2969
2970 /* Send TX valid antennas before triggering calibrations */
2971 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2972 return error;
2973
2974 /*
2975 * Send phy configurations command to init uCode
2976 * to start the 16.0 uCode init image internal calibrations.
2977 */
2978 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2979 DPRINTF(("%s: failed to run internal calibration: %d\n",
2980 DEVNAME(sc), error));
2981 return error;
2982 }
2983
2984 /*
2985 * Nothing to do but wait for the init complete notification
2986 * from the firmware
2987 */
2988 while (!sc->sc_init_complete)
2989 if ((error = tsleep(&sc->sc_init_complete,
2990 0, "iwminit", 2*hz)) != 0)
2991 break;
2992
2993 return error;
2994 }
2995
2996 /*
2997 * receive side
2998 */
2999
3000 /* (re)stock rx ring, called at init-time and at runtime */
3001 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)3002 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3003 {
3004 struct iwm_rx_ring *ring = &sc->rxq;
3005 struct iwm_rx_data *data = &ring->data[idx];
3006 struct mbuf *m;
3007 int error;
3008 int fatal = 0;
3009
3010 m = m_gethdr(M_DONTWAIT, MT_DATA);
3011 if (m == NULL)
3012 return ENOBUFS;
3013
3014 if (size <= MCLBYTES) {
3015 MCLGET(m, M_DONTWAIT);
3016 } else {
3017 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3018 }
3019 if ((m->m_flags & M_EXT) == 0) {
3020 m_freem(m);
3021 return ENOBUFS;
3022 }
3023
3024 if (data->m != NULL) {
3025 bus_dmamap_unload(sc->sc_dmat, data->map);
3026 fatal = 1;
3027 }
3028
3029 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3030 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3031 BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
3032 /* XXX */
3033 if (fatal)
3034 panic("iwm: could not load RX mbuf");
3035 m_freem(m);
3036 return error;
3037 }
3038 data->m = m;
3039 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3040
3041 /* Update RX descriptor. */
3042 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3043 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3044 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3045
3046 return 0;
3047 }
3048
3049 /* iwlwifi: mvm/rx.c */
3050 #define IWM_RSSI_OFFSET 50
3051 static int
iwm_mvm_calc_rssi(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3052 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3053 {
3054 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3055 uint32_t agc_a, agc_b;
3056 uint32_t val;
3057
3058 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3059 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3060 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3061
3062 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3063 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3064 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3065
3066 /*
3067 * dBm = rssi dB - agc dB - constant.
3068 * Higher AGC (higher radio gain) means lower signal.
3069 */
3070 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3071 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3072 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3073
3074 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3075 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3076
3077 return max_rssi_dbm;
3078 }
3079
3080 /* iwlwifi: mvm/rx.c */
3081 /*
3082 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3083 * values are reported by the fw as positive values - need to negate
3084 * to obtain their dBM. Account for missing antennas by replacing 0
3085 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3086 */
3087 static int
iwm_mvm_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3088 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
3089 struct iwm_rx_phy_info *phy_info)
3090 {
3091 int energy_a, energy_b, energy_c, max_energy;
3092 uint32_t val;
3093
3094 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3095 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3096 IWM_RX_INFO_ENERGY_ANT_A_POS;
3097 energy_a = energy_a ? -energy_a : -256;
3098 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3099 IWM_RX_INFO_ENERGY_ANT_B_POS;
3100 energy_b = energy_b ? -energy_b : -256;
3101 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3102 IWM_RX_INFO_ENERGY_ANT_C_POS;
3103 energy_c = energy_c ? -energy_c : -256;
3104 max_energy = MAX(energy_a, energy_b);
3105 max_energy = MAX(max_energy, energy_c);
3106
3107 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3108 energy_a, energy_b, energy_c, max_energy));
3109
3110 return max_energy;
3111 }
3112
3113 static void
iwm_mvm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)3114 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3115 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3116 {
3117 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3118
3119 DPRINTFN(20, ("received PHY stats\n"));
3120 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3121 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3122
3123 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3124 }
3125
3126 /*
3127 * Retrieve the average noise (in dBm) among receivers.
3128 */
3129 static int
iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy * stats)3130 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
3131 {
3132 int i, total, nbant, noise;
3133
3134 total = nbant = noise = 0;
3135 for (i = 0; i < 3; i++) {
3136 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3137 if (noise) {
3138 total += noise;
3139 nbant++;
3140 }
3141 }
3142
3143 /* There should be at least one antenna but check anyway. */
3144 return (nbant == 0) ? -127 : (total / nbant) - 107;
3145 }
3146
3147 /*
3148 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3149 *
3150 * Handles the actual data of the Rx packet from the fw
3151 */
3152 static void
iwm_mvm_rx_rx_mpdu(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)3153 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3154 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3155 {
3156 struct ieee80211com *ic = &sc->sc_ic;
3157 struct ieee80211_frame *wh;
3158 struct ieee80211_node *ni;
3159 struct ieee80211_channel *c = NULL;
3160 struct mbuf *m;
3161 struct iwm_rx_phy_info *phy_info;
3162 struct iwm_rx_mpdu_res_start *rx_res;
3163 int device_timestamp;
3164 uint32_t len;
3165 uint32_t rx_pkt_status;
3166 int rssi;
3167
3168 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3169 BUS_DMASYNC_POSTREAD);
3170
3171 phy_info = &sc->sc_last_phy_info;
3172 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3173 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3174 len = le16toh(rx_res->byte_count);
3175 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3176
3177 m = data->m;
3178 m->m_data = pkt->data + sizeof(*rx_res);
3179 m->m_pkthdr.len = m->m_len = len;
3180
3181 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3182 DPRINTF(("dsp size out of range [0,20]: %d\n",
3183 phy_info->cfg_phy_cnt));
3184 return;
3185 }
3186
3187 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3188 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3189 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3190 return; /* drop */
3191 }
3192
3193 device_timestamp = le32toh(phy_info->system_timestamp);
3194
3195 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3196 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3197 } else {
3198 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3199 }
3200 rssi = -rssi;
3201
3202 if (ic->ic_state == IEEE80211_S_SCAN)
3203 iwm_fix_channel(ic, m);
3204
3205 /* replenish ring for the buffer we're going to feed to the sharks */
3206 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3207 return;
3208
3209 m_set_rcvif(m, IC2IFP(ic));
3210
3211 if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3212 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3213 c = &ic->ic_channels[le32toh(phy_info->channel)];
3214 }
3215
3216 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3217 if (c)
3218 ni->ni_chan = c;
3219
3220 if (sc->sc_drvbpf != NULL) {
3221 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3222
3223 tap->wr_flags = 0;
3224 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3225 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3226 tap->wr_chan_freq =
3227 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3228 tap->wr_chan_flags =
3229 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3230 tap->wr_dbm_antsignal = (int8_t)rssi;
3231 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3232 tap->wr_tsft = phy_info->system_timestamp;
3233 switch (phy_info->rate) {
3234 /* CCK rates. */
3235 case 10: tap->wr_rate = 2; break;
3236 case 20: tap->wr_rate = 4; break;
3237 case 55: tap->wr_rate = 11; break;
3238 case 110: tap->wr_rate = 22; break;
3239 /* OFDM rates. */
3240 case 0xd: tap->wr_rate = 12; break;
3241 case 0xf: tap->wr_rate = 18; break;
3242 case 0x5: tap->wr_rate = 24; break;
3243 case 0x7: tap->wr_rate = 36; break;
3244 case 0x9: tap->wr_rate = 48; break;
3245 case 0xb: tap->wr_rate = 72; break;
3246 case 0x1: tap->wr_rate = 96; break;
3247 case 0x3: tap->wr_rate = 108; break;
3248 /* Unknown rate: should not happen. */
3249 default: tap->wr_rate = 0;
3250 }
3251
3252 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3253 }
3254 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3255 ieee80211_free_node(ni);
3256 }
3257
3258 static void
iwm_mvm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)3259 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3260 struct iwm_node *in)
3261 {
3262 struct ieee80211com *ic = &sc->sc_ic;
3263 struct ifnet *ifp = IC2IFP(ic);
3264 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3265 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3266 int failack = tx_resp->failure_frame;
3267
3268 KASSERT(tx_resp->frame_count == 1);
3269
3270 /* Update rate control statistics. */
3271 in->in_amn.amn_txcnt++;
3272 if (failack > 0) {
3273 in->in_amn.amn_retrycnt++;
3274 }
3275
3276 if (status != IWM_TX_STATUS_SUCCESS &&
3277 status != IWM_TX_STATUS_DIRECT_DONE)
3278 ifp->if_oerrors++;
3279 else
3280 ifp->if_opackets++;
3281 }
3282
3283 static void
iwm_mvm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)3284 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3285 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3286 {
3287 struct ieee80211com *ic = &sc->sc_ic;
3288 struct ifnet *ifp = IC2IFP(ic);
3289 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3290 int idx = cmd_hdr->idx;
3291 int qid = cmd_hdr->qid;
3292 struct iwm_tx_ring *ring = &sc->txq[qid];
3293 struct iwm_tx_data *txd = &ring->data[idx];
3294 struct iwm_node *in = txd->in;
3295
3296 if (txd->done) {
3297 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3298 DEVNAME(sc)));
3299 return;
3300 }
3301
3302 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3303 BUS_DMASYNC_POSTREAD);
3304
3305 sc->sc_tx_timer = 0;
3306
3307 iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3308
3309 /* Unmap and free mbuf. */
3310 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3311 BUS_DMASYNC_POSTWRITE);
3312 bus_dmamap_unload(sc->sc_dmat, txd->map);
3313 m_freem(txd->m);
3314
3315 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3316 KASSERT(txd->done == 0);
3317 txd->done = 1;
3318 KASSERT(txd->in);
3319
3320 txd->m = NULL;
3321 txd->in = NULL;
3322 ieee80211_free_node(&in->in_ni);
3323
3324 if (--ring->queued < IWM_TX_RING_LOMARK) {
3325 sc->qfullmsk &= ~(1 << ring->qid);
3326 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3327 ifp->if_flags &= ~IFF_OACTIVE;
3328 /*
3329 * Well, we're in interrupt context, but then again
3330 * I guess net80211 does all sorts of stunts in
3331 * interrupt context, so maybe this is no biggie.
3332 */
3333 (*ifp->if_start)(ifp);
3334 }
3335 }
3336 }
3337
3338 /*
3339 * BEGIN iwlwifi/mvm/binding.c
3340 */
3341
3342 static int
iwm_mvm_binding_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)3343 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3344 {
3345 struct iwm_binding_cmd cmd;
3346 struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3347 int i, ret;
3348 uint32_t status;
3349
3350 memset(&cmd, 0, sizeof(cmd));
3351
3352 cmd.id_and_color
3353 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3354 cmd.action = htole32(action);
3355 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3356
3357 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3358 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3359 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3360
3361 status = 0;
3362 ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3363 sizeof(cmd), &cmd, &status);
3364 if (ret) {
3365 DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3366 DEVNAME(sc), action, ret));
3367 return ret;
3368 }
3369
3370 if (status) {
3371 DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3372 status));
3373 ret = EIO;
3374 }
3375
3376 return ret;
3377 }
3378
3379 static int
iwm_mvm_binding_update(struct iwm_softc * sc,struct iwm_node * in,int add)3380 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3381 {
3382 return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3383 }
3384
3385 static int
iwm_mvm_binding_add_vif(struct iwm_softc * sc,struct iwm_node * in)3386 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3387 {
3388 return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3389 }
3390
3391 /*
3392 * END iwlwifi/mvm/binding.c
3393 */
3394
3395 /*
3396 * BEGIN iwlwifi/mvm/phy-ctxt.c
3397 */
3398
3399 /*
3400 * Construct the generic fields of the PHY context command
3401 */
3402 static void
iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc * sc,struct iwm_mvm_phy_ctxt * ctxt,struct iwm_phy_context_cmd * cmd,uint32_t action,uint32_t apply_time)3403 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3404 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3405 {
3406 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3407
3408 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3409 ctxt->color));
3410 cmd->action = htole32(action);
3411 cmd->apply_time = htole32(apply_time);
3412 }
3413
3414 /*
3415 * Add the phy configuration to the PHY context command
3416 */
3417 static void
iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc * sc,struct iwm_phy_context_cmd * cmd,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic)3418 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3419 struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3420 uint8_t chains_static, uint8_t chains_dynamic)
3421 {
3422 struct ieee80211com *ic = &sc->sc_ic;
3423 uint8_t active_cnt, idle_cnt;
3424
3425 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3426 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3427
3428 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3429 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3430 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3431
3432 /* Set rx the chains */
3433 idle_cnt = chains_static;
3434 active_cnt = chains_dynamic;
3435
3436 cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3437 IWM_PHY_RX_CHAIN_VALID_POS);
3438 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3439 cmd->rxchain_info |= htole32(active_cnt <<
3440 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3441
3442 cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3443 }
3444
3445 /*
3446 * Send a command
3447 * only if something in the configuration changed: in case that this is the
3448 * first time that the phy configuration is applied or in case that the phy
3449 * configuration changed from the previous apply.
3450 */
3451 static int
iwm_mvm_phy_ctxt_apply(struct iwm_softc * sc,struct iwm_mvm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time)3452 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3453 struct iwm_mvm_phy_ctxt *ctxt,
3454 uint8_t chains_static, uint8_t chains_dynamic,
3455 uint32_t action, uint32_t apply_time)
3456 {
3457 struct iwm_phy_context_cmd cmd;
3458 int ret;
3459
3460 /* Set the command header fields */
3461 iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3462
3463 /* Set the command data */
3464 iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3465 chains_static, chains_dynamic);
3466
3467 ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3468 sizeof(struct iwm_phy_context_cmd), &cmd);
3469 if (ret) {
3470 DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3471 }
3472 return ret;
3473 }
3474
3475 /*
3476 * Send a command to add a PHY context based on the current HW configuration.
3477 */
3478 static int
iwm_mvm_phy_ctxt_add(struct iwm_softc * sc,struct iwm_mvm_phy_ctxt * ctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic)3479 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3480 struct ieee80211_channel *chan,
3481 uint8_t chains_static, uint8_t chains_dynamic)
3482 {
3483 ctxt->channel = chan;
3484 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3485 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3486 }
3487
3488 /*
3489 * Send a command to modify the PHY context based on the current HW
3490 * configuration. Note that the function does not check that the configuration
3491 * changed.
3492 */
3493 static int
iwm_mvm_phy_ctxt_changed(struct iwm_softc * sc,struct iwm_mvm_phy_ctxt * ctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic)3494 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3495 struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3496 uint8_t chains_static, uint8_t chains_dynamic)
3497 {
3498 ctxt->channel = chan;
3499 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3500 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3501 }
3502
3503 /*
3504 * END iwlwifi/mvm/phy-ctxt.c
3505 */
3506
3507 /*
3508 * transmit side
3509 */
3510
3511 /*
3512 * Send a command to the firmware. We try to implement the Linux
3513 * driver interface for the routine.
3514 * mostly from if_iwn (iwn_cmd()).
3515 *
3516 * For now, we always copy the first part and map the second one (if it exists).
3517 */
3518 static int
iwm_send_cmd(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)3519 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3520 {
3521 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3522 struct iwm_tfd *desc;
3523 struct iwm_tx_data *data;
3524 struct iwm_device_cmd *cmd;
3525 struct mbuf *m;
3526 bus_addr_t paddr;
3527 uint32_t addr_lo;
3528 int error = 0, i, paylen, off, s;
3529 int code;
3530 int async, wantresp;
3531
3532 code = hcmd->id;
3533 async = hcmd->flags & IWM_CMD_ASYNC;
3534 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3535
3536 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3537 paylen += hcmd->len[i];
3538 }
3539
3540 /* if the command wants an answer, busy sc_cmd_resp */
3541 if (wantresp) {
3542 KASSERT(!async);
3543 while (sc->sc_wantresp != -1)
3544 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3545 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3546 DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3547 }
3548
3549 /*
3550 * Is the hardware still available? (after e.g. above wait).
3551 */
3552 s = splnet();
3553 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3554 error = ENXIO;
3555 goto out;
3556 }
3557
3558 desc = &ring->desc[ring->cur];
3559 data = &ring->data[ring->cur];
3560
3561 if (paylen > sizeof(cmd->data)) {
3562 /* Command is too large */
3563 if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3564 error = EINVAL;
3565 goto out;
3566 }
3567 m = m_gethdr(M_DONTWAIT, MT_DATA);
3568 if (m == NULL) {
3569 error = ENOMEM;
3570 goto out;
3571 }
3572 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3573 if (!(m->m_flags & M_EXT)) {
3574 m_freem(m);
3575 error = ENOMEM;
3576 goto out;
3577 }
3578 cmd = mtod(m, struct iwm_device_cmd *);
3579 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3580 IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3581 if (error != 0) {
3582 m_freem(m);
3583 goto out;
3584 }
3585 data->m = m;
3586 paddr = data->map->dm_segs[0].ds_addr;
3587 } else {
3588 cmd = &ring->cmd[ring->cur];
3589 paddr = data->cmd_paddr;
3590 }
3591
3592 cmd->hdr.code = code;
3593 cmd->hdr.flags = 0;
3594 cmd->hdr.qid = ring->qid;
3595 cmd->hdr.idx = ring->cur;
3596
3597 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3598 if (hcmd->len[i] == 0)
3599 continue;
3600 memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3601 off += hcmd->len[i];
3602 }
3603 KASSERT(off == paylen);
3604
3605 /* lo field is not aligned */
3606 addr_lo = htole32((uint32_t)paddr);
3607 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3608 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3609 | ((sizeof(cmd->hdr) + paylen) << 4));
3610 desc->num_tbs = 1;
3611
3612 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3613 code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
3614
3615 if (paylen > sizeof(cmd->data)) {
3616 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3617 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3618 } else {
3619 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3620 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3621 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3622 }
3623 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3624 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3625 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3626
3627 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3628 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3629 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3630 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3631 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3632 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3633 DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3634 error = EBUSY;
3635 goto out;
3636 }
3637
3638 #if 0
3639 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3640 #endif
3641 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3642 code, ring->qid, ring->cur));
3643
3644 /* Kick command ring. */
3645 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3646 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3647
3648 if (!async) {
3649 /* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3650 int generation = sc->sc_generation;
3651 error = tsleep(desc, PCATCH, "iwmcmd", hz);
3652 if (error == 0) {
3653 /* if hardware is no longer up, return error */
3654 if (generation != sc->sc_generation) {
3655 error = ENXIO;
3656 } else {
3657 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3658 }
3659 }
3660 }
3661 out:
3662 if (wantresp && error != 0) {
3663 iwm_free_resp(sc, hcmd);
3664 }
3665 splx(s);
3666
3667 return error;
3668 }
3669
3670 /* iwlwifi: mvm/utils.c */
3671 static int
iwm_mvm_send_cmd_pdu(struct iwm_softc * sc,uint8_t id,uint32_t flags,uint16_t len,const void * data)3672 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3673 uint32_t flags, uint16_t len, const void *data)
3674 {
3675 struct iwm_host_cmd cmd = {
3676 .id = id,
3677 .len = { len, },
3678 .data = { data, },
3679 .flags = flags,
3680 };
3681
3682 return iwm_send_cmd(sc, &cmd);
3683 }
3684
3685 /* iwlwifi: mvm/utils.c */
3686 static int
iwm_mvm_send_cmd_status(struct iwm_softc * sc,struct iwm_host_cmd * cmd,uint32_t * status)3687 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3688 struct iwm_host_cmd *cmd, uint32_t *status)
3689 {
3690 struct iwm_rx_packet *pkt;
3691 struct iwm_cmd_response *resp;
3692 int error, resp_len;
3693
3694 //lockdep_assert_held(&mvm->mutex);
3695
3696 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3697 cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3698
3699 if ((error = iwm_send_cmd(sc, cmd)) != 0)
3700 return error;
3701 pkt = cmd->resp_pkt;
3702
3703 /* Can happen if RFKILL is asserted */
3704 if (!pkt) {
3705 error = 0;
3706 goto out_free_resp;
3707 }
3708
3709 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3710 error = EIO;
3711 goto out_free_resp;
3712 }
3713
3714 resp_len = iwm_rx_packet_payload_len(pkt);
3715 if (resp_len != sizeof(*resp)) {
3716 error = EIO;
3717 goto out_free_resp;
3718 }
3719
3720 resp = (void *)pkt->data;
3721 *status = le32toh(resp->status);
3722 out_free_resp:
3723 iwm_free_resp(sc, cmd);
3724 return error;
3725 }
3726
3727 /* iwlwifi/mvm/utils.c */
3728 static int
iwm_mvm_send_cmd_pdu_status(struct iwm_softc * sc,uint8_t id,uint16_t len,const void * data,uint32_t * status)3729 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3730 uint16_t len, const void *data, uint32_t *status)
3731 {
3732 struct iwm_host_cmd cmd = {
3733 .id = id,
3734 .len = { len, },
3735 .data = { data, },
3736 };
3737
3738 return iwm_mvm_send_cmd_status(sc, &cmd, status);
3739 }
3740
3741 static void
iwm_free_resp(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)3742 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3743 {
3744 KASSERT(sc->sc_wantresp != -1);
3745 KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3746 == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3747 sc->sc_wantresp = -1;
3748 wakeup(&sc->sc_wantresp);
3749 }
3750
3751 /*
3752 * Process a "command done" firmware notification. This is where we wakeup
3753 * processes waiting for a synchronous command completion.
3754 * from if_iwn
3755 */
3756 static void
iwm_cmd_done(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3757 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3758 {
3759 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3760 struct iwm_tx_data *data;
3761
3762 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3763 return; /* Not a command ack. */
3764 }
3765
3766 data = &ring->data[pkt->hdr.idx];
3767
3768 /* If the command was mapped in an mbuf, free it. */
3769 if (data->m != NULL) {
3770 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3771 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3772 bus_dmamap_unload(sc->sc_dmat, data->map);
3773 m_freem(data->m);
3774 data->m = NULL;
3775 }
3776 wakeup(&ring->desc[pkt->hdr.idx]);
3777 }
3778
3779 #if 0
3780 /*
3781 * necessary only for block ack mode
3782 */
3783 void
3784 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3785 uint16_t len)
3786 {
3787 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3788 uint16_t w_val;
3789
3790 scd_bc_tbl = sc->sched_dma.vaddr;
3791
3792 len += 8; /* magic numbers came naturally from paris */
3793 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3794 len = roundup(len, 4) / 4;
3795
3796 w_val = htole16(sta_id << 12 | len);
3797
3798 /* Update TX scheduler. */
3799 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3800 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3801 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3802 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3803
3804 /* I really wonder what this is ?!? */
3805 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3806 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3807 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3808 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3809 (char *)(void *)sc->sched_dma.vaddr,
3810 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3811 }
3812 }
3813 #endif
3814
3815 /*
3816 * Fill in various bit for management frames, and leave them
3817 * unfilled for data frames (firmware takes care of that).
3818 * Return the selected TX rate.
3819 */
3820 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct ieee80211_frame * wh,struct iwm_tx_cmd * tx)3821 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3822 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3823 {
3824 struct ieee80211com *ic = &sc->sc_ic;
3825 struct ieee80211_node *ni = &in->in_ni;
3826 const struct iwm_rate *rinfo;
3827 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3828 int ridx, rate_flags;
3829 int nrates = ni->ni_rates.rs_nrates;
3830
3831 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3832 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3833
3834 if (type != IEEE80211_FC0_TYPE_DATA) {
3835 /* for non-data, use the lowest supported rate */
3836 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3837 IWM_RIDX_OFDM : IWM_RIDX_CCK;
3838 } else if (ic->ic_fixed_rate != -1) {
3839 ridx = sc->sc_fixed_ridx;
3840 } else {
3841 /* for data frames, use RS table */
3842 tx->initial_rate_index = (nrates - 1) - ni->ni_txrate;
3843 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3844 DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3845 ridx = in->in_ridx[ni->ni_txrate];
3846 return &iwm_rates[ridx];
3847 }
3848
3849 rinfo = &iwm_rates[ridx];
3850 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3851 if (IWM_RIDX_IS_CCK(ridx))
3852 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3853 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3854
3855 return rinfo;
3856 }
3857
3858 #define TB0_SIZE 16
3859 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)3860 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3861 {
3862 struct ieee80211com *ic = &sc->sc_ic;
3863 struct iwm_node *in = (void *)ni;
3864 struct iwm_tx_ring *ring;
3865 struct iwm_tx_data *data;
3866 struct iwm_tfd *desc;
3867 struct iwm_device_cmd *cmd;
3868 struct iwm_tx_cmd *tx;
3869 struct ieee80211_frame *wh;
3870 struct ieee80211_key *k = NULL;
3871 struct mbuf *m1;
3872 const struct iwm_rate *rinfo;
3873 uint32_t flags;
3874 u_int hdrlen;
3875 bus_dma_segment_t *seg;
3876 uint8_t tid, type;
3877 int i, totlen, error, pad;
3878 int hdrlen2;
3879
3880 wh = mtod(m, struct ieee80211_frame *);
3881 hdrlen = ieee80211_anyhdrsize(wh);
3882 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3883
3884 hdrlen2 = (ieee80211_has_qos(wh)) ?
3885 sizeof (struct ieee80211_qosframe) :
3886 sizeof (struct ieee80211_frame);
3887
3888 if (hdrlen != hdrlen2)
3889 DPRINTF(("%s: hdrlen error (%d != %d)\n",
3890 DEVNAME(sc), hdrlen, hdrlen2));
3891
3892 tid = 0;
3893
3894 ring = &sc->txq[ac];
3895 desc = &ring->desc[ring->cur];
3896 memset(desc, 0, sizeof(*desc));
3897 data = &ring->data[ring->cur];
3898
3899 /* Fill out iwm_tx_cmd to send to the firmware */
3900 cmd = &ring->cmd[ring->cur];
3901 cmd->hdr.code = IWM_TX_CMD;
3902 cmd->hdr.flags = 0;
3903 cmd->hdr.qid = ring->qid;
3904 cmd->hdr.idx = ring->cur;
3905
3906 tx = (void *)cmd->data;
3907 memset(tx, 0, sizeof(*tx));
3908
3909 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3910
3911 if (sc->sc_drvbpf != NULL) {
3912 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3913
3914 tap->wt_flags = 0;
3915 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3916 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3917 tap->wt_rate = rinfo->rate;
3918 tap->wt_hwqueue = ac;
3919 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
3920 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3921
3922 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
3923 }
3924
3925 /* Encrypt the frame if need be. */
3926 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3927 k = ieee80211_crypto_encap(ic, ni, m);
3928 if (k == NULL) {
3929 m_freem(m);
3930 return ENOBUFS;
3931 }
3932 /* Packet header may have moved, reset our local pointer. */
3933 wh = mtod(m, struct ieee80211_frame *);
3934 }
3935 totlen = m->m_pkthdr.len;
3936
3937 flags = 0;
3938 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3939 flags |= IWM_TX_CMD_FLG_ACK;
3940 }
3941
3942 if (type != IEEE80211_FC0_TYPE_DATA
3943 && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3944 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3945 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3946 }
3947
3948 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3949 type != IEEE80211_FC0_TYPE_DATA)
3950 tx->sta_id = sc->sc_aux_sta.sta_id;
3951 else
3952 tx->sta_id = IWM_STATION_ID;
3953
3954 if (type == IEEE80211_FC0_TYPE_MGT) {
3955 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3956
3957 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3958 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3959 tx->pm_frame_timeout = htole16(3);
3960 else
3961 tx->pm_frame_timeout = htole16(2);
3962 } else {
3963 tx->pm_frame_timeout = htole16(0);
3964 }
3965
3966 if (hdrlen & 3) {
3967 /* First segment length must be a multiple of 4. */
3968 flags |= IWM_TX_CMD_FLG_MH_PAD;
3969 pad = 4 - (hdrlen & 3);
3970 } else
3971 pad = 0;
3972
3973 tx->driver_txop = 0;
3974 tx->next_frame_len = 0;
3975
3976 tx->len = htole16(totlen);
3977 tx->tid_tspec = tid;
3978 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3979
3980 /* Set physical address of "scratch area". */
3981 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3982 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3983
3984 /* Copy 802.11 header in TX command. */
3985 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3986
3987 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3988
3989 tx->sec_ctl = 0;
3990 tx->tx_flags |= htole32(flags);
3991
3992 /* Trim 802.11 header. */
3993 m_adj(m, hdrlen);
3994
3995 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3996 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3997 if (error != 0) {
3998 if (error != EFBIG) {
3999 aprint_error_dev(sc->sc_dev,
4000 "can't map mbuf (error %d)\n", error);
4001 m_freem(m);
4002 return error;
4003 }
4004 /* Too many DMA segments, linearize mbuf. */
4005 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4006 if (m1 == NULL) {
4007 m_freem(m);
4008 return ENOBUFS;
4009 }
4010 if (m->m_pkthdr.len > MHLEN) {
4011 MCLGET(m1, M_DONTWAIT);
4012 if (!(m1->m_flags & M_EXT)) {
4013 m_freem(m);
4014 m_freem(m1);
4015 return ENOBUFS;
4016 }
4017 }
4018 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4019 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4020 m_freem(m);
4021 m = m1;
4022
4023 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4024 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4025 if (error != 0) {
4026 aprint_error_dev(sc->sc_dev,
4027 "can't map mbuf (error %d)\n", error);
4028 m_freem(m);
4029 return error;
4030 }
4031 }
4032 data->m = m;
4033 data->in = in;
4034 data->done = 0;
4035
4036 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4037 KASSERT(data->in != NULL);
4038
4039 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4040 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4041
4042 /* Fill TX descriptor. */
4043 desc->num_tbs = 2 + data->map->dm_nsegs;
4044
4045 desc->tbs[0].lo = htole32(data->cmd_paddr);
4046 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4047 (TB0_SIZE << 4);
4048 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4049 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4050 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4051 + hdrlen + pad - TB0_SIZE) << 4);
4052
4053 /* Other DMA segments are for data payload. */
4054 seg = data->map->dm_segs;
4055 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4056 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4057 desc->tbs[i+2].hi_n_len = \
4058 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4059 | ((seg->ds_len) << 4);
4060 }
4061
4062 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4063 BUS_DMASYNC_PREWRITE);
4064 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4065 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4066 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4067 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4068 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4069 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4070
4071 #if 0
4072 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4073 #endif
4074
4075 /* Kick TX ring. */
4076 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4077 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4078
4079 /* Mark TX ring as full if we reach a certain threshold. */
4080 if (++ring->queued > IWM_TX_RING_HIMARK) {
4081 sc->qfullmsk |= 1 << ring->qid;
4082 }
4083
4084 return 0;
4085 }
4086
4087 #if 0
4088 /* not necessary? */
4089 static int
4090 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4091 {
4092 struct iwm_tx_path_flush_cmd flush_cmd = {
4093 .queues_ctl = htole32(tfd_msk),
4094 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4095 };
4096 int ret;
4097
4098 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4099 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
4100 sizeof(flush_cmd), &flush_cmd);
4101 if (ret)
4102 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4103 ret);
4104 return ret;
4105 }
4106 #endif
4107
4108
4109 /*
4110 * BEGIN mvm/power.c
4111 */
4112
4113 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4114
4115 static int
iwm_mvm_beacon_filter_send_cmd(struct iwm_softc * sc,struct iwm_beacon_filter_cmd * cmd)4116 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4117 struct iwm_beacon_filter_cmd *cmd)
4118 {
4119 int ret;
4120
4121 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4122 IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4123
4124 if (!ret) {
4125 DPRINTF(("ba_enable_beacon_abort is: %d\n",
4126 le32toh(cmd->ba_enable_beacon_abort)));
4127 DPRINTF(("ba_escape_timer is: %d\n",
4128 le32toh(cmd->ba_escape_timer)));
4129 DPRINTF(("bf_debug_flag is: %d\n",
4130 le32toh(cmd->bf_debug_flag)));
4131 DPRINTF(("bf_enable_beacon_filter is: %d\n",
4132 le32toh(cmd->bf_enable_beacon_filter)));
4133 DPRINTF(("bf_energy_delta is: %d\n",
4134 le32toh(cmd->bf_energy_delta)));
4135 DPRINTF(("bf_escape_timer is: %d\n",
4136 le32toh(cmd->bf_escape_timer)));
4137 DPRINTF(("bf_roaming_energy_delta is: %d\n",
4138 le32toh(cmd->bf_roaming_energy_delta)));
4139 DPRINTF(("bf_roaming_state is: %d\n",
4140 le32toh(cmd->bf_roaming_state)));
4141 DPRINTF(("bf_temp_threshold is: %d\n",
4142 le32toh(cmd->bf_temp_threshold)));
4143 DPRINTF(("bf_temp_fast_filter is: %d\n",
4144 le32toh(cmd->bf_temp_fast_filter)));
4145 DPRINTF(("bf_temp_slow_filter is: %d\n",
4146 le32toh(cmd->bf_temp_slow_filter)));
4147 }
4148 return ret;
4149 }
4150
4151 static void
iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc * sc,struct iwm_node * in,struct iwm_beacon_filter_cmd * cmd)4152 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4153 struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4154 {
4155 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4156 }
4157
4158 static int
iwm_mvm_update_beacon_abort(struct iwm_softc * sc,struct iwm_node * in,int enable)4159 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4160 int enable)
4161 {
4162 struct iwm_beacon_filter_cmd cmd = {
4163 IWM_BF_CMD_CONFIG_DEFAULTS,
4164 .bf_enable_beacon_filter = htole32(1),
4165 .ba_enable_beacon_abort = htole32(enable),
4166 };
4167
4168 if (!sc->sc_bf.bf_enabled)
4169 return 0;
4170
4171 sc->sc_bf.ba_enabled = enable;
4172 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4173 return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4174 }
4175
4176 static void
iwm_mvm_power_log(struct iwm_softc * sc,struct iwm_mac_power_cmd * cmd)4177 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4178 {
4179 DPRINTF(("Sending power table command on mac id 0x%X for "
4180 "power level %d, flags = 0x%X\n",
4181 cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4182 DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4183
4184 if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4185 DPRINTF(("Disable power management\n"));
4186 return;
4187 }
4188 KASSERT(0);
4189
4190 #if 0
4191 DPRINTF(mvm, "Rx timeout = %u usec\n",
4192 le32_to_cpu(cmd->rx_data_timeout));
4193 DPRINTF(mvm, "Tx timeout = %u usec\n",
4194 le32_to_cpu(cmd->tx_data_timeout));
4195 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4196 DPRINTF(mvm, "DTIM periods to skip = %u\n",
4197 cmd->skip_dtim_periods);
4198 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4199 DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4200 cmd->lprx_rssi_threshold);
4201 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4202 DPRINTF(mvm, "uAPSD enabled\n");
4203 DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4204 le32_to_cpu(cmd->rx_data_timeout_uapsd));
4205 DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4206 le32_to_cpu(cmd->tx_data_timeout_uapsd));
4207 DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4208 DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4209 DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4210 }
4211 #endif
4212 }
4213
4214 static void
iwm_mvm_power_build_cmd(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_power_cmd * cmd)4215 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4216 struct iwm_mac_power_cmd *cmd)
4217 {
4218 struct ieee80211com *ic = &sc->sc_ic;
4219 struct ieee80211_node *ni = &in->in_ni;
4220 int dtimper, dtimper_msec;
4221 int keep_alive;
4222
4223 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4224 in->in_color));
4225 dtimper = ic->ic_dtim_period ?: 1;
4226
4227 /*
4228 * Regardless of power management state the driver must set
4229 * keep alive period. FW will use it for sending keep alive NDPs
4230 * immediately after association. Check that keep alive period
4231 * is at least 3 * DTIM
4232 */
4233 dtimper_msec = dtimper * ni->ni_intval;
4234 keep_alive
4235 = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4236 keep_alive = roundup(keep_alive, 1000) / 1000;
4237 cmd->keep_alive_seconds = htole16(keep_alive);
4238 }
4239
4240 static int
iwm_mvm_power_mac_update_mode(struct iwm_softc * sc,struct iwm_node * in)4241 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4242 {
4243 int ret;
4244 int ba_enable;
4245 struct iwm_mac_power_cmd cmd;
4246
4247 memset(&cmd, 0, sizeof(cmd));
4248
4249 iwm_mvm_power_build_cmd(sc, in, &cmd);
4250 iwm_mvm_power_log(sc, &cmd);
4251
4252 if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4253 IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4254 return ret;
4255
4256 ba_enable = !!(cmd.flags &
4257 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4258 return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4259 }
4260
4261 static int
iwm_mvm_power_update_device(struct iwm_softc * sc)4262 iwm_mvm_power_update_device(struct iwm_softc *sc)
4263 {
4264 struct iwm_device_power_cmd cmd = {
4265 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4266 };
4267
4268 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4269 return 0;
4270
4271 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4272 DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4273
4274 return iwm_mvm_send_cmd_pdu(sc,
4275 IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4276 }
4277
4278 static int
iwm_mvm_enable_beacon_filter(struct iwm_softc * sc,struct iwm_node * in)4279 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4280 {
4281 struct iwm_beacon_filter_cmd cmd = {
4282 IWM_BF_CMD_CONFIG_DEFAULTS,
4283 .bf_enable_beacon_filter = htole32(1),
4284 };
4285 int ret;
4286
4287 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4288 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4289
4290 if (ret == 0)
4291 sc->sc_bf.bf_enabled = 1;
4292
4293 return ret;
4294 }
4295
4296 static int
iwm_mvm_disable_beacon_filter(struct iwm_softc * sc,struct iwm_node * in)4297 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4298 {
4299 struct iwm_beacon_filter_cmd cmd;
4300 int ret;
4301
4302 memset(&cmd, 0, sizeof(cmd));
4303 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4304 return 0;
4305
4306 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4307 if (ret == 0)
4308 sc->sc_bf.bf_enabled = 0;
4309
4310 return ret;
4311 }
4312
4313 #if 0
4314 static int
4315 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4316 {
4317 if (!sc->sc_bf.bf_enabled)
4318 return 0;
4319
4320 return iwm_mvm_enable_beacon_filter(sc, in);
4321 }
4322 #endif
4323
4324 /*
4325 * END mvm/power.c
4326 */
4327
4328 /*
4329 * BEGIN mvm/sta.c
4330 */
4331
4332 static void
iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 * cmd_v6,struct iwm_mvm_add_sta_cmd_v5 * cmd_v5)4333 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4334 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4335 {
4336 memset(cmd_v5, 0, sizeof(*cmd_v5));
4337
4338 cmd_v5->add_modify = cmd_v6->add_modify;
4339 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4340 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4341 memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4342 cmd_v5->sta_id = cmd_v6->sta_id;
4343 cmd_v5->modify_mask = cmd_v6->modify_mask;
4344 cmd_v5->station_flags = cmd_v6->station_flags;
4345 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4346 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4347 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4348 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4349 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4350 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4351 cmd_v5->assoc_id = cmd_v6->assoc_id;
4352 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4353 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4354 }
4355
4356 static int
iwm_mvm_send_add_sta_cmd_status(struct iwm_softc * sc,struct iwm_mvm_add_sta_cmd_v6 * cmd,int * status)4357 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4358 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4359 {
4360 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4361
4362 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4363 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4364 sizeof(*cmd), cmd, status);
4365 }
4366
4367 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4368
4369 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4370 &cmd_v5, status);
4371 }
4372
4373 /* send station add/update command to firmware */
4374 static int
iwm_mvm_sta_send_to_fw(struct iwm_softc * sc,struct iwm_node * in,int update)4375 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4376 {
4377 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4378 int ret;
4379 uint32_t status;
4380
4381 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4382
4383 add_sta_cmd.sta_id = IWM_STATION_ID;
4384 add_sta_cmd.mac_id_n_color
4385 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4386 if (!update) {
4387 add_sta_cmd.tfd_queue_msk = htole32(0xf);
4388 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4389 }
4390 add_sta_cmd.add_modify = update ? 1 : 0;
4391 add_sta_cmd.station_flags_msk
4392 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4393
4394 status = IWM_ADD_STA_SUCCESS;
4395 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4396 if (ret)
4397 return ret;
4398
4399 switch (status) {
4400 case IWM_ADD_STA_SUCCESS:
4401 break;
4402 default:
4403 ret = EIO;
4404 DPRINTF(("IWM_ADD_STA failed\n"));
4405 break;
4406 }
4407
4408 return ret;
4409 }
4410
4411 static int
iwm_mvm_add_sta(struct iwm_softc * sc,struct iwm_node * in)4412 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4413 {
4414 int ret;
4415
4416 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4417 if (ret)
4418 return ret;
4419
4420 return 0;
4421 }
4422
4423 static int
iwm_mvm_update_sta(struct iwm_softc * sc,struct iwm_node * in)4424 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4425 {
4426 return iwm_mvm_sta_send_to_fw(sc, in, 1);
4427 }
4428
4429 static int
iwm_mvm_add_int_sta_common(struct iwm_softc * sc,struct iwm_int_sta * sta,const uint8_t * addr,uint16_t mac_id,uint16_t color)4430 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4431 const uint8_t *addr, uint16_t mac_id, uint16_t color)
4432 {
4433 struct iwm_mvm_add_sta_cmd_v6 cmd;
4434 int ret;
4435 uint32_t status;
4436
4437 memset(&cmd, 0, sizeof(cmd));
4438 cmd.sta_id = sta->sta_id;
4439 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4440
4441 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4442
4443 if (addr)
4444 memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4445
4446 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4447 if (ret)
4448 return ret;
4449
4450 switch (status) {
4451 case IWM_ADD_STA_SUCCESS:
4452 DPRINTF(("Internal station added.\n"));
4453 return 0;
4454 default:
4455 DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4456 DEVNAME(sc), status));
4457 ret = EIO;
4458 break;
4459 }
4460 return ret;
4461 }
4462
4463 static int
iwm_mvm_add_aux_sta(struct iwm_softc * sc)4464 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4465 {
4466 int ret;
4467
4468 sc->sc_aux_sta.sta_id = 3;
4469 sc->sc_aux_sta.tfd_queue_msk = 0;
4470
4471 ret = iwm_mvm_add_int_sta_common(sc,
4472 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4473
4474 if (ret)
4475 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4476 return ret;
4477 }
4478
4479 /*
4480 * END mvm/sta.c
4481 */
4482
4483 /*
4484 * BEGIN mvm/scan.c
4485 */
4486
4487 #define IWM_PLCP_QUIET_THRESH 1
4488 #define IWM_ACTIVE_QUIET_TIME 10
4489 #define LONG_OUT_TIME_PERIOD 600
4490 #define SHORT_OUT_TIME_PERIOD 200
4491 #define SUSPEND_TIME_PERIOD 100
4492
4493 static uint16_t
iwm_mvm_scan_rx_chain(struct iwm_softc * sc)4494 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4495 {
4496 uint16_t rx_chain;
4497 uint8_t rx_ant;
4498
4499 rx_ant = IWM_FW_VALID_RX_ANT(sc);
4500 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4501 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4502 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4503 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4504 return htole16(rx_chain);
4505 }
4506
4507 #define ieee80211_tu_to_usec(a) (1024*(a))
4508
4509 static uint32_t
iwm_mvm_scan_max_out_time(struct iwm_softc * sc,uint32_t flags,int is_assoc)4510 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4511 {
4512 if (!is_assoc)
4513 return 0;
4514 if (flags & 0x1)
4515 return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4516 return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4517 }
4518
4519 static uint32_t
iwm_mvm_scan_suspend_time(struct iwm_softc * sc,int is_assoc)4520 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4521 {
4522 if (!is_assoc)
4523 return 0;
4524 return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4525 }
4526
4527 static uint32_t
iwm_mvm_scan_rxon_flags(struct iwm_softc * sc,int flags)4528 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4529 {
4530 if (flags & IEEE80211_CHAN_2GHZ)
4531 return htole32(IWM_PHY_BAND_24);
4532 else
4533 return htole32(IWM_PHY_BAND_5);
4534 }
4535
4536 static uint32_t
iwm_mvm_scan_rate_n_flags(struct iwm_softc * sc,int flags,int no_cck)4537 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4538 {
4539 uint32_t tx_ant;
4540 int i, ind;
4541
4542 for (i = 0, ind = sc->sc_scan_last_antenna;
4543 i < IWM_RATE_MCS_ANT_NUM; i++) {
4544 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4545 if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4546 sc->sc_scan_last_antenna = ind;
4547 break;
4548 }
4549 }
4550 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4551
4552 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4553 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4554 tx_ant);
4555 else
4556 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4557 }
4558
4559 /*
4560 * If req->n_ssids > 0, it means we should do an active scan.
4561 * In case of active scan w/o directed scan, we receive a zero-length SSID
4562 * just to notify that this scan is active and not passive.
4563 * In order to notify the FW of the number of SSIDs we wish to scan (including
4564 * the zero-length one), we need to set the corresponding bits in chan->type,
4565 * one for each SSID, and set the active bit (first). If the first SSID is
4566 * already included in the probe template, so we need to set only
4567 * req->n_ssids - 1 bits in addition to the first bit.
4568 */
4569 static uint16_t
iwm_mvm_get_active_dwell(struct iwm_softc * sc,int flags,int n_ssids)4570 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4571 {
4572 if (flags & IEEE80211_CHAN_2GHZ)
4573 return 30 + 3 * (n_ssids + 1);
4574 return 20 + 2 * (n_ssids + 1);
4575 }
4576
4577 static uint16_t
iwm_mvm_get_passive_dwell(struct iwm_softc * sc,int flags)4578 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4579 {
4580 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4581 }
4582
4583 static int
iwm_mvm_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_cmd * cmd,int flags,int n_ssids,int basic_ssid)4584 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4585 int flags, int n_ssids, int basic_ssid)
4586 {
4587 struct ieee80211com *ic = &sc->sc_ic;
4588 uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4589 uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4590 struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4591 (cmd->data + le16toh(cmd->tx_cmd.len));
4592 int type = (1 << n_ssids) - 1;
4593 struct ieee80211_channel *c;
4594 int nchan;
4595
4596 if (!basic_ssid)
4597 type |= (1 << n_ssids);
4598
4599 for (nchan = 0, c = &ic->ic_channels[1];
4600 c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4601 c++) {
4602 if ((c->ic_flags & flags) != flags)
4603 continue;
4604
4605 chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4606 chan->type = htole32(type);
4607 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4608 chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4609 chan->active_dwell = htole16(active_dwell);
4610 chan->passive_dwell = htole16(passive_dwell);
4611 chan->iteration_count = htole16(1);
4612 chan++;
4613 nchan++;
4614 }
4615 if (nchan == 0)
4616 DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4617 return nchan;
4618 }
4619
4620 /*
4621 * Fill in probe request with the following parameters:
4622 * TA is our vif HW address, which mac80211 ensures we have.
4623 * Packet is broadcasted, so this is both SA and DA.
4624 * The probe request IE is made out of two: first comes the most prioritized
4625 * SSID if a directed scan is requested. Second comes whatever extra
4626 * information was given to us as the scan request IE.
4627 */
4628 static uint16_t
iwm_mvm_fill_probe_req(struct iwm_softc * sc,struct ieee80211_frame * frame,const uint8_t * ta,int n_ssids,const uint8_t * ssid,int ssid_len,const uint8_t * ie,int ie_len,int left)4629 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4630 const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4631 const uint8_t *ie, int ie_len, int left)
4632 {
4633 int len = 0;
4634 uint8_t *pos = NULL;
4635
4636 /* Make sure there is enough space for the probe request,
4637 * two mandatory IEs and the data */
4638 left -= sizeof(*frame);
4639 if (left < 0)
4640 return 0;
4641
4642 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4643 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4644 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4645 IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4646 memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4647 IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4648
4649 len += sizeof(*frame);
4650 CTASSERT(sizeof(*frame) == 24);
4651
4652 /* for passive scans, no need to fill anything */
4653 if (n_ssids == 0)
4654 return (uint16_t)len;
4655
4656 /* points to the payload of the request */
4657 pos = (uint8_t *)frame + sizeof(*frame);
4658
4659 /* fill in our SSID IE */
4660 left -= ssid_len + 2;
4661 if (left < 0)
4662 return 0;
4663 *pos++ = IEEE80211_ELEMID_SSID;
4664 *pos++ = ssid_len;
4665 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4666 memcpy(pos, ssid, ssid_len);
4667 pos += ssid_len;
4668 }
4669
4670 len += ssid_len + 2;
4671
4672 if (left < ie_len)
4673 return len;
4674
4675 if (ie && ie_len) {
4676 memcpy(pos, ie, ie_len);
4677 len += ie_len;
4678 }
4679
4680 return (uint16_t)len;
4681 }
4682
4683 static int
iwm_mvm_scan_request(struct iwm_softc * sc,int flags,int n_ssids,uint8_t * ssid,int ssid_len)4684 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4685 int n_ssids, uint8_t *ssid, int ssid_len)
4686 {
4687 struct ieee80211com *ic = &sc->sc_ic;
4688 struct iwm_host_cmd hcmd = {
4689 .id = IWM_SCAN_REQUEST_CMD,
4690 .len = { 0, },
4691 .data = { sc->sc_scan_cmd, },
4692 .flags = IWM_CMD_SYNC,
4693 .dataflags = { IWM_HCMD_DFL_NOCOPY, },
4694 };
4695 struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4696 int is_assoc = 0;
4697 int ret;
4698 uint32_t status;
4699 int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4700
4701 //lockdep_assert_held(&mvm->mutex);
4702
4703 sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4704
4705 DPRINTF(("Handling ieee80211 scan request\n"));
4706 memset(cmd, 0, sc->sc_scan_cmd_len);
4707
4708 cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4709 cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4710 cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4711 cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4712 cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4713 cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4714 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4715 IWM_MAC_FILTER_IN_BEACON);
4716
4717 cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4718 cmd->repeats = htole32(1);
4719
4720 /*
4721 * If the user asked for passive scan, don't change to active scan if
4722 * you see any activity on the channel - remain passive.
4723 */
4724 if (n_ssids > 0) {
4725 cmd->passive2active = htole16(1);
4726 cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4727 #if 0
4728 if (basic_ssid) {
4729 ssid = req->ssids[0].ssid;
4730 ssid_len = req->ssids[0].ssid_len;
4731 }
4732 #endif
4733 } else {
4734 cmd->passive2active = 0;
4735 cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4736 }
4737
4738 cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4739 IWM_TX_CMD_FLG_BT_DIS);
4740 cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4741 cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4742 cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4743
4744 cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4745 (struct ieee80211_frame *)cmd->data,
4746 ic->ic_myaddr, n_ssids, ssid, ssid_len,
4747 NULL, 0, sc->sc_capa_max_probe_len));
4748
4749 cmd->channel_count
4750 = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4751
4752 cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4753 le16toh(cmd->tx_cmd.len) +
4754 (cmd->channel_count * sizeof(struct iwm_scan_channel)));
4755 hcmd.len[0] = le16toh(cmd->len);
4756
4757 status = IWM_SCAN_RESPONSE_OK;
4758 ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4759 if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4760 DPRINTF(("Scan request was sent successfully\n"));
4761 } else {
4762 /*
4763 * If the scan failed, it usually means that the FW was unable
4764 * to allocate the time events. Warn on it, but maybe we
4765 * should try to send the command again with different params.
4766 */
4767 sc->sc_scanband = 0;
4768 ret = EIO;
4769 }
4770 return ret;
4771 }
4772
4773 /*
4774 * END mvm/scan.c
4775 */
4776
4777 /*
4778 * BEGIN mvm/mac-ctxt.c
4779 */
4780
4781 static void
iwm_mvm_ack_rates(struct iwm_softc * sc,struct iwm_node * in,int * cck_rates,int * ofdm_rates)4782 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4783 int *cck_rates, int *ofdm_rates)
4784 {
4785 struct ieee80211_node *ni = &in->in_ni;
4786 int lowest_present_ofdm = 100;
4787 int lowest_present_cck = 100;
4788 uint8_t cck = 0;
4789 uint8_t ofdm = 0;
4790 int i;
4791
4792 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4793 for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4794 cck |= (1 << i);
4795 if (lowest_present_cck > i)
4796 lowest_present_cck = i;
4797 }
4798 }
4799 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4800 int adj = i - IWM_FIRST_OFDM_RATE;
4801 ofdm |= (1 << adj);
4802 if (lowest_present_ofdm > i)
4803 lowest_present_ofdm = i;
4804 }
4805
4806 /*
4807 * Now we've got the basic rates as bitmaps in the ofdm and cck
4808 * variables. This isn't sufficient though, as there might not
4809 * be all the right rates in the bitmap. E.g. if the only basic
4810 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4811 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4812 *
4813 * [...] a STA responding to a received frame shall transmit
4814 * its Control Response frame [...] at the highest rate in the
4815 * BSSBasicRateSet parameter that is less than or equal to the
4816 * rate of the immediately previous frame in the frame exchange
4817 * sequence ([...]) and that is of the same modulation class
4818 * ([...]) as the received frame. If no rate contained in the
4819 * BSSBasicRateSet parameter meets these conditions, then the
4820 * control frame sent in response to a received frame shall be
4821 * transmitted at the highest mandatory rate of the PHY that is
4822 * less than or equal to the rate of the received frame, and
4823 * that is of the same modulation class as the received frame.
4824 *
4825 * As a consequence, we need to add all mandatory rates that are
4826 * lower than all of the basic rates to these bitmaps.
4827 */
4828
4829 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4830 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4831 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4832 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4833 /* 6M already there or needed so always add */
4834 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4835
4836 /*
4837 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4838 * Note, however:
4839 * - if no CCK rates are basic, it must be ERP since there must
4840 * be some basic rates at all, so they're OFDM => ERP PHY
4841 * (or we're in 5 GHz, and the cck bitmap will never be used)
4842 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4843 * - if 5.5M is basic, 1M and 2M are mandatory
4844 * - if 2M is basic, 1M is mandatory
4845 * - if 1M is basic, that's the only valid ACK rate.
4846 * As a consequence, it's not as complicated as it sounds, just add
4847 * any lower rates to the ACK rate bitmap.
4848 */
4849 if (IWM_RATE_11M_INDEX < lowest_present_cck)
4850 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4851 if (IWM_RATE_5M_INDEX < lowest_present_cck)
4852 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4853 if (IWM_RATE_2M_INDEX < lowest_present_cck)
4854 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4855 /* 1M already there or needed so always add */
4856 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4857
4858 *cck_rates = cck;
4859 *ofdm_rates = ofdm;
4860 }
4861
4862 static void
iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_ctx_cmd * cmd,uint32_t action)4863 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4864 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4865 {
4866 struct ieee80211com *ic = &sc->sc_ic;
4867 struct ieee80211_node *ni = ic->ic_bss;
4868 int cck_ack_rates, ofdm_ack_rates;
4869 int i;
4870
4871 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4872 in->in_color));
4873 cmd->action = htole32(action);
4874
4875 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4876 cmd->tsf_id = htole32(in->in_tsfid);
4877
4878 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4879 if (in->in_assoc) {
4880 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4881 } else {
4882 memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4883 }
4884 iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4885 cmd->cck_rates = htole32(cck_ack_rates);
4886 cmd->ofdm_rates = htole32(ofdm_ack_rates);
4887
4888 cmd->cck_short_preamble
4889 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4890 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4891 cmd->short_slot
4892 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4893 ? IWM_MAC_FLG_SHORT_SLOT : 0);
4894
4895 for (i = 0; i < IWM_AC_NUM+1; i++) {
4896 int txf = i;
4897
4898 cmd->ac[txf].cw_min = htole16(0x0f);
4899 cmd->ac[txf].cw_max = htole16(0x3f);
4900 cmd->ac[txf].aifsn = 1;
4901 cmd->ac[txf].fifos_mask = (1 << txf);
4902 cmd->ac[txf].edca_txop = 0;
4903 }
4904
4905 if (ic->ic_flags & IEEE80211_F_USEPROT)
4906 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4907
4908 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4909 }
4910
4911 static int
iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc * sc,struct iwm_mac_ctx_cmd * cmd)4912 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4913 {
4914 int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4915 sizeof(*cmd), cmd);
4916 if (ret)
4917 DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4918 DEVNAME(sc), le32toh(cmd->action), ret));
4919 return ret;
4920 }
4921
4922 /*
4923 * Fill the specific data for mac context of type station or p2p client
4924 */
4925 static void
iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_data_sta * ctxt_sta,int force_assoc_off)4926 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4927 struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4928 {
4929 struct ieee80211_node *ni = &in->in_ni;
4930 unsigned dtim_period, dtim_count;
4931
4932 dtim_period = ni->ni_dtim_period;
4933 dtim_count = ni->ni_dtim_count;
4934
4935 /* We need the dtim_period to set the MAC as associated */
4936 if (in->in_assoc && dtim_period && !force_assoc_off) {
4937 uint64_t tsf;
4938 uint32_t dtim_offs;
4939
4940 /*
4941 * The DTIM count counts down, so when it is N that means N
4942 * more beacon intervals happen until the DTIM TBTT. Therefore
4943 * add this to the current time. If that ends up being in the
4944 * future, the firmware will handle it.
4945 *
4946 * Also note that the system_timestamp (which we get here as
4947 * "sync_device_ts") and TSF timestamp aren't at exactly the
4948 * same offset in the frame -- the TSF is at the first symbol
4949 * of the TSF, the system timestamp is at signal acquisition
4950 * time. This means there's an offset between them of at most
4951 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4952 * 384us in the longest case), this is currently not relevant
4953 * as the firmware wakes up around 2ms before the TBTT.
4954 */
4955 dtim_offs = dtim_count * ni->ni_intval;
4956 /* convert TU to usecs */
4957 dtim_offs *= 1024;
4958
4959 tsf = ni->ni_tstamp.tsf;
4960
4961 ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4962 ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4963
4964 DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4965 (long long)le64toh(ctxt_sta->dtim_tsf),
4966 le32toh(ctxt_sta->dtim_time), dtim_offs));
4967
4968 ctxt_sta->is_assoc = htole32(1);
4969 } else {
4970 ctxt_sta->is_assoc = htole32(0);
4971 }
4972
4973 ctxt_sta->bi = htole32(ni->ni_intval);
4974 ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4975 ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4976 ctxt_sta->dtim_reciprocal =
4977 htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4978
4979 /* 10 = CONN_MAX_LISTEN_INTERVAL */
4980 ctxt_sta->listen_interval = htole32(10);
4981 ctxt_sta->assoc_id = htole32(ni->ni_associd);
4982 }
4983
4984 static int
iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)4985 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4986 uint32_t action)
4987 {
4988 struct iwm_mac_ctx_cmd cmd;
4989
4990 memset(&cmd, 0, sizeof(cmd));
4991
4992 /* Fill the common data for all mac context types */
4993 iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4994
4995 /* Allow beacons to pass through as long as we are not associated,or we
4996 * do not have dtim period information */
4997 if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
4998 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4999 else
5000 cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
5001
5002 /* Fill the data specific for station mode */
5003 iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
5004 &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
5005
5006 return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
5007 }
5008
5009 static int
iwm_mvm_mac_ctx_send(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)5010 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5011 {
5012 return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
5013 }
5014
5015 static int
iwm_mvm_mac_ctxt_add(struct iwm_softc * sc,struct iwm_node * in)5016 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
5017 {
5018 int ret;
5019
5020 ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
5021 if (ret)
5022 return ret;
5023
5024 return 0;
5025 }
5026
5027 static int
iwm_mvm_mac_ctxt_changed(struct iwm_softc * sc,struct iwm_node * in)5028 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
5029 {
5030 return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5031 }
5032
5033 #if 0
5034 static int
5035 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
5036 {
5037 struct iwm_mac_ctx_cmd cmd;
5038 int ret;
5039
5040 if (!in->in_uploaded) {
5041 print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
5042 return EIO;
5043 }
5044
5045 memset(&cmd, 0, sizeof(cmd));
5046
5047 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5048 in->in_color));
5049 cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
5050
5051 ret = iwm_mvm_send_cmd_pdu(sc,
5052 IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
5053 if (ret) {
5054 aprint_error_dev(sc->sc_dev,
5055 "Failed to remove MAC context: %d\n", ret);
5056 return ret;
5057 }
5058 in->in_uploaded = 0;
5059
5060 return 0;
5061 }
5062 #endif
5063
5064 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
5065
5066 static void
iwm_mvm_rx_missed_beacons_notif(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)5067 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
5068 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5069 {
5070 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5071
5072 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5073 le32toh(mb->mac_id),
5074 le32toh(mb->consec_missed_beacons),
5075 le32toh(mb->consec_missed_beacons_since_last_rx),
5076 le32toh(mb->num_recvd_beacons),
5077 le32toh(mb->num_expected_beacons)));
5078
5079 /*
5080 * TODO: the threshold should be adjusted based on latency conditions,
5081 * and/or in case of a CS flow on one of the other AP vifs.
5082 */
5083 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5084 IWM_MVM_MISSED_BEACONS_THRESHOLD)
5085 ieee80211_beacon_miss(&sc->sc_ic);
5086 }
5087
5088 /*
5089 * END mvm/mac-ctxt.c
5090 */
5091
5092 /*
5093 * BEGIN mvm/quota.c
5094 */
5095
5096 static int
iwm_mvm_update_quotas(struct iwm_softc * sc,struct iwm_node * in)5097 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5098 {
5099 struct iwm_time_quota_cmd cmd;
5100 int i, idx, ret, num_active_macs, quota, quota_rem;
5101 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5102 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5103 uint16_t id;
5104
5105 memset(&cmd, 0, sizeof(cmd));
5106
5107 /* currently, PHY ID == binding ID */
5108 if (in) {
5109 id = in->in_phyctxt->id;
5110 KASSERT(id < IWM_MAX_BINDINGS);
5111 colors[id] = in->in_phyctxt->color;
5112
5113 if (1)
5114 n_ifs[id] = 1;
5115 }
5116
5117 /*
5118 * The FW's scheduling session consists of
5119 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
5120 * equally between all the bindings that require quota
5121 */
5122 num_active_macs = 0;
5123 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5124 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5125 num_active_macs += n_ifs[i];
5126 }
5127
5128 quota = 0;
5129 quota_rem = 0;
5130 if (num_active_macs) {
5131 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
5132 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
5133 }
5134
5135 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5136 if (colors[i] < 0)
5137 continue;
5138
5139 cmd.quotas[idx].id_and_color =
5140 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5141
5142 if (n_ifs[i] <= 0) {
5143 cmd.quotas[idx].quota = htole32(0);
5144 cmd.quotas[idx].max_duration = htole32(0);
5145 } else {
5146 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5147 cmd.quotas[idx].max_duration = htole32(0);
5148 }
5149 idx++;
5150 }
5151
5152 /* Give the remainder of the session to the first binding */
5153 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5154
5155 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5156 sizeof(cmd), &cmd);
5157 if (ret)
5158 DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5159 return ret;
5160 }
5161
5162 /*
5163 * END mvm/quota.c
5164 */
5165
5166 /*
5167 * aieee80211 routines
5168 */
5169
5170 /*
5171 * Change to AUTH state in 80211 state machine. Roughly matches what
5172 * Linux does in bss_info_changed().
5173 */
5174 static int
iwm_auth(struct iwm_softc * sc)5175 iwm_auth(struct iwm_softc *sc)
5176 {
5177 struct ieee80211com *ic = &sc->sc_ic;
5178 struct iwm_node *in = (void *)ic->ic_bss;
5179 uint32_t duration;
5180 uint32_t min_duration;
5181 int error;
5182
5183 in->in_assoc = 0;
5184
5185 if ((error = iwm_allow_mcast(sc)) != 0)
5186 return error;
5187
5188 if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5189 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5190 return error;
5191 }
5192
5193 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5194 in->in_ni.ni_chan, 1, 1)) != 0) {
5195 DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5196 return error;
5197 }
5198 in->in_phyctxt = &sc->sc_phyctxt[0];
5199
5200 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5201 DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5202 return error;
5203 }
5204
5205 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5206 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5207 return error;
5208 }
5209
5210 /* a bit superfluous? */
5211 while (sc->sc_auth_prot)
5212 tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5213 sc->sc_auth_prot = 1;
5214
5215 duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5216 200 + in->in_ni.ni_intval);
5217 min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5218 100 + in->in_ni.ni_intval);
5219 iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5220
5221 while (sc->sc_auth_prot != 2) {
5222 /*
5223 * well, meh, but if the kernel is sleeping for half a
5224 * second, we have bigger problems
5225 */
5226 if (sc->sc_auth_prot == 0) {
5227 DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5228 return ETIMEDOUT;
5229 } else if (sc->sc_auth_prot == -1) {
5230 DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5231 sc->sc_auth_prot = 0;
5232 return EAUTH;
5233 }
5234 tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5235 }
5236
5237 return 0;
5238 }
5239
5240 static int
iwm_assoc(struct iwm_softc * sc)5241 iwm_assoc(struct iwm_softc *sc)
5242 {
5243 struct ieee80211com *ic = &sc->sc_ic;
5244 struct iwm_node *in = (void *)ic->ic_bss;
5245 int error;
5246
5247 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5248 DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5249 return error;
5250 }
5251
5252 in->in_assoc = 1;
5253 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5254 DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5255 return error;
5256 }
5257
5258 return 0;
5259 }
5260
5261 static int
iwm_release(struct iwm_softc * sc,struct iwm_node * in)5262 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5263 {
5264 /*
5265 * Ok, so *technically* the proper set of calls for going
5266 * from RUN back to SCAN is:
5267 *
5268 * iwm_mvm_power_mac_disable(sc, in);
5269 * iwm_mvm_mac_ctxt_changed(sc, in);
5270 * iwm_mvm_rm_sta(sc, in);
5271 * iwm_mvm_update_quotas(sc, NULL);
5272 * iwm_mvm_mac_ctxt_changed(sc, in);
5273 * iwm_mvm_binding_remove_vif(sc, in);
5274 * iwm_mvm_mac_ctxt_remove(sc, in);
5275 *
5276 * However, that freezes the device not matter which permutations
5277 * and modifications are attempted. Obviously, this driver is missing
5278 * something since it works in the Linux driver, but figuring out what
5279 * is missing is a little more complicated. Now, since we're going
5280 * back to nothing anyway, we'll just do a complete device reset.
5281 * Up your's, device!
5282 */
5283 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
5284 iwm_stop_device(sc);
5285 iwm_init_hw(sc);
5286 if (in)
5287 in->in_assoc = 0;
5288 return 0;
5289
5290 #if 0
5291 int error;
5292
5293 iwm_mvm_power_mac_disable(sc, in);
5294
5295 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5296 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
5297 error);
5298 return error;
5299 }
5300
5301 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5302 aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
5303 return error;
5304 }
5305 error = iwm_mvm_rm_sta(sc, in);
5306 in->in_assoc = 0;
5307 iwm_mvm_update_quotas(sc, NULL);
5308 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5309 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
5310 error);
5311 return error;
5312 }
5313 iwm_mvm_binding_remove_vif(sc, in);
5314
5315 iwm_mvm_mac_ctxt_remove(sc, in);
5316
5317 return error;
5318 #endif
5319 }
5320
5321
5322 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211_node_table * nt)5323 iwm_node_alloc(struct ieee80211_node_table *nt)
5324 {
5325 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5326 }
5327
5328 static void
iwm_calib_timeout(void * arg)5329 iwm_calib_timeout(void *arg)
5330 {
5331 struct iwm_softc *sc = arg;
5332 struct ieee80211com *ic = &sc->sc_ic;
5333 int s;
5334
5335 s = splnet();
5336 if (ic->ic_fixed_rate == -1
5337 && ic->ic_opmode == IEEE80211_M_STA
5338 && ic->ic_bss) {
5339 struct iwm_node *in = (void *)ic->ic_bss;
5340 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5341 }
5342 splx(s);
5343
5344 callout_schedule(&sc->sc_calib_to, hz/2);
5345 }
5346
5347 static void
iwm_setrates(struct iwm_node * in)5348 iwm_setrates(struct iwm_node *in)
5349 {
5350 struct ieee80211_node *ni = &in->in_ni;
5351 struct ieee80211com *ic = ni->ni_ic;
5352 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5353 struct iwm_lq_cmd *lq = &in->in_lq;
5354 int nrates = ni->ni_rates.rs_nrates;
5355 int i, ridx, tab = 0;
5356 int txant = 0;
5357
5358 if (nrates > __arraycount(lq->rs_table) ||
5359 nrates > IEEE80211_RATE_MAXSIZE) {
5360 DPRINTF(("%s: node supports %d rates, driver handles only "
5361 "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
5362 return;
5363 }
5364
5365 /* first figure out which rates we should support */
5366 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5367 for (i = 0; i < nrates; i++) {
5368 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5369
5370 /* Map 802.11 rate to HW rate index. */
5371 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5372 if (iwm_rates[ridx].rate == rate)
5373 break;
5374 if (ridx > IWM_RIDX_MAX)
5375 DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5376 DEVNAME(sc), rate));
5377 else
5378 in->in_ridx[i] = ridx;
5379 }
5380
5381 /* then construct a lq_cmd based on those */
5382 memset(lq, 0, sizeof(*lq));
5383 lq->sta_id = IWM_STATION_ID;
5384
5385 /*
5386 * are these used? (we don't do SISO or MIMO)
5387 * need to set them to non-zero, though, or we get an error.
5388 */
5389 lq->single_stream_ant_msk = 1;
5390 lq->dual_stream_ant_msk = 1;
5391
5392 /*
5393 * Build the actual rate selection table.
5394 * The lowest bits are the rates. Additionally,
5395 * CCK needs bit 9 to be set. The rest of the bits
5396 * we add to the table select the tx antenna
5397 * Note that we add the rates in the highest rate first
5398 * (opposite of ni_rates).
5399 */
5400 for (i = 0; i < nrates; i++) {
5401 int nextant;
5402
5403 if (txant == 0)
5404 txant = IWM_FW_VALID_TX_ANT(sc);
5405 nextant = 1<<(ffs(txant)-1);
5406 txant &= ~nextant;
5407
5408 ridx = in->in_ridx[(nrates-1)-i];
5409 tab = iwm_rates[ridx].plcp;
5410 tab |= nextant << IWM_RATE_MCS_ANT_POS;
5411 if (IWM_RIDX_IS_CCK(ridx))
5412 tab |= IWM_RATE_MCS_CCK_MSK;
5413 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5414 lq->rs_table[i] = htole32(tab);
5415 }
5416 /* then fill the rest with the lowest possible rate */
5417 for (i = nrates; i < __arraycount(lq->rs_table); i++) {
5418 KASSERT(tab != 0);
5419 lq->rs_table[i] = htole32(tab);
5420 }
5421
5422 /* init amrr */
5423 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5424 /* Start at lowest available bit-rate, AMRR will raise. */
5425 ni->ni_txrate = 0;
5426 }
5427
5428 static int
iwm_media_change(struct ifnet * ifp)5429 iwm_media_change(struct ifnet *ifp)
5430 {
5431 struct iwm_softc *sc = ifp->if_softc;
5432 struct ieee80211com *ic = &sc->sc_ic;
5433 uint8_t rate, ridx;
5434 int error;
5435
5436 error = ieee80211_media_change(ifp);
5437 if (error != ENETRESET)
5438 return error;
5439
5440 if (ic->ic_fixed_rate != -1) {
5441 rate = ic->ic_sup_rates[ic->ic_curmode].
5442 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5443 /* Map 802.11 rate to HW rate index. */
5444 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5445 if (iwm_rates[ridx].rate == rate)
5446 break;
5447 sc->sc_fixed_ridx = ridx;
5448 }
5449
5450 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5451 (IFF_UP | IFF_RUNNING)) {
5452 iwm_stop(ifp, 0);
5453 error = iwm_init(ifp);
5454 }
5455 return error;
5456 }
5457
5458 static void
iwm_newstate_cb(struct work * wk,void * v)5459 iwm_newstate_cb(struct work *wk, void *v)
5460 {
5461 struct iwm_softc *sc = v;
5462 struct ieee80211com *ic = &sc->sc_ic;
5463 struct iwm_newstate_state *iwmns = (void *)wk;
5464 enum ieee80211_state nstate = iwmns->ns_nstate;
5465 int generation = iwmns->ns_generation;
5466 struct iwm_node *in;
5467 int arg = iwmns->ns_arg;
5468 int error;
5469
5470 kmem_free(iwmns, sizeof(*iwmns));
5471
5472 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5473 if (sc->sc_generation != generation) {
5474 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5475 if (nstate == IEEE80211_S_INIT) {
5476 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5477 sc->sc_newstate(ic, nstate, arg);
5478 }
5479 return;
5480 }
5481
5482 DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5483
5484 /* disable beacon filtering if we're hopping out of RUN */
5485 if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5486 iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5487
5488 if (((in = (void *)ic->ic_bss) != NULL))
5489 in->in_assoc = 0;
5490 iwm_release(sc, NULL);
5491
5492 /*
5493 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5494 * above then the card will be completely reinitialized,
5495 * so the driver must do everything necessary to bring the card
5496 * from INIT to SCAN.
5497 *
5498 * Additionally, upon receiving deauth frame from AP,
5499 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5500 * state. This will also fail with this driver, so bring the FSM
5501 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5502 */
5503 if (nstate == IEEE80211_S_SCAN ||
5504 nstate == IEEE80211_S_AUTH ||
5505 nstate == IEEE80211_S_ASSOC) {
5506 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5507 sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5508 DPRINTF(("Going INIT->SCAN\n"));
5509 nstate = IEEE80211_S_SCAN;
5510 }
5511 }
5512
5513 switch (nstate) {
5514 case IEEE80211_S_INIT:
5515 sc->sc_scanband = 0;
5516 break;
5517
5518 case IEEE80211_S_SCAN:
5519 if (sc->sc_scanband)
5520 break;
5521
5522 if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5523 ic->ic_des_esslen != 0,
5524 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5525 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5526 return;
5527 }
5528 ic->ic_state = nstate;
5529 return;
5530
5531 case IEEE80211_S_AUTH:
5532 if ((error = iwm_auth(sc)) != 0) {
5533 DPRINTF(("%s: could not move to auth state: %d\n",
5534 DEVNAME(sc), error));
5535 return;
5536 }
5537
5538 break;
5539
5540 case IEEE80211_S_ASSOC:
5541 if ((error = iwm_assoc(sc)) != 0) {
5542 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5543 error));
5544 return;
5545 }
5546 break;
5547
5548 case IEEE80211_S_RUN: {
5549 struct iwm_host_cmd cmd = {
5550 .id = IWM_LQ_CMD,
5551 .len = { sizeof(in->in_lq), },
5552 .flags = IWM_CMD_SYNC,
5553 };
5554
5555 in = (struct iwm_node *)ic->ic_bss;
5556 iwm_mvm_power_mac_update_mode(sc, in);
5557 iwm_mvm_enable_beacon_filter(sc, in);
5558 iwm_mvm_update_quotas(sc, in);
5559 iwm_setrates(in);
5560
5561 cmd.data[0] = &in->in_lq;
5562 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5563 DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5564 }
5565
5566 callout_schedule(&sc->sc_calib_to, hz/2);
5567
5568 break; }
5569
5570 default:
5571 DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
5572 break;
5573 }
5574
5575 sc->sc_newstate(ic, nstate, arg);
5576 }
5577
5578 static int
iwm_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)5579 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5580 {
5581 struct iwm_newstate_state *iwmns;
5582 struct ifnet *ifp = IC2IFP(ic);
5583 struct iwm_softc *sc = ifp->if_softc;
5584
5585 callout_stop(&sc->sc_calib_to);
5586
5587 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5588 if (!iwmns) {
5589 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5590 return ENOMEM;
5591 }
5592
5593 iwmns->ns_nstate = nstate;
5594 iwmns->ns_arg = arg;
5595 iwmns->ns_generation = sc->sc_generation;
5596
5597 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5598
5599 return 0;
5600 }
5601
5602 static void
iwm_endscan_cb(struct work * work __unused,void * arg)5603 iwm_endscan_cb(struct work *work __unused, void *arg)
5604 {
5605 struct iwm_softc *sc = arg;
5606 struct ieee80211com *ic = &sc->sc_ic;
5607 int done;
5608
5609 DPRINTF(("scan ended\n"));
5610
5611 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
5612 sc->sc_nvm.sku_cap_band_52GHz_enable) {
5613 int error;
5614 done = 0;
5615 if ((error = iwm_mvm_scan_request(sc,
5616 IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5617 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5618 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5619 done = 1;
5620 }
5621 } else {
5622 done = 1;
5623 }
5624
5625 if (done) {
5626 if (!sc->sc_scanband) {
5627 ieee80211_cancel_scan(ic);
5628 } else {
5629 ieee80211_end_scan(ic);
5630 }
5631 sc->sc_scanband = 0;
5632 }
5633 }
5634
5635 static int
iwm_init_hw(struct iwm_softc * sc)5636 iwm_init_hw(struct iwm_softc *sc)
5637 {
5638 struct ieee80211com *ic = &sc->sc_ic;
5639 int error, i, qid;
5640
5641 if ((error = iwm_preinit(sc)) != 0)
5642 return error;
5643
5644 if ((error = iwm_start_hw(sc)) != 0)
5645 return error;
5646
5647 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5648 return error;
5649 }
5650
5651 /*
5652 * should stop and start HW since that INIT
5653 * image just loaded
5654 */
5655 iwm_stop_device(sc);
5656 if ((error = iwm_start_hw(sc)) != 0) {
5657 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
5658 return error;
5659 }
5660
5661 /* omstart, this time with the regular firmware */
5662 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5663 if (error) {
5664 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
5665 goto error;
5666 }
5667
5668 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5669 goto error;
5670
5671 /* Send phy db control command and then phy db calibration*/
5672 if ((error = iwm_send_phy_db_data(sc)) != 0)
5673 goto error;
5674
5675 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5676 goto error;
5677
5678 /* Add auxiliary station for scanning */
5679 if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5680 goto error;
5681
5682 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5683 /*
5684 * The channel used here isn't relevant as it's
5685 * going to be overwritten in the other flows.
5686 * For now use the first channel we have.
5687 */
5688 if ((error = iwm_mvm_phy_ctxt_add(sc,
5689 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5690 goto error;
5691 }
5692
5693 error = iwm_mvm_power_update_device(sc);
5694 if (error)
5695 goto error;
5696
5697 /* Mark TX rings as active. */
5698 for (qid = 0; qid < 4; qid++) {
5699 iwm_enable_txq(sc, qid, qid);
5700 }
5701
5702 return 0;
5703
5704 error:
5705 iwm_stop_device(sc);
5706 return error;
5707 }
5708
5709 /* Allow multicast from our BSSID. */
5710 static int
iwm_allow_mcast(struct iwm_softc * sc)5711 iwm_allow_mcast(struct iwm_softc *sc)
5712 {
5713 struct ieee80211com *ic = &sc->sc_ic;
5714 struct ieee80211_node *ni = ic->ic_bss;
5715 struct iwm_mcast_filter_cmd *cmd;
5716 size_t size;
5717 int error;
5718
5719 size = roundup(sizeof(*cmd), 4);
5720 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
5721 if (cmd == NULL)
5722 return ENOMEM;
5723 cmd->filter_own = 1;
5724 cmd->port_id = 0;
5725 cmd->count = 0;
5726 cmd->pass_all = 1;
5727 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5728
5729 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5730 IWM_CMD_SYNC, size, cmd);
5731 kmem_intr_free(cmd, size);
5732 return error;
5733 }
5734
5735 /*
5736 * ifnet interfaces
5737 */
5738
5739 static int
iwm_init(struct ifnet * ifp)5740 iwm_init(struct ifnet *ifp)
5741 {
5742 struct iwm_softc *sc = ifp->if_softc;
5743 int error;
5744
5745 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5746 return 0;
5747 }
5748 sc->sc_generation++;
5749 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5750
5751 if ((error = iwm_init_hw(sc)) != 0) {
5752 iwm_stop(ifp, 1);
5753 return error;
5754 }
5755
5756 /*
5757 * Ok, firmware loaded and we are jogging
5758 */
5759
5760 ifp->if_flags &= ~IFF_OACTIVE;
5761 ifp->if_flags |= IFF_RUNNING;
5762
5763 ieee80211_begin_scan(&sc->sc_ic, 0);
5764 sc->sc_flags |= IWM_FLAG_HW_INITED;
5765
5766 return 0;
5767 }
5768
5769 /*
5770 * Dequeue packets from sendq and call send.
5771 * mostly from iwn
5772 */
5773 static void
iwm_start(struct ifnet * ifp)5774 iwm_start(struct ifnet *ifp)
5775 {
5776 struct iwm_softc *sc = ifp->if_softc;
5777 struct ieee80211com *ic = &sc->sc_ic;
5778 struct ieee80211_node *ni;
5779 struct ether_header *eh;
5780 struct mbuf *m;
5781 int ac;
5782
5783 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5784 return;
5785
5786 for (;;) {
5787 /* why isn't this done per-queue? */
5788 if (sc->qfullmsk != 0) {
5789 ifp->if_flags |= IFF_OACTIVE;
5790 break;
5791 }
5792
5793 /* need to send management frames even if we're not RUNning */
5794 IF_DEQUEUE(&ic->ic_mgtq, m);
5795 if (m) {
5796 ni = M_GETCTX(m, struct ieee80211_node *);
5797 ac = 0;
5798 goto sendit;
5799 }
5800 if (ic->ic_state != IEEE80211_S_RUN) {
5801 break;
5802 }
5803
5804 IFQ_DEQUEUE(&ifp->if_snd, m);
5805 if (!m)
5806 break;
5807 if (m->m_len < sizeof (*eh) &&
5808 (m = m_pullup(m, sizeof (*eh))) == NULL) {
5809 ifp->if_oerrors++;
5810 continue;
5811 }
5812 if (ifp->if_bpf != NULL)
5813 bpf_mtap(ifp, m);
5814
5815 eh = mtod(m, struct ether_header *);
5816 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
5817 if (ni == NULL) {
5818 m_freem(m);
5819 ifp->if_oerrors++;
5820 continue;
5821 }
5822 /* classify mbuf so we can find which tx ring to use */
5823 if (ieee80211_classify(ic, m, ni) != 0) {
5824 m_freem(m);
5825 ieee80211_free_node(ni);
5826 ifp->if_oerrors++;
5827 continue;
5828 }
5829
5830 /* No QoS encapsulation for EAPOL frames. */
5831 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
5832 M_WME_GETAC(m) : WME_AC_BE;
5833
5834 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
5835 ieee80211_free_node(ni);
5836 ifp->if_oerrors++;
5837 continue;
5838 }
5839
5840 sendit:
5841 if (ic->ic_rawbpf != NULL)
5842 bpf_mtap3(ic->ic_rawbpf, m);
5843 if (iwm_tx(sc, m, ni, ac) != 0) {
5844 ieee80211_free_node(ni);
5845 ifp->if_oerrors++;
5846 continue;
5847 }
5848
5849 if (ifp->if_flags & IFF_UP) {
5850 sc->sc_tx_timer = 15;
5851 ifp->if_timer = 1;
5852 }
5853 }
5854
5855 return;
5856 }
5857
5858 static void
iwm_stop(struct ifnet * ifp,int disable)5859 iwm_stop(struct ifnet *ifp, int disable)
5860 {
5861 struct iwm_softc *sc = ifp->if_softc;
5862 struct ieee80211com *ic = &sc->sc_ic;
5863
5864 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5865 sc->sc_flags |= IWM_FLAG_STOPPED;
5866 sc->sc_generation++;
5867 sc->sc_scanband = 0;
5868 sc->sc_auth_prot = 0;
5869 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5870
5871 if (ic->ic_state != IEEE80211_S_INIT)
5872 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5873
5874 callout_stop(&sc->sc_calib_to);
5875 ifp->if_timer = sc->sc_tx_timer = 0;
5876 iwm_stop_device(sc);
5877 }
5878
5879 static void
iwm_watchdog(struct ifnet * ifp)5880 iwm_watchdog(struct ifnet *ifp)
5881 {
5882 struct iwm_softc *sc = ifp->if_softc;
5883
5884 ifp->if_timer = 0;
5885 if (sc->sc_tx_timer > 0) {
5886 if (--sc->sc_tx_timer == 0) {
5887 aprint_error_dev(sc->sc_dev, "device timeout\n");
5888 #ifdef IWM_DEBUG
5889 iwm_nic_error(sc);
5890 #endif
5891 ifp->if_flags &= ~IFF_UP;
5892 iwm_stop(ifp, 1);
5893 ifp->if_oerrors++;
5894 return;
5895 }
5896 ifp->if_timer = 1;
5897 }
5898
5899 ieee80211_watchdog(&sc->sc_ic);
5900 }
5901
5902 static int
iwm_ioctl(struct ifnet * ifp,u_long cmd,void * data)5903 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5904 {
5905 struct iwm_softc *sc = ifp->if_softc;
5906 struct ieee80211com *ic = &sc->sc_ic;
5907 const struct sockaddr *sa;
5908 int s, error = 0;
5909
5910 s = splnet();
5911
5912 switch (cmd) {
5913 case SIOCSIFADDR:
5914 ifp->if_flags |= IFF_UP;
5915 /* FALLTHROUGH */
5916 case SIOCSIFFLAGS:
5917 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5918 break;
5919 if (ifp->if_flags & IFF_UP) {
5920 if (!(ifp->if_flags & IFF_RUNNING)) {
5921 if ((error = iwm_init(ifp)) != 0)
5922 ifp->if_flags &= ~IFF_UP;
5923 }
5924 } else {
5925 if (ifp->if_flags & IFF_RUNNING)
5926 iwm_stop(ifp, 1);
5927 }
5928 break;
5929
5930 case SIOCADDMULTI:
5931 case SIOCDELMULTI:
5932 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
5933 error = ENXIO;
5934 break;
5935 }
5936 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
5937 error = (cmd == SIOCADDMULTI) ?
5938 ether_addmulti(sa, &sc->sc_ec) :
5939 ether_delmulti(sa, &sc->sc_ec);
5940 if (error == ENETRESET)
5941 error = 0;
5942 break;
5943
5944 default:
5945 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
5946 error = ether_ioctl(ifp, cmd, data);
5947 break;
5948 }
5949 error = ieee80211_ioctl(ic, cmd, data);
5950 break;
5951 }
5952
5953 if (error == ENETRESET) {
5954 error = 0;
5955 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5956 (IFF_UP | IFF_RUNNING)) {
5957 iwm_stop(ifp, 0);
5958 error = iwm_init(ifp);
5959 }
5960 }
5961
5962 splx(s);
5963 return error;
5964 }
5965
5966 /*
5967 * The interrupt side of things
5968 */
5969
5970 /*
5971 * error dumping routines are from iwlwifi/mvm/utils.c
5972 */
5973
5974 /*
5975 * Note: This structure is read from the device with IO accesses,
5976 * and the reading already does the endian conversion. As it is
5977 * read with uint32_t-sized accesses, any members with a different size
5978 * need to be ordered correctly though!
5979 */
5980 struct iwm_error_event_table {
5981 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5982 uint32_t error_id; /* type of error */
5983 uint32_t pc; /* program counter */
5984 uint32_t blink1; /* branch link */
5985 uint32_t blink2; /* branch link */
5986 uint32_t ilink1; /* interrupt link */
5987 uint32_t ilink2; /* interrupt link */
5988 uint32_t data1; /* error-specific data */
5989 uint32_t data2; /* error-specific data */
5990 uint32_t data3; /* error-specific data */
5991 uint32_t bcon_time; /* beacon timer */
5992 uint32_t tsf_low; /* network timestamp function timer */
5993 uint32_t tsf_hi; /* network timestamp function timer */
5994 uint32_t gp1; /* GP1 timer register */
5995 uint32_t gp2; /* GP2 timer register */
5996 uint32_t gp3; /* GP3 timer register */
5997 uint32_t ucode_ver; /* uCode version */
5998 uint32_t hw_ver; /* HW Silicon version */
5999 uint32_t brd_ver; /* HW board version */
6000 uint32_t log_pc; /* log program counter */
6001 uint32_t frame_ptr; /* frame pointer */
6002 uint32_t stack_ptr; /* stack pointer */
6003 uint32_t hcmd; /* last host command header */
6004 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6005 * rxtx_flag */
6006 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6007 * host_flag */
6008 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6009 * enc_flag */
6010 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6011 * time_flag */
6012 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6013 * wico interrupt */
6014 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
6015 uint32_t wait_event; /* wait event() caller address */
6016 uint32_t l2p_control; /* L2pControlField */
6017 uint32_t l2p_duration; /* L2pDurationField */
6018 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6019 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6020 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6021 * (LMPM_PMG_SEL) */
6022 uint32_t u_timestamp; /* indicate when the date and time of the
6023 * compilation */
6024 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6025 } __packed;
6026
6027 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6028 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6029
6030 #ifdef IWM_DEBUG
6031 static const struct {
6032 const char *name;
6033 uint8_t num;
6034 } advanced_lookup[] = {
6035 { "NMI_INTERRUPT_WDG", 0x34 },
6036 { "SYSASSERT", 0x35 },
6037 { "UCODE_VERSION_MISMATCH", 0x37 },
6038 { "BAD_COMMAND", 0x38 },
6039 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6040 { "FATAL_ERROR", 0x3D },
6041 { "NMI_TRM_HW_ERR", 0x46 },
6042 { "NMI_INTERRUPT_TRM", 0x4C },
6043 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6044 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6045 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6046 { "NMI_INTERRUPT_HOST", 0x66 },
6047 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6048 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6049 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6050 { "ADVANCED_SYSASSERT", 0 },
6051 };
6052
6053 static const char *
iwm_desc_lookup(uint32_t num)6054 iwm_desc_lookup(uint32_t num)
6055 {
6056 int i;
6057
6058 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6059 if (advanced_lookup[i].num == num)
6060 return advanced_lookup[i].name;
6061
6062 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6063 return advanced_lookup[i].name;
6064 }
6065
6066 /*
6067 * Support for dumping the error log seemed like a good idea ...
6068 * but it's mostly hex junk and the only sensible thing is the
6069 * hw/ucode revision (which we know anyway). Since it's here,
6070 * I'll just leave it in, just in case e.g. the Intel guys want to
6071 * help us decipher some "ADVANCED_SYSASSERT" later.
6072 */
6073 static void
iwm_nic_error(struct iwm_softc * sc)6074 iwm_nic_error(struct iwm_softc *sc)
6075 {
6076 struct iwm_error_event_table table;
6077 uint32_t base;
6078
6079 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6080 base = sc->sc_uc.uc_error_event_table;
6081 if (base < 0x800000 || base >= 0x80C000) {
6082 aprint_error_dev(sc->sc_dev,
6083 "Not valid error log pointer 0x%08x\n", base);
6084 return;
6085 }
6086
6087 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
6088 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6089 return;
6090 }
6091
6092 if (!table.valid) {
6093 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6094 return;
6095 }
6096
6097 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6098 aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
6099 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6100 sc->sc_flags, table.valid);
6101 }
6102
6103 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
6104 iwm_desc_lookup(table.error_id));
6105 aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
6106 aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
6107 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
6108 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
6109 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
6110 aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
6111 aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
6112 aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
6113 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
6114 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
6115 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
6116 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
6117 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
6118 aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
6119 aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
6120 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
6121 aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
6122 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
6123 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
6124 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
6125 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
6126 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
6127 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
6128 aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
6129 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
6130 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
6131 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
6132 table.l2p_duration);
6133 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
6134 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6135 table.l2p_addr_match);
6136 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
6137 table.lmpm_pmg_sel);
6138 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
6139 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
6140 table.flow_handler);
6141 }
6142 #endif
6143
6144 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6145 do { \
6146 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6147 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6148 _var_ = (void *)((_pkt_)+1); \
6149 } while (/*CONSTCOND*/0)
6150
6151 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6152 do { \
6153 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6154 sizeof(len), BUS_DMASYNC_POSTREAD); \
6155 _ptr_ = (void *)((_pkt_)+1); \
6156 } while (/*CONSTCOND*/0)
6157
6158 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6159
6160 /*
6161 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6162 * Basic structure from if_iwn
6163 */
6164 static void
iwm_notif_intr(struct iwm_softc * sc)6165 iwm_notif_intr(struct iwm_softc *sc)
6166 {
6167 uint16_t hw;
6168
6169 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6170 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6171
6172 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6173 while (sc->rxq.cur != hw) {
6174 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6175 struct iwm_rx_packet *pkt, tmppkt;
6176 struct iwm_cmd_response *cresp;
6177 int qid, idx;
6178
6179 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6180 BUS_DMASYNC_POSTREAD);
6181 pkt = mtod(data->m, struct iwm_rx_packet *);
6182
6183 qid = pkt->hdr.qid & ~0x80;
6184 idx = pkt->hdr.idx;
6185
6186 DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6187 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6188 pkt->hdr.code, sc->rxq.cur, hw));
6189
6190 /*
6191 * randomly get these from the firmware, no idea why.
6192 * they at least seem harmless, so just ignore them for now
6193 */
6194 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6195 || pkt->len_n_flags == htole32(0x55550000))) {
6196 ADVANCE_RXQ(sc);
6197 continue;
6198 }
6199
6200 switch (pkt->hdr.code) {
6201 case IWM_REPLY_RX_PHY_CMD:
6202 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6203 break;
6204
6205 case IWM_REPLY_RX_MPDU_CMD:
6206 tmppkt = *pkt; // XXX m is freed by ieee80211_input()
6207 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6208 pkt = &tmppkt;
6209 break;
6210
6211 case IWM_TX_CMD:
6212 iwm_mvm_rx_tx_cmd(sc, pkt, data);
6213 break;
6214
6215 case IWM_MISSED_BEACONS_NOTIFICATION:
6216 iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
6217 break;
6218
6219 case IWM_MVM_ALIVE: {
6220 struct iwm_mvm_alive_resp *resp;
6221 SYNC_RESP_STRUCT(resp, pkt);
6222
6223 sc->sc_uc.uc_error_event_table
6224 = le32toh(resp->error_event_table_ptr);
6225 sc->sc_uc.uc_log_event_table
6226 = le32toh(resp->log_event_table_ptr);
6227 sc->sched_base = le32toh(resp->scd_base_ptr);
6228 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6229
6230 sc->sc_uc.uc_intr = 1;
6231 wakeup(&sc->sc_uc);
6232 break; }
6233
6234 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6235 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6236 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6237
6238 uint16_t size = le16toh(phy_db_notif->length);
6239 bus_dmamap_sync(sc->sc_dmat, data->map,
6240 sizeof(*pkt) + sizeof(*phy_db_notif),
6241 size, BUS_DMASYNC_POSTREAD);
6242 iwm_phy_db_set_section(sc, phy_db_notif, size);
6243
6244 break; }
6245
6246 case IWM_STATISTICS_NOTIFICATION: {
6247 struct iwm_notif_statistics *stats;
6248 SYNC_RESP_STRUCT(stats, pkt);
6249 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6250 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6251 break; }
6252
6253 case IWM_NVM_ACCESS_CMD:
6254 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6255 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6256 sizeof(sc->sc_cmd_resp),
6257 BUS_DMASYNC_POSTREAD);
6258 memcpy(sc->sc_cmd_resp,
6259 pkt, sizeof(sc->sc_cmd_resp));
6260 }
6261 break;
6262
6263 case IWM_PHY_CONFIGURATION_CMD:
6264 case IWM_TX_ANT_CONFIGURATION_CMD:
6265 case IWM_ADD_STA:
6266 case IWM_MAC_CONTEXT_CMD:
6267 case IWM_REPLY_SF_CFG_CMD:
6268 case IWM_POWER_TABLE_CMD:
6269 case IWM_PHY_CONTEXT_CMD:
6270 case IWM_BINDING_CONTEXT_CMD:
6271 case IWM_TIME_EVENT_CMD:
6272 case IWM_SCAN_REQUEST_CMD:
6273 case IWM_REPLY_BEACON_FILTERING_CMD:
6274 case IWM_MAC_PM_POWER_TABLE:
6275 case IWM_TIME_QUOTA_CMD:
6276 case IWM_REMOVE_STA:
6277 case IWM_TXPATH_FLUSH:
6278 case IWM_LQ_CMD:
6279 SYNC_RESP_STRUCT(cresp, pkt);
6280 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6281 memcpy(sc->sc_cmd_resp,
6282 pkt, sizeof(*pkt)+sizeof(*cresp));
6283 }
6284 break;
6285
6286 /* ignore */
6287 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6288 break;
6289
6290 case IWM_INIT_COMPLETE_NOTIF:
6291 sc->sc_init_complete = 1;
6292 wakeup(&sc->sc_init_complete);
6293 break;
6294
6295 case IWM_SCAN_COMPLETE_NOTIFICATION: {
6296 struct iwm_scan_complete_notif *notif;
6297 SYNC_RESP_STRUCT(notif, pkt);
6298
6299 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6300 break; }
6301
6302 case IWM_REPLY_ERROR: {
6303 struct iwm_error_resp *resp;
6304 SYNC_RESP_STRUCT(resp, pkt);
6305
6306 aprint_error_dev(sc->sc_dev,
6307 "firmware error 0x%x, cmd 0x%x\n",
6308 le32toh(resp->error_type), resp->cmd_id);
6309 break; }
6310
6311 case IWM_TIME_EVENT_NOTIFICATION: {
6312 struct iwm_time_event_notif *notif;
6313 SYNC_RESP_STRUCT(notif, pkt);
6314
6315 if (notif->status) {
6316 if (le32toh(notif->action) &
6317 IWM_TE_V2_NOTIF_HOST_EVENT_START)
6318 sc->sc_auth_prot = 2;
6319 else
6320 sc->sc_auth_prot = 0;
6321 } else {
6322 sc->sc_auth_prot = -1;
6323 }
6324 wakeup(&sc->sc_auth_prot);
6325 break; }
6326
6327 case IWM_MCAST_FILTER_CMD:
6328 break;
6329
6330 default:
6331 aprint_error_dev(sc->sc_dev,
6332 "code %02x frame %d/%d %x UNHANDLED "
6333 "(this should not happen)\n",
6334 pkt->hdr.code, qid, idx, pkt->len_n_flags);
6335 break;
6336 }
6337
6338 /*
6339 * Why test bit 0x80? The Linux driver:
6340 *
6341 * There is one exception: uCode sets bit 15 when it
6342 * originates the response/notification, i.e. when the
6343 * response/notification is not a direct response to a
6344 * command sent by the driver. For example, uCode issues
6345 * IWM_REPLY_RX when it sends a received frame to the driver;
6346 * it is not a direct response to any driver command.
6347 *
6348 * Ok, so since when is 7 == 15? Well, the Linux driver
6349 * uses a slightly different format for pkt->hdr, and "qid"
6350 * is actually the upper byte of a two-byte field.
6351 */
6352 if (!(pkt->hdr.qid & (1 << 7))) {
6353 iwm_cmd_done(sc, pkt);
6354 }
6355
6356 ADVANCE_RXQ(sc);
6357 }
6358
6359 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6360 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6361
6362 /*
6363 * Tell the firmware what we have processed.
6364 * Seems like the hardware gets upset unless we align
6365 * the write by 8??
6366 */
6367 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6368 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6369 }
6370
6371 static int
iwm_intr(void * arg)6372 iwm_intr(void *arg)
6373 {
6374 struct iwm_softc *sc = arg;
6375 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6376 int handled = 0;
6377 int r1, r2, rv = 0;
6378 int isperiodic = 0;
6379
6380 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6381
6382 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6383 uint32_t *ict = sc->ict_dma.vaddr;
6384 int tmp;
6385
6386 tmp = htole32(ict[sc->ict_cur]);
6387 if (!tmp)
6388 goto out_ena;
6389
6390 /*
6391 * ok, there was something. keep plowing until we have all.
6392 */
6393 r1 = r2 = 0;
6394 while (tmp) {
6395 r1 |= tmp;
6396 ict[sc->ict_cur] = 0;
6397 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6398 tmp = htole32(ict[sc->ict_cur]);
6399 }
6400
6401 /* this is where the fun begins. don't ask */
6402 if (r1 == 0xffffffff)
6403 r1 = 0;
6404
6405 /* i am not expected to understand this */
6406 if (r1 & 0xc0000)
6407 r1 |= 0x8000;
6408 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6409 } else {
6410 r1 = IWM_READ(sc, IWM_CSR_INT);
6411 /* "hardware gone" (where, fishing?) */
6412 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6413 goto out;
6414 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6415 }
6416 if (r1 == 0 && r2 == 0) {
6417 goto out_ena;
6418 }
6419
6420 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6421
6422 /* ignored */
6423 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6424
6425 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6426 #ifdef IWM_DEBUG
6427 int i;
6428
6429 iwm_nic_error(sc);
6430
6431 /* Dump driver status (TX and RX rings) while we're here. */
6432 DPRINTF(("driver status:\n"));
6433 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6434 struct iwm_tx_ring *ring = &sc->txq[i];
6435 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
6436 "queued=%-3d\n",
6437 i, ring->qid, ring->cur, ring->queued));
6438 }
6439 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
6440 DPRINTF((" 802.11 state %d\n", sc->sc_ic.ic_state));
6441 #endif
6442
6443 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
6444 ifp->if_flags &= ~IFF_UP;
6445 iwm_stop(ifp, 1);
6446 rv = 1;
6447 goto out;
6448
6449 }
6450
6451 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6452 handled |= IWM_CSR_INT_BIT_HW_ERR;
6453 aprint_error_dev(sc->sc_dev,
6454 "hardware error, stopping device\n");
6455 ifp->if_flags &= ~IFF_UP;
6456 iwm_stop(ifp, 1);
6457 rv = 1;
6458 goto out;
6459 }
6460
6461 /* firmware chunk loaded */
6462 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6463 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6464 handled |= IWM_CSR_INT_BIT_FH_TX;
6465
6466 sc->sc_fw_chunk_done = 1;
6467 wakeup(&sc->sc_fw);
6468 }
6469
6470 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6471 handled |= IWM_CSR_INT_BIT_RF_KILL;
6472 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6473 DPRINTF(("%s: rfkill switch, disabling interface\n",
6474 DEVNAME(sc)));
6475 ifp->if_flags &= ~IFF_UP;
6476 iwm_stop(ifp, 1);
6477 }
6478 }
6479
6480 /*
6481 * The Linux driver uses periodic interrupts to avoid races.
6482 * We cargo-cult like it's going out of fashion.
6483 */
6484 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6485 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6486 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6487 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6488 IWM_WRITE_1(sc,
6489 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6490 isperiodic = 1;
6491 }
6492
6493 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6494 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6495 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6496
6497 iwm_notif_intr(sc);
6498
6499 /* enable periodic interrupt, see above */
6500 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6501 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6502 IWM_CSR_INT_PERIODIC_ENA);
6503 }
6504
6505 if (__predict_false(r1 & ~handled))
6506 DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6507 rv = 1;
6508
6509 out_ena:
6510 iwm_restore_interrupts(sc);
6511 out:
6512 return rv;
6513 }
6514
6515 /*
6516 * Autoconf glue-sniffing
6517 */
6518
6519 static const pci_product_id_t iwm_devices[] = {
6520 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
6521 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
6522 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
6523 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
6524 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
6525 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
6526 };
6527
6528 static int
iwm_match(device_t parent,cfdata_t match __unused,void * aux)6529 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
6530 {
6531 struct pci_attach_args *pa = aux;
6532
6533 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
6534 return 0;
6535
6536 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
6537 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
6538 return 1;
6539
6540 return 0;
6541 }
6542
6543 static int
iwm_preinit(struct iwm_softc * sc)6544 iwm_preinit(struct iwm_softc *sc)
6545 {
6546 struct ieee80211com *ic = &sc->sc_ic;
6547 int error;
6548
6549 if (sc->sc_flags & IWM_FLAG_ATTACHED)
6550 return 0;
6551
6552 if ((error = iwm_start_hw(sc)) != 0) {
6553 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6554 return error;
6555 }
6556
6557 error = iwm_run_init_mvm_ucode(sc, 1);
6558 iwm_stop_device(sc);
6559 if (error)
6560 return error;
6561
6562 sc->sc_flags |= IWM_FLAG_ATTACHED;
6563
6564 aprint_normal_dev(sc->sc_dev,
6565 "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6566 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6567 IWM_UCODE_MAJOR(sc->sc_fwver),
6568 IWM_UCODE_MINOR(sc->sc_fwver),
6569 IWM_UCODE_API(sc->sc_fwver),
6570 ether_sprintf(sc->sc_nvm.hw_addr));
6571
6572 /* not all hardware can do 5GHz band */
6573 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
6574 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6575
6576 ieee80211_ifattach(ic);
6577
6578 ic->ic_node_alloc = iwm_node_alloc;
6579
6580 /* Override 802.11 state transition machine. */
6581 sc->sc_newstate = ic->ic_newstate;
6582 ic->ic_newstate = iwm_newstate;
6583 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
6584 ieee80211_announce(ic);
6585
6586 iwm_radiotap_attach(sc);
6587
6588 return 0;
6589 }
6590
6591 static void
iwm_attach_hook(device_t dev)6592 iwm_attach_hook(device_t dev)
6593 {
6594 struct iwm_softc *sc = device_private(dev);
6595
6596 iwm_preinit(sc);
6597 }
6598
6599 static void
iwm_attach(device_t parent,device_t self,void * aux)6600 iwm_attach(device_t parent, device_t self, void *aux)
6601 {
6602 struct iwm_softc *sc = device_private(self);
6603 struct pci_attach_args *pa = aux;
6604 struct ieee80211com *ic = &sc->sc_ic;
6605 struct ifnet *ifp = &sc->sc_ec.ec_if;
6606 pcireg_t reg, memtype;
6607 char intrbuf[PCI_INTRSTR_LEN];
6608 const char *intrstr;
6609 int error;
6610 int txq_i;
6611 const struct sysctlnode *node;
6612
6613 sc->sc_dev = self;
6614 sc->sc_pct = pa->pa_pc;
6615 sc->sc_pcitag = pa->pa_tag;
6616 sc->sc_dmat = pa->pa_dmat;
6617 sc->sc_pciid = pa->pa_id;
6618
6619 pci_aprint_devinfo(pa, NULL);
6620
6621 /*
6622 * Get the offset of the PCI Express Capability Structure in PCI
6623 * Configuration Space.
6624 */
6625 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6626 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6627 if (error == 0) {
6628 aprint_error_dev(self,
6629 "PCIe capability structure not found!\n");
6630 return;
6631 }
6632
6633 /* Clear device-specific "PCI retry timeout" register (41h). */
6634 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6635 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6636
6637 /* Enable bus-mastering and hardware bug workaround. */
6638 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6639 reg |= PCI_COMMAND_MASTER_ENABLE;
6640 /* if !MSI */
6641 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6642 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6643 }
6644 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6645
6646 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6647 error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6648 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
6649 if (error != 0) {
6650 aprint_error_dev(self, "can't map mem space\n");
6651 return;
6652 }
6653
6654 /* Install interrupt handler. */
6655 error = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
6656 if (error != 0) {
6657 aprint_error_dev(self, "can't allocate interrupt\n");
6658 return;
6659 }
6660 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
6661 sizeof(intrbuf));
6662 sc->sc_ih = pci_intr_establish(sc->sc_pct, sc->sc_pihp[0], IPL_NET,
6663 iwm_intr, sc);
6664 if (sc->sc_ih == NULL) {
6665 aprint_error_dev(self, "can't establish interrupt");
6666 if (intrstr != NULL)
6667 aprint_error(" at %s", intrstr);
6668 aprint_error("\n");
6669 return;
6670 }
6671 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
6672
6673 sc->sc_wantresp = -1;
6674
6675 switch (PCI_PRODUCT(sc->sc_pciid)) {
6676 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
6677 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
6678 sc->sc_fwname = "iwlwifi-7260-9.ucode";
6679 sc->host_interrupt_operation_mode = 1;
6680 break;
6681 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
6682 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
6683 sc->sc_fwname = "iwlwifi-3160-9.ucode";
6684 sc->host_interrupt_operation_mode = 1;
6685 break;
6686 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
6687 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
6688 sc->sc_fwname = "iwlwifi-7265-9.ucode";
6689 sc->host_interrupt_operation_mode = 0;
6690 break;
6691 default:
6692 aprint_error_dev(self, "unknown product %#x",
6693 PCI_PRODUCT(sc->sc_pciid));
6694 return;
6695 }
6696 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
6697 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6698
6699 /*
6700 * We now start fiddling with the hardware
6701 */
6702
6703 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6704 if (iwm_prepare_card_hw(sc) != 0) {
6705 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6706 return;
6707 }
6708
6709 /* Allocate DMA memory for firmware transfers. */
6710 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6711 aprint_error_dev(sc->sc_dev,
6712 "could not allocate memory for firmware\n");
6713 return;
6714 }
6715
6716 /* Allocate "Keep Warm" page. */
6717 if ((error = iwm_alloc_kw(sc)) != 0) {
6718 aprint_error_dev(sc->sc_dev,
6719 "could not allocate keep warm page\n");
6720 goto fail1;
6721 }
6722
6723 /* We use ICT interrupts */
6724 if ((error = iwm_alloc_ict(sc)) != 0) {
6725 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
6726 goto fail2;
6727 }
6728
6729 /* Allocate TX scheduler "rings". */
6730 if ((error = iwm_alloc_sched(sc)) != 0) {
6731 aprint_error_dev(sc->sc_dev,
6732 "could not allocate TX scheduler rings\n");
6733 goto fail3;
6734 }
6735
6736 /* Allocate TX rings */
6737 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
6738 if ((error = iwm_alloc_tx_ring(sc,
6739 &sc->txq[txq_i], txq_i)) != 0) {
6740 aprint_error_dev(sc->sc_dev,
6741 "could not allocate TX ring %d\n", txq_i);
6742 goto fail4;
6743 }
6744 }
6745
6746 /* Allocate RX ring. */
6747 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6748 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
6749 goto fail4;
6750 }
6751
6752 workqueue_create(&sc->sc_eswq, "iwmes",
6753 iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
6754 workqueue_create(&sc->sc_nswq, "iwmns",
6755 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
6756
6757 /* Clear pending interrupts. */
6758 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6759
6760 if ((error = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
6761 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
6762 SYSCTL_DESCR("iwm per-controller controls"),
6763 NULL, 0, NULL, 0,
6764 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
6765 CTL_EOL)) != 0) {
6766 aprint_normal_dev(sc->sc_dev,
6767 "couldn't create iwm per-controller sysctl node\n");
6768 }
6769 if (error == 0) {
6770 int iwm_nodenum = node->sysctl_num;
6771
6772 /* Reload firmware sysctl node */
6773 if ((error = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
6774 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
6775 SYSCTL_DESCR("Reload firmware"),
6776 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
6777 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
6778 CTL_EOL)) != 0) {
6779 aprint_normal_dev(sc->sc_dev,
6780 "couldn't create load_fw sysctl node\n");
6781 }
6782 }
6783
6784 /*
6785 * Attach interface
6786 */
6787 ic->ic_ifp = ifp;
6788 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6789 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6790 ic->ic_state = IEEE80211_S_INIT;
6791
6792 /* Set device capabilities. */
6793 ic->ic_caps =
6794 IEEE80211_C_WEP | /* WEP */
6795 IEEE80211_C_WPA | /* 802.11i */
6796 IEEE80211_C_SHSLOT | /* short slot time supported */
6797 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
6798
6799 /* all hardware can do 2.4GHz band */
6800 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6801 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6802
6803 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
6804 sc->sc_phyctxt[i].id = i;
6805 }
6806
6807 sc->sc_amrr.amrr_min_success_threshold = 1;
6808 sc->sc_amrr.amrr_max_success_threshold = 15;
6809
6810 /* IBSS channel undefined for now. */
6811 ic->ic_ibss_chan = &ic->ic_channels[1];
6812
6813 #if 0
6814 /* Max RSSI */
6815 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6816 #endif
6817
6818 ifp->if_softc = sc;
6819 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6820 ifp->if_init = iwm_init;
6821 ifp->if_stop = iwm_stop;
6822 ifp->if_ioctl = iwm_ioctl;
6823 ifp->if_start = iwm_start;
6824 ifp->if_watchdog = iwm_watchdog;
6825 IFQ_SET_READY(&ifp->if_snd);
6826 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6827
6828 if_initialize(ifp);
6829 #if 0
6830 ieee80211_ifattach(ic);
6831 #else
6832 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
6833 #endif
6834 if_register(ifp);
6835 /* Use common softint-based if_input */
6836 ifp->if_percpuq = if_percpuq_create(ifp);
6837
6838 callout_init(&sc->sc_calib_to, 0);
6839 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
6840
6841 //task_set(&sc->init_task, iwm_init_task, sc);
6842
6843 if (pmf_device_register(self, NULL, NULL))
6844 pmf_class_network_register(self, ifp);
6845 else
6846 aprint_error_dev(self, "couldn't establish power handler\n");
6847
6848 /*
6849 * We can't do normal attach before the file system is mounted
6850 * because we cannot read the MAC address without loading the
6851 * firmware from disk. So we postpone until mountroot is done.
6852 * Notably, this will require a full driver unload/load cycle
6853 * (or reboot) in case the firmware is not present when the
6854 * hook runs.
6855 */
6856 config_mountroot(self, iwm_attach_hook);
6857
6858 return;
6859
6860 /* Free allocated memory if something failed during attachment. */
6861 fail4: while (--txq_i >= 0)
6862 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6863 iwm_free_sched(sc);
6864 fail3: if (sc->ict_dma.vaddr != NULL)
6865 iwm_free_ict(sc);
6866 fail2: iwm_free_kw(sc);
6867 fail1: iwm_free_fwmem(sc);
6868 }
6869
6870 /*
6871 * Attach the interface to 802.11 radiotap.
6872 */
6873 void
iwm_radiotap_attach(struct iwm_softc * sc)6874 iwm_radiotap_attach(struct iwm_softc *sc)
6875 {
6876 struct ifnet *ifp = sc->sc_ic.ic_ifp;
6877
6878 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
6879 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
6880 &sc->sc_drvbpf);
6881
6882 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6883 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6884 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6885
6886 sc->sc_txtap_len = sizeof sc->sc_txtapu;
6887 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6888 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6889 }
6890
6891 #if 0
6892 static void
6893 iwm_init_task(void *arg1)
6894 {
6895 struct iwm_softc *sc = arg1;
6896 struct ifnet *ifp = &sc->sc_ic.ic_if;
6897 int s;
6898
6899 s = splnet();
6900 while (sc->sc_flags & IWM_FLAG_BUSY)
6901 tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6902 sc->sc_flags |= IWM_FLAG_BUSY;
6903
6904 iwm_stop(ifp, 0);
6905 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6906 iwm_init(ifp);
6907
6908 sc->sc_flags &= ~IWM_FLAG_BUSY;
6909 wakeup(&sc->sc_flags);
6910 splx(s);
6911 }
6912
6913 static void
6914 iwm_wakeup(struct iwm_softc *sc)
6915 {
6916 pcireg_t reg;
6917
6918 /* Clear device-specific "PCI retry timeout" register (41h). */
6919 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6920 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6921
6922 iwm_init_task(sc);
6923 }
6924
6925 static int
6926 iwm_activate(device_t self, enum devact act)
6927 {
6928 struct iwm_softc *sc = device_private(self);
6929 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6930
6931 switch (act) {
6932 case DVACT_DEACTIVATE:
6933 if (ifp->if_flags & IFF_RUNNING)
6934 iwm_stop(ifp, 0);
6935 return 0;
6936 default:
6937 return EOPNOTSUPP;
6938 }
6939 }
6940 #endif
6941
6942 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
6943 NULL, NULL);
6944
6945 static int
iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)6946 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
6947 {
6948 struct sysctlnode node;
6949 struct iwm_softc *sc;
6950 int error, t;
6951
6952 node = *rnode;
6953 sc = node.sysctl_data;
6954 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
6955 node.sysctl_data = &t;
6956 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6957 if (error || newp == NULL)
6958 return error;
6959
6960 if (t == 0)
6961 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
6962 return 0;
6963 }
6964
6965 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
6966 {
6967 const struct sysctlnode *rnode;
6968 #ifdef IWM_DEBUG
6969 const struct sysctlnode *cnode;
6970 #endif /* IWM_DEBUG */
6971 int rc;
6972
6973 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
6974 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
6975 SYSCTL_DESCR("iwm global controls"),
6976 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
6977 goto err;
6978
6979 iwm_sysctl_root_num = rnode->sysctl_num;
6980
6981 #ifdef IWM_DEBUG
6982 /* control debugging printfs */
6983 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
6984 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
6985 "debug", SYSCTL_DESCR("Enable debugging output"),
6986 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
6987 goto err;
6988 #endif /* IWM_DEBUG */
6989
6990 return;
6991
6992 err:
6993 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
6994 }
6995