xref: /linux/drivers/net/wireless/broadcom/b43/dma.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 
4   Broadcom B43 wireless driver
5 
6   DMA ringbuffer and descriptor allocation/management
7 
8   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 
10   Some code in this file is derived from the b44.c driver
11   Copyright (C) 2002 David S. Miller
12   Copyright (C) Pekka Pietikainen
13 
14 
15 */
16 
17 #include "b43.h"
18 #include "dma.h"
19 #include "main.h"
20 #include "debugfs.h"
21 #include "xmit.h"
22 
23 #include <linux/dma-mapping.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/skbuff.h>
27 #include <linux/etherdevice.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 
31 
32 /* Required number of TX DMA slots per TX frame.
33  * This currently is 2, because we put the header and the ieee80211 frame
34  * into separate slots. */
35 #define TX_SLOTS_PER_FRAME	2
36 
37 static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
38 			   enum b43_addrtype addrtype)
39 {
40 	u32 uninitialized_var(addr);
41 
42 	switch (addrtype) {
43 	case B43_DMA_ADDR_LOW:
44 		addr = lower_32_bits(dmaaddr);
45 		if (dma->translation_in_low) {
46 			addr &= ~SSB_DMA_TRANSLATION_MASK;
47 			addr |= dma->translation;
48 		}
49 		break;
50 	case B43_DMA_ADDR_HIGH:
51 		addr = upper_32_bits(dmaaddr);
52 		if (!dma->translation_in_low) {
53 			addr &= ~SSB_DMA_TRANSLATION_MASK;
54 			addr |= dma->translation;
55 		}
56 		break;
57 	case B43_DMA_ADDR_EXT:
58 		if (dma->translation_in_low)
59 			addr = lower_32_bits(dmaaddr);
60 		else
61 			addr = upper_32_bits(dmaaddr);
62 		addr &= SSB_DMA_TRANSLATION_MASK;
63 		addr >>= SSB_DMA_TRANSLATION_SHIFT;
64 		break;
65 	}
66 
67 	return addr;
68 }
69 
70 /* 32bit DMA ops. */
71 static
72 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
73 					  int slot,
74 					  struct b43_dmadesc_meta **meta)
75 {
76 	struct b43_dmadesc32 *desc;
77 
78 	*meta = &(ring->meta[slot]);
79 	desc = ring->descbase;
80 	desc = &(desc[slot]);
81 
82 	return (struct b43_dmadesc_generic *)desc;
83 }
84 
85 static void op32_fill_descriptor(struct b43_dmaring *ring,
86 				 struct b43_dmadesc_generic *desc,
87 				 dma_addr_t dmaaddr, u16 bufsize,
88 				 int start, int end, int irq)
89 {
90 	struct b43_dmadesc32 *descbase = ring->descbase;
91 	int slot;
92 	u32 ctl;
93 	u32 addr;
94 	u32 addrext;
95 
96 	slot = (int)(&(desc->dma32) - descbase);
97 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
98 
99 	addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
100 	addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
101 
102 	ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
103 	if (slot == ring->nr_slots - 1)
104 		ctl |= B43_DMA32_DCTL_DTABLEEND;
105 	if (start)
106 		ctl |= B43_DMA32_DCTL_FRAMESTART;
107 	if (end)
108 		ctl |= B43_DMA32_DCTL_FRAMEEND;
109 	if (irq)
110 		ctl |= B43_DMA32_DCTL_IRQ;
111 	ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
112 	    & B43_DMA32_DCTL_ADDREXT_MASK;
113 
114 	desc->dma32.control = cpu_to_le32(ctl);
115 	desc->dma32.address = cpu_to_le32(addr);
116 }
117 
118 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
119 {
120 	b43_dma_write(ring, B43_DMA32_TXINDEX,
121 		      (u32) (slot * sizeof(struct b43_dmadesc32)));
122 }
123 
124 static void op32_tx_suspend(struct b43_dmaring *ring)
125 {
126 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
127 		      | B43_DMA32_TXSUSPEND);
128 }
129 
130 static void op32_tx_resume(struct b43_dmaring *ring)
131 {
132 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
133 		      & ~B43_DMA32_TXSUSPEND);
134 }
135 
136 static int op32_get_current_rxslot(struct b43_dmaring *ring)
137 {
138 	u32 val;
139 
140 	val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
141 	val &= B43_DMA32_RXDPTR;
142 
143 	return (val / sizeof(struct b43_dmadesc32));
144 }
145 
146 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
147 {
148 	b43_dma_write(ring, B43_DMA32_RXINDEX,
149 		      (u32) (slot * sizeof(struct b43_dmadesc32)));
150 }
151 
152 static const struct b43_dma_ops dma32_ops = {
153 	.idx2desc = op32_idx2desc,
154 	.fill_descriptor = op32_fill_descriptor,
155 	.poke_tx = op32_poke_tx,
156 	.tx_suspend = op32_tx_suspend,
157 	.tx_resume = op32_tx_resume,
158 	.get_current_rxslot = op32_get_current_rxslot,
159 	.set_current_rxslot = op32_set_current_rxslot,
160 };
161 
162 /* 64bit DMA ops. */
163 static
164 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
165 					  int slot,
166 					  struct b43_dmadesc_meta **meta)
167 {
168 	struct b43_dmadesc64 *desc;
169 
170 	*meta = &(ring->meta[slot]);
171 	desc = ring->descbase;
172 	desc = &(desc[slot]);
173 
174 	return (struct b43_dmadesc_generic *)desc;
175 }
176 
177 static void op64_fill_descriptor(struct b43_dmaring *ring,
178 				 struct b43_dmadesc_generic *desc,
179 				 dma_addr_t dmaaddr, u16 bufsize,
180 				 int start, int end, int irq)
181 {
182 	struct b43_dmadesc64 *descbase = ring->descbase;
183 	int slot;
184 	u32 ctl0 = 0, ctl1 = 0;
185 	u32 addrlo, addrhi;
186 	u32 addrext;
187 
188 	slot = (int)(&(desc->dma64) - descbase);
189 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
190 
191 	addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
192 	addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
193 	addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
194 
195 	if (slot == ring->nr_slots - 1)
196 		ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
197 	if (start)
198 		ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
199 	if (end)
200 		ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
201 	if (irq)
202 		ctl0 |= B43_DMA64_DCTL0_IRQ;
203 	ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
204 	ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
205 	    & B43_DMA64_DCTL1_ADDREXT_MASK;
206 
207 	desc->dma64.control0 = cpu_to_le32(ctl0);
208 	desc->dma64.control1 = cpu_to_le32(ctl1);
209 	desc->dma64.address_low = cpu_to_le32(addrlo);
210 	desc->dma64.address_high = cpu_to_le32(addrhi);
211 }
212 
213 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
214 {
215 	b43_dma_write(ring, B43_DMA64_TXINDEX,
216 		      (u32) (slot * sizeof(struct b43_dmadesc64)));
217 }
218 
219 static void op64_tx_suspend(struct b43_dmaring *ring)
220 {
221 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
222 		      | B43_DMA64_TXSUSPEND);
223 }
224 
225 static void op64_tx_resume(struct b43_dmaring *ring)
226 {
227 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
228 		      & ~B43_DMA64_TXSUSPEND);
229 }
230 
231 static int op64_get_current_rxslot(struct b43_dmaring *ring)
232 {
233 	u32 val;
234 
235 	val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
236 	val &= B43_DMA64_RXSTATDPTR;
237 
238 	return (val / sizeof(struct b43_dmadesc64));
239 }
240 
241 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
242 {
243 	b43_dma_write(ring, B43_DMA64_RXINDEX,
244 		      (u32) (slot * sizeof(struct b43_dmadesc64)));
245 }
246 
247 static const struct b43_dma_ops dma64_ops = {
248 	.idx2desc = op64_idx2desc,
249 	.fill_descriptor = op64_fill_descriptor,
250 	.poke_tx = op64_poke_tx,
251 	.tx_suspend = op64_tx_suspend,
252 	.tx_resume = op64_tx_resume,
253 	.get_current_rxslot = op64_get_current_rxslot,
254 	.set_current_rxslot = op64_set_current_rxslot,
255 };
256 
257 static inline int free_slots(struct b43_dmaring *ring)
258 {
259 	return (ring->nr_slots - ring->used_slots);
260 }
261 
262 static inline int next_slot(struct b43_dmaring *ring, int slot)
263 {
264 	B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
265 	if (slot == ring->nr_slots - 1)
266 		return 0;
267 	return slot + 1;
268 }
269 
270 static inline int prev_slot(struct b43_dmaring *ring, int slot)
271 {
272 	B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
273 	if (slot == 0)
274 		return ring->nr_slots - 1;
275 	return slot - 1;
276 }
277 
278 #ifdef CONFIG_B43_DEBUG
279 static void update_max_used_slots(struct b43_dmaring *ring,
280 				  int current_used_slots)
281 {
282 	if (current_used_slots <= ring->max_used_slots)
283 		return;
284 	ring->max_used_slots = current_used_slots;
285 	if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
286 		b43dbg(ring->dev->wl,
287 		       "max_used_slots increased to %d on %s ring %d\n",
288 		       ring->max_used_slots,
289 		       ring->tx ? "TX" : "RX", ring->index);
290 	}
291 }
292 #else
293 static inline
294     void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
295 {
296 }
297 #endif /* DEBUG */
298 
299 /* Request a slot for usage. */
300 static inline int request_slot(struct b43_dmaring *ring)
301 {
302 	int slot;
303 
304 	B43_WARN_ON(!ring->tx);
305 	B43_WARN_ON(ring->stopped);
306 	B43_WARN_ON(free_slots(ring) == 0);
307 
308 	slot = next_slot(ring, ring->current_slot);
309 	ring->current_slot = slot;
310 	ring->used_slots++;
311 
312 	update_max_used_slots(ring, ring->used_slots);
313 
314 	return slot;
315 }
316 
317 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
318 {
319 	static const u16 map64[] = {
320 		B43_MMIO_DMA64_BASE0,
321 		B43_MMIO_DMA64_BASE1,
322 		B43_MMIO_DMA64_BASE2,
323 		B43_MMIO_DMA64_BASE3,
324 		B43_MMIO_DMA64_BASE4,
325 		B43_MMIO_DMA64_BASE5,
326 	};
327 	static const u16 map32[] = {
328 		B43_MMIO_DMA32_BASE0,
329 		B43_MMIO_DMA32_BASE1,
330 		B43_MMIO_DMA32_BASE2,
331 		B43_MMIO_DMA32_BASE3,
332 		B43_MMIO_DMA32_BASE4,
333 		B43_MMIO_DMA32_BASE5,
334 	};
335 
336 	if (type == B43_DMA_64BIT) {
337 		B43_WARN_ON(!(controller_idx >= 0 &&
338 			      controller_idx < ARRAY_SIZE(map64)));
339 		return map64[controller_idx];
340 	}
341 	B43_WARN_ON(!(controller_idx >= 0 &&
342 		      controller_idx < ARRAY_SIZE(map32)));
343 	return map32[controller_idx];
344 }
345 
346 static inline
347     dma_addr_t map_descbuffer(struct b43_dmaring *ring,
348 			      unsigned char *buf, size_t len, int tx)
349 {
350 	dma_addr_t dmaaddr;
351 
352 	if (tx) {
353 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
354 					 buf, len, DMA_TO_DEVICE);
355 	} else {
356 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
357 					 buf, len, DMA_FROM_DEVICE);
358 	}
359 
360 	return dmaaddr;
361 }
362 
363 static inline
364     void unmap_descbuffer(struct b43_dmaring *ring,
365 			  dma_addr_t addr, size_t len, int tx)
366 {
367 	if (tx) {
368 		dma_unmap_single(ring->dev->dev->dma_dev,
369 				 addr, len, DMA_TO_DEVICE);
370 	} else {
371 		dma_unmap_single(ring->dev->dev->dma_dev,
372 				 addr, len, DMA_FROM_DEVICE);
373 	}
374 }
375 
376 static inline
377     void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
378 				 dma_addr_t addr, size_t len)
379 {
380 	B43_WARN_ON(ring->tx);
381 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
382 				    addr, len, DMA_FROM_DEVICE);
383 }
384 
385 static inline
386     void sync_descbuffer_for_device(struct b43_dmaring *ring,
387 				    dma_addr_t addr, size_t len)
388 {
389 	B43_WARN_ON(ring->tx);
390 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
391 				   addr, len, DMA_FROM_DEVICE);
392 }
393 
394 static inline
395     void free_descriptor_buffer(struct b43_dmaring *ring,
396 				struct b43_dmadesc_meta *meta)
397 {
398 	if (meta->skb) {
399 		if (ring->tx)
400 			ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
401 		else
402 			dev_kfree_skb_any(meta->skb);
403 		meta->skb = NULL;
404 	}
405 }
406 
407 static int alloc_ringmemory(struct b43_dmaring *ring)
408 {
409 	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
410 	 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
411 	 * In practice we could use smaller buffers for the latter, but the
412 	 * alignment is really important because of the hardware bug. If bit
413 	 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
414 	 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
415 	 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
416 	 * more than 256 slots for ring.
417 	 */
418 	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
419 				B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
420 
421 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
422 					    ring_mem_size, &(ring->dmabase),
423 					    GFP_KERNEL);
424 	if (!ring->descbase)
425 		return -ENOMEM;
426 
427 	return 0;
428 }
429 
430 static void free_ringmemory(struct b43_dmaring *ring)
431 {
432 	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
433 				B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
434 	dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
435 			  ring->descbase, ring->dmabase);
436 }
437 
438 /* Reset the RX DMA channel */
439 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
440 				      enum b43_dmatype type)
441 {
442 	int i;
443 	u32 value;
444 	u16 offset;
445 
446 	might_sleep();
447 
448 	offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
449 	b43_write32(dev, mmio_base + offset, 0);
450 	for (i = 0; i < 10; i++) {
451 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
452 						   B43_DMA32_RXSTATUS;
453 		value = b43_read32(dev, mmio_base + offset);
454 		if (type == B43_DMA_64BIT) {
455 			value &= B43_DMA64_RXSTAT;
456 			if (value == B43_DMA64_RXSTAT_DISABLED) {
457 				i = -1;
458 				break;
459 			}
460 		} else {
461 			value &= B43_DMA32_RXSTATE;
462 			if (value == B43_DMA32_RXSTAT_DISABLED) {
463 				i = -1;
464 				break;
465 			}
466 		}
467 		msleep(1);
468 	}
469 	if (i != -1) {
470 		b43err(dev->wl, "DMA RX reset timed out\n");
471 		return -ENODEV;
472 	}
473 
474 	return 0;
475 }
476 
477 /* Reset the TX DMA channel */
478 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
479 				      enum b43_dmatype type)
480 {
481 	int i;
482 	u32 value;
483 	u16 offset;
484 
485 	might_sleep();
486 
487 	for (i = 0; i < 10; i++) {
488 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
489 						   B43_DMA32_TXSTATUS;
490 		value = b43_read32(dev, mmio_base + offset);
491 		if (type == B43_DMA_64BIT) {
492 			value &= B43_DMA64_TXSTAT;
493 			if (value == B43_DMA64_TXSTAT_DISABLED ||
494 			    value == B43_DMA64_TXSTAT_IDLEWAIT ||
495 			    value == B43_DMA64_TXSTAT_STOPPED)
496 				break;
497 		} else {
498 			value &= B43_DMA32_TXSTATE;
499 			if (value == B43_DMA32_TXSTAT_DISABLED ||
500 			    value == B43_DMA32_TXSTAT_IDLEWAIT ||
501 			    value == B43_DMA32_TXSTAT_STOPPED)
502 				break;
503 		}
504 		msleep(1);
505 	}
506 	offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
507 	b43_write32(dev, mmio_base + offset, 0);
508 	for (i = 0; i < 10; i++) {
509 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
510 						   B43_DMA32_TXSTATUS;
511 		value = b43_read32(dev, mmio_base + offset);
512 		if (type == B43_DMA_64BIT) {
513 			value &= B43_DMA64_TXSTAT;
514 			if (value == B43_DMA64_TXSTAT_DISABLED) {
515 				i = -1;
516 				break;
517 			}
518 		} else {
519 			value &= B43_DMA32_TXSTATE;
520 			if (value == B43_DMA32_TXSTAT_DISABLED) {
521 				i = -1;
522 				break;
523 			}
524 		}
525 		msleep(1);
526 	}
527 	if (i != -1) {
528 		b43err(dev->wl, "DMA TX reset timed out\n");
529 		return -ENODEV;
530 	}
531 	/* ensure the reset is completed. */
532 	msleep(1);
533 
534 	return 0;
535 }
536 
537 /* Check if a DMA mapping address is invalid. */
538 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
539 				  dma_addr_t addr,
540 				  size_t buffersize, bool dma_to_device)
541 {
542 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
543 		return true;
544 
545 	switch (ring->type) {
546 	case B43_DMA_30BIT:
547 		if ((u64)addr + buffersize > (1ULL << 30))
548 			goto address_error;
549 		break;
550 	case B43_DMA_32BIT:
551 		if ((u64)addr + buffersize > (1ULL << 32))
552 			goto address_error;
553 		break;
554 	case B43_DMA_64BIT:
555 		/* Currently we can't have addresses beyond
556 		 * 64bit in the kernel. */
557 		break;
558 	}
559 
560 	/* The address is OK. */
561 	return false;
562 
563 address_error:
564 	/* We can't support this address. Unmap it again. */
565 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
566 
567 	return true;
568 }
569 
570 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
571 {
572 	unsigned char *f = skb->data + ring->frameoffset;
573 
574 	return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
575 }
576 
577 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
578 {
579 	struct b43_rxhdr_fw4 *rxhdr;
580 	unsigned char *frame;
581 
582 	/* This poisons the RX buffer to detect DMA failures. */
583 
584 	rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
585 	rxhdr->frame_len = 0;
586 
587 	B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
588 	frame = skb->data + ring->frameoffset;
589 	memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
590 }
591 
592 static int setup_rx_descbuffer(struct b43_dmaring *ring,
593 			       struct b43_dmadesc_generic *desc,
594 			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
595 {
596 	dma_addr_t dmaaddr;
597 	struct sk_buff *skb;
598 
599 	B43_WARN_ON(ring->tx);
600 
601 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
602 	if (unlikely(!skb))
603 		return -ENOMEM;
604 	b43_poison_rx_buffer(ring, skb);
605 	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
606 	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
607 		/* ugh. try to realloc in zone_dma */
608 		gfp_flags |= GFP_DMA;
609 
610 		dev_kfree_skb_any(skb);
611 
612 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
613 		if (unlikely(!skb))
614 			return -ENOMEM;
615 		b43_poison_rx_buffer(ring, skb);
616 		dmaaddr = map_descbuffer(ring, skb->data,
617 					 ring->rx_buffersize, 0);
618 		if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
619 			b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
620 			dev_kfree_skb_any(skb);
621 			return -EIO;
622 		}
623 	}
624 
625 	meta->skb = skb;
626 	meta->dmaaddr = dmaaddr;
627 	ring->ops->fill_descriptor(ring, desc, dmaaddr,
628 				   ring->rx_buffersize, 0, 0, 0);
629 
630 	return 0;
631 }
632 
633 /* Allocate the initial descbuffers.
634  * This is used for an RX ring only.
635  */
636 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
637 {
638 	int i, err = -ENOMEM;
639 	struct b43_dmadesc_generic *desc;
640 	struct b43_dmadesc_meta *meta;
641 
642 	for (i = 0; i < ring->nr_slots; i++) {
643 		desc = ring->ops->idx2desc(ring, i, &meta);
644 
645 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
646 		if (err) {
647 			b43err(ring->dev->wl,
648 			       "Failed to allocate initial descbuffers\n");
649 			goto err_unwind;
650 		}
651 	}
652 	mb();
653 	ring->used_slots = ring->nr_slots;
654 	err = 0;
655       out:
656 	return err;
657 
658       err_unwind:
659 	for (i--; i >= 0; i--) {
660 		desc = ring->ops->idx2desc(ring, i, &meta);
661 
662 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
663 		dev_kfree_skb(meta->skb);
664 	}
665 	goto out;
666 }
667 
668 /* Do initial setup of the DMA controller.
669  * Reset the controller, write the ring busaddress
670  * and switch the "enable" bit on.
671  */
672 static int dmacontroller_setup(struct b43_dmaring *ring)
673 {
674 	int err = 0;
675 	u32 value;
676 	u32 addrext;
677 	bool parity = ring->dev->dma.parity;
678 	u32 addrlo;
679 	u32 addrhi;
680 
681 	if (ring->tx) {
682 		if (ring->type == B43_DMA_64BIT) {
683 			u64 ringbase = (u64) (ring->dmabase);
684 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
685 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
686 			addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
687 
688 			value = B43_DMA64_TXENABLE;
689 			value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
690 			    & B43_DMA64_TXADDREXT_MASK;
691 			if (!parity)
692 				value |= B43_DMA64_TXPARITYDISABLE;
693 			b43_dma_write(ring, B43_DMA64_TXCTL, value);
694 			b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
695 			b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
696 		} else {
697 			u32 ringbase = (u32) (ring->dmabase);
698 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
699 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
700 
701 			value = B43_DMA32_TXENABLE;
702 			value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
703 			    & B43_DMA32_TXADDREXT_MASK;
704 			if (!parity)
705 				value |= B43_DMA32_TXPARITYDISABLE;
706 			b43_dma_write(ring, B43_DMA32_TXCTL, value);
707 			b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
708 		}
709 	} else {
710 		err = alloc_initial_descbuffers(ring);
711 		if (err)
712 			goto out;
713 		if (ring->type == B43_DMA_64BIT) {
714 			u64 ringbase = (u64) (ring->dmabase);
715 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
716 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
717 			addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
718 
719 			value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
720 			value |= B43_DMA64_RXENABLE;
721 			value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
722 			    & B43_DMA64_RXADDREXT_MASK;
723 			if (!parity)
724 				value |= B43_DMA64_RXPARITYDISABLE;
725 			b43_dma_write(ring, B43_DMA64_RXCTL, value);
726 			b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
727 			b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
728 			b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
729 				      sizeof(struct b43_dmadesc64));
730 		} else {
731 			u32 ringbase = (u32) (ring->dmabase);
732 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
733 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
734 
735 			value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
736 			value |= B43_DMA32_RXENABLE;
737 			value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
738 			    & B43_DMA32_RXADDREXT_MASK;
739 			if (!parity)
740 				value |= B43_DMA32_RXPARITYDISABLE;
741 			b43_dma_write(ring, B43_DMA32_RXCTL, value);
742 			b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
743 			b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
744 				      sizeof(struct b43_dmadesc32));
745 		}
746 	}
747 
748 out:
749 	return err;
750 }
751 
752 /* Shutdown the DMA controller. */
753 static void dmacontroller_cleanup(struct b43_dmaring *ring)
754 {
755 	if (ring->tx) {
756 		b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
757 					   ring->type);
758 		if (ring->type == B43_DMA_64BIT) {
759 			b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
760 			b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
761 		} else
762 			b43_dma_write(ring, B43_DMA32_TXRING, 0);
763 	} else {
764 		b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
765 					   ring->type);
766 		if (ring->type == B43_DMA_64BIT) {
767 			b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
768 			b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
769 		} else
770 			b43_dma_write(ring, B43_DMA32_RXRING, 0);
771 	}
772 }
773 
774 static void free_all_descbuffers(struct b43_dmaring *ring)
775 {
776 	struct b43_dmadesc_meta *meta;
777 	int i;
778 
779 	if (!ring->used_slots)
780 		return;
781 	for (i = 0; i < ring->nr_slots; i++) {
782 		/* get meta - ignore returned value */
783 		ring->ops->idx2desc(ring, i, &meta);
784 
785 		if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
786 			B43_WARN_ON(!ring->tx);
787 			continue;
788 		}
789 		if (ring->tx) {
790 			unmap_descbuffer(ring, meta->dmaaddr,
791 					 meta->skb->len, 1);
792 		} else {
793 			unmap_descbuffer(ring, meta->dmaaddr,
794 					 ring->rx_buffersize, 0);
795 		}
796 		free_descriptor_buffer(ring, meta);
797 	}
798 }
799 
800 static u64 supported_dma_mask(struct b43_wldev *dev)
801 {
802 	u32 tmp;
803 	u16 mmio_base;
804 
805 	switch (dev->dev->bus_type) {
806 #ifdef CONFIG_B43_BCMA
807 	case B43_BUS_BCMA:
808 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
809 		if (tmp & BCMA_IOST_DMA64)
810 			return DMA_BIT_MASK(64);
811 		break;
812 #endif
813 #ifdef CONFIG_B43_SSB
814 	case B43_BUS_SSB:
815 		tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
816 		if (tmp & SSB_TMSHIGH_DMA64)
817 			return DMA_BIT_MASK(64);
818 		break;
819 #endif
820 	}
821 
822 	mmio_base = b43_dmacontroller_base(0, 0);
823 	b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
824 	tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
825 	if (tmp & B43_DMA32_TXADDREXT_MASK)
826 		return DMA_BIT_MASK(32);
827 
828 	return DMA_BIT_MASK(30);
829 }
830 
831 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
832 {
833 	if (dmamask == DMA_BIT_MASK(30))
834 		return B43_DMA_30BIT;
835 	if (dmamask == DMA_BIT_MASK(32))
836 		return B43_DMA_32BIT;
837 	if (dmamask == DMA_BIT_MASK(64))
838 		return B43_DMA_64BIT;
839 	B43_WARN_ON(1);
840 	return B43_DMA_30BIT;
841 }
842 
843 /* Main initialization function. */
844 static
845 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
846 				      int controller_index,
847 				      int for_tx,
848 				      enum b43_dmatype type)
849 {
850 	struct b43_dmaring *ring;
851 	int i, err;
852 	dma_addr_t dma_test;
853 
854 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
855 	if (!ring)
856 		goto out;
857 
858 	ring->nr_slots = B43_RXRING_SLOTS;
859 	if (for_tx)
860 		ring->nr_slots = B43_TXRING_SLOTS;
861 
862 	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
863 			     GFP_KERNEL);
864 	if (!ring->meta)
865 		goto err_kfree_ring;
866 	for (i = 0; i < ring->nr_slots; i++)
867 		ring->meta->skb = B43_DMA_PTR_POISON;
868 
869 	ring->type = type;
870 	ring->dev = dev;
871 	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
872 	ring->index = controller_index;
873 	if (type == B43_DMA_64BIT)
874 		ring->ops = &dma64_ops;
875 	else
876 		ring->ops = &dma32_ops;
877 	if (for_tx) {
878 		ring->tx = true;
879 		ring->current_slot = -1;
880 	} else {
881 		if (ring->index == 0) {
882 			switch (dev->fw.hdr_format) {
883 			case B43_FW_HDR_598:
884 				ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
885 				ring->frameoffset = B43_DMA0_RX_FW598_FO;
886 				break;
887 			case B43_FW_HDR_410:
888 			case B43_FW_HDR_351:
889 				ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
890 				ring->frameoffset = B43_DMA0_RX_FW351_FO;
891 				break;
892 			}
893 		} else
894 			B43_WARN_ON(1);
895 	}
896 #ifdef CONFIG_B43_DEBUG
897 	ring->last_injected_overflow = jiffies;
898 #endif
899 
900 	if (for_tx) {
901 		/* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
902 		BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
903 
904 		ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
905 					    b43_txhdr_size(dev),
906 					    GFP_KERNEL);
907 		if (!ring->txhdr_cache)
908 			goto err_kfree_meta;
909 
910 		/* test for ability to dma to txhdr_cache */
911 		dma_test = dma_map_single(dev->dev->dma_dev,
912 					  ring->txhdr_cache,
913 					  b43_txhdr_size(dev),
914 					  DMA_TO_DEVICE);
915 
916 		if (b43_dma_mapping_error(ring, dma_test,
917 					  b43_txhdr_size(dev), 1)) {
918 			/* ugh realloc */
919 			kfree(ring->txhdr_cache);
920 			ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
921 						    b43_txhdr_size(dev),
922 						    GFP_KERNEL | GFP_DMA);
923 			if (!ring->txhdr_cache)
924 				goto err_kfree_meta;
925 
926 			dma_test = dma_map_single(dev->dev->dma_dev,
927 						  ring->txhdr_cache,
928 						  b43_txhdr_size(dev),
929 						  DMA_TO_DEVICE);
930 
931 			if (b43_dma_mapping_error(ring, dma_test,
932 						  b43_txhdr_size(dev), 1)) {
933 
934 				b43err(dev->wl,
935 				       "TXHDR DMA allocation failed\n");
936 				goto err_kfree_txhdr_cache;
937 			}
938 		}
939 
940 		dma_unmap_single(dev->dev->dma_dev,
941 				 dma_test, b43_txhdr_size(dev),
942 				 DMA_TO_DEVICE);
943 	}
944 
945 	err = alloc_ringmemory(ring);
946 	if (err)
947 		goto err_kfree_txhdr_cache;
948 	err = dmacontroller_setup(ring);
949 	if (err)
950 		goto err_free_ringmemory;
951 
952       out:
953 	return ring;
954 
955       err_free_ringmemory:
956 	free_ringmemory(ring);
957       err_kfree_txhdr_cache:
958 	kfree(ring->txhdr_cache);
959       err_kfree_meta:
960 	kfree(ring->meta);
961       err_kfree_ring:
962 	kfree(ring);
963 	ring = NULL;
964 	goto out;
965 }
966 
967 #define divide(a, b)	({	\
968 	typeof(a) __a = a;	\
969 	do_div(__a, b);		\
970 	__a;			\
971   })
972 
973 #define modulo(a, b)	({	\
974 	typeof(a) __a = a;	\
975 	do_div(__a, b);		\
976   })
977 
978 /* Main cleanup function. */
979 static void b43_destroy_dmaring(struct b43_dmaring *ring,
980 				const char *ringname)
981 {
982 	if (!ring)
983 		return;
984 
985 #ifdef CONFIG_B43_DEBUG
986 	{
987 		/* Print some statistics. */
988 		u64 failed_packets = ring->nr_failed_tx_packets;
989 		u64 succeed_packets = ring->nr_succeed_tx_packets;
990 		u64 nr_packets = failed_packets + succeed_packets;
991 		u64 permille_failed = 0, average_tries = 0;
992 
993 		if (nr_packets)
994 			permille_failed = divide(failed_packets * 1000, nr_packets);
995 		if (nr_packets)
996 			average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
997 
998 		b43dbg(ring->dev->wl, "DMA-%u %s: "
999 		       "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1000 		       "Average tries %llu.%02llu\n",
1001 		       (unsigned int)(ring->type), ringname,
1002 		       ring->max_used_slots,
1003 		       ring->nr_slots,
1004 		       (unsigned long long)failed_packets,
1005 		       (unsigned long long)nr_packets,
1006 		       (unsigned long long)divide(permille_failed, 10),
1007 		       (unsigned long long)modulo(permille_failed, 10),
1008 		       (unsigned long long)divide(average_tries, 100),
1009 		       (unsigned long long)modulo(average_tries, 100));
1010 	}
1011 #endif /* DEBUG */
1012 
1013 	/* Device IRQs are disabled prior entering this function,
1014 	 * so no need to take care of concurrency with rx handler stuff.
1015 	 */
1016 	dmacontroller_cleanup(ring);
1017 	free_all_descbuffers(ring);
1018 	free_ringmemory(ring);
1019 
1020 	kfree(ring->txhdr_cache);
1021 	kfree(ring->meta);
1022 	kfree(ring);
1023 }
1024 
1025 #define destroy_ring(dma, ring) do {				\
1026 	b43_destroy_dmaring((dma)->ring, __stringify(ring));	\
1027 	(dma)->ring = NULL;					\
1028     } while (0)
1029 
1030 void b43_dma_free(struct b43_wldev *dev)
1031 {
1032 	struct b43_dma *dma;
1033 
1034 	if (b43_using_pio_transfers(dev))
1035 		return;
1036 	dma = &dev->dma;
1037 
1038 	destroy_ring(dma, rx_ring);
1039 	destroy_ring(dma, tx_ring_AC_BK);
1040 	destroy_ring(dma, tx_ring_AC_BE);
1041 	destroy_ring(dma, tx_ring_AC_VI);
1042 	destroy_ring(dma, tx_ring_AC_VO);
1043 	destroy_ring(dma, tx_ring_mcast);
1044 }
1045 
1046 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1047 {
1048 	u64 orig_mask = mask;
1049 	bool fallback = false;
1050 	int err;
1051 
1052 	/* Try to set the DMA mask. If it fails, try falling back to a
1053 	 * lower mask, as we can always also support a lower one. */
1054 	while (1) {
1055 		err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
1056 		if (!err)
1057 			break;
1058 		if (mask == DMA_BIT_MASK(64)) {
1059 			mask = DMA_BIT_MASK(32);
1060 			fallback = true;
1061 			continue;
1062 		}
1063 		if (mask == DMA_BIT_MASK(32)) {
1064 			mask = DMA_BIT_MASK(30);
1065 			fallback = true;
1066 			continue;
1067 		}
1068 		b43err(dev->wl, "The machine/kernel does not support "
1069 		       "the required %u-bit DMA mask\n",
1070 		       (unsigned int)dma_mask_to_engine_type(orig_mask));
1071 		return -EOPNOTSUPP;
1072 	}
1073 	if (fallback) {
1074 		b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1075 			(unsigned int)dma_mask_to_engine_type(orig_mask),
1076 			(unsigned int)dma_mask_to_engine_type(mask));
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1083  * bit in low address word instead of high one.
1084  */
1085 static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1086 					    enum b43_dmatype type)
1087 {
1088 	if (type != B43_DMA_64BIT)
1089 		return true;
1090 
1091 #ifdef CONFIG_B43_SSB
1092 	if (dev->dev->bus_type == B43_BUS_SSB &&
1093 	    dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
1094 	    !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
1095 	      ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1096 			return true;
1097 #endif
1098 	return false;
1099 }
1100 
1101 int b43_dma_init(struct b43_wldev *dev)
1102 {
1103 	struct b43_dma *dma = &dev->dma;
1104 	int err;
1105 	u64 dmamask;
1106 	enum b43_dmatype type;
1107 
1108 	dmamask = supported_dma_mask(dev);
1109 	type = dma_mask_to_engine_type(dmamask);
1110 	err = b43_dma_set_mask(dev, dmamask);
1111 	if (err)
1112 		return err;
1113 
1114 	switch (dev->dev->bus_type) {
1115 #ifdef CONFIG_B43_BCMA
1116 	case B43_BUS_BCMA:
1117 		dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1118 		break;
1119 #endif
1120 #ifdef CONFIG_B43_SSB
1121 	case B43_BUS_SSB:
1122 		dma->translation = ssb_dma_translation(dev->dev->sdev);
1123 		break;
1124 #endif
1125 	}
1126 	dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
1127 
1128 	dma->parity = true;
1129 #ifdef CONFIG_B43_BCMA
1130 	/* TODO: find out which SSB devices need disabling parity */
1131 	if (dev->dev->bus_type == B43_BUS_BCMA)
1132 		dma->parity = false;
1133 #endif
1134 
1135 	err = -ENOMEM;
1136 	/* setup TX DMA channels. */
1137 	dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1138 	if (!dma->tx_ring_AC_BK)
1139 		goto out;
1140 
1141 	dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1142 	if (!dma->tx_ring_AC_BE)
1143 		goto err_destroy_bk;
1144 
1145 	dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1146 	if (!dma->tx_ring_AC_VI)
1147 		goto err_destroy_be;
1148 
1149 	dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1150 	if (!dma->tx_ring_AC_VO)
1151 		goto err_destroy_vi;
1152 
1153 	dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1154 	if (!dma->tx_ring_mcast)
1155 		goto err_destroy_vo;
1156 
1157 	/* setup RX DMA channel. */
1158 	dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1159 	if (!dma->rx_ring)
1160 		goto err_destroy_mcast;
1161 
1162 	/* No support for the TX status DMA ring. */
1163 	B43_WARN_ON(dev->dev->core_rev < 5);
1164 
1165 	b43dbg(dev->wl, "%u-bit DMA initialized\n",
1166 	       (unsigned int)type);
1167 	err = 0;
1168 out:
1169 	return err;
1170 
1171 err_destroy_mcast:
1172 	destroy_ring(dma, tx_ring_mcast);
1173 err_destroy_vo:
1174 	destroy_ring(dma, tx_ring_AC_VO);
1175 err_destroy_vi:
1176 	destroy_ring(dma, tx_ring_AC_VI);
1177 err_destroy_be:
1178 	destroy_ring(dma, tx_ring_AC_BE);
1179 err_destroy_bk:
1180 	destroy_ring(dma, tx_ring_AC_BK);
1181 	return err;
1182 }
1183 
1184 /* Generate a cookie for the TX header. */
1185 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1186 {
1187 	u16 cookie;
1188 
1189 	/* Use the upper 4 bits of the cookie as
1190 	 * DMA controller ID and store the slot number
1191 	 * in the lower 12 bits.
1192 	 * Note that the cookie must never be 0, as this
1193 	 * is a special value used in RX path.
1194 	 * It can also not be 0xFFFF because that is special
1195 	 * for multicast frames.
1196 	 */
1197 	cookie = (((u16)ring->index + 1) << 12);
1198 	B43_WARN_ON(slot & ~0x0FFF);
1199 	cookie |= (u16)slot;
1200 
1201 	return cookie;
1202 }
1203 
1204 /* Inspect a cookie and find out to which controller/slot it belongs. */
1205 static
1206 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1207 {
1208 	struct b43_dma *dma = &dev->dma;
1209 	struct b43_dmaring *ring = NULL;
1210 
1211 	switch (cookie & 0xF000) {
1212 	case 0x1000:
1213 		ring = dma->tx_ring_AC_BK;
1214 		break;
1215 	case 0x2000:
1216 		ring = dma->tx_ring_AC_BE;
1217 		break;
1218 	case 0x3000:
1219 		ring = dma->tx_ring_AC_VI;
1220 		break;
1221 	case 0x4000:
1222 		ring = dma->tx_ring_AC_VO;
1223 		break;
1224 	case 0x5000:
1225 		ring = dma->tx_ring_mcast;
1226 		break;
1227 	}
1228 	*slot = (cookie & 0x0FFF);
1229 	if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1230 		b43dbg(dev->wl, "TX-status contains "
1231 		       "invalid cookie: 0x%04X\n", cookie);
1232 		return NULL;
1233 	}
1234 
1235 	return ring;
1236 }
1237 
1238 static int dma_tx_fragment(struct b43_dmaring *ring,
1239 			   struct sk_buff *skb)
1240 {
1241 	const struct b43_dma_ops *ops = ring->ops;
1242 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1243 	struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1244 	u8 *header;
1245 	int slot, old_top_slot, old_used_slots;
1246 	int err;
1247 	struct b43_dmadesc_generic *desc;
1248 	struct b43_dmadesc_meta *meta;
1249 	struct b43_dmadesc_meta *meta_hdr;
1250 	u16 cookie;
1251 	size_t hdrsize = b43_txhdr_size(ring->dev);
1252 
1253 	/* Important note: If the number of used DMA slots per TX frame
1254 	 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1255 	 * the file has to be updated, too!
1256 	 */
1257 
1258 	old_top_slot = ring->current_slot;
1259 	old_used_slots = ring->used_slots;
1260 
1261 	/* Get a slot for the header. */
1262 	slot = request_slot(ring);
1263 	desc = ops->idx2desc(ring, slot, &meta_hdr);
1264 	memset(meta_hdr, 0, sizeof(*meta_hdr));
1265 
1266 	header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1267 	cookie = generate_cookie(ring, slot);
1268 	err = b43_generate_txhdr(ring->dev, header,
1269 				 skb, info, cookie);
1270 	if (unlikely(err)) {
1271 		ring->current_slot = old_top_slot;
1272 		ring->used_slots = old_used_slots;
1273 		return err;
1274 	}
1275 
1276 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1277 					   hdrsize, 1);
1278 	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1279 		ring->current_slot = old_top_slot;
1280 		ring->used_slots = old_used_slots;
1281 		return -EIO;
1282 	}
1283 	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1284 			     hdrsize, 1, 0, 0);
1285 
1286 	/* Get a slot for the payload. */
1287 	slot = request_slot(ring);
1288 	desc = ops->idx2desc(ring, slot, &meta);
1289 	memset(meta, 0, sizeof(*meta));
1290 
1291 	meta->skb = skb;
1292 	meta->is_last_fragment = true;
1293 	priv_info->bouncebuffer = NULL;
1294 
1295 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1296 	/* create a bounce buffer in zone_dma on mapping failure. */
1297 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1298 		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1299 						  GFP_ATOMIC | GFP_DMA);
1300 		if (!priv_info->bouncebuffer) {
1301 			ring->current_slot = old_top_slot;
1302 			ring->used_slots = old_used_slots;
1303 			err = -ENOMEM;
1304 			goto out_unmap_hdr;
1305 		}
1306 
1307 		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1308 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1309 			kfree(priv_info->bouncebuffer);
1310 			priv_info->bouncebuffer = NULL;
1311 			ring->current_slot = old_top_slot;
1312 			ring->used_slots = old_used_slots;
1313 			err = -EIO;
1314 			goto out_unmap_hdr;
1315 		}
1316 	}
1317 
1318 	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1319 
1320 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1321 		/* Tell the firmware about the cookie of the last
1322 		 * mcast frame, so it can clear the more-data bit in it. */
1323 		b43_shm_write16(ring->dev, B43_SHM_SHARED,
1324 				B43_SHM_SH_MCASTCOOKIE, cookie);
1325 	}
1326 	/* Now transfer the whole frame. */
1327 	wmb();
1328 	ops->poke_tx(ring, next_slot(ring, slot));
1329 	return 0;
1330 
1331 out_unmap_hdr:
1332 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1333 			 hdrsize, 1);
1334 	return err;
1335 }
1336 
1337 static inline int should_inject_overflow(struct b43_dmaring *ring)
1338 {
1339 #ifdef CONFIG_B43_DEBUG
1340 	if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1341 		/* Check if we should inject another ringbuffer overflow
1342 		 * to test handling of this situation in the stack. */
1343 		unsigned long next_overflow;
1344 
1345 		next_overflow = ring->last_injected_overflow + HZ;
1346 		if (time_after(jiffies, next_overflow)) {
1347 			ring->last_injected_overflow = jiffies;
1348 			b43dbg(ring->dev->wl,
1349 			       "Injecting TX ring overflow on "
1350 			       "DMA controller %d\n", ring->index);
1351 			return 1;
1352 		}
1353 	}
1354 #endif /* CONFIG_B43_DEBUG */
1355 	return 0;
1356 }
1357 
1358 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1359 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1360 						   u8 queue_prio)
1361 {
1362 	struct b43_dmaring *ring;
1363 
1364 	if (dev->qos_enabled) {
1365 		/* 0 = highest priority */
1366 		switch (queue_prio) {
1367 		default:
1368 			B43_WARN_ON(1);
1369 			/* fallthrough */
1370 		case 0:
1371 			ring = dev->dma.tx_ring_AC_VO;
1372 			break;
1373 		case 1:
1374 			ring = dev->dma.tx_ring_AC_VI;
1375 			break;
1376 		case 2:
1377 			ring = dev->dma.tx_ring_AC_BE;
1378 			break;
1379 		case 3:
1380 			ring = dev->dma.tx_ring_AC_BK;
1381 			break;
1382 		}
1383 	} else
1384 		ring = dev->dma.tx_ring_AC_BE;
1385 
1386 	return ring;
1387 }
1388 
1389 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1390 {
1391 	struct b43_dmaring *ring;
1392 	struct ieee80211_hdr *hdr;
1393 	int err = 0;
1394 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1395 
1396 	hdr = (struct ieee80211_hdr *)skb->data;
1397 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1398 		/* The multicast ring will be sent after the DTIM */
1399 		ring = dev->dma.tx_ring_mcast;
1400 		/* Set the more-data bit. Ucode will clear it on
1401 		 * the last frame for us. */
1402 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1403 	} else {
1404 		/* Decide by priority where to put this frame. */
1405 		ring = select_ring_by_priority(
1406 			dev, skb_get_queue_mapping(skb));
1407 	}
1408 
1409 	B43_WARN_ON(!ring->tx);
1410 
1411 	if (unlikely(ring->stopped)) {
1412 		/* We get here only because of a bug in mac80211.
1413 		 * Because of a race, one packet may be queued after
1414 		 * the queue is stopped, thus we got called when we shouldn't.
1415 		 * For now, just refuse the transmit. */
1416 		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1417 			b43err(dev->wl, "Packet after queue stopped\n");
1418 		err = -ENOSPC;
1419 		goto out;
1420 	}
1421 
1422 	if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
1423 		/* If we get here, we have a real error with the queue
1424 		 * full, but queues not stopped. */
1425 		b43err(dev->wl, "DMA queue overflow\n");
1426 		err = -ENOSPC;
1427 		goto out;
1428 	}
1429 
1430 	/* Assign the queue number to the ring (if not already done before)
1431 	 * so TX status handling can use it. The queue to ring mapping is
1432 	 * static, so we don't need to store it per frame. */
1433 	ring->queue_prio = skb_get_queue_mapping(skb);
1434 
1435 	err = dma_tx_fragment(ring, skb);
1436 	if (unlikely(err == -ENOKEY)) {
1437 		/* Drop this packet, as we don't have the encryption key
1438 		 * anymore and must not transmit it unencrypted. */
1439 		ieee80211_free_txskb(dev->wl->hw, skb);
1440 		err = 0;
1441 		goto out;
1442 	}
1443 	if (unlikely(err)) {
1444 		b43err(dev->wl, "DMA tx mapping failure\n");
1445 		goto out;
1446 	}
1447 	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1448 	    should_inject_overflow(ring)) {
1449 		/* This TX ring is full. */
1450 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1451 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1452 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1453 		ring->stopped = true;
1454 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1455 			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1456 		}
1457 	}
1458 out:
1459 
1460 	return err;
1461 }
1462 
1463 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1464 			     const struct b43_txstatus *status)
1465 {
1466 	const struct b43_dma_ops *ops;
1467 	struct b43_dmaring *ring;
1468 	struct b43_dmadesc_meta *meta;
1469 	static const struct b43_txstatus fake; /* filled with 0 */
1470 	const struct b43_txstatus *txstat;
1471 	int slot, firstused;
1472 	bool frame_succeed;
1473 	int skip;
1474 	static u8 err_out1;
1475 
1476 	ring = parse_cookie(dev, status->cookie, &slot);
1477 	if (unlikely(!ring))
1478 		return;
1479 	B43_WARN_ON(!ring->tx);
1480 
1481 	/* Sanity check: TX packets are processed in-order on one ring.
1482 	 * Check if the slot deduced from the cookie really is the first
1483 	 * used slot. */
1484 	firstused = ring->current_slot - ring->used_slots + 1;
1485 	if (firstused < 0)
1486 		firstused = ring->nr_slots + firstused;
1487 
1488 	skip = 0;
1489 	if (unlikely(slot != firstused)) {
1490 		/* This possibly is a firmware bug and will result in
1491 		 * malfunction, memory leaks and/or stall of DMA functionality.
1492 		 */
1493 		if (slot == next_slot(ring, next_slot(ring, firstused))) {
1494 			/* If a single header/data pair was missed, skip over
1495 			 * the first two slots in an attempt to recover.
1496 			 */
1497 			slot = firstused;
1498 			skip = 2;
1499 			if (!err_out1) {
1500 				/* Report the error once. */
1501 				b43dbg(dev->wl,
1502 				       "Skip on DMA ring %d slot %d.\n",
1503 				       ring->index, slot);
1504 				err_out1 = 1;
1505 			}
1506 		} else {
1507 			/* More than a single header/data pair were missed.
1508 			 * Report this error. If running with open-source
1509 			 * firmware, then reset the controller to
1510 			 * revive operation.
1511 			 */
1512 			b43dbg(dev->wl,
1513 			       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1514 			       ring->index, firstused, slot);
1515 			if (dev->fw.opensource)
1516 				b43_controller_restart(dev, "Out of order TX");
1517 			return;
1518 		}
1519 	}
1520 
1521 	ops = ring->ops;
1522 	while (1) {
1523 		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1524 		/* get meta - ignore returned value */
1525 		ops->idx2desc(ring, slot, &meta);
1526 
1527 		if (b43_dma_ptr_is_poisoned(meta->skb)) {
1528 			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1529 			       "on ring %d\n",
1530 			       slot, firstused, ring->index);
1531 			break;
1532 		}
1533 
1534 		if (meta->skb) {
1535 			struct b43_private_tx_info *priv_info =
1536 			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1537 
1538 			unmap_descbuffer(ring, meta->dmaaddr,
1539 					 meta->skb->len, 1);
1540 			kfree(priv_info->bouncebuffer);
1541 			priv_info->bouncebuffer = NULL;
1542 		} else {
1543 			unmap_descbuffer(ring, meta->dmaaddr,
1544 					 b43_txhdr_size(dev), 1);
1545 		}
1546 
1547 		if (meta->is_last_fragment) {
1548 			struct ieee80211_tx_info *info;
1549 
1550 			if (unlikely(!meta->skb)) {
1551 				/* This is a scatter-gather fragment of a frame,
1552 				 * so the skb pointer must not be NULL.
1553 				 */
1554 				b43dbg(dev->wl, "TX status unexpected NULL skb "
1555 				       "at slot %d (first=%d) on ring %d\n",
1556 				       slot, firstused, ring->index);
1557 				break;
1558 			}
1559 
1560 			info = IEEE80211_SKB_CB(meta->skb);
1561 
1562 			/*
1563 			 * Call back to inform the ieee80211 subsystem about
1564 			 * the status of the transmission. When skipping over
1565 			 * a missed TX status report, use a status structure
1566 			 * filled with zeros to indicate that the frame was not
1567 			 * sent (frame_count 0) and not acknowledged
1568 			 */
1569 			if (unlikely(skip))
1570 				txstat = &fake;
1571 			else
1572 				txstat = status;
1573 
1574 			frame_succeed = b43_fill_txstatus_report(dev, info,
1575 								 txstat);
1576 #ifdef CONFIG_B43_DEBUG
1577 			if (frame_succeed)
1578 				ring->nr_succeed_tx_packets++;
1579 			else
1580 				ring->nr_failed_tx_packets++;
1581 			ring->nr_total_packet_tries += status->frame_count;
1582 #endif /* DEBUG */
1583 			ieee80211_tx_status(dev->wl->hw, meta->skb);
1584 
1585 			/* skb will be freed by ieee80211_tx_status().
1586 			 * Poison our pointer. */
1587 			meta->skb = B43_DMA_PTR_POISON;
1588 		} else {
1589 			/* No need to call free_descriptor_buffer here, as
1590 			 * this is only the txhdr, which is not allocated.
1591 			 */
1592 			if (unlikely(meta->skb)) {
1593 				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1594 				       "at slot %d (first=%d) on ring %d\n",
1595 				       slot, firstused, ring->index);
1596 				break;
1597 			}
1598 		}
1599 
1600 		/* Everything unmapped and free'd. So it's not used anymore. */
1601 		ring->used_slots--;
1602 
1603 		if (meta->is_last_fragment && !skip) {
1604 			/* This is the last scatter-gather
1605 			 * fragment of the frame. We are done. */
1606 			break;
1607 		}
1608 		slot = next_slot(ring, slot);
1609 		if (skip > 0)
1610 			--skip;
1611 	}
1612 	if (ring->stopped) {
1613 		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1614 		ring->stopped = false;
1615 	}
1616 
1617 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1618 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1619 	} else {
1620 		/* If the driver queue is running wake the corresponding
1621 		 * mac80211 queue. */
1622 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1623 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1624 			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1625 		}
1626 	}
1627 	/* Add work to the queue. */
1628 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1629 }
1630 
1631 static void dma_rx(struct b43_dmaring *ring, int *slot)
1632 {
1633 	const struct b43_dma_ops *ops = ring->ops;
1634 	struct b43_dmadesc_generic *desc;
1635 	struct b43_dmadesc_meta *meta;
1636 	struct b43_rxhdr_fw4 *rxhdr;
1637 	struct sk_buff *skb;
1638 	u16 len;
1639 	int err;
1640 	dma_addr_t dmaaddr;
1641 
1642 	desc = ops->idx2desc(ring, *slot, &meta);
1643 
1644 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1645 	skb = meta->skb;
1646 
1647 	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1648 	len = le16_to_cpu(rxhdr->frame_len);
1649 	if (len == 0) {
1650 		int i = 0;
1651 
1652 		do {
1653 			udelay(2);
1654 			barrier();
1655 			len = le16_to_cpu(rxhdr->frame_len);
1656 		} while (len == 0 && i++ < 5);
1657 		if (unlikely(len == 0)) {
1658 			dmaaddr = meta->dmaaddr;
1659 			goto drop_recycle_buffer;
1660 		}
1661 	}
1662 	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1663 		/* Something went wrong with the DMA.
1664 		 * The device did not touch the buffer and did not overwrite the poison. */
1665 		b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1666 		dmaaddr = meta->dmaaddr;
1667 		goto drop_recycle_buffer;
1668 	}
1669 	if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1670 		/* The data did not fit into one descriptor buffer
1671 		 * and is split over multiple buffers.
1672 		 * This should never happen, as we try to allocate buffers
1673 		 * big enough. So simply ignore this packet.
1674 		 */
1675 		int cnt = 0;
1676 		s32 tmp = len;
1677 
1678 		while (1) {
1679 			desc = ops->idx2desc(ring, *slot, &meta);
1680 			/* recycle the descriptor buffer. */
1681 			b43_poison_rx_buffer(ring, meta->skb);
1682 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1683 						   ring->rx_buffersize);
1684 			*slot = next_slot(ring, *slot);
1685 			cnt++;
1686 			tmp -= ring->rx_buffersize;
1687 			if (tmp <= 0)
1688 				break;
1689 		}
1690 		b43err(ring->dev->wl, "DMA RX buffer too small "
1691 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1692 		       len, ring->rx_buffersize, cnt);
1693 		goto drop;
1694 	}
1695 
1696 	dmaaddr = meta->dmaaddr;
1697 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1698 	if (unlikely(err)) {
1699 		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1700 		goto drop_recycle_buffer;
1701 	}
1702 
1703 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1704 	skb_put(skb, len + ring->frameoffset);
1705 	skb_pull(skb, ring->frameoffset);
1706 
1707 	b43_rx(ring->dev, skb, rxhdr);
1708 drop:
1709 	return;
1710 
1711 drop_recycle_buffer:
1712 	/* Poison and recycle the RX buffer. */
1713 	b43_poison_rx_buffer(ring, skb);
1714 	sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1715 }
1716 
1717 void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1718 {
1719 	int current_slot, previous_slot;
1720 
1721 	B43_WARN_ON(ring->tx);
1722 
1723 	/* Device has filled all buffers, drop all packets and let TCP
1724 	 * decrease speed.
1725 	 * Decrement RX index by one will let the device to see all slots
1726 	 * as free again
1727 	 */
1728 	/*
1729 	*TODO: How to increase rx_drop in mac80211?
1730 	*/
1731 	current_slot = ring->ops->get_current_rxslot(ring);
1732 	previous_slot = prev_slot(ring, current_slot);
1733 	ring->ops->set_current_rxslot(ring, previous_slot);
1734 }
1735 
1736 void b43_dma_rx(struct b43_dmaring *ring)
1737 {
1738 	const struct b43_dma_ops *ops = ring->ops;
1739 	int slot, current_slot;
1740 	int used_slots = 0;
1741 
1742 	B43_WARN_ON(ring->tx);
1743 	current_slot = ops->get_current_rxslot(ring);
1744 	B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1745 
1746 	slot = ring->current_slot;
1747 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1748 		dma_rx(ring, &slot);
1749 		update_max_used_slots(ring, ++used_slots);
1750 	}
1751 	wmb();
1752 	ops->set_current_rxslot(ring, slot);
1753 	ring->current_slot = slot;
1754 }
1755 
1756 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1757 {
1758 	B43_WARN_ON(!ring->tx);
1759 	ring->ops->tx_suspend(ring);
1760 }
1761 
1762 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1763 {
1764 	B43_WARN_ON(!ring->tx);
1765 	ring->ops->tx_resume(ring);
1766 }
1767 
1768 void b43_dma_tx_suspend(struct b43_wldev *dev)
1769 {
1770 	b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1771 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1772 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1773 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1774 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1775 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1776 }
1777 
1778 void b43_dma_tx_resume(struct b43_wldev *dev)
1779 {
1780 	b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1781 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1782 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1783 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1784 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1785 	b43_power_saving_ctl_bits(dev, 0);
1786 }
1787 
1788 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1789 			   u16 mmio_base, bool enable)
1790 {
1791 	u32 ctl;
1792 
1793 	if (type == B43_DMA_64BIT) {
1794 		ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1795 		ctl &= ~B43_DMA64_RXDIRECTFIFO;
1796 		if (enable)
1797 			ctl |= B43_DMA64_RXDIRECTFIFO;
1798 		b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1799 	} else {
1800 		ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1801 		ctl &= ~B43_DMA32_RXDIRECTFIFO;
1802 		if (enable)
1803 			ctl |= B43_DMA32_RXDIRECTFIFO;
1804 		b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1805 	}
1806 }
1807 
1808 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1809  * This is called from PIO code, so DMA structures are not available. */
1810 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1811 			    unsigned int engine_index, bool enable)
1812 {
1813 	enum b43_dmatype type;
1814 	u16 mmio_base;
1815 
1816 	type = dma_mask_to_engine_type(supported_dma_mask(dev));
1817 
1818 	mmio_base = b43_dmacontroller_base(type, engine_index);
1819 	direct_fifo_rx(dev, type, mmio_base, enable);
1820 }
1821