1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MUSB OTG driver peripheral support
4  *
5  * Copyright 2005 Mentor Graphics Corporation
6  * Copyright (C) 2005-2006 by Texas Instruments
7  * Copyright (C) 2006-2007 Nokia Corporation
8  * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
9  */
10 
11 #ifndef __UBOOT__
12 #include <log.h>
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/timer.h>
18 #include <linux/module.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/slab.h>
24 #else
25 #include <common.h>
26 #include <dm.h>
27 #include <dm/device_compat.h>
28 #include <linux/bug.h>
29 #include <linux/usb/ch9.h>
30 #include "linux-compat.h"
31 #endif
32 
33 #include "musb_core.h"
34 
35 
36 /* MUSB PERIPHERAL status 3-mar-2006:
37  *
38  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
39  *   Minor glitches:
40  *
41  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
42  *       in one test run (operator error?)
43  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
44  *       to break when dma is enabled ... is something wrongly
45  *       clearing SENDSTALL?
46  *
47  * - Mass storage behaved ok when last tested.  Network traffic patterns
48  *   (with lots of short transfers etc) need retesting; they turn up the
49  *   worst cases of the DMA, since short packets are typical but are not
50  *   required.
51  *
52  * - TX/IN
53  *     + both pio and dma behave in with network and g_zero tests
54  *     + no cppi throughput issues other than no-hw-queueing
55  *     + failed with FLAT_REG (DaVinci)
56  *     + seems to behave with double buffering, PIO -and- CPPI
57  *     + with gadgetfs + AIO, requests got lost?
58  *
59  * - RX/OUT
60  *     + both pio and dma behave in with network and g_zero tests
61  *     + dma is slow in typical case (short_not_ok is clear)
62  *     + double buffering ok with PIO
63  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
64  *     + request lossage observed with gadgetfs
65  *
66  * - ISO not tested ... might work, but only weakly isochronous
67  *
68  * - Gadget driver disabling of softconnect during bind() is ignored; so
69  *   drivers can't hold off host requests until userspace is ready.
70  *   (Workaround:  they can turn it off later.)
71  *
72  * - PORTABILITY (assumes PIO works):
73  *     + DaVinci, basically works with cppi dma
74  *     + OMAP 2430, ditto with mentor dma
75  *     + TUSB 6010, platform-specific dma in the works
76  */
77 
78 /* ----------------------------------------------------------------------- */
79 
80 #define is_buffer_mapped(req) (is_dma_capable() && \
81 					(req->map_state != UN_MAPPED))
82 
83 #ifndef CONFIG_USB_MUSB_PIO_ONLY
84 /* Maps the buffer to dma  */
85 
map_dma_buffer(struct musb_request * request,struct musb * musb,struct musb_ep * musb_ep)86 static inline void map_dma_buffer(struct musb_request *request,
87 			struct musb *musb, struct musb_ep *musb_ep)
88 {
89 	int compatible = true;
90 	struct dma_controller *dma = musb->dma_controller;
91 
92 	request->map_state = UN_MAPPED;
93 
94 	if (!is_dma_capable() || !musb_ep->dma)
95 		return;
96 
97 	/* Check if DMA engine can handle this request.
98 	 * DMA code must reject the USB request explicitly.
99 	 * Default behaviour is to map the request.
100 	 */
101 	if (dma->is_compatible)
102 		compatible = dma->is_compatible(musb_ep->dma,
103 				musb_ep->packet_sz, request->request.buf,
104 				request->request.length);
105 	if (!compatible)
106 		return;
107 
108 	if (request->request.dma == DMA_ADDR_INVALID) {
109 		request->request.dma = dma_map_single(
110 				musb->controller,
111 				request->request.buf,
112 				request->request.length,
113 				request->tx
114 					? DMA_TO_DEVICE
115 					: DMA_FROM_DEVICE);
116 		request->map_state = MUSB_MAPPED;
117 	} else {
118 		dma_sync_single_for_device(musb->controller,
119 			request->request.dma,
120 			request->request.length,
121 			request->tx
122 				? DMA_TO_DEVICE
123 				: DMA_FROM_DEVICE);
124 		request->map_state = PRE_MAPPED;
125 	}
126 }
127 
128 /* Unmap the buffer from dma and maps it back to cpu */
unmap_dma_buffer(struct musb_request * request,struct musb * musb)129 static inline void unmap_dma_buffer(struct musb_request *request,
130 				struct musb *musb)
131 {
132 	if (!is_buffer_mapped(request))
133 		return;
134 
135 	if (request->request.dma == DMA_ADDR_INVALID) {
136 		dev_vdbg(musb->controller,
137 				"not unmapping a never mapped buffer\n");
138 		return;
139 	}
140 	if (request->map_state == MUSB_MAPPED) {
141 		dma_unmap_single(musb->controller,
142 			request->request.dma,
143 			request->request.length,
144 			request->tx
145 				? DMA_TO_DEVICE
146 				: DMA_FROM_DEVICE);
147 		request->request.dma = DMA_ADDR_INVALID;
148 	} else { /* PRE_MAPPED */
149 		dma_sync_single_for_cpu(musb->controller,
150 			request->request.dma,
151 			request->request.length,
152 			request->tx
153 				? DMA_TO_DEVICE
154 				: DMA_FROM_DEVICE);
155 	}
156 	request->map_state = UN_MAPPED;
157 }
158 #else
map_dma_buffer(struct musb_request * request,struct musb * musb,struct musb_ep * musb_ep)159 static inline void map_dma_buffer(struct musb_request *request,
160 			struct musb *musb, struct musb_ep *musb_ep)
161 {
162 }
163 
unmap_dma_buffer(struct musb_request * request,struct musb * musb)164 static inline void unmap_dma_buffer(struct musb_request *request,
165 				struct musb *musb)
166 {
167 }
168 #endif
169 
170 /*
171  * Immediately complete a request.
172  *
173  * @param request the request to complete
174  * @param status the status to complete the request with
175  * Context: controller locked, IRQs blocked.
176  */
musb_g_giveback(struct musb_ep * ep,struct usb_request * request,int status)177 void musb_g_giveback(
178 	struct musb_ep		*ep,
179 	struct usb_request	*request,
180 	int			status)
181 __releases(ep->musb->lock)
182 __acquires(ep->musb->lock)
183 {
184 	struct musb_request	*req;
185 	struct musb		*musb;
186 	int			busy = ep->busy;
187 
188 	req = to_musb_request(request);
189 
190 	list_del(&req->list);
191 	if (req->request.status == -EINPROGRESS)
192 		req->request.status = status;
193 	musb = req->musb;
194 
195 	ep->busy = 1;
196 	spin_unlock(&musb->lock);
197 	unmap_dma_buffer(req, musb);
198 	if (request->status == 0)
199 		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
200 				ep->end_point.name, request,
201 				req->request.actual, req->request.length);
202 	else
203 		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
204 				ep->end_point.name, request,
205 				req->request.actual, req->request.length,
206 				request->status);
207 	req->request.complete(&req->ep->end_point, &req->request);
208 	spin_lock(&musb->lock);
209 	ep->busy = busy;
210 }
211 
212 /* ----------------------------------------------------------------------- */
213 
214 /*
215  * Abort requests queued to an endpoint using the status. Synchronous.
216  * caller locked controller and blocked irqs, and selected this ep.
217  */
nuke(struct musb_ep * ep,const int status)218 static void nuke(struct musb_ep *ep, const int status)
219 {
220 	struct musb		*musb = ep->musb;
221 	struct musb_request	*req = NULL;
222 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
223 
224 	ep->busy = 1;
225 
226 	if (is_dma_capable() && ep->dma) {
227 		struct dma_controller	*c = ep->musb->dma_controller;
228 		int value;
229 
230 		if (ep->is_in) {
231 			/*
232 			 * The programming guide says that we must not clear
233 			 * the DMAMODE bit before DMAENAB, so we only
234 			 * clear it in the second write...
235 			 */
236 			musb_writew(epio, MUSB_TXCSR,
237 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
238 			musb_writew(epio, MUSB_TXCSR,
239 					0 | MUSB_TXCSR_FLUSHFIFO);
240 		} else {
241 			musb_writew(epio, MUSB_RXCSR,
242 					0 | MUSB_RXCSR_FLUSHFIFO);
243 			musb_writew(epio, MUSB_RXCSR,
244 					0 | MUSB_RXCSR_FLUSHFIFO);
245 		}
246 
247 		value = c->channel_abort(ep->dma);
248 		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
249 				ep->name, value);
250 		c->channel_release(ep->dma);
251 		ep->dma = NULL;
252 	}
253 
254 	while (!list_empty(&ep->req_list)) {
255 		req = list_first_entry(&ep->req_list, struct musb_request, list);
256 		musb_g_giveback(ep, &req->request, status);
257 	}
258 }
259 
260 /* ----------------------------------------------------------------------- */
261 
262 /* Data transfers - pure PIO, pure DMA, or mixed mode */
263 
264 /*
265  * This assumes the separate CPPI engine is responding to DMA requests
266  * from the usb core ... sequenced a bit differently from mentor dma.
267  */
268 
max_ep_writesize(struct musb * musb,struct musb_ep * ep)269 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
270 {
271 	if (can_bulk_split(musb, ep->type))
272 		return ep->hw_ep->max_packet_sz_tx;
273 	else
274 		return ep->packet_sz;
275 }
276 
277 
278 #ifdef CONFIG_USB_INVENTRA_DMA
279 
280 /* Peripheral tx (IN) using Mentor DMA works as follows:
281 	Only mode 0 is used for transfers <= wPktSize,
282 	mode 1 is used for larger transfers,
283 
284 	One of the following happens:
285 	- Host sends IN token which causes an endpoint interrupt
286 		-> TxAvail
287 			-> if DMA is currently busy, exit.
288 			-> if queue is non-empty, txstate().
289 
290 	- Request is queued by the gadget driver.
291 		-> if queue was previously empty, txstate()
292 
293 	txstate()
294 		-> start
295 		  /\	-> setup DMA
296 		  |     (data is transferred to the FIFO, then sent out when
297 		  |	IN token(s) are recd from Host.
298 		  |		-> DMA interrupt on completion
299 		  |		   calls TxAvail.
300 		  |		      -> stop DMA, ~DMAENAB,
301 		  |		      -> set TxPktRdy for last short pkt or zlp
302 		  |		      -> Complete Request
303 		  |		      -> Continue next request (call txstate)
304 		  |___________________________________|
305 
306  * Non-Mentor DMA engines can of course work differently, such as by
307  * upleveling from irq-per-packet to irq-per-buffer.
308  */
309 
310 #endif
311 
312 /*
313  * An endpoint is transmitting data. This can be called either from
314  * the IRQ routine or from ep.queue() to kickstart a request on an
315  * endpoint.
316  *
317  * Context: controller locked, IRQs blocked, endpoint selected
318  */
txstate(struct musb * musb,struct musb_request * req)319 static void txstate(struct musb *musb, struct musb_request *req)
320 {
321 	u8			epnum = req->epnum;
322 	struct musb_ep		*musb_ep;
323 	void __iomem		*epio = musb->endpoints[epnum].regs;
324 	struct usb_request	*request;
325 	u16			fifo_count = 0, csr;
326 	int			use_dma = 0;
327 
328 	musb_ep = req->ep;
329 
330 	/* Check if EP is disabled */
331 	if (!musb_ep->desc) {
332 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
333 						musb_ep->end_point.name);
334 		return;
335 	}
336 
337 	/* we shouldn't get here while DMA is active ... but we do ... */
338 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
339 		dev_dbg(musb->controller, "dma pending...\n");
340 		return;
341 	}
342 
343 	/* read TXCSR before */
344 	csr = musb_readw(epio, MUSB_TXCSR);
345 
346 	request = &req->request;
347 	fifo_count = min(max_ep_writesize(musb, musb_ep),
348 			(int)(request->length - request->actual));
349 
350 	if (csr & MUSB_TXCSR_TXPKTRDY) {
351 		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
352 				musb_ep->end_point.name, csr);
353 		return;
354 	}
355 
356 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
357 		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
358 				musb_ep->end_point.name, csr);
359 		return;
360 	}
361 
362 	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
363 			epnum, musb_ep->packet_sz, fifo_count,
364 			csr);
365 
366 #ifndef	CONFIG_USB_MUSB_PIO_ONLY
367 	if (is_buffer_mapped(req)) {
368 		struct dma_controller	*c = musb->dma_controller;
369 		size_t request_size;
370 
371 		/* setup DMA, then program endpoint CSR */
372 		request_size = min_t(size_t, request->length - request->actual,
373 					musb_ep->dma->max_len);
374 
375 		use_dma = (request->dma != DMA_ADDR_INVALID);
376 
377 		/* MUSB_TXCSR_P_ISO is still set correctly */
378 
379 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
380 		{
381 			if (request_size < musb_ep->packet_sz)
382 				musb_ep->dma->desired_mode = 0;
383 			else
384 				musb_ep->dma->desired_mode = 1;
385 
386 			use_dma = use_dma && c->channel_program(
387 					musb_ep->dma, musb_ep->packet_sz,
388 					musb_ep->dma->desired_mode,
389 					request->dma + request->actual, request_size);
390 			if (use_dma) {
391 				if (musb_ep->dma->desired_mode == 0) {
392 					/*
393 					 * We must not clear the DMAMODE bit
394 					 * before the DMAENAB bit -- and the
395 					 * latter doesn't always get cleared
396 					 * before we get here...
397 					 */
398 					csr &= ~(MUSB_TXCSR_AUTOSET
399 						| MUSB_TXCSR_DMAENAB);
400 					musb_writew(epio, MUSB_TXCSR, csr
401 						| MUSB_TXCSR_P_WZC_BITS);
402 					csr &= ~MUSB_TXCSR_DMAMODE;
403 					csr |= (MUSB_TXCSR_DMAENAB |
404 							MUSB_TXCSR_MODE);
405 					/* against programming guide */
406 				} else {
407 					csr |= (MUSB_TXCSR_DMAENAB
408 							| MUSB_TXCSR_DMAMODE
409 							| MUSB_TXCSR_MODE);
410 					if (!musb_ep->hb_mult)
411 						csr |= MUSB_TXCSR_AUTOSET;
412 				}
413 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
414 
415 				musb_writew(epio, MUSB_TXCSR, csr);
416 			}
417 		}
418 
419 #elif defined(CONFIG_USB_TI_CPPI_DMA)
420 		/* program endpoint CSR first, then setup DMA */
421 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
422 		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
423 		       MUSB_TXCSR_MODE;
424 		musb_writew(epio, MUSB_TXCSR,
425 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
426 				| csr);
427 
428 		/* ensure writebuffer is empty */
429 		csr = musb_readw(epio, MUSB_TXCSR);
430 
431 		/* NOTE host side sets DMAENAB later than this; both are
432 		 * OK since the transfer dma glue (between CPPI and Mentor
433 		 * fifos) just tells CPPI it could start.  Data only moves
434 		 * to the USB TX fifo when both fifos are ready.
435 		 */
436 
437 		/* "mode" is irrelevant here; handle terminating ZLPs like
438 		 * PIO does, since the hardware RNDIS mode seems unreliable
439 		 * except for the last-packet-is-already-short case.
440 		 */
441 		use_dma = use_dma && c->channel_program(
442 				musb_ep->dma, musb_ep->packet_sz,
443 				0,
444 				request->dma + request->actual,
445 				request_size);
446 		if (!use_dma) {
447 			c->channel_release(musb_ep->dma);
448 			musb_ep->dma = NULL;
449 			csr &= ~MUSB_TXCSR_DMAENAB;
450 			musb_writew(epio, MUSB_TXCSR, csr);
451 			/* invariant: prequest->buf is non-null */
452 		}
453 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
454 		use_dma = use_dma && c->channel_program(
455 				musb_ep->dma, musb_ep->packet_sz,
456 				request->zero,
457 				request->dma + request->actual,
458 				request_size);
459 #endif
460 	}
461 #endif
462 
463 	if (!use_dma) {
464 		/*
465 		 * Unmap the dma buffer back to cpu if dma channel
466 		 * programming fails
467 		 */
468 		unmap_dma_buffer(req, musb);
469 
470 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
471 				(u8 *) (request->buf + request->actual));
472 		request->actual += fifo_count;
473 		csr |= MUSB_TXCSR_TXPKTRDY;
474 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
475 		musb_writew(epio, MUSB_TXCSR, csr);
476 	}
477 
478 	/* host may already have the data when this message shows... */
479 	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
480 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
481 			request->actual, request->length,
482 			musb_readw(epio, MUSB_TXCSR),
483 			fifo_count,
484 			musb_readw(epio, MUSB_TXMAXP));
485 }
486 
487 /*
488  * FIFO state update (e.g. data ready).
489  * Called from IRQ,  with controller locked.
490  */
musb_g_tx(struct musb * musb,u8 epnum)491 void musb_g_tx(struct musb *musb, u8 epnum)
492 {
493 	u16			csr;
494 	struct musb_request	*req;
495 	struct usb_request	*request;
496 	u8 __iomem		*mbase = musb->mregs;
497 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
498 	void __iomem		*epio = musb->endpoints[epnum].regs;
499 	struct dma_channel	*dma;
500 
501 	musb_ep_select(mbase, epnum);
502 	req = next_request(musb_ep);
503 	request = &req->request;
504 
505 	csr = musb_readw(epio, MUSB_TXCSR);
506 	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
507 
508 	dma = is_dma_capable() ? musb_ep->dma : NULL;
509 
510 	/*
511 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
512 	 * probably rates reporting as a host error.
513 	 */
514 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
515 		csr |=	MUSB_TXCSR_P_WZC_BITS;
516 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
517 		musb_writew(epio, MUSB_TXCSR, csr);
518 		return;
519 	}
520 
521 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
522 		/* We NAKed, no big deal... little reason to care. */
523 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
524 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
525 		musb_writew(epio, MUSB_TXCSR, csr);
526 		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
527 				epnum, request);
528 	}
529 
530 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
531 		/*
532 		 * SHOULD NOT HAPPEN... has with CPPI though, after
533 		 * changing SENDSTALL (and other cases); harmless?
534 		 */
535 		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
536 		return;
537 	}
538 
539 	if (request) {
540 		u8	is_dma = 0;
541 
542 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
543 			is_dma = 1;
544 			csr |= MUSB_TXCSR_P_WZC_BITS;
545 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
546 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
547 			musb_writew(epio, MUSB_TXCSR, csr);
548 			/* Ensure writebuffer is empty. */
549 			csr = musb_readw(epio, MUSB_TXCSR);
550 			request->actual += musb_ep->dma->actual_len;
551 			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
552 				epnum, csr, musb_ep->dma->actual_len, request);
553 		}
554 
555 		/*
556 		 * First, maybe a terminating short packet. Some DMA
557 		 * engines might handle this by themselves.
558 		 */
559 		if ((request->zero && request->length
560 			&& (request->length % musb_ep->packet_sz == 0)
561 			&& (request->actual == request->length))
562 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
563 			|| (is_dma && (!dma->desired_mode ||
564 				(request->actual &
565 					(musb_ep->packet_sz - 1))))
566 #endif
567 		) {
568 			/*
569 			 * On DMA completion, FIFO may not be
570 			 * available yet...
571 			 */
572 			if (csr & MUSB_TXCSR_TXPKTRDY)
573 				return;
574 
575 			dev_dbg(musb->controller, "sending zero pkt\n");
576 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
577 					| MUSB_TXCSR_TXPKTRDY);
578 			request->zero = 0;
579 		}
580 
581 		if (request->actual == request->length) {
582 			musb_g_giveback(musb_ep, request, 0);
583 			/*
584 			 * In the giveback function the MUSB lock is
585 			 * released and acquired after sometime. During
586 			 * this time period the INDEX register could get
587 			 * changed by the gadget_queue function especially
588 			 * on SMP systems. Reselect the INDEX to be sure
589 			 * we are reading/modifying the right registers
590 			 */
591 			musb_ep_select(mbase, epnum);
592 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
593 			if (!req) {
594 				dev_dbg(musb->controller, "%s idle now\n",
595 					musb_ep->end_point.name);
596 				return;
597 			}
598 		}
599 
600 		txstate(musb, req);
601 	}
602 }
603 
604 /* ------------------------------------------------------------ */
605 
606 #ifdef CONFIG_USB_INVENTRA_DMA
607 
608 /* Peripheral rx (OUT) using Mentor DMA works as follows:
609 	- Only mode 0 is used.
610 
611 	- Request is queued by the gadget class driver.
612 		-> if queue was previously empty, rxstate()
613 
614 	- Host sends OUT token which causes an endpoint interrupt
615 	  /\      -> RxReady
616 	  |	      -> if request queued, call rxstate
617 	  |		/\	-> setup DMA
618 	  |		|	     -> DMA interrupt on completion
619 	  |		|		-> RxReady
620 	  |		|		      -> stop DMA
621 	  |		|		      -> ack the read
622 	  |		|		      -> if data recd = max expected
623 	  |		|				by the request, or host
624 	  |		|				sent a short packet,
625 	  |		|				complete the request,
626 	  |		|				and start the next one.
627 	  |		|_____________________________________|
628 	  |					 else just wait for the host
629 	  |					    to send the next OUT token.
630 	  |__________________________________________________|
631 
632  * Non-Mentor DMA engines can of course work differently.
633  */
634 
635 #endif
636 
637 /*
638  * Context: controller locked, IRQs blocked, endpoint selected
639  */
rxstate(struct musb * musb,struct musb_request * req)640 static void rxstate(struct musb *musb, struct musb_request *req)
641 {
642 	const u8		epnum = req->epnum;
643 	struct usb_request	*request = &req->request;
644 	struct musb_ep		*musb_ep;
645 	void __iomem		*epio = musb->endpoints[epnum].regs;
646 	unsigned		fifo_count = 0;
647 	u16			len;
648 	u16			csr = musb_readw(epio, MUSB_RXCSR);
649 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
650 	u8			use_mode_1;
651 
652 	if (hw_ep->is_shared_fifo)
653 		musb_ep = &hw_ep->ep_in;
654 	else
655 		musb_ep = &hw_ep->ep_out;
656 
657 	len = musb_ep->packet_sz;
658 
659 	/* Check if EP is disabled */
660 	if (!musb_ep->desc) {
661 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
662 						musb_ep->end_point.name);
663 		return;
664 	}
665 
666 	/* We shouldn't get here while DMA is active, but we do... */
667 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
668 		dev_dbg(musb->controller, "DMA pending...\n");
669 		return;
670 	}
671 
672 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
673 		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
674 		    musb_ep->end_point.name, csr);
675 		return;
676 	}
677 
678 	if (is_cppi_enabled() && is_buffer_mapped(req)) {
679 		struct dma_controller	*c = musb->dma_controller;
680 		struct dma_channel	*channel = musb_ep->dma;
681 
682 		/* NOTE:  CPPI won't actually stop advancing the DMA
683 		 * queue after short packet transfers, so this is almost
684 		 * always going to run as IRQ-per-packet DMA so that
685 		 * faults will be handled correctly.
686 		 */
687 		if (c->channel_program(channel,
688 				musb_ep->packet_sz,
689 				!request->short_not_ok,
690 				request->dma + request->actual,
691 				request->length - request->actual)) {
692 
693 			/* make sure that if an rxpkt arrived after the irq,
694 			 * the cppi engine will be ready to take it as soon
695 			 * as DMA is enabled
696 			 */
697 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
698 					| MUSB_RXCSR_DMAMODE);
699 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
700 			musb_writew(epio, MUSB_RXCSR, csr);
701 			return;
702 		}
703 	}
704 
705 	if (csr & MUSB_RXCSR_RXPKTRDY) {
706 		len = musb_readw(epio, MUSB_RXCOUNT);
707 
708 		/*
709 		 * Enable Mode 1 on RX transfers only when short_not_ok flag
710 		 * is set. Currently short_not_ok flag is set only from
711 		 * file_storage and f_mass_storage drivers
712 		 */
713 
714 		if (request->short_not_ok && len == musb_ep->packet_sz)
715 			use_mode_1 = 1;
716 		else
717 			use_mode_1 = 0;
718 
719 		if (request->actual < request->length) {
720 #ifdef CONFIG_USB_INVENTRA_DMA
721 			if (is_buffer_mapped(req)) {
722 				struct dma_controller	*c;
723 				struct dma_channel	*channel;
724 				int			use_dma = 0;
725 
726 				c = musb->dma_controller;
727 				channel = musb_ep->dma;
728 
729 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
730 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
731 	 * completion. We only get interrupts from DMA controller.
732 	 *
733 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
734 	 * in advance. For mass storage class, request->length = what the host
735 	 * sends, so that'd work.  But for pretty much everything else,
736 	 * request->length is routinely more than what the host sends. For
737 	 * most these gadgets, end of is signified either by a short packet,
738 	 * or filling the last byte of the buffer.  (Sending extra data in
739 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
740 	 * we don't get DMA completion interrupt for short packets.
741 	 *
742 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
743 	 * to get endpoint interrupt on every DMA req, but that didn't seem
744 	 * to work reliably.
745 	 *
746 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
747 	 * then becomes usable as a runtime "use mode 1" hint...
748 	 */
749 
750 				/* Experimental: Mode1 works with mass storage use cases */
751 				if (use_mode_1) {
752 					csr |= MUSB_RXCSR_AUTOCLEAR;
753 					musb_writew(epio, MUSB_RXCSR, csr);
754 					csr |= MUSB_RXCSR_DMAENAB;
755 					musb_writew(epio, MUSB_RXCSR, csr);
756 
757 					/*
758 					 * this special sequence (enabling and then
759 					 * disabling MUSB_RXCSR_DMAMODE) is required
760 					 * to get DMAReq to activate
761 					 */
762 					musb_writew(epio, MUSB_RXCSR,
763 						csr | MUSB_RXCSR_DMAMODE);
764 					musb_writew(epio, MUSB_RXCSR, csr);
765 
766 				} else {
767 					if (!musb_ep->hb_mult &&
768 						musb_ep->hw_ep->rx_double_buffered)
769 						csr |= MUSB_RXCSR_AUTOCLEAR;
770 					csr |= MUSB_RXCSR_DMAENAB;
771 					musb_writew(epio, MUSB_RXCSR, csr);
772 				}
773 
774 				if (request->actual < request->length) {
775 					int transfer_size = 0;
776 					if (use_mode_1) {
777 						transfer_size = min(request->length - request->actual,
778 								channel->max_len);
779 						musb_ep->dma->desired_mode = 1;
780 					} else {
781 						transfer_size = min(request->length - request->actual,
782 								(unsigned)len);
783 						musb_ep->dma->desired_mode = 0;
784 					}
785 
786 					use_dma = c->channel_program(
787 							channel,
788 							musb_ep->packet_sz,
789 							channel->desired_mode,
790 							request->dma
791 							+ request->actual,
792 							transfer_size);
793 				}
794 
795 				if (use_dma)
796 					return;
797 			}
798 #elif defined(CONFIG_USB_UX500_DMA)
799 			if ((is_buffer_mapped(req)) &&
800 				(request->actual < request->length)) {
801 
802 				struct dma_controller *c;
803 				struct dma_channel *channel;
804 				int transfer_size = 0;
805 
806 				c = musb->dma_controller;
807 				channel = musb_ep->dma;
808 
809 				/* In case first packet is short */
810 				if (len < musb_ep->packet_sz)
811 					transfer_size = len;
812 				else if (request->short_not_ok)
813 					transfer_size =	min(request->length -
814 							request->actual,
815 							channel->max_len);
816 				else
817 					transfer_size = min(request->length -
818 							request->actual,
819 							(unsigned)len);
820 
821 				csr &= ~MUSB_RXCSR_DMAMODE;
822 				csr |= (MUSB_RXCSR_DMAENAB |
823 					MUSB_RXCSR_AUTOCLEAR);
824 
825 				musb_writew(epio, MUSB_RXCSR, csr);
826 
827 				if (transfer_size <= musb_ep->packet_sz) {
828 					musb_ep->dma->desired_mode = 0;
829 				} else {
830 					musb_ep->dma->desired_mode = 1;
831 					/* Mode must be set after DMAENAB */
832 					csr |= MUSB_RXCSR_DMAMODE;
833 					musb_writew(epio, MUSB_RXCSR, csr);
834 				}
835 
836 				if (c->channel_program(channel,
837 							musb_ep->packet_sz,
838 							channel->desired_mode,
839 							request->dma
840 							+ request->actual,
841 							transfer_size))
842 
843 					return;
844 			}
845 #endif	/* Mentor's DMA */
846 
847 			fifo_count = request->length - request->actual;
848 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
849 					musb_ep->end_point.name,
850 					len, fifo_count,
851 					musb_ep->packet_sz);
852 
853 			fifo_count = min_t(unsigned, len, fifo_count);
854 
855 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
856 			if (tusb_dma_omap() && is_buffer_mapped(req)) {
857 				struct dma_controller *c = musb->dma_controller;
858 				struct dma_channel *channel = musb_ep->dma;
859 				u32 dma_addr = request->dma + request->actual;
860 				int ret;
861 
862 				ret = c->channel_program(channel,
863 						musb_ep->packet_sz,
864 						channel->desired_mode,
865 						dma_addr,
866 						fifo_count);
867 				if (ret)
868 					return;
869 			}
870 #endif
871 			/*
872 			 * Unmap the dma buffer back to cpu if dma channel
873 			 * programming fails. This buffer is mapped if the
874 			 * channel allocation is successful
875 			 */
876 			 if (is_buffer_mapped(req)) {
877 				unmap_dma_buffer(req, musb);
878 
879 				/*
880 				 * Clear DMAENAB and AUTOCLEAR for the
881 				 * PIO mode transfer
882 				 */
883 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
884 				musb_writew(epio, MUSB_RXCSR, csr);
885 			}
886 
887 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
888 					(request->buf + request->actual));
889 			request->actual += fifo_count;
890 
891 			/* REVISIT if we left anything in the fifo, flush
892 			 * it and report -EOVERFLOW
893 			 */
894 
895 			/* ack the read! */
896 			csr |= MUSB_RXCSR_P_WZC_BITS;
897 			csr &= ~MUSB_RXCSR_RXPKTRDY;
898 			musb_writew(epio, MUSB_RXCSR, csr);
899 		}
900 	}
901 
902 	/* reach the end or short packet detected */
903 	if (request->actual == request->length || len < musb_ep->packet_sz)
904 		musb_g_giveback(musb_ep, request, 0);
905 }
906 
907 /*
908  * Data ready for a request; called from IRQ
909  */
musb_g_rx(struct musb * musb,u8 epnum)910 void musb_g_rx(struct musb *musb, u8 epnum)
911 {
912 	u16			csr;
913 	struct musb_request	*req;
914 	struct usb_request	*request;
915 	void __iomem		*mbase = musb->mregs;
916 	struct musb_ep		*musb_ep;
917 	void __iomem		*epio = musb->endpoints[epnum].regs;
918 	struct dma_channel	*dma;
919 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
920 
921 	if (hw_ep->is_shared_fifo)
922 		musb_ep = &hw_ep->ep_in;
923 	else
924 		musb_ep = &hw_ep->ep_out;
925 
926 	musb_ep_select(mbase, epnum);
927 
928 	req = next_request(musb_ep);
929 	if (!req)
930 		return;
931 
932 	request = &req->request;
933 
934 	csr = musb_readw(epio, MUSB_RXCSR);
935 	dma = is_dma_capable() ? musb_ep->dma : NULL;
936 
937 	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
938 			csr, dma ? " (dma)" : "", request);
939 
940 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
941 		csr |= MUSB_RXCSR_P_WZC_BITS;
942 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
943 		musb_writew(epio, MUSB_RXCSR, csr);
944 		return;
945 	}
946 
947 	if (csr & MUSB_RXCSR_P_OVERRUN) {
948 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
949 		csr &= ~MUSB_RXCSR_P_OVERRUN;
950 		musb_writew(epio, MUSB_RXCSR, csr);
951 
952 		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
953 		if (request->status == -EINPROGRESS)
954 			request->status = -EOVERFLOW;
955 	}
956 	if (csr & MUSB_RXCSR_INCOMPRX) {
957 		/* REVISIT not necessarily an error */
958 		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
959 	}
960 
961 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
962 		/* "should not happen"; likely RXPKTRDY pending for DMA */
963 		dev_dbg(musb->controller, "%s busy, csr %04x\n",
964 			musb_ep->end_point.name, csr);
965 		return;
966 	}
967 
968 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
969 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
970 				| MUSB_RXCSR_DMAENAB
971 				| MUSB_RXCSR_DMAMODE);
972 		musb_writew(epio, MUSB_RXCSR,
973 			MUSB_RXCSR_P_WZC_BITS | csr);
974 
975 		request->actual += musb_ep->dma->actual_len;
976 
977 		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
978 			epnum, csr,
979 			musb_readw(epio, MUSB_RXCSR),
980 			musb_ep->dma->actual_len, request);
981 
982 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
983 	defined(CONFIG_USB_UX500_DMA)
984 		/* Autoclear doesn't clear RxPktRdy for short packets */
985 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
986 				|| (dma->actual_len
987 					& (musb_ep->packet_sz - 1))) {
988 			/* ack the read! */
989 			csr &= ~MUSB_RXCSR_RXPKTRDY;
990 			musb_writew(epio, MUSB_RXCSR, csr);
991 		}
992 
993 		/* incomplete, and not short? wait for next IN packet */
994 		if ((request->actual < request->length)
995 				&& (musb_ep->dma->actual_len
996 					== musb_ep->packet_sz)) {
997 			/* In double buffer case, continue to unload fifo if
998  			 * there is Rx packet in FIFO.
999  			 **/
1000 			csr = musb_readw(epio, MUSB_RXCSR);
1001 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
1002 				hw_ep->rx_double_buffered)
1003 				goto exit;
1004 			return;
1005 		}
1006 #endif
1007 		musb_g_giveback(musb_ep, request, 0);
1008 		/*
1009 		 * In the giveback function the MUSB lock is
1010 		 * released and acquired after sometime. During
1011 		 * this time period the INDEX register could get
1012 		 * changed by the gadget_queue function especially
1013 		 * on SMP systems. Reselect the INDEX to be sure
1014 		 * we are reading/modifying the right registers
1015 		 */
1016 		musb_ep_select(mbase, epnum);
1017 
1018 		req = next_request(musb_ep);
1019 		if (!req)
1020 			return;
1021 	}
1022 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1023 	defined(CONFIG_USB_UX500_DMA)
1024 exit:
1025 #endif
1026 	/* Analyze request */
1027 	rxstate(musb, req);
1028 }
1029 
1030 /* ------------------------------------------------------------ */
1031 
musb_gadget_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1032 static int musb_gadget_enable(struct usb_ep *ep,
1033 			const struct usb_endpoint_descriptor *desc)
1034 {
1035 	unsigned long		flags;
1036 	struct musb_ep		*musb_ep;
1037 	struct musb_hw_ep	*hw_ep;
1038 	void __iomem		*regs;
1039 	struct musb		*musb;
1040 	void __iomem	*mbase;
1041 	u8		epnum;
1042 	u16		csr;
1043 	unsigned	tmp;
1044 	int		status = -EINVAL;
1045 
1046 	if (!ep || !desc)
1047 		return -EINVAL;
1048 
1049 	musb_ep = to_musb_ep(ep);
1050 	hw_ep = musb_ep->hw_ep;
1051 	regs = hw_ep->regs;
1052 	musb = musb_ep->musb;
1053 	mbase = musb->mregs;
1054 	epnum = musb_ep->current_epnum;
1055 
1056 	spin_lock_irqsave(&musb->lock, flags);
1057 
1058 	if (musb_ep->desc) {
1059 		status = -EBUSY;
1060 		goto fail;
1061 	}
1062 	musb_ep->type = usb_endpoint_type(desc);
1063 
1064 	/* check direction and (later) maxpacket size against endpoint */
1065 	if (usb_endpoint_num(desc) != epnum)
1066 		goto fail;
1067 
1068 	/* REVISIT this rules out high bandwidth periodic transfers */
1069 	tmp = usb_endpoint_maxp(desc);
1070 	if (tmp & ~0x07ff) {
1071 		int ok;
1072 
1073 		if (usb_endpoint_dir_in(desc))
1074 			ok = musb->hb_iso_tx;
1075 		else
1076 			ok = musb->hb_iso_rx;
1077 
1078 		if (!ok) {
1079 			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1080 			goto fail;
1081 		}
1082 		musb_ep->hb_mult = (tmp >> 11) & 3;
1083 	} else {
1084 		musb_ep->hb_mult = 0;
1085 	}
1086 
1087 	musb_ep->packet_sz = tmp & 0x7ff;
1088 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1089 
1090 	/* enable the interrupts for the endpoint, set the endpoint
1091 	 * packet size (or fail), set the mode, clear the fifo
1092 	 */
1093 	musb_ep_select(mbase, epnum);
1094 	if (usb_endpoint_dir_in(desc)) {
1095 		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1096 
1097 		if (hw_ep->is_shared_fifo)
1098 			musb_ep->is_in = 1;
1099 		if (!musb_ep->is_in)
1100 			goto fail;
1101 
1102 		if (tmp > hw_ep->max_packet_sz_tx) {
1103 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1104 			goto fail;
1105 		}
1106 
1107 		int_txe |= (1 << epnum);
1108 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
1109 
1110 		/* REVISIT if can_bulk_split(), use by updating "tmp";
1111 		 * likewise high bandwidth periodic tx
1112 		 */
1113 		/* Set TXMAXP with the FIFO size of the endpoint
1114 		 * to disable double buffering mode.
1115 		 */
1116 		if (musb->double_buffer_not_ok)
1117 			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1118 		else
1119 			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1120 					| (musb_ep->hb_mult << 11));
1121 
1122 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1123 		if (musb_readw(regs, MUSB_TXCSR)
1124 				& MUSB_TXCSR_FIFONOTEMPTY)
1125 			csr |= MUSB_TXCSR_FLUSHFIFO;
1126 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1127 			csr |= MUSB_TXCSR_P_ISO;
1128 
1129 		/* set twice in case of double buffering */
1130 		musb_writew(regs, MUSB_TXCSR, csr);
1131 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1132 		musb_writew(regs, MUSB_TXCSR, csr);
1133 
1134 	} else {
1135 		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1136 
1137 		if (hw_ep->is_shared_fifo)
1138 			musb_ep->is_in = 0;
1139 		if (musb_ep->is_in)
1140 			goto fail;
1141 
1142 		if (tmp > hw_ep->max_packet_sz_rx) {
1143 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1144 			goto fail;
1145 		}
1146 
1147 		int_rxe |= (1 << epnum);
1148 		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1149 
1150 		/* REVISIT if can_bulk_combine() use by updating "tmp"
1151 		 * likewise high bandwidth periodic rx
1152 		 */
1153 		/* Set RXMAXP with the FIFO size of the endpoint
1154 		 * to disable double buffering mode.
1155 		 */
1156 		if (musb->double_buffer_not_ok)
1157 			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1158 		else
1159 			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1160 					| (musb_ep->hb_mult << 11));
1161 
1162 		/* force shared fifo to OUT-only mode */
1163 		if (hw_ep->is_shared_fifo) {
1164 			csr = musb_readw(regs, MUSB_TXCSR);
1165 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1166 			musb_writew(regs, MUSB_TXCSR, csr);
1167 		}
1168 
1169 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1170 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1171 			csr |= MUSB_RXCSR_P_ISO;
1172 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1173 			csr |= MUSB_RXCSR_DISNYET;
1174 
1175 		/* set twice in case of double buffering */
1176 		musb_writew(regs, MUSB_RXCSR, csr);
1177 		musb_writew(regs, MUSB_RXCSR, csr);
1178 	}
1179 
1180 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
1181 	 * for some reason you run out of channels here.
1182 	 */
1183 	if (is_dma_capable() && musb->dma_controller) {
1184 		struct dma_controller	*c = musb->dma_controller;
1185 
1186 		musb_ep->dma = c->channel_alloc(c, hw_ep,
1187 				(desc->bEndpointAddress & USB_DIR_IN));
1188 	} else
1189 		musb_ep->dma = NULL;
1190 
1191 	musb_ep->desc = desc;
1192 	musb_ep->busy = 0;
1193 	musb_ep->wedged = 0;
1194 	status = 0;
1195 
1196 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1197 			musb_driver_name, musb_ep->end_point.name,
1198 			({ char *s; switch (musb_ep->type) {
1199 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
1200 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
1201 			default:			s = "iso"; break;
1202 			}; s; }),
1203 			musb_ep->is_in ? "IN" : "OUT",
1204 			musb_ep->dma ? "dma, " : "",
1205 			musb_ep->packet_sz);
1206 
1207 	schedule_work(&musb->irq_work);
1208 
1209 fail:
1210 	spin_unlock_irqrestore(&musb->lock, flags);
1211 	return status;
1212 }
1213 
1214 /*
1215  * Disable an endpoint flushing all requests queued.
1216  */
musb_gadget_disable(struct usb_ep * ep)1217 static int musb_gadget_disable(struct usb_ep *ep)
1218 {
1219 	unsigned long	flags;
1220 	struct musb	*musb;
1221 	u8		epnum;
1222 	struct musb_ep	*musb_ep;
1223 	void __iomem	*epio;
1224 	int		status = 0;
1225 
1226 	musb_ep = to_musb_ep(ep);
1227 	musb = musb_ep->musb;
1228 	epnum = musb_ep->current_epnum;
1229 	epio = musb->endpoints[epnum].regs;
1230 
1231 	spin_lock_irqsave(&musb->lock, flags);
1232 	musb_ep_select(musb->mregs, epnum);
1233 
1234 	/* zero the endpoint sizes */
1235 	if (musb_ep->is_in) {
1236 		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1237 		int_txe &= ~(1 << epnum);
1238 		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1239 		musb_writew(epio, MUSB_TXMAXP, 0);
1240 	} else {
1241 		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1242 		int_rxe &= ~(1 << epnum);
1243 		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1244 		musb_writew(epio, MUSB_RXMAXP, 0);
1245 	}
1246 
1247 	musb_ep->desc = NULL;
1248 #ifndef __UBOOT__
1249 	musb_ep->end_point.desc = NULL;
1250 #endif
1251 
1252 	/* abort all pending DMA and requests */
1253 	nuke(musb_ep, -ESHUTDOWN);
1254 
1255 	schedule_work(&musb->irq_work);
1256 
1257 	spin_unlock_irqrestore(&(musb->lock), flags);
1258 
1259 	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1260 
1261 	return status;
1262 }
1263 
1264 /*
1265  * Allocate a request for an endpoint.
1266  * Reused by ep0 code.
1267  */
musb_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)1268 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1269 {
1270 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1271 	struct musb		*musb = musb_ep->musb;
1272 	struct musb_request	*request = NULL;
1273 
1274 	request = kzalloc(sizeof *request, gfp_flags);
1275 	if (!request) {
1276 		dev_dbg(musb->controller, "not enough memory\n");
1277 		return NULL;
1278 	}
1279 
1280 	request->request.dma = DMA_ADDR_INVALID;
1281 	request->epnum = musb_ep->current_epnum;
1282 	request->ep = musb_ep;
1283 
1284 	return &request->request;
1285 }
1286 
1287 /*
1288  * Free a request
1289  * Reused by ep0 code.
1290  */
musb_free_request(struct usb_ep * ep,struct usb_request * req)1291 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1292 {
1293 	kfree(to_musb_request(req));
1294 }
1295 
1296 static LIST_HEAD(buffers);
1297 
1298 struct free_record {
1299 	struct list_head	list;
1300 	struct device		*dev;
1301 	unsigned		bytes;
1302 	dma_addr_t		dma;
1303 };
1304 
1305 /*
1306  * Context: controller locked, IRQs blocked.
1307  */
musb_ep_restart(struct musb * musb,struct musb_request * req)1308 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1309 {
1310 	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1311 		req->tx ? "TX/IN" : "RX/OUT",
1312 		&req->request, req->request.length, req->epnum);
1313 
1314 	musb_ep_select(musb->mregs, req->epnum);
1315 	if (req->tx)
1316 		txstate(musb, req);
1317 	else
1318 		rxstate(musb, req);
1319 }
1320 
musb_gadget_queue(struct usb_ep * ep,struct usb_request * req,gfp_t gfp_flags)1321 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1322 			gfp_t gfp_flags)
1323 {
1324 	struct musb_ep		*musb_ep;
1325 	struct musb_request	*request;
1326 	struct musb		*musb;
1327 	int			status = 0;
1328 	unsigned long		lockflags;
1329 
1330 	if (!ep || !req)
1331 		return -EINVAL;
1332 	if (!req->buf)
1333 		return -ENODATA;
1334 
1335 	musb_ep = to_musb_ep(ep);
1336 	musb = musb_ep->musb;
1337 
1338 	request = to_musb_request(req);
1339 	request->musb = musb;
1340 
1341 	if (request->ep != musb_ep)
1342 		return -EINVAL;
1343 
1344 	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1345 
1346 	/* request is mine now... */
1347 	request->request.actual = 0;
1348 	request->request.status = -EINPROGRESS;
1349 	request->epnum = musb_ep->current_epnum;
1350 	request->tx = musb_ep->is_in;
1351 
1352 	map_dma_buffer(request, musb, musb_ep);
1353 
1354 	spin_lock_irqsave(&musb->lock, lockflags);
1355 
1356 	/* don't queue if the ep is down */
1357 	if (!musb_ep->desc) {
1358 		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1359 				req, ep->name, "disabled");
1360 		status = -ESHUTDOWN;
1361 		goto cleanup;
1362 	}
1363 
1364 	/* add request to the list */
1365 	list_add_tail(&request->list, &musb_ep->req_list);
1366 
1367 	/* it this is the head of the queue, start i/o ... */
1368 	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1369 		musb_ep_restart(musb, request);
1370 
1371 cleanup:
1372 	spin_unlock_irqrestore(&musb->lock, lockflags);
1373 	return status;
1374 }
1375 
musb_gadget_dequeue(struct usb_ep * ep,struct usb_request * request)1376 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1377 {
1378 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1379 	struct musb_request	*req = to_musb_request(request);
1380 	struct musb_request	*r;
1381 	unsigned long		flags;
1382 	int			status = 0;
1383 	struct musb		*musb = musb_ep->musb;
1384 
1385 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1386 		return -EINVAL;
1387 
1388 	spin_lock_irqsave(&musb->lock, flags);
1389 
1390 	list_for_each_entry(r, &musb_ep->req_list, list) {
1391 		if (r == req)
1392 			break;
1393 	}
1394 	if (r != req) {
1395 		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1396 		status = -EINVAL;
1397 		goto done;
1398 	}
1399 
1400 	/* if the hardware doesn't have the request, easy ... */
1401 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1402 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1403 
1404 	/* ... else abort the dma transfer ... */
1405 	else if (is_dma_capable() && musb_ep->dma) {
1406 		struct dma_controller	*c = musb->dma_controller;
1407 
1408 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1409 		if (c->channel_abort)
1410 			status = c->channel_abort(musb_ep->dma);
1411 		else
1412 			status = -EBUSY;
1413 		if (status == 0)
1414 			musb_g_giveback(musb_ep, request, -ECONNRESET);
1415 	} else {
1416 		/* NOTE: by sticking to easily tested hardware/driver states,
1417 		 * we leave counting of in-flight packets imprecise.
1418 		 */
1419 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1420 	}
1421 
1422 done:
1423 	spin_unlock_irqrestore(&musb->lock, flags);
1424 	return status;
1425 }
1426 
1427 /*
1428  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1429  * data but will queue requests.
1430  *
1431  * exported to ep0 code
1432  */
musb_gadget_set_halt(struct usb_ep * ep,int value)1433 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1434 {
1435 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1436 	u8			epnum = musb_ep->current_epnum;
1437 	struct musb		*musb = musb_ep->musb;
1438 	void __iomem		*epio = musb->endpoints[epnum].regs;
1439 	void __iomem		*mbase;
1440 	unsigned long		flags;
1441 	u16			csr;
1442 	struct musb_request	*request;
1443 	int			status = 0;
1444 
1445 	if (!ep)
1446 		return -EINVAL;
1447 	mbase = musb->mregs;
1448 
1449 	spin_lock_irqsave(&musb->lock, flags);
1450 
1451 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1452 		status = -EINVAL;
1453 		goto done;
1454 	}
1455 
1456 	musb_ep_select(mbase, epnum);
1457 
1458 	request = next_request(musb_ep);
1459 	if (value) {
1460 		if (request) {
1461 			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1462 			    ep->name);
1463 			status = -EAGAIN;
1464 			goto done;
1465 		}
1466 		/* Cannot portably stall with non-empty FIFO */
1467 		if (musb_ep->is_in) {
1468 			csr = musb_readw(epio, MUSB_TXCSR);
1469 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1470 				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1471 				status = -EAGAIN;
1472 				goto done;
1473 			}
1474 		}
1475 	} else
1476 		musb_ep->wedged = 0;
1477 
1478 	/* set/clear the stall and toggle bits */
1479 	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1480 	if (musb_ep->is_in) {
1481 		csr = musb_readw(epio, MUSB_TXCSR);
1482 		csr |= MUSB_TXCSR_P_WZC_BITS
1483 			| MUSB_TXCSR_CLRDATATOG;
1484 		if (value)
1485 			csr |= MUSB_TXCSR_P_SENDSTALL;
1486 		else
1487 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
1488 				| MUSB_TXCSR_P_SENTSTALL);
1489 		csr &= ~MUSB_TXCSR_TXPKTRDY;
1490 		musb_writew(epio, MUSB_TXCSR, csr);
1491 	} else {
1492 		csr = musb_readw(epio, MUSB_RXCSR);
1493 		csr |= MUSB_RXCSR_P_WZC_BITS
1494 			| MUSB_RXCSR_FLUSHFIFO
1495 			| MUSB_RXCSR_CLRDATATOG;
1496 		if (value)
1497 			csr |= MUSB_RXCSR_P_SENDSTALL;
1498 		else
1499 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
1500 				| MUSB_RXCSR_P_SENTSTALL);
1501 		musb_writew(epio, MUSB_RXCSR, csr);
1502 	}
1503 
1504 	/* maybe start the first request in the queue */
1505 	if (!musb_ep->busy && !value && request) {
1506 		dev_dbg(musb->controller, "restarting the request\n");
1507 		musb_ep_restart(musb, request);
1508 	}
1509 
1510 done:
1511 	spin_unlock_irqrestore(&musb->lock, flags);
1512 	return status;
1513 }
1514 
1515 #ifndef __UBOOT__
1516 /*
1517  * Sets the halt feature with the clear requests ignored
1518  */
musb_gadget_set_wedge(struct usb_ep * ep)1519 static int musb_gadget_set_wedge(struct usb_ep *ep)
1520 {
1521 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1522 
1523 	if (!ep)
1524 		return -EINVAL;
1525 
1526 	musb_ep->wedged = 1;
1527 
1528 	return usb_ep_set_halt(ep);
1529 }
1530 #endif
1531 
musb_gadget_fifo_status(struct usb_ep * ep)1532 static int musb_gadget_fifo_status(struct usb_ep *ep)
1533 {
1534 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1535 	void __iomem		*epio = musb_ep->hw_ep->regs;
1536 	int			retval = -EINVAL;
1537 
1538 	if (musb_ep->desc && !musb_ep->is_in) {
1539 		struct musb		*musb = musb_ep->musb;
1540 		int			epnum = musb_ep->current_epnum;
1541 		void __iomem		*mbase = musb->mregs;
1542 		unsigned long		flags;
1543 
1544 		spin_lock_irqsave(&musb->lock, flags);
1545 
1546 		musb_ep_select(mbase, epnum);
1547 		/* FIXME return zero unless RXPKTRDY is set */
1548 		retval = musb_readw(epio, MUSB_RXCOUNT);
1549 
1550 		spin_unlock_irqrestore(&musb->lock, flags);
1551 	}
1552 	return retval;
1553 }
1554 
musb_gadget_fifo_flush(struct usb_ep * ep)1555 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1556 {
1557 	struct musb_ep	*musb_ep = to_musb_ep(ep);
1558 	struct musb	*musb = musb_ep->musb;
1559 	u8		epnum = musb_ep->current_epnum;
1560 	void __iomem	*epio = musb->endpoints[epnum].regs;
1561 	void __iomem	*mbase;
1562 	unsigned long	flags;
1563 	u16		csr, int_txe;
1564 
1565 	mbase = musb->mregs;
1566 
1567 	spin_lock_irqsave(&musb->lock, flags);
1568 	musb_ep_select(mbase, (u8) epnum);
1569 
1570 	/* disable interrupts */
1571 	int_txe = musb_readw(mbase, MUSB_INTRTXE);
1572 	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1573 
1574 	if (musb_ep->is_in) {
1575 		csr = musb_readw(epio, MUSB_TXCSR);
1576 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1577 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1578 			/*
1579 			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1580 			 * to interrupt current FIFO loading, but not flushing
1581 			 * the already loaded ones.
1582 			 */
1583 			csr &= ~MUSB_TXCSR_TXPKTRDY;
1584 			musb_writew(epio, MUSB_TXCSR, csr);
1585 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1586 			musb_writew(epio, MUSB_TXCSR, csr);
1587 		}
1588 	} else {
1589 		csr = musb_readw(epio, MUSB_RXCSR);
1590 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1591 		musb_writew(epio, MUSB_RXCSR, csr);
1592 		musb_writew(epio, MUSB_RXCSR, csr);
1593 	}
1594 
1595 	/* re-enable interrupt */
1596 	musb_writew(mbase, MUSB_INTRTXE, int_txe);
1597 	spin_unlock_irqrestore(&musb->lock, flags);
1598 }
1599 
1600 static const struct usb_ep_ops musb_ep_ops = {
1601 	.enable		= musb_gadget_enable,
1602 	.disable	= musb_gadget_disable,
1603 	.alloc_request	= musb_alloc_request,
1604 	.free_request	= musb_free_request,
1605 	.queue		= musb_gadget_queue,
1606 	.dequeue	= musb_gadget_dequeue,
1607 	.set_halt	= musb_gadget_set_halt,
1608 #ifndef __UBOOT__
1609 	.set_wedge	= musb_gadget_set_wedge,
1610 #endif
1611 	.fifo_status	= musb_gadget_fifo_status,
1612 	.fifo_flush	= musb_gadget_fifo_flush
1613 };
1614 
1615 /* ----------------------------------------------------------------------- */
1616 
musb_gadget_get_frame(struct usb_gadget * gadget)1617 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1618 {
1619 	struct musb	*musb = gadget_to_musb(gadget);
1620 
1621 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
1622 }
1623 
musb_gadget_wakeup(struct usb_gadget * gadget)1624 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1625 {
1626 #ifndef __UBOOT__
1627 	struct musb	*musb = gadget_to_musb(gadget);
1628 	void __iomem	*mregs = musb->mregs;
1629 	unsigned long	flags;
1630 	int		status = -EINVAL;
1631 	u8		power, devctl;
1632 	int		retries;
1633 
1634 	spin_lock_irqsave(&musb->lock, flags);
1635 
1636 	switch (musb->xceiv->state) {
1637 	case OTG_STATE_B_PERIPHERAL:
1638 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1639 		 * that's part of the standard usb 1.1 state machine, and
1640 		 * doesn't affect OTG transitions.
1641 		 */
1642 		if (musb->may_wakeup && musb->is_suspended)
1643 			break;
1644 		goto done;
1645 	case OTG_STATE_B_IDLE:
1646 		/* Start SRP ... OTG not required. */
1647 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1648 		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1649 		devctl |= MUSB_DEVCTL_SESSION;
1650 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
1651 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1652 		retries = 100;
1653 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
1654 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1655 			if (retries-- < 1)
1656 				break;
1657 		}
1658 		retries = 10000;
1659 		while (devctl & MUSB_DEVCTL_SESSION) {
1660 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1661 			if (retries-- < 1)
1662 				break;
1663 		}
1664 
1665 		spin_unlock_irqrestore(&musb->lock, flags);
1666 		otg_start_srp(musb->xceiv->otg);
1667 		spin_lock_irqsave(&musb->lock, flags);
1668 
1669 		/* Block idling for at least 1s */
1670 		musb_platform_try_idle(musb,
1671 			jiffies + msecs_to_jiffies(1 * HZ));
1672 
1673 		status = 0;
1674 		goto done;
1675 	default:
1676 		dev_dbg(musb->controller, "Unhandled wake: %s\n",
1677 			otg_state_string(musb->xceiv->state));
1678 		goto done;
1679 	}
1680 
1681 	status = 0;
1682 
1683 	power = musb_readb(mregs, MUSB_POWER);
1684 	power |= MUSB_POWER_RESUME;
1685 	musb_writeb(mregs, MUSB_POWER, power);
1686 	dev_dbg(musb->controller, "issue wakeup\n");
1687 
1688 	/* FIXME do this next chunk in a timer callback, no udelay */
1689 	mdelay(2);
1690 
1691 	power = musb_readb(mregs, MUSB_POWER);
1692 	power &= ~MUSB_POWER_RESUME;
1693 	musb_writeb(mregs, MUSB_POWER, power);
1694 done:
1695 	spin_unlock_irqrestore(&musb->lock, flags);
1696 	return status;
1697 #else
1698 	return 0;
1699 #endif
1700 }
1701 
1702 static int
musb_gadget_set_self_powered(struct usb_gadget * gadget,int is_selfpowered)1703 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1704 {
1705 	struct musb	*musb = gadget_to_musb(gadget);
1706 
1707 	musb->is_self_powered = !!is_selfpowered;
1708 	return 0;
1709 }
1710 
musb_pullup(struct musb * musb,int is_on)1711 static void musb_pullup(struct musb *musb, int is_on)
1712 {
1713 	u8 power;
1714 
1715 	power = musb_readb(musb->mregs, MUSB_POWER);
1716 	if (is_on)
1717 		power |= MUSB_POWER_SOFTCONN;
1718 	else
1719 		power &= ~MUSB_POWER_SOFTCONN;
1720 
1721 	/* FIXME if on, HdrcStart; if off, HdrcStop */
1722 
1723 	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1724 		is_on ? "on" : "off");
1725 	musb_writeb(musb->mregs, MUSB_POWER, power);
1726 }
1727 
1728 #if 0
1729 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1730 {
1731 	dev_dbg(musb->controller, "<= %s =>\n", __func__);
1732 
1733 	/*
1734 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
1735 	 * though that can clear it), just musb_pullup().
1736 	 */
1737 
1738 	return -EINVAL;
1739 }
1740 #endif
1741 
musb_gadget_vbus_draw(struct usb_gadget * gadget,unsigned mA)1742 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1743 {
1744 #ifndef __UBOOT__
1745 	struct musb	*musb = gadget_to_musb(gadget);
1746 
1747 	if (!musb->xceiv->set_power)
1748 		return -EOPNOTSUPP;
1749 	return usb_phy_set_power(musb->xceiv, mA);
1750 #else
1751 	return 0;
1752 #endif
1753 }
1754 
musb_gadget_pullup(struct usb_gadget * gadget,int is_on)1755 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1756 {
1757 	struct musb	*musb = gadget_to_musb(gadget);
1758 	unsigned long	flags;
1759 
1760 	is_on = !!is_on;
1761 
1762 	pm_runtime_get_sync(musb->controller);
1763 
1764 	/* NOTE: this assumes we are sensing vbus; we'd rather
1765 	 * not pullup unless the B-session is active.
1766 	 */
1767 	spin_lock_irqsave(&musb->lock, flags);
1768 	if (is_on != musb->softconnect) {
1769 		musb->softconnect = is_on;
1770 		musb_pullup(musb, is_on);
1771 	}
1772 	spin_unlock_irqrestore(&musb->lock, flags);
1773 
1774 	pm_runtime_put(musb->controller);
1775 
1776 	return 0;
1777 }
1778 
1779 #ifndef __UBOOT__
1780 static int musb_gadget_start(struct usb_gadget *g,
1781 		struct usb_gadget_driver *driver);
1782 static int musb_gadget_stop(struct usb_gadget *g,
1783 		struct usb_gadget_driver *driver);
1784 #else
musb_gadget_stop(struct usb_gadget * g)1785 static int musb_gadget_stop(struct usb_gadget *g)
1786 {
1787 	struct musb	*musb = gadget_to_musb(g);
1788 
1789 	musb_stop(musb);
1790 	return 0;
1791 }
1792 #endif
1793 
1794 static const struct usb_gadget_ops musb_gadget_operations = {
1795 	.get_frame		= musb_gadget_get_frame,
1796 	.wakeup			= musb_gadget_wakeup,
1797 	.set_selfpowered	= musb_gadget_set_self_powered,
1798 	/* .vbus_session		= musb_gadget_vbus_session, */
1799 	.vbus_draw		= musb_gadget_vbus_draw,
1800 	.pullup			= musb_gadget_pullup,
1801 #ifndef __UBOOT__
1802 	.udc_start		= musb_gadget_start,
1803 	.udc_stop		= musb_gadget_stop,
1804 #else
1805 	.udc_start		= musb_gadget_start,
1806 	.udc_stop		= musb_gadget_stop,
1807 #endif
1808 };
1809 
1810 /* ----------------------------------------------------------------------- */
1811 
1812 /* Registration */
1813 
1814 /* Only this registration code "knows" the rule (from USB standards)
1815  * about there being only one external upstream port.  It assumes
1816  * all peripheral ports are external...
1817  */
1818 
1819 #ifndef __UBOOT__
musb_gadget_release(struct device * dev)1820 static void musb_gadget_release(struct device *dev)
1821 {
1822 	/* kref_put(WHAT) */
1823 	dev_dbg(dev, "%s\n", __func__);
1824 }
1825 #endif
1826 
1827 
1828 static void __devinit
init_peripheral_ep(struct musb * musb,struct musb_ep * ep,u8 epnum,int is_in)1829 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1830 {
1831 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1832 
1833 	memset(ep, 0, sizeof *ep);
1834 
1835 	ep->current_epnum = epnum;
1836 	ep->musb = musb;
1837 	ep->hw_ep = hw_ep;
1838 	ep->is_in = is_in;
1839 
1840 	INIT_LIST_HEAD(&ep->req_list);
1841 
1842 	sprintf(ep->name, "ep%d%s", epnum,
1843 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1844 				is_in ? "in" : "out"));
1845 	ep->end_point.name = ep->name;
1846 	INIT_LIST_HEAD(&ep->end_point.ep_list);
1847 	if (!epnum) {
1848 		ep->end_point.maxpacket = 64;
1849 		ep->end_point.ops = &musb_g_ep0_ops;
1850 		musb->g.ep0 = &ep->end_point;
1851 	} else {
1852 		if (is_in)
1853 			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1854 		else
1855 			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1856 		ep->end_point.ops = &musb_ep_ops;
1857 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1858 	}
1859 }
1860 
1861 /*
1862  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1863  * to the rest of the driver state.
1864  */
musb_g_init_endpoints(struct musb * musb)1865 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1866 {
1867 	u8			epnum;
1868 	struct musb_hw_ep	*hw_ep;
1869 	unsigned		count = 0;
1870 
1871 	/* initialize endpoint list just once */
1872 	INIT_LIST_HEAD(&(musb->g.ep_list));
1873 
1874 	for (epnum = 0, hw_ep = musb->endpoints;
1875 			epnum < musb->nr_endpoints;
1876 			epnum++, hw_ep++) {
1877 		if (hw_ep->is_shared_fifo /* || !epnum */) {
1878 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1879 			count++;
1880 		} else {
1881 			if (hw_ep->max_packet_sz_tx) {
1882 				init_peripheral_ep(musb, &hw_ep->ep_in,
1883 							epnum, 1);
1884 				count++;
1885 			}
1886 			if (hw_ep->max_packet_sz_rx) {
1887 				init_peripheral_ep(musb, &hw_ep->ep_out,
1888 							epnum, 0);
1889 				count++;
1890 			}
1891 		}
1892 	}
1893 }
1894 
1895 /* called once during driver setup to initialize and link into
1896  * the driver model; memory is zeroed.
1897  */
musb_gadget_setup(struct musb * musb)1898 int __devinit musb_gadget_setup(struct musb *musb)
1899 {
1900 	int status;
1901 
1902 	/* REVISIT minor race:  if (erroneously) setting up two
1903 	 * musb peripherals at the same time, only the bus lock
1904 	 * is probably held.
1905 	 */
1906 
1907 	musb->g.ops = &musb_gadget_operations;
1908 #ifndef __UBOOT__
1909 	musb->g.max_speed = USB_SPEED_HIGH;
1910 #endif
1911 	musb->g.speed = USB_SPEED_UNKNOWN;
1912 
1913 #ifndef __UBOOT__
1914 	/* this "gadget" abstracts/virtualizes the controller */
1915 	dev_set_name(&musb->g.dev, "gadget");
1916 	musb->g.dev.parent = musb->controller;
1917 	musb->g.dev.dma_mask = musb->controller->dma_mask;
1918 	musb->g.dev.release = musb_gadget_release;
1919 #endif
1920 	musb->g.name = musb_driver_name;
1921 
1922 #ifndef __UBOOT__
1923 	if (is_otg_enabled(musb))
1924 		musb->g.is_otg = 1;
1925 #endif
1926 
1927 	musb_g_init_endpoints(musb);
1928 
1929 	musb->is_active = 0;
1930 	musb_platform_try_idle(musb, 0);
1931 
1932 #ifndef __UBOOT__
1933 	status = device_register(&musb->g.dev);
1934 	if (status != 0) {
1935 		put_device(&musb->g.dev);
1936 		return status;
1937 	}
1938 	status = usb_add_gadget_udc(musb->controller, &musb->g);
1939 	if (status)
1940 		goto err;
1941 #endif
1942 
1943 	return 0;
1944 #ifndef __UBOOT__
1945 err:
1946 	musb->g.dev.parent = NULL;
1947 	device_unregister(&musb->g.dev);
1948 	return status;
1949 #endif
1950 }
1951 
musb_gadget_cleanup(struct musb * musb)1952 void musb_gadget_cleanup(struct musb *musb)
1953 {
1954 #ifndef __UBOOT__
1955 	usb_del_gadget_udc(&musb->g);
1956 	if (musb->g.dev.parent)
1957 		device_unregister(&musb->g.dev);
1958 #endif
1959 }
1960 
1961 /*
1962  * Register the gadget driver. Used by gadget drivers when
1963  * registering themselves with the controller.
1964  *
1965  * -EINVAL something went wrong (not driver)
1966  * -EBUSY another gadget is already using the controller
1967  * -ENOMEM no memory to perform the operation
1968  *
1969  * @param driver the gadget driver
1970  * @return <0 if error, 0 if everything is fine
1971  */
1972 #ifndef __UBOOT__
musb_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1973 static int musb_gadget_start(struct usb_gadget *g,
1974 		struct usb_gadget_driver *driver)
1975 #else
1976 int musb_gadget_start(struct usb_gadget *g,
1977 		struct usb_gadget_driver *driver)
1978 #endif
1979 {
1980 	struct musb		*musb = gadget_to_musb(g);
1981 #ifndef __UBOOT__
1982 	struct usb_otg		*otg = musb->xceiv->otg;
1983 #endif
1984 	unsigned long		flags;
1985 	int			retval = -EINVAL;
1986 
1987 #ifndef __UBOOT__
1988 	if (driver->max_speed < USB_SPEED_HIGH)
1989 		goto err0;
1990 #endif
1991 
1992 	pm_runtime_get_sync(musb->controller);
1993 
1994 #ifndef __UBOOT__
1995 	dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1996 #endif
1997 
1998 	musb->softconnect = 0;
1999 	musb->gadget_driver = driver;
2000 
2001 	spin_lock_irqsave(&musb->lock, flags);
2002 	musb->is_active = 1;
2003 
2004 #ifndef __UBOOT__
2005 	otg_set_peripheral(otg, &musb->g);
2006 	musb->xceiv->state = OTG_STATE_B_IDLE;
2007 
2008 	/*
2009 	 * FIXME this ignores the softconnect flag.  Drivers are
2010 	 * allowed hold the peripheral inactive until for example
2011 	 * userspace hooks up printer hardware or DSP codecs, so
2012 	 * hosts only see fully functional devices.
2013 	 */
2014 
2015 	if (!is_otg_enabled(musb))
2016 #endif
2017 		musb_start(musb);
2018 
2019 	spin_unlock_irqrestore(&musb->lock, flags);
2020 
2021 #ifndef __UBOOT__
2022 	if (is_otg_enabled(musb)) {
2023 		struct usb_hcd	*hcd = musb_to_hcd(musb);
2024 
2025 		dev_dbg(musb->controller, "OTG startup...\n");
2026 
2027 		/* REVISIT:  funcall to other code, which also
2028 		 * handles power budgeting ... this way also
2029 		 * ensures HdrcStart is indirectly called.
2030 		 */
2031 		retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2032 		if (retval < 0) {
2033 			dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
2034 			goto err2;
2035 		}
2036 
2037 		if ((musb->xceiv->last_event == USB_EVENT_ID)
2038 					&& otg->set_vbus)
2039 			otg_set_vbus(otg, 1);
2040 
2041 		hcd->self.uses_pio_for_control = 1;
2042 	}
2043 	if (musb->xceiv->last_event == USB_EVENT_NONE)
2044 		pm_runtime_put(musb->controller);
2045 #endif
2046 
2047 	return 0;
2048 
2049 #ifndef __UBOOT__
2050 err2:
2051 	if (!is_otg_enabled(musb))
2052 		musb_stop(musb);
2053 err0:
2054 	return retval;
2055 #endif
2056 }
2057 
2058 #ifndef __UBOOT__
stop_activity(struct musb * musb,struct usb_gadget_driver * driver)2059 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2060 {
2061 	int			i;
2062 	struct musb_hw_ep	*hw_ep;
2063 
2064 	/* don't disconnect if it's not connected */
2065 	if (musb->g.speed == USB_SPEED_UNKNOWN)
2066 		driver = NULL;
2067 	else
2068 		musb->g.speed = USB_SPEED_UNKNOWN;
2069 
2070 	/* deactivate the hardware */
2071 	if (musb->softconnect) {
2072 		musb->softconnect = 0;
2073 		musb_pullup(musb, 0);
2074 	}
2075 	musb_stop(musb);
2076 
2077 	/* killing any outstanding requests will quiesce the driver;
2078 	 * then report disconnect
2079 	 */
2080 	if (driver) {
2081 		for (i = 0, hw_ep = musb->endpoints;
2082 				i < musb->nr_endpoints;
2083 				i++, hw_ep++) {
2084 			musb_ep_select(musb->mregs, i);
2085 			if (hw_ep->is_shared_fifo /* || !epnum */) {
2086 				nuke(&hw_ep->ep_in, -ESHUTDOWN);
2087 			} else {
2088 				if (hw_ep->max_packet_sz_tx)
2089 					nuke(&hw_ep->ep_in, -ESHUTDOWN);
2090 				if (hw_ep->max_packet_sz_rx)
2091 					nuke(&hw_ep->ep_out, -ESHUTDOWN);
2092 			}
2093 		}
2094 	}
2095 }
2096 
2097 /*
2098  * Unregister the gadget driver. Used by gadget drivers when
2099  * unregistering themselves from the controller.
2100  *
2101  * @param driver the gadget driver to unregister
2102  */
musb_gadget_stop(struct usb_gadget * g,struct usb_gadget_driver * driver)2103 static int musb_gadget_stop(struct usb_gadget *g,
2104 		struct usb_gadget_driver *driver)
2105 {
2106 	struct musb	*musb = gadget_to_musb(g);
2107 	unsigned long	flags;
2108 
2109 	if (musb->xceiv->last_event == USB_EVENT_NONE)
2110 		pm_runtime_get_sync(musb->controller);
2111 
2112 	/*
2113 	 * REVISIT always use otg_set_peripheral() here too;
2114 	 * this needs to shut down the OTG engine.
2115 	 */
2116 
2117 	spin_lock_irqsave(&musb->lock, flags);
2118 
2119 	musb_hnp_stop(musb);
2120 
2121 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2122 
2123 	musb->xceiv->state = OTG_STATE_UNDEFINED;
2124 	stop_activity(musb, driver);
2125 	otg_set_peripheral(musb->xceiv->otg, NULL);
2126 
2127 	dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2128 
2129 	musb->is_active = 0;
2130 	musb_platform_try_idle(musb, 0);
2131 	spin_unlock_irqrestore(&musb->lock, flags);
2132 
2133 	if (is_otg_enabled(musb)) {
2134 		usb_remove_hcd(musb_to_hcd(musb));
2135 		/* FIXME we need to be able to register another
2136 		 * gadget driver here and have everything work;
2137 		 * that currently misbehaves.
2138 		 */
2139 	}
2140 
2141 	if (!is_otg_enabled(musb))
2142 		musb_stop(musb);
2143 
2144 	pm_runtime_put(musb->controller);
2145 
2146 	return 0;
2147 }
2148 #endif
2149 
2150 /* ----------------------------------------------------------------------- */
2151 
2152 /* lifecycle operations called through plat_uds.c */
2153 
musb_g_resume(struct musb * musb)2154 void musb_g_resume(struct musb *musb)
2155 {
2156 #ifndef __UBOOT__
2157 	musb->is_suspended = 0;
2158 	switch (musb->xceiv->state) {
2159 	case OTG_STATE_B_IDLE:
2160 		break;
2161 	case OTG_STATE_B_WAIT_ACON:
2162 	case OTG_STATE_B_PERIPHERAL:
2163 		musb->is_active = 1;
2164 		if (musb->gadget_driver && musb->gadget_driver->resume) {
2165 			spin_unlock(&musb->lock);
2166 			musb->gadget_driver->resume(&musb->g);
2167 			spin_lock(&musb->lock);
2168 		}
2169 		break;
2170 	default:
2171 		WARNING("unhandled RESUME transition (%s)\n",
2172 				otg_state_string(musb->xceiv->state));
2173 	}
2174 #endif
2175 }
2176 
2177 /* called when SOF packets stop for 3+ msec */
musb_g_suspend(struct musb * musb)2178 void musb_g_suspend(struct musb *musb)
2179 {
2180 #ifndef __UBOOT__
2181 	u8	devctl;
2182 
2183 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2184 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2185 
2186 	switch (musb->xceiv->state) {
2187 	case OTG_STATE_B_IDLE:
2188 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2189 			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2190 		break;
2191 	case OTG_STATE_B_PERIPHERAL:
2192 		musb->is_suspended = 1;
2193 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
2194 			spin_unlock(&musb->lock);
2195 			musb->gadget_driver->suspend(&musb->g);
2196 			spin_lock(&musb->lock);
2197 		}
2198 		break;
2199 	default:
2200 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2201 		 * A_PERIPHERAL may need care too
2202 		 */
2203 		WARNING("unhandled SUSPEND transition (%s)\n",
2204 				otg_state_string(musb->xceiv->state));
2205 	}
2206 #endif
2207 }
2208 
2209 /* Called during SRP */
musb_g_wakeup(struct musb * musb)2210 void musb_g_wakeup(struct musb *musb)
2211 {
2212 	musb_gadget_wakeup(&musb->g);
2213 }
2214 
2215 /* called when VBUS drops below session threshold, and in other cases */
musb_g_disconnect(struct musb * musb)2216 void musb_g_disconnect(struct musb *musb)
2217 {
2218 	void __iomem	*mregs = musb->mregs;
2219 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
2220 
2221 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2222 
2223 	/* clear HR */
2224 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2225 
2226 	/* don't draw vbus until new b-default session */
2227 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2228 
2229 	musb->g.speed = USB_SPEED_UNKNOWN;
2230 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2231 		spin_unlock(&musb->lock);
2232 		musb->gadget_driver->disconnect(&musb->g);
2233 		spin_lock(&musb->lock);
2234 	}
2235 
2236 #ifndef __UBOOT__
2237 	switch (musb->xceiv->state) {
2238 	default:
2239 		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2240 			otg_state_string(musb->xceiv->state));
2241 		musb->xceiv->state = OTG_STATE_A_IDLE;
2242 		MUSB_HST_MODE(musb);
2243 		break;
2244 	case OTG_STATE_A_PERIPHERAL:
2245 		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2246 		MUSB_HST_MODE(musb);
2247 		break;
2248 	case OTG_STATE_B_WAIT_ACON:
2249 	case OTG_STATE_B_HOST:
2250 	case OTG_STATE_B_PERIPHERAL:
2251 	case OTG_STATE_B_IDLE:
2252 		musb->xceiv->state = OTG_STATE_B_IDLE;
2253 		break;
2254 	case OTG_STATE_B_SRP_INIT:
2255 		break;
2256 	}
2257 #endif
2258 
2259 	musb->is_active = 0;
2260 }
2261 
musb_g_reset(struct musb * musb)2262 void musb_g_reset(struct musb *musb)
2263 __releases(musb->lock)
2264 __acquires(musb->lock)
2265 {
2266 	void __iomem	*mbase = musb->mregs;
2267 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
2268 	u8		power;
2269 
2270 #ifndef __UBOOT__
2271 	dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2272 			(devctl & MUSB_DEVCTL_BDEVICE)
2273 				? "B-Device" : "A-Device",
2274 			musb_readb(mbase, MUSB_FADDR),
2275 			musb->gadget_driver
2276 				? musb->gadget_driver->driver.name
2277 				: NULL
2278 			);
2279 #endif
2280 
2281 	/* report disconnect, if we didn't already (flushing EP state) */
2282 	if (musb->g.speed != USB_SPEED_UNKNOWN)
2283 		musb_g_disconnect(musb);
2284 
2285 	/* clear HR */
2286 	else if (devctl & MUSB_DEVCTL_HR)
2287 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2288 
2289 
2290 	/* what speed did we negotiate? */
2291 	power = musb_readb(mbase, MUSB_POWER);
2292 	musb->g.speed = (power & MUSB_POWER_HSMODE)
2293 			? USB_SPEED_HIGH : USB_SPEED_FULL;
2294 
2295 	/* start in USB_STATE_DEFAULT */
2296 	musb->is_active = 1;
2297 	musb->is_suspended = 0;
2298 	MUSB_DEV_MODE(musb);
2299 	musb->address = 0;
2300 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2301 
2302 	musb->may_wakeup = 0;
2303 	musb->g.b_hnp_enable = 0;
2304 	musb->g.a_alt_hnp_support = 0;
2305 	musb->g.a_hnp_support = 0;
2306 
2307 #ifndef __UBOOT__
2308 	/* Normal reset, as B-Device;
2309 	 * or else after HNP, as A-Device
2310 	 */
2311 	if (devctl & MUSB_DEVCTL_BDEVICE) {
2312 		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2313 		musb->g.is_a_peripheral = 0;
2314 	} else if (is_otg_enabled(musb)) {
2315 		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2316 		musb->g.is_a_peripheral = 1;
2317 	} else
2318 		WARN_ON(1);
2319 
2320 	/* start with default limits on VBUS power draw */
2321 	(void) musb_gadget_vbus_draw(&musb->g,
2322 			is_otg_enabled(musb) ? 8 : 100);
2323 #endif
2324 }
2325